2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
11 * Thread start/stop adapted from Boehm's GC:
12 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
13 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
14 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
15 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
17 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
18 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
20 * Permission is hereby granted to use or copy this program
21 * for any purpose, provided the above notices are retained on all copies.
22 * Permission to modify the code and to distribute modified code is granted,
23 * provided the above notices are retained, and a notice that the code was
24 * modified is included with the above copyright notice.
27 * Copyright 2001-2003 Ximian, Inc
28 * Copyright 2003-2010 Novell, Inc.
29 * Copyright 2011 Xamarin, Inc.
31 * Permission is hereby granted, free of charge, to any person obtaining
32 * a copy of this software and associated documentation files (the
33 * "Software"), to deal in the Software without restriction, including
34 * without limitation the rights to use, copy, modify, merge, publish,
35 * distribute, sublicense, and/or sell copies of the Software, and to
36 * permit persons to whom the Software is furnished to do so, subject to
37 * the following conditions:
39 * The above copyright notice and this permission notice shall be
40 * included in all copies or substantial portions of the Software.
42 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
43 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
44 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
45 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
46 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
47 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
48 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
51 * Important: allocation provides always zeroed memory, having to do
52 * a memset after allocation is deadly for performance.
53 * Memory usage at startup is currently as follows:
55 * 64 KB internal space
57 * We should provide a small memory config with half the sizes
59 * We currently try to make as few mono assumptions as possible:
60 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
62 * 2) gc descriptor is the second word in the vtable (first word in the class)
63 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
64 * 4) there is a function to get an object's size and the number of
65 * elements in an array.
66 * 5) we know the special way bounds are allocated for complex arrays
67 * 6) we know about proxies and how to treat them when domains are unloaded
69 * Always try to keep stack usage to a minimum: no recursive behaviour
70 * and no large stack allocs.
72 * General description.
73 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
74 * When the nursery is full we start a nursery collection: this is performed with a
76 * When the old generation is full we start a copying GC of the old generation as well:
77 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
78 * in the future. Maybe we'll even do both during the same collection like IMMIX.
80 * The things that complicate this description are:
81 * *) pinned objects: we can't move them so we need to keep track of them
82 * *) no precise info of the thread stacks and registers: we need to be able to
83 * quickly find the objects that may be referenced conservatively and pin them
84 * (this makes the first issues more important)
85 * *) large objects are too expensive to be dealt with using copying GC: we handle them
86 * with mark/sweep during major collections
87 * *) some objects need to not move even if they are small (interned strings, Type handles):
88 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
89 * PinnedChunks regions
95 *) we could have a function pointer in MonoClass to implement
96 customized write barriers for value types
98 *) investigate the stuff needed to advance a thread to a GC-safe
99 point (single-stepping, read from unmapped memory etc) and implement it.
100 This would enable us to inline allocations and write barriers, for example,
101 or at least parts of them, like the write barrier checks.
102 We may need this also for handling precise info on stacks, even simple things
103 as having uninitialized data on the stack and having to wait for the prolog
104 to zero it. Not an issue for the last frame that we scan conservatively.
105 We could always not trust the value in the slots anyway.
107 *) modify the jit to save info about references in stack locations:
108 this can be done just for locals as a start, so that at least
109 part of the stack is handled precisely.
111 *) test/fix endianess issues
113 *) Implement a card table as the write barrier instead of remembered
114 sets? Card tables are not easy to implement with our current
115 memory layout. We have several different kinds of major heap
116 objects: Small objects in regular blocks, small objects in pinned
117 chunks and LOS objects. If we just have a pointer we have no way
118 to tell which kind of object it points into, therefore we cannot
119 know where its card table is. The least we have to do to make
120 this happen is to get rid of write barriers for indirect stores.
123 *) Get rid of write barriers for indirect stores. We can do this by
124 telling the GC to wbarrier-register an object once we do an ldloca
125 or ldelema on it, and to unregister it once it's not used anymore
126 (it can only travel downwards on the stack). The problem with
127 unregistering is that it needs to happen eventually no matter
128 what, even if exceptions are thrown, the thread aborts, etc.
129 Rodrigo suggested that we could do only the registering part and
130 let the collector find out (pessimistically) when it's safe to
131 unregister, namely when the stack pointer of the thread that
132 registered the object is higher than it was when the registering
133 happened. This might make for a good first implementation to get
134 some data on performance.
136 *) Some sort of blacklist support? Blacklists is a concept from the
137 Boehm GC: if during a conservative scan we find pointers to an
138 area which we might use as heap, we mark that area as unusable, so
139 pointer retention by random pinning pointers is reduced.
141 *) experiment with max small object size (very small right now - 2kb,
142 because it's tied to the max freelist size)
144 *) add an option to mmap the whole heap in one chunk: it makes for many
145 simplifications in the checks (put the nursery at the top and just use a single
146 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
147 not flexible (too much of the address space may be used by default or we can't
148 increase the heap as needed) and we'd need a race-free mechanism to return memory
149 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
150 was written to, munmap is needed, but the following mmap may not find the same segment
153 *) memzero the major fragments after restarting the world and optionally a smaller
156 *) investigate having fragment zeroing threads
158 *) separate locks for finalization and other minor stuff to reduce
161 *) try a different copying order to improve memory locality
163 *) a thread abort after a store but before the write barrier will
164 prevent the write barrier from executing
166 *) specialized dynamically generated markers/copiers
168 *) Dynamically adjust TLAB size to the number of threads. If we have
169 too many threads that do allocation, we might need smaller TLABs,
170 and we might get better performance with larger TLABs if we only
171 have a handful of threads. We could sum up the space left in all
172 assigned TLABs and if that's more than some percentage of the
173 nursery size, reduce the TLAB size.
175 *) Explore placing unreachable objects on unused nursery memory.
176 Instead of memset'ng a region to zero, place an int[] covering it.
177 A good place to start is add_nursery_frag. The tricky thing here is
178 placing those objects atomically outside of a collection.
180 *) Allocation should use asymmetric Dekker synchronization:
181 http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
182 This should help weak consistency archs.
189 #define _XOPEN_SOURCE
190 #define _DARWIN_C_SOURCE
196 #ifdef HAVE_PTHREAD_H
199 #ifdef HAVE_SEMAPHORE_H
200 #include <semaphore.h>
208 #include "metadata/sgen-gc.h"
209 #include "metadata/metadata-internals.h"
210 #include "metadata/class-internals.h"
211 #include "metadata/gc-internal.h"
212 #include "metadata/object-internals.h"
213 #include "metadata/threads.h"
214 #include "metadata/sgen-cardtable.h"
215 #include "metadata/sgen-ssb.h"
216 #include "metadata/sgen-protocol.h"
217 #include "metadata/sgen-archdep.h"
218 #include "metadata/sgen-bridge.h"
219 #include "metadata/sgen-memory-governor.h"
220 #include "metadata/mono-gc.h"
221 #include "metadata/method-builder.h"
222 #include "metadata/profiler-private.h"
223 #include "metadata/monitor.h"
224 #include "metadata/threadpool-internals.h"
225 #include "metadata/mempool-internals.h"
226 #include "metadata/marshal.h"
227 #include "metadata/runtime.h"
228 #include "metadata/sgen-cardtable.h"
229 #include "metadata/sgen-pinning.h"
230 #include "metadata/sgen-workers.h"
231 #include "utils/mono-mmap.h"
232 #include "utils/mono-time.h"
233 #include "utils/mono-semaphore.h"
234 #include "utils/mono-counters.h"
235 #include "utils/mono-proclib.h"
236 #include "utils/mono-memory-model.h"
237 #include "utils/mono-logger-internal.h"
239 #include <mono/utils/mono-logger-internal.h>
240 #include <mono/utils/memcheck.h>
242 #if defined(__MACH__)
243 #include "utils/mach-support.h"
246 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
250 #include "mono/cil/opcode.def"
256 #undef pthread_create
258 #undef pthread_detach
261 * ######################################################################
262 * ######## Types and constants used by the GC.
263 * ######################################################################
266 /* 0 means not initialized, 1 is initialized, -1 means in progress */
267 static int gc_initialized = 0;
268 /* If set, check if we need to do something every X allocations */
269 gboolean has_per_allocation_action;
270 /* If set, do a heap check every X allocation */
271 guint32 verify_before_allocs = 0;
272 /* If set, do a minor collection before every X allocation */
273 guint32 collect_before_allocs = 0;
274 /* If set, do a whole heap check before each collection */
275 static gboolean whole_heap_check_before_collection = FALSE;
276 /* If set, do a heap consistency check before each minor collection */
277 static gboolean consistency_check_at_minor_collection = FALSE;
278 /* If set, check that there are no references to the domain left at domain unload */
279 static gboolean xdomain_checks = FALSE;
280 /* If not null, dump the heap after each collection into this file */
281 static FILE *heap_dump_file = NULL;
282 /* If set, mark stacks conservatively, even if precise marking is possible */
283 static gboolean conservative_stack_mark = FALSE;
284 /* If set, do a plausibility check on the scan_starts before and after
286 static gboolean do_scan_starts_check = FALSE;
287 static gboolean nursery_collection_is_parallel = FALSE;
288 static gboolean disable_minor_collections = FALSE;
289 static gboolean disable_major_collections = FALSE;
290 gboolean do_pin_stats = FALSE;
291 static gboolean do_verify_nursery = FALSE;
292 static gboolean do_dump_nursery_content = FALSE;
294 #ifdef HEAVY_STATISTICS
295 long long stat_objects_alloced_degraded = 0;
296 long long stat_bytes_alloced_degraded = 0;
298 long long stat_copy_object_called_nursery = 0;
299 long long stat_objects_copied_nursery = 0;
300 long long stat_copy_object_called_major = 0;
301 long long stat_objects_copied_major = 0;
303 long long stat_scan_object_called_nursery = 0;
304 long long stat_scan_object_called_major = 0;
306 long long stat_slots_allocated_in_vain;
308 long long stat_nursery_copy_object_failed_from_space = 0;
309 long long stat_nursery_copy_object_failed_forwarded = 0;
310 long long stat_nursery_copy_object_failed_pinned = 0;
311 long long stat_nursery_copy_object_failed_to_space = 0;
313 static int stat_wbarrier_set_field = 0;
314 static int stat_wbarrier_set_arrayref = 0;
315 static int stat_wbarrier_arrayref_copy = 0;
316 static int stat_wbarrier_generic_store = 0;
317 static int stat_wbarrier_set_root = 0;
318 static int stat_wbarrier_value_copy = 0;
319 static int stat_wbarrier_object_copy = 0;
322 int stat_minor_gcs = 0;
323 int stat_major_gcs = 0;
325 static long long stat_pinned_objects = 0;
327 static long long time_minor_pre_collection_fragment_clear = 0;
328 static long long time_minor_pinning = 0;
329 static long long time_minor_scan_remsets = 0;
330 static long long time_minor_scan_pinned = 0;
331 static long long time_minor_scan_registered_roots = 0;
332 static long long time_minor_scan_thread_data = 0;
333 static long long time_minor_finish_gray_stack = 0;
334 static long long time_minor_fragment_creation = 0;
336 static long long time_major_pre_collection_fragment_clear = 0;
337 static long long time_major_pinning = 0;
338 static long long time_major_scan_pinned = 0;
339 static long long time_major_scan_registered_roots = 0;
340 static long long time_major_scan_thread_data = 0;
341 static long long time_major_scan_alloc_pinned = 0;
342 static long long time_major_scan_finalized = 0;
343 static long long time_major_scan_big_objects = 0;
344 static long long time_major_finish_gray_stack = 0;
345 static long long time_major_free_bigobjs = 0;
346 static long long time_major_los_sweep = 0;
347 static long long time_major_sweep = 0;
348 static long long time_major_fragment_creation = 0;
350 int gc_debug_level = 0;
355 mono_gc_flush_info (void)
357 fflush (gc_debug_file);
361 #define TV_DECLARE SGEN_TV_DECLARE
362 #define TV_GETTIME SGEN_TV_GETTIME
363 #define TV_ELAPSED SGEN_TV_ELAPSED
364 #define TV_ELAPSED_MS SGEN_TV_ELAPSED_MS
366 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
368 NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
370 /* the runtime can register areas of memory as roots: we keep two lists of roots,
371 * a pinned root set for conservatively scanned roots and a normal one for
372 * precisely scanned roots (currently implemented as a single list).
374 typedef struct _RootRecord RootRecord;
380 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
381 #define object_is_pinned SGEN_OBJECT_IS_PINNED
382 #define pin_object SGEN_PIN_OBJECT
383 #define unpin_object SGEN_UNPIN_OBJECT
385 #define ptr_in_nursery sgen_ptr_in_nursery
387 #define LOAD_VTABLE SGEN_LOAD_VTABLE
390 safe_name (void* obj)
392 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
393 return vt->klass->name;
396 #define safe_object_get_size sgen_safe_object_get_size
399 sgen_safe_name (void* obj)
401 return safe_name (obj);
405 * ######################################################################
406 * ######## Global data.
407 * ######################################################################
409 LOCK_DECLARE (gc_mutex);
410 static int gc_disabled = 0;
412 static gboolean use_cardtable;
414 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
416 static mword pagesize = 4096;
417 int degraded_mode = 0;
419 static mword bytes_pinned_from_failed_allocation = 0;
421 GCMemSection *nursery_section = NULL;
422 static mword lowest_heap_address = ~(mword)0;
423 static mword highest_heap_address = 0;
425 static LOCK_DECLARE (interruption_mutex);
426 static LOCK_DECLARE (pin_queue_mutex);
428 #define LOCK_PIN_QUEUE mono_mutex_lock (&pin_queue_mutex)
429 #define UNLOCK_PIN_QUEUE mono_mutex_unlock (&pin_queue_mutex)
431 typedef struct _FinalizeReadyEntry FinalizeReadyEntry;
432 struct _FinalizeReadyEntry {
433 FinalizeReadyEntry *next;
437 typedef struct _EphemeronLinkNode EphemeronLinkNode;
439 struct _EphemeronLinkNode {
440 EphemeronLinkNode *next;
449 int current_collection_generation = -1;
452 * The link pointer is hidden by negating each bit. We use the lowest
453 * bit of the link (before negation) to store whether it needs
454 * resurrection tracking.
456 #define HIDE_POINTER(p,t) ((gpointer)(~((gulong)(p)|((t)?1:0))))
457 #define REVEAL_POINTER(p) ((gpointer)((~(gulong)(p))&~3L))
459 /* objects that are ready to be finalized */
460 static FinalizeReadyEntry *fin_ready_list = NULL;
461 static FinalizeReadyEntry *critical_fin_list = NULL;
463 static EphemeronLinkNode *ephemeron_list;
465 static int num_ready_finalizers = 0;
466 static int no_finalize = 0;
469 ROOT_TYPE_NORMAL = 0, /* "normal" roots */
470 ROOT_TYPE_PINNED = 1, /* roots without a GC descriptor */
471 ROOT_TYPE_WBARRIER = 2, /* roots with a write barrier */
475 /* registered roots: the key to the hash is the root start address */
477 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
479 static SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
480 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
481 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
482 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL)
484 static mword roots_size = 0; /* amount of memory in the root set */
486 #define GC_ROOT_NUM 32
489 void *objects [GC_ROOT_NUM];
490 int root_types [GC_ROOT_NUM];
491 uintptr_t extra_info [GC_ROOT_NUM];
495 notify_gc_roots (GCRootReport *report)
499 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
504 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
506 if (report->count == GC_ROOT_NUM)
507 notify_gc_roots (report);
508 report->objects [report->count] = object;
509 report->root_types [report->count] = rtype;
510 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
513 MonoNativeTlsKey thread_info_key;
515 #ifdef HAVE_KW_THREAD
516 __thread SgenThreadInfo *thread_info;
517 __thread gpointer *store_remset_buffer;
518 __thread long store_remset_buffer_index;
519 __thread char *stack_end;
520 __thread long *store_remset_buffer_index_addr;
523 /* The size of a TLAB */
524 /* The bigger the value, the less often we have to go to the slow path to allocate a new
525 * one, but the more space is wasted by threads not allocating much memory.
527 * FIXME: Make this self-tuning for each thread.
529 guint32 tlab_size = (1024 * 4);
531 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
533 /* Functions supplied by the runtime to be called by the GC */
534 static MonoGCCallbacks gc_callbacks;
536 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
537 #define ALLOC_ALIGN_BITS SGEN_ALLOC_ALIGN_BITS
539 #define ALIGN_UP SGEN_ALIGN_UP
541 #define MOVED_OBJECTS_NUM 64
542 static void *moved_objects [MOVED_OBJECTS_NUM];
543 static int moved_objects_idx = 0;
545 /* Vtable of the objects used to fill out nursery fragments before a collection */
546 static MonoVTable *array_fill_vtable;
548 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
549 MonoNativeThreadId main_gc_thread = NULL;
552 /*Object was pinned during the current collection*/
553 static mword objects_pinned;
556 * ######################################################################
557 * ######## Macros and function declarations.
558 * ######################################################################
562 align_pointer (void *ptr)
564 mword p = (mword)ptr;
565 p += sizeof (gpointer) - 1;
566 p &= ~ (sizeof (gpointer) - 1);
570 typedef SgenGrayQueue GrayQueue;
572 /* forward declarations */
573 static int stop_world (int generation);
574 static int restart_world (int generation, GGTimingInfo *timing);
575 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
576 static void scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type, GrayQueue *queue);
577 static void scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeReadyEntry *list, GrayQueue *queue);
578 static void report_finalizer_roots (void);
579 static void report_registered_roots (void);
580 static void find_pinning_ref_from_thread (char *obj, size_t size);
581 static void update_current_thread_stack (void *start);
582 static void collect_bridge_objects (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, GrayQueue *queue);
583 static void finalize_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, GrayQueue *queue);
584 static void process_fin_stage_entries (void);
585 static void null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, gboolean before_finalization, GrayQueue *queue);
586 static void null_links_for_domain (MonoDomain *domain, int generation);
587 static void remove_finalizers_for_domain (MonoDomain *domain, int generation);
588 static void process_dislink_stage_entries (void);
590 static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
591 static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, GrayQueue *queue);
592 static void finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue);
594 static void mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track, gboolean in_gc);
595 static gboolean mono_gc_is_critical_method (MonoMethod *method);
597 void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
600 static void init_stats (void);
602 static int mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue);
603 static void clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue);
604 static void null_ephemerons_for_domain (MonoDomain *domain);
606 SgenObjectOperations current_object_ops;
607 SgenMajorCollector major_collector;
608 SgenMinorCollector sgen_minor_collector;
609 static GrayQueue gray_queue;
611 static SgenRemeberedSet remset;
614 #define WORKERS_DISTRIBUTE_GRAY_QUEUE (sgen_collection_is_parallel () ? sgen_workers_get_distribute_gray_queue () : &gray_queue)
616 static SgenGrayQueue*
617 sgen_workers_get_job_gray_queue (WorkerData *worker_data)
619 return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
623 is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
625 MonoObject *o = (MonoObject*)(obj);
626 MonoObject *ref = (MonoObject*)*(ptr);
627 int offset = (char*)(ptr) - (char*)o;
629 if (o->vtable->klass == mono_defaults.thread_class && offset == G_STRUCT_OFFSET (MonoThread, internal_thread))
631 if (o->vtable->klass == mono_defaults.internal_thread_class && offset == G_STRUCT_OFFSET (MonoInternalThread, current_appcontext))
633 if (mono_class_has_parent_fast (o->vtable->klass, mono_defaults.real_proxy_class) &&
634 offset == G_STRUCT_OFFSET (MonoRealProxy, unwrapped_server))
636 /* Thread.cached_culture_info */
637 if (!strcmp (ref->vtable->klass->name_space, "System.Globalization") &&
638 !strcmp (ref->vtable->klass->name, "CultureInfo") &&
639 !strcmp(o->vtable->klass->name_space, "System") &&
640 !strcmp(o->vtable->klass->name, "Object[]"))
643 * at System.IO.MemoryStream.InternalConstructor (byte[],int,int,bool,bool) [0x0004d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:121
644 * at System.IO.MemoryStream..ctor (byte[]) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:81
645 * at (wrapper remoting-invoke-with-check) System.IO.MemoryStream..ctor (byte[]) <IL 0x00020, 0xffffffff>
646 * at System.Runtime.Remoting.Messaging.CADMethodCallMessage.GetArguments () [0x0000d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/CADMessages.cs:327
647 * at System.Runtime.Remoting.Messaging.MethodCall..ctor (System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/MethodCall.cs:87
648 * at System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) [0x00018] in /home/schani/Work/novell/trunk/mcs/class/corlib/System/AppDomain.cs:1213
649 * at (wrapper remoting-invoke-with-check) System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) <IL 0x0003d, 0xffffffff>
650 * at System.Runtime.Remoting.Channels.CrossAppDomainSink.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00008] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Channels/CrossAppDomainChannel.cs:198
651 * at (wrapper runtime-invoke) object.runtime_invoke_CrossAppDomainSink/ProcessMessageRes_object_object (object,intptr,intptr,intptr) <IL 0x0004c, 0xffffffff>
653 if (!strcmp (ref->vtable->klass->name_space, "System") &&
654 !strcmp (ref->vtable->klass->name, "Byte[]") &&
655 !strcmp (o->vtable->klass->name_space, "System.IO") &&
656 !strcmp (o->vtable->klass->name, "MemoryStream"))
658 /* append_job() in threadpool.c */
659 if (!strcmp (ref->vtable->klass->name_space, "System.Runtime.Remoting.Messaging") &&
660 !strcmp (ref->vtable->klass->name, "AsyncResult") &&
661 !strcmp (o->vtable->klass->name_space, "System") &&
662 !strcmp (o->vtable->klass->name, "Object[]") &&
663 mono_thread_pool_is_queue_array ((MonoArray*) o))
669 check_reference_for_xdomain (gpointer *ptr, char *obj, MonoDomain *domain)
671 MonoObject *o = (MonoObject*)(obj);
672 MonoObject *ref = (MonoObject*)*(ptr);
673 int offset = (char*)(ptr) - (char*)o;
675 MonoClassField *field;
678 if (!ref || ref->vtable->domain == domain)
680 if (is_xdomain_ref_allowed (ptr, obj, domain))
684 for (class = o->vtable->klass; class; class = class->parent) {
687 for (i = 0; i < class->field.count; ++i) {
688 if (class->fields[i].offset == offset) {
689 field = &class->fields[i];
697 if (ref->vtable->klass == mono_defaults.string_class)
698 str = mono_string_to_utf8 ((MonoString*)ref);
701 g_print ("xdomain reference in %p (%s.%s) at offset %d (%s) to %p (%s.%s) (%s) - pointed to by:\n",
702 o, o->vtable->klass->name_space, o->vtable->klass->name,
703 offset, field ? field->name : "",
704 ref, ref->vtable->klass->name_space, ref->vtable->klass->name, str ? str : "");
705 mono_gc_scan_for_specific_ref (o, TRUE);
711 #define HANDLE_PTR(ptr,obj) check_reference_for_xdomain ((ptr), (obj), domain)
714 scan_object_for_xdomain_refs (char *start, mword size, void *data)
716 MonoDomain *domain = ((MonoObject*)start)->vtable->domain;
718 #include "sgen-scan-object.h"
721 static gboolean scan_object_for_specific_ref_precise = TRUE;
724 #define HANDLE_PTR(ptr,obj) do { \
725 if ((MonoObject*)*(ptr) == key) { \
726 g_print ("found ref to %p in object %p (%s) at offset %td\n", \
727 key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
732 scan_object_for_specific_ref (char *start, MonoObject *key)
736 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (start)))
739 if (scan_object_for_specific_ref_precise) {
740 #include "sgen-scan-object.h"
742 mword *words = (mword*)start;
743 size_t size = safe_object_get_size ((MonoObject*)start);
745 for (i = 0; i < size / sizeof (mword); ++i) {
746 if (words [i] == (mword)key) {
747 g_print ("found possible ref to %p in object %p (%s) at offset %td\n",
748 key, start, safe_name (start), i * sizeof (mword));
755 sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
757 while (start < end) {
761 if (!*(void**)start) {
762 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
767 if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
773 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
775 if ((MonoVTable*)SGEN_LOAD_VTABLE (obj) != array_fill_vtable)
776 callback (obj, size, data);
783 scan_object_for_specific_ref_callback (char *obj, size_t size, MonoObject *key)
785 scan_object_for_specific_ref (obj, key);
789 check_root_obj_specific_ref (RootRecord *root, MonoObject *key, MonoObject *obj)
793 g_print ("found ref to %p in root record %p\n", key, root);
796 static MonoObject *check_key = NULL;
797 static RootRecord *check_root = NULL;
800 check_root_obj_specific_ref_from_marker (void **obj)
802 check_root_obj_specific_ref (check_root, check_key, *obj);
806 scan_roots_for_specific_ref (MonoObject *key, int root_type)
812 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
813 mword desc = root->root_desc;
817 switch (desc & ROOT_DESC_TYPE_MASK) {
818 case ROOT_DESC_BITMAP:
819 desc >>= ROOT_DESC_TYPE_SHIFT;
822 check_root_obj_specific_ref (root, key, *start_root);
827 case ROOT_DESC_COMPLEX: {
828 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
829 int bwords = (*bitmap_data) - 1;
830 void **start_run = start_root;
832 while (bwords-- > 0) {
833 gsize bmap = *bitmap_data++;
834 void **objptr = start_run;
837 check_root_obj_specific_ref (root, key, *objptr);
841 start_run += GC_BITS_PER_WORD;
845 case ROOT_DESC_USER: {
846 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
847 marker (start_root, check_root_obj_specific_ref_from_marker);
850 case ROOT_DESC_RUN_LEN:
851 g_assert_not_reached ();
853 g_assert_not_reached ();
855 } SGEN_HASH_TABLE_FOREACH_END;
862 mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise)
867 scan_object_for_specific_ref_precise = precise;
869 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
870 (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key, TRUE);
872 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
874 sgen_los_iterate_objects ((IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
876 scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
877 scan_roots_for_specific_ref (key, ROOT_TYPE_WBARRIER);
879 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], ptr, root) {
880 while (ptr < (void**)root->end_root) {
881 check_root_obj_specific_ref (root, *ptr, key);
884 } SGEN_HASH_TABLE_FOREACH_END;
888 need_remove_object_for_domain (char *start, MonoDomain *domain)
890 if (mono_object_domain (start) == domain) {
891 DEBUG (4, fprintf (gc_debug_file, "Need to cleanup object %p\n", start));
892 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
899 process_object_for_domain_clearing (char *start, MonoDomain *domain)
901 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
902 if (vt->klass == mono_defaults.internal_thread_class)
903 g_assert (mono_object_domain (start) == mono_get_root_domain ());
904 /* The object could be a proxy for an object in the domain
906 if (mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
907 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
909 /* The server could already have been zeroed out, so
910 we need to check for that, too. */
911 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
912 DEBUG (4, fprintf (gc_debug_file, "Cleaning up remote pointer in %p to object %p\n",
914 ((MonoRealProxy*)start)->unwrapped_server = NULL;
919 static MonoDomain *check_domain = NULL;
922 check_obj_not_in_domain (void **o)
924 g_assert (((MonoObject*)(*o))->vtable->domain != check_domain);
928 scan_for_registered_roots_in_domain (MonoDomain *domain, int root_type)
932 check_domain = domain;
933 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
934 mword desc = root->root_desc;
936 /* The MonoDomain struct is allowed to hold
937 references to objects in its own domain. */
938 if (start_root == (void**)domain)
941 switch (desc & ROOT_DESC_TYPE_MASK) {
942 case ROOT_DESC_BITMAP:
943 desc >>= ROOT_DESC_TYPE_SHIFT;
945 if ((desc & 1) && *start_root)
946 check_obj_not_in_domain (*start_root);
951 case ROOT_DESC_COMPLEX: {
952 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
953 int bwords = (*bitmap_data) - 1;
954 void **start_run = start_root;
956 while (bwords-- > 0) {
957 gsize bmap = *bitmap_data++;
958 void **objptr = start_run;
960 if ((bmap & 1) && *objptr)
961 check_obj_not_in_domain (*objptr);
965 start_run += GC_BITS_PER_WORD;
969 case ROOT_DESC_USER: {
970 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
971 marker (start_root, check_obj_not_in_domain);
974 case ROOT_DESC_RUN_LEN:
975 g_assert_not_reached ();
977 g_assert_not_reached ();
979 } SGEN_HASH_TABLE_FOREACH_END;
985 check_for_xdomain_refs (void)
989 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
990 (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL, FALSE);
992 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
994 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
995 scan_object_for_xdomain_refs (bigobj->data, bigobj->size, NULL);
999 clear_domain_process_object (char *obj, MonoDomain *domain)
1003 process_object_for_domain_clearing (obj, domain);
1004 remove = need_remove_object_for_domain (obj, domain);
1006 if (remove && ((MonoObject*)obj)->synchronisation) {
1007 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
1009 mono_gc_register_disappearing_link (NULL, dislink, FALSE, TRUE);
1016 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
1018 if (clear_domain_process_object (obj, domain))
1019 memset (obj, 0, size);
1023 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
1025 clear_domain_process_object (obj, domain);
1029 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1031 if (need_remove_object_for_domain (obj, domain))
1032 major_collector.free_non_pinned_object (obj, size);
1036 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1038 if (need_remove_object_for_domain (obj, domain))
1039 major_collector.free_pinned_object (obj, size);
1043 * When appdomains are unloaded we can easily remove objects that have finalizers,
1044 * but all the others could still be present in random places on the heap.
1045 * We need a sweep to get rid of them even though it's going to be costly
1047 * The reason we need to remove them is because we access the vtable and class
1048 * structures to know the object size and the reference bitmap: once the domain is
1049 * unloaded the point to random memory.
1052 mono_gc_clear_domain (MonoDomain * domain)
1054 LOSObject *bigobj, *prev;
1059 process_fin_stage_entries ();
1060 process_dislink_stage_entries ();
1062 sgen_clear_nursery_fragments ();
1064 if (xdomain_checks && domain != mono_get_root_domain ()) {
1065 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
1066 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
1067 check_for_xdomain_refs ();
1070 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
1071 to memory returned to the OS.*/
1072 null_ephemerons_for_domain (domain);
1074 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1075 null_links_for_domain (domain, i);
1077 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1078 remove_finalizers_for_domain (domain, i);
1080 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1081 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
1083 /* We need two passes over major and large objects because
1084 freeing such objects might give their memory back to the OS
1085 (in the case of large objects) or obliterate its vtable
1086 (pinned objects with major-copying or pinned and non-pinned
1087 objects with major-mark&sweep), but we might need to
1088 dereference a pointer from an object to another object if
1089 the first object is a proxy. */
1090 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
1091 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1092 clear_domain_process_object (bigobj->data, domain);
1095 for (bigobj = los_object_list; bigobj;) {
1096 if (need_remove_object_for_domain (bigobj->data, domain)) {
1097 LOSObject *to_free = bigobj;
1099 prev->next = bigobj->next;
1101 los_object_list = bigobj->next;
1102 bigobj = bigobj->next;
1103 DEBUG (4, fprintf (gc_debug_file, "Freeing large object %p\n",
1105 sgen_los_free_object (to_free);
1109 bigobj = bigobj->next;
1111 major_collector.iterate_objects (TRUE, FALSE, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
1112 major_collector.iterate_objects (FALSE, TRUE, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
1114 if (G_UNLIKELY (do_pin_stats)) {
1115 if (domain == mono_get_root_domain ())
1116 sgen_pin_stats_print_class_stats ();
1123 * sgen_add_to_global_remset:
1125 * The global remset contains locations which point into newspace after
1126 * a minor collection. This can happen if the objects they point to are pinned.
1128 * LOCKING: If called from a parallel collector, the global remset
1129 * lock must be held. For serial collectors that is not necessary.
1132 sgen_add_to_global_remset (gpointer ptr)
1134 remset.record_pointer (ptr);
1138 * sgen_drain_gray_stack:
1140 * Scan objects in the gray stack until the stack is empty. This should be called
1141 * frequently after each object is copied, to achieve better locality and cache
1145 sgen_drain_gray_stack (GrayQueue *queue, int max_objs)
1148 ScanObjectFunc scan_func = current_object_ops.scan_object;
1150 if (max_objs == -1) {
1152 GRAY_OBJECT_DEQUEUE (queue, obj);
1155 DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", obj, safe_name (obj)));
1156 scan_func (obj, queue);
1162 for (i = 0; i != max_objs; ++i) {
1163 GRAY_OBJECT_DEQUEUE (queue, obj);
1166 DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", obj, safe_name (obj)));
1167 scan_func (obj, queue);
1169 } while (max_objs < 0);
1175 * Addresses from start to end are already sorted. This function finds
1176 * the object header for each address and pins the object. The
1177 * addresses must be inside the passed section. The (start of the)
1178 * address array is overwritten with the addresses of the actually
1179 * pinned objects. Return the number of pinned objects.
1182 pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, GrayQueue *queue)
1187 void *last_obj = NULL;
1188 size_t last_obj_size = 0;
1191 void **definitely_pinned = start;
1193 sgen_nursery_allocator_prepare_for_pinning ();
1195 while (start < end) {
1197 /* the range check should be reduntant */
1198 if (addr != last && addr >= start_nursery && addr < end_nursery) {
1199 DEBUG (5, fprintf (gc_debug_file, "Considering pinning addr %p\n", addr));
1200 /* multiple pointers to the same object */
1201 if (addr >= last_obj && (char*)addr < (char*)last_obj + last_obj_size) {
1205 idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
1206 g_assert (idx < section->num_scan_start);
1207 search_start = (void*)section->scan_starts [idx];
1208 if (!search_start || search_start > addr) {
1211 search_start = section->scan_starts [idx];
1212 if (search_start && search_start <= addr)
1215 if (!search_start || search_start > addr)
1216 search_start = start_nursery;
1218 if (search_start < last_obj)
1219 search_start = (char*)last_obj + last_obj_size;
1220 /* now addr should be in an object a short distance from search_start
1221 * Note that search_start must point to zeroed mem or point to an object.
1225 if (!*(void**)search_start) {
1226 /* Consistency check */
1228 for (frag = nursery_fragments; frag; frag = frag->next) {
1229 if (search_start >= frag->fragment_start && search_start < frag->fragment_end)
1230 g_assert_not_reached ();
1234 search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
1237 last_obj = search_start;
1238 last_obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
1240 if (((MonoObject*)last_obj)->synchronisation == GINT_TO_POINTER (-1)) {
1241 /* Marks the beginning of a nursery fragment, skip */
1243 DEBUG (8, fprintf (gc_debug_file, "Pinned try match %p (%s), size %zd\n", last_obj, safe_name (last_obj), last_obj_size));
1244 if (addr >= search_start && (char*)addr < (char*)last_obj + last_obj_size) {
1245 DEBUG (4, fprintf (gc_debug_file, "Pinned object %p, vtable %p (%s), count %d\n", search_start, *(void**)search_start, safe_name (search_start), count));
1246 binary_protocol_pin (search_start, (gpointer)LOAD_VTABLE (search_start), safe_object_get_size (search_start));
1247 pin_object (search_start);
1248 GRAY_OBJECT_ENQUEUE (queue, search_start);
1249 if (G_UNLIKELY (do_pin_stats))
1250 sgen_pin_stats_register_object (search_start, last_obj_size);
1251 definitely_pinned [count] = search_start;
1256 /* skip to the next object */
1257 search_start = (void*)((char*)search_start + last_obj_size);
1258 } while (search_start <= addr);
1259 /* we either pinned the correct object or we ignored the addr because
1260 * it points to unused zeroed memory.
1266 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1267 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1268 GCRootReport report;
1270 for (idx = 0; idx < count; ++idx)
1271 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1272 notify_gc_roots (&report);
1274 stat_pinned_objects += count;
1279 sgen_pin_objects_in_section (GCMemSection *section, GrayQueue *queue)
1281 int num_entries = section->pin_queue_num_entries;
1283 void **start = section->pin_queue_start;
1285 reduced_to = pin_objects_from_addresses (section, start, start + num_entries,
1286 section->data, section->next_data, queue);
1287 section->pin_queue_num_entries = reduced_to;
1289 section->pin_queue_start = NULL;
1295 sgen_pin_object (void *object, GrayQueue *queue)
1297 if (sgen_collection_is_parallel ()) {
1299 /*object arrives pinned*/
1300 sgen_pin_stage_ptr (object);
1304 SGEN_PIN_OBJECT (object);
1305 sgen_pin_stage_ptr (object);
1307 if (G_UNLIKELY (do_pin_stats))
1308 sgen_pin_stats_register_object (object, safe_object_get_size (object));
1310 GRAY_OBJECT_ENQUEUE (queue, object);
1311 binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
1315 sgen_parallel_pin_or_update (void **ptr, void *obj, MonoVTable *vt, SgenGrayQueue *queue)
1319 gboolean major_pinned = FALSE;
1321 if (sgen_ptr_in_nursery (obj)) {
1322 if (SGEN_CAS_PTR (obj, (void*)((mword)vt | SGEN_PINNED_BIT), vt) == vt) {
1323 sgen_pin_object (obj, queue);
1327 major_collector.pin_major_object (obj, queue);
1328 major_pinned = TRUE;
1331 vtable_word = *(mword*)obj;
1332 /*someone else forwarded it, update the pointer and bail out*/
1333 if (vtable_word & SGEN_FORWARDED_BIT) {
1334 *ptr = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1338 /*someone pinned it, nothing to do.*/
1339 if (vtable_word & SGEN_PINNED_BIT || major_pinned)
1344 /* Sort the addresses in array in increasing order.
1345 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1348 sgen_sort_addresses (void **array, int size)
1353 for (i = 1; i < size; ++i) {
1356 int parent = (child - 1) / 2;
1358 if (array [parent] >= array [child])
1361 tmp = array [parent];
1362 array [parent] = array [child];
1363 array [child] = tmp;
1369 for (i = size - 1; i > 0; --i) {
1372 array [i] = array [0];
1378 while (root * 2 + 1 <= end) {
1379 int child = root * 2 + 1;
1381 if (child < end && array [child] < array [child + 1])
1383 if (array [root] >= array [child])
1387 array [root] = array [child];
1388 array [child] = tmp;
1396 * Scan the memory between start and end and queue values which could be pointers
1397 * to the area between start_nursery and end_nursery for later consideration.
1398 * Typically used for thread stacks.
1401 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1404 while (start < end) {
1405 if (*start >= start_nursery && *start < end_nursery) {
1407 * *start can point to the middle of an object
1408 * note: should we handle pointing at the end of an object?
1409 * pinning in C# code disallows pointing at the end of an object
1410 * but there is some small chance that an optimizing C compiler
1411 * may keep the only reference to an object by pointing
1412 * at the end of it. We ignore this small chance for now.
1413 * Pointers to the end of an object are indistinguishable
1414 * from pointers to the start of the next object in memory
1415 * so if we allow that we'd need to pin two objects...
1416 * We queue the pointer in an array, the
1417 * array will then be sorted and uniqued. This way
1418 * we can coalesce several pinning pointers and it should
1419 * be faster since we'd do a memory scan with increasing
1420 * addresses. Note: we can align the address to the allocation
1421 * alignment, so the unique process is more effective.
1423 mword addr = (mword)*start;
1424 addr &= ~(ALLOC_ALIGN - 1);
1425 if (addr >= (mword)start_nursery && addr < (mword)end_nursery)
1426 sgen_pin_stage_ptr ((void*)addr);
1427 if (G_UNLIKELY (do_pin_stats)) {
1428 if (ptr_in_nursery ((void*)addr))
1429 sgen_pin_stats_register_address ((char*)addr, pin_type);
1431 DEBUG (6, if (count) fprintf (gc_debug_file, "Pinning address %p from %p\n", (void*)addr, start));
1436 DEBUG (7, if (count) fprintf (gc_debug_file, "found %d potential pinned heap pointers\n", count));
1440 * Debugging function: find in the conservative roots where @obj is being pinned.
1442 static G_GNUC_UNUSED void
1443 find_pinning_reference (char *obj, size_t size)
1447 char *endobj = obj + size;
1449 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_NORMAL], start, root) {
1450 /* if desc is non-null it has precise info */
1451 if (!root->root_desc) {
1452 while (start < (char**)root->end_root) {
1453 if (*start >= obj && *start < endobj) {
1454 DEBUG (0, fprintf (gc_debug_file, "Object %p referenced in pinned roots %p-%p\n", obj, start, root->end_root));
1459 } SGEN_HASH_TABLE_FOREACH_END;
1461 find_pinning_ref_from_thread (obj, size);
1465 * The first thing we do in a collection is to identify pinned objects.
1466 * This function considers all the areas of memory that need to be
1467 * conservatively scanned.
1470 pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue)
1474 DEBUG (2, fprintf (gc_debug_file, "Scanning pinned roots (%d bytes, %d/%d entries)\n", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries));
1475 /* objects pinned from the API are inside these roots */
1476 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
1477 DEBUG (6, fprintf (gc_debug_file, "Pinned roots %p-%p\n", start_root, root->end_root));
1478 conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
1479 } SGEN_HASH_TABLE_FOREACH_END;
1480 /* now deal with the thread stacks
1481 * in the future we should be able to conservatively scan only:
1482 * *) the cpu registers
1483 * *) the unmanaged stack frames
1484 * *) the _last_ managed stack frame
1485 * *) pointers slots in managed frames
1487 scan_thread_data (start_nursery, end_nursery, FALSE, queue);
1491 CopyOrMarkObjectFunc func;
1493 } UserCopyOrMarkData;
1495 static MonoNativeTlsKey user_copy_or_mark_key;
1498 init_user_copy_or_mark_key (void)
1500 mono_native_tls_alloc (&user_copy_or_mark_key, NULL);
1504 set_user_copy_or_mark_data (UserCopyOrMarkData *data)
1506 mono_native_tls_set_value (user_copy_or_mark_key, data);
1510 single_arg_user_copy_or_mark (void **obj)
1512 UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
1514 data->func (obj, data->queue);
1518 * The memory area from start_root to end_root contains pointers to objects.
1519 * Their position is precisely described by @desc (this means that the pointer
1520 * can be either NULL or the pointer to the start of an object).
1521 * This functions copies them to to_space updates them.
1523 * This function is not thread-safe!
1526 precisely_scan_objects_from (CopyOrMarkObjectFunc copy_func, void** start_root, void** end_root, char* n_start, char *n_end, mword desc, GrayQueue *queue)
1528 switch (desc & ROOT_DESC_TYPE_MASK) {
1529 case ROOT_DESC_BITMAP:
1530 desc >>= ROOT_DESC_TYPE_SHIFT;
1532 if ((desc & 1) && *start_root) {
1533 copy_func (start_root, queue);
1534 DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", start_root, *start_root));
1535 sgen_drain_gray_stack (queue, -1);
1541 case ROOT_DESC_COMPLEX: {
1542 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1543 int bwords = (*bitmap_data) - 1;
1544 void **start_run = start_root;
1546 while (bwords-- > 0) {
1547 gsize bmap = *bitmap_data++;
1548 void **objptr = start_run;
1550 if ((bmap & 1) && *objptr) {
1551 copy_func (objptr, queue);
1552 DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", objptr, *objptr));
1553 sgen_drain_gray_stack (queue, -1);
1558 start_run += GC_BITS_PER_WORD;
1562 case ROOT_DESC_USER: {
1563 UserCopyOrMarkData data = { copy_func, queue };
1564 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1565 set_user_copy_or_mark_data (&data);
1566 marker (start_root, single_arg_user_copy_or_mark);
1567 set_user_copy_or_mark_data (NULL);
1570 case ROOT_DESC_RUN_LEN:
1571 g_assert_not_reached ();
1573 g_assert_not_reached ();
1578 reset_heap_boundaries (void)
1580 lowest_heap_address = ~(mword)0;
1581 highest_heap_address = 0;
1585 sgen_update_heap_boundaries (mword low, mword high)
1590 old = lowest_heap_address;
1593 } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
1596 old = highest_heap_address;
1599 } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
1603 * Allocate and setup the data structures needed to be able to allocate objects
1604 * in the nursery. The nursery is stored in nursery_section.
1607 alloc_nursery (void)
1609 GCMemSection *section;
1614 if (nursery_section)
1616 DEBUG (2, fprintf (gc_debug_file, "Allocating nursery size: %lu\n", (unsigned long)sgen_nursery_size));
1617 /* later we will alloc a larger area for the nursery but only activate
1618 * what we need. The rest will be used as expansion if we have too many pinned
1619 * objects in the existing nursery.
1621 /* FIXME: handle OOM */
1622 section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
1624 alloc_size = sgen_nursery_size;
1626 /* If there isn't enough space even for the nursery we should simply abort. */
1627 g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
1629 #ifdef SGEN_ALIGN_NURSERY
1630 data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
1632 data = major_collector.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
1634 sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
1635 DEBUG (4, fprintf (gc_debug_file, "Expanding nursery size (%p-%p): %lu, total: %lu\n", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)mono_gc_get_heap_size ()));
1636 section->data = section->next_data = data;
1637 section->size = alloc_size;
1638 section->end_data = data + sgen_nursery_size;
1639 scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
1640 section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
1641 section->num_scan_start = scan_starts;
1642 section->block.role = MEMORY_ROLE_GEN0;
1643 section->block.next = NULL;
1645 nursery_section = section;
1647 sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
1651 mono_gc_get_nursery (int *shift_bits, size_t *size)
1653 *size = sgen_nursery_size;
1654 #ifdef SGEN_ALIGN_NURSERY
1655 *shift_bits = DEFAULT_NURSERY_BITS;
1659 return sgen_get_nursery_start ();
1663 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
1665 SgenThreadInfo *info = mono_thread_info_current ();
1667 /* Could be called from sgen_thread_unregister () with a NULL info */
1670 info->stopped_domain = domain;
1675 mono_gc_precise_stack_mark_enabled (void)
1677 return !conservative_stack_mark;
1681 mono_gc_get_logfile (void)
1683 return sgen_get_logfile ();
1687 report_finalizer_roots_list (FinalizeReadyEntry *list)
1689 GCRootReport report;
1690 FinalizeReadyEntry *fin;
1693 for (fin = list; fin; fin = fin->next) {
1696 add_profile_gc_root (&report, fin->object, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1698 notify_gc_roots (&report);
1702 report_finalizer_roots (void)
1704 report_finalizer_roots_list (fin_ready_list);
1705 report_finalizer_roots_list (critical_fin_list);
1708 static GCRootReport *root_report;
1711 single_arg_report_root (void **obj)
1714 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1718 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1720 switch (desc & ROOT_DESC_TYPE_MASK) {
1721 case ROOT_DESC_BITMAP:
1722 desc >>= ROOT_DESC_TYPE_SHIFT;
1724 if ((desc & 1) && *start_root) {
1725 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1731 case ROOT_DESC_COMPLEX: {
1732 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1733 int bwords = (*bitmap_data) - 1;
1734 void **start_run = start_root;
1736 while (bwords-- > 0) {
1737 gsize bmap = *bitmap_data++;
1738 void **objptr = start_run;
1740 if ((bmap & 1) && *objptr) {
1741 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1746 start_run += GC_BITS_PER_WORD;
1750 case ROOT_DESC_USER: {
1751 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1752 root_report = report;
1753 marker (start_root, single_arg_report_root);
1756 case ROOT_DESC_RUN_LEN:
1757 g_assert_not_reached ();
1759 g_assert_not_reached ();
1764 report_registered_roots_by_type (int root_type)
1766 GCRootReport report;
1770 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1771 DEBUG (6, fprintf (gc_debug_file, "Precise root scan %p-%p (desc: %p)\n", start_root, root->end_root, (void*)root->root_desc));
1772 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1773 } SGEN_HASH_TABLE_FOREACH_END;
1774 notify_gc_roots (&report);
1778 report_registered_roots (void)
1780 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1781 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1785 scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeReadyEntry *list, GrayQueue *queue)
1787 FinalizeReadyEntry *fin;
1789 for (fin = list; fin; fin = fin->next) {
1792 DEBUG (5, fprintf (gc_debug_file, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object)));
1793 copy_func (&fin->object, queue);
1798 generation_name (int generation)
1800 switch (generation) {
1801 case GENERATION_NURSERY: return "nursery";
1802 case GENERATION_OLD: return "old";
1803 default: g_assert_not_reached ();
1809 stw_bridge_process (void)
1811 sgen_bridge_processing_stw_step ();
1815 bridge_process (void)
1817 sgen_bridge_processing_finish ();
1820 SgenObjectOperations *
1821 sgen_get_current_object_ops (void){
1822 return ¤t_object_ops;
1827 finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue)
1831 int done_with_ephemerons, ephemeron_rounds = 0;
1832 CopyOrMarkObjectFunc copy_func = current_object_ops.copy_or_mark_object;
1835 * We copied all the reachable objects. Now it's the time to copy
1836 * the objects that were not referenced by the roots, but by the copied objects.
1837 * we built a stack of objects pointed to by gray_start: they are
1838 * additional roots and we may add more items as we go.
1839 * We loop until gray_start == gray_objects which means no more objects have
1840 * been added. Note this is iterative: no recursion is involved.
1841 * We need to walk the LO list as well in search of marked big objects
1842 * (use a flag since this is needed only on major collections). We need to loop
1843 * here as well, so keep a counter of marked LO (increasing it in copy_object).
1844 * To achieve better cache locality and cache usage, we drain the gray stack
1845 * frequently, after each object is copied, and just finish the work here.
1847 sgen_drain_gray_stack (queue, -1);
1849 DEBUG (2, fprintf (gc_debug_file, "%s generation done\n", generation_name (generation)));
1852 Reset bridge data, we might have lingering data from a previous collection if this is a major
1853 collection trigged by minor overflow.
1855 We must reset the gathered bridges since their original block might be evacuated due to major
1856 fragmentation in the meanwhile and the bridge code should not have to deal with that.
1858 sgen_bridge_reset_data ();
1861 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
1862 * before processing finalizable objects or non-tracking weak hamdle to avoid finalizing/clearing
1863 * objects that are in fact reachable.
1865 done_with_ephemerons = 0;
1867 done_with_ephemerons = mark_ephemerons_in_range (copy_func, start_addr, end_addr, queue);
1868 sgen_drain_gray_stack (queue, -1);
1870 } while (!done_with_ephemerons);
1872 sgen_scan_togglerefs (copy_func, start_addr, end_addr, queue);
1873 if (generation == GENERATION_OLD)
1874 sgen_scan_togglerefs (copy_func, sgen_get_nursery_start (), sgen_get_nursery_end (), queue);
1876 if (sgen_need_bridge_processing ()) {
1877 collect_bridge_objects (copy_func, start_addr, end_addr, generation, queue);
1878 if (generation == GENERATION_OLD)
1879 collect_bridge_objects (copy_func, sgen_get_nursery_start (), sgen_get_nursery_end (), GENERATION_NURSERY, queue);
1883 Make sure we drain the gray stack before processing disappearing links and finalizers.
1884 If we don't make sure it is empty we might wrongly see a live object as dead.
1886 sgen_drain_gray_stack (queue, -1);
1889 We must clear weak links that don't track resurrection before processing object ready for
1890 finalization so they can be cleared before that.
1892 null_link_in_range (copy_func, start_addr, end_addr, generation, TRUE, queue);
1893 if (generation == GENERATION_OLD)
1894 null_link_in_range (copy_func, start_addr, end_addr, GENERATION_NURSERY, TRUE, queue);
1897 /* walk the finalization queue and move also the objects that need to be
1898 * finalized: use the finalized objects as new roots so the objects they depend
1899 * on are also not reclaimed. As with the roots above, only objects in the nursery
1900 * are marked/copied.
1902 finalize_in_range (copy_func, start_addr, end_addr, generation, queue);
1903 if (generation == GENERATION_OLD)
1904 finalize_in_range (copy_func, sgen_get_nursery_start (), sgen_get_nursery_end (), GENERATION_NURSERY, queue);
1905 /* drain the new stack that might have been created */
1906 DEBUG (6, fprintf (gc_debug_file, "Precise scan of gray area post fin\n"));
1907 sgen_drain_gray_stack (queue, -1);
1910 * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
1912 done_with_ephemerons = 0;
1914 done_with_ephemerons = mark_ephemerons_in_range (copy_func, start_addr, end_addr, queue);
1915 sgen_drain_gray_stack (queue, -1);
1917 } while (!done_with_ephemerons);
1920 * Clear ephemeron pairs with unreachable keys.
1921 * We pass the copy func so we can figure out if an array was promoted or not.
1923 clear_unreachable_ephemerons (copy_func, start_addr, end_addr, queue);
1926 DEBUG (2, fprintf (gc_debug_file, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron roundss\n", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds));
1929 * handle disappearing links
1930 * Note we do this after checking the finalization queue because if an object
1931 * survives (at least long enough to be finalized) we don't clear the link.
1932 * This also deals with a possible issue with the monitor reclamation: with the Boehm
1933 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
1936 g_assert (sgen_gray_object_queue_is_empty (queue));
1938 null_link_in_range (copy_func, start_addr, end_addr, generation, FALSE, queue);
1939 if (generation == GENERATION_OLD)
1940 null_link_in_range (copy_func, start_addr, end_addr, GENERATION_NURSERY, FALSE, queue);
1941 if (sgen_gray_object_queue_is_empty (queue))
1943 sgen_drain_gray_stack (queue, -1);
1946 g_assert (sgen_gray_object_queue_is_empty (queue));
1950 sgen_check_section_scan_starts (GCMemSection *section)
1953 for (i = 0; i < section->num_scan_start; ++i) {
1954 if (section->scan_starts [i]) {
1955 guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
1956 g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
1962 check_scan_starts (void)
1964 if (!do_scan_starts_check)
1966 sgen_check_section_scan_starts (nursery_section);
1967 major_collector.check_scan_starts ();
1971 scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type, GrayQueue *queue)
1975 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1976 DEBUG (6, fprintf (gc_debug_file, "Precise root scan %p-%p (desc: %p)\n", start_root, root->end_root, (void*)root->root_desc));
1977 precisely_scan_objects_from (copy_func, start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, queue);
1978 } SGEN_HASH_TABLE_FOREACH_END;
1982 sgen_dump_occupied (char *start, char *end, char *section_start)
1984 fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
1988 sgen_dump_section (GCMemSection *section, const char *type)
1990 char *start = section->data;
1991 char *end = section->data + section->size;
1992 char *occ_start = NULL;
1994 char *old_start = NULL; /* just for debugging */
1996 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
1998 while (start < end) {
2002 if (!*(void**)start) {
2004 sgen_dump_occupied (occ_start, start, section->data);
2007 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
2010 g_assert (start < section->next_data);
2015 vt = (GCVTable*)LOAD_VTABLE (start);
2018 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
2021 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
2022 start - section->data,
2023 vt->klass->name_space, vt->klass->name,
2031 sgen_dump_occupied (occ_start, start, section->data);
2033 fprintf (heap_dump_file, "</section>\n");
2037 dump_object (MonoObject *obj, gboolean dump_location)
2039 static char class_name [1024];
2041 MonoClass *class = mono_object_class (obj);
2045 * Python's XML parser is too stupid to parse angle brackets
2046 * in strings, so we just ignore them;
2049 while (class->name [i] && j < sizeof (class_name) - 1) {
2050 if (!strchr ("<>\"", class->name [i]))
2051 class_name [j++] = class->name [i];
2054 g_assert (j < sizeof (class_name));
2057 fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%d\"",
2058 class->name_space, class_name,
2059 safe_object_get_size (obj));
2060 if (dump_location) {
2061 const char *location;
2062 if (ptr_in_nursery (obj))
2063 location = "nursery";
2064 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
2068 fprintf (heap_dump_file, " location=\"%s\"", location);
2070 fprintf (heap_dump_file, "/>\n");
2074 dump_heap (const char *type, int num, const char *reason)
2079 fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
2081 fprintf (heap_dump_file, " reason=\"%s\"", reason);
2082 fprintf (heap_dump_file, ">\n");
2083 fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
2084 sgen_dump_internal_mem_usage (heap_dump_file);
2085 fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
2086 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
2087 fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
2089 fprintf (heap_dump_file, "<pinned-objects>\n");
2090 for (list = sgen_pin_stats_get_object_list (); list; list = list->next)
2091 dump_object (list->obj, TRUE);
2092 fprintf (heap_dump_file, "</pinned-objects>\n");
2094 sgen_dump_section (nursery_section, "nursery");
2096 major_collector.dump_heap (heap_dump_file);
2098 fprintf (heap_dump_file, "<los>\n");
2099 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
2100 dump_object ((MonoObject*)bigobj->data, FALSE);
2101 fprintf (heap_dump_file, "</los>\n");
2103 fprintf (heap_dump_file, "</collection>\n");
2107 sgen_register_moved_object (void *obj, void *destination)
2109 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
2111 /* FIXME: handle this for parallel collector */
2112 g_assert (!sgen_collection_is_parallel ());
2114 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
2115 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
2116 moved_objects_idx = 0;
2118 moved_objects [moved_objects_idx++] = obj;
2119 moved_objects [moved_objects_idx++] = destination;
2125 static gboolean inited = FALSE;
2130 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pre_collection_fragment_clear);
2131 mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pinning);
2132 mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_remsets);
2133 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_pinned);
2134 mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_registered_roots);
2135 mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_thread_data);
2136 mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_finish_gray_stack);
2137 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_fragment_creation);
2139 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pre_collection_fragment_clear);
2140 mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pinning);
2141 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_pinned);
2142 mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_registered_roots);
2143 mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_thread_data);
2144 mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_alloc_pinned);
2145 mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_finalized);
2146 mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_big_objects);
2147 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_finish_gray_stack);
2148 mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_free_bigobjs);
2149 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_los_sweep);
2150 mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_sweep);
2151 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_fragment_creation);
2153 mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_pinned_objects);
2155 #ifdef HEAVY_STATISTICS
2156 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
2157 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
2158 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
2159 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
2160 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
2161 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
2162 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
2164 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
2165 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
2167 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
2168 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
2169 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_major);
2170 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_major);
2172 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
2173 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);
2175 mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_slots_allocated_in_vain);
2177 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
2178 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
2179 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
2180 mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_to_space);
2182 sgen_nursery_allocator_init_heavy_stats ();
2183 sgen_alloc_init_heavy_stats ();
2191 reset_pinned_from_failed_allocation (void)
2193 bytes_pinned_from_failed_allocation = 0;
2197 sgen_set_pinned_from_failed_allocation (mword objsize)
2199 bytes_pinned_from_failed_allocation += objsize;
2203 sgen_collection_is_parallel (void)
2205 switch (current_collection_generation) {
2206 case GENERATION_NURSERY:
2207 return nursery_collection_is_parallel;
2208 case GENERATION_OLD:
2209 return major_collector.is_parallel;
2211 g_error ("Invalid current generation %d", current_collection_generation);
2219 } FinishRememberedSetScanJobData;
2222 job_finish_remembered_set_scan (WorkerData *worker_data, void *job_data_untyped)
2224 FinishRememberedSetScanJobData *job_data = job_data_untyped;
2226 remset.finish_scan_remsets (job_data->heap_start, job_data->heap_end, sgen_workers_get_job_gray_queue (worker_data));
2231 CopyOrMarkObjectFunc func;
2235 } ScanFromRegisteredRootsJobData;
2238 job_scan_from_registered_roots (WorkerData *worker_data, void *job_data_untyped)
2240 ScanFromRegisteredRootsJobData *job_data = job_data_untyped;
2242 scan_from_registered_roots (job_data->func,
2243 job_data->heap_start, job_data->heap_end,
2244 job_data->root_type,
2245 sgen_workers_get_job_gray_queue (worker_data));
2252 } ScanThreadDataJobData;
2255 job_scan_thread_data (WorkerData *worker_data, void *job_data_untyped)
2257 ScanThreadDataJobData *job_data = job_data_untyped;
2259 scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
2260 sgen_workers_get_job_gray_queue (worker_data));
2265 FinalizeReadyEntry *list;
2266 } ScanFinalizerEntriesJobData;
2269 job_scan_finalizer_entries (WorkerData *worker_data, void *job_data_untyped)
2271 ScanFinalizerEntriesJobData *job_data = job_data_untyped;
2273 scan_finalizer_entries (current_object_ops.copy_or_mark_object,
2275 sgen_workers_get_job_gray_queue (worker_data));
2279 verify_scan_starts (char *start, char *end)
2283 for (i = 0; i < nursery_section->num_scan_start; ++i) {
2284 char *addr = nursery_section->scan_starts [i];
2285 if (addr > start && addr < end)
2286 fprintf (gc_debug_file, "NFC-BAD SCAN START [%d] %p for obj [%p %p]\n", i, addr, start, end);
2291 verify_nursery (void)
2293 char *start, *end, *cur, *hole_start;
2295 if (!do_verify_nursery)
2298 /*This cleans up unused fragments */
2299 sgen_nursery_allocator_prepare_for_pinning ();
2301 hole_start = start = cur = sgen_get_nursery_start ();
2302 end = sgen_get_nursery_end ();
2307 if (!*(void**)cur) {
2308 cur += sizeof (void*);
2312 if (object_is_forwarded (cur))
2313 fprintf (gc_debug_file, "FORWARDED OBJ %p\n", cur);
2314 else if (object_is_pinned (cur))
2315 fprintf (gc_debug_file, "PINNED OBJ %p\n", cur);
2317 ss = safe_object_get_size ((MonoObject*)cur);
2318 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2319 verify_scan_starts (cur, cur + size);
2320 if (do_dump_nursery_content) {
2321 if (cur > hole_start)
2322 fprintf (gc_debug_file, "HOLE [%p %p %d]\n", hole_start, cur, (int)(cur - hole_start));
2323 fprintf (gc_debug_file, "OBJ [%p %p %d %d %s %d]\n", cur, cur + size, (int)size, (int)ss, sgen_safe_name ((MonoObject*)cur), (gpointer)LOAD_VTABLE (cur) == sgen_get_array_fill_vtable ());
2328 fflush (gc_debug_file);
2332 * Collect objects in the nursery. Returns whether to trigger a major
2336 collect_nursery (void)
2338 gboolean needs_major;
2339 size_t max_garbage_amount;
2341 FinishRememberedSetScanJobData frssjd;
2342 ScanFromRegisteredRootsJobData scrrjd_normal, scrrjd_wbarrier;
2343 ScanFinalizerEntriesJobData sfejd_fin_ready, sfejd_critical_fin;
2344 ScanThreadDataJobData stdjd;
2345 mword fragment_total;
2346 TV_DECLARE (all_atv);
2347 TV_DECLARE (all_btv);
2351 if (disable_minor_collections)
2356 mono_perfcounters->gc_collections0++;
2358 current_collection_generation = GENERATION_NURSERY;
2359 if (sgen_collection_is_parallel ())
2360 current_object_ops = sgen_minor_collector.parallel_ops;
2362 current_object_ops = sgen_minor_collector.serial_ops;
2364 reset_pinned_from_failed_allocation ();
2366 binary_protocol_collection (GENERATION_NURSERY);
2367 check_scan_starts ();
2369 sgen_nursery_alloc_prepare_for_minor ();
2373 nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
2374 /* FIXME: optimize later to use the higher address where an object can be present */
2375 nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
2377 DEBUG (1, fprintf (gc_debug_file, "Start nursery collection %d %p-%p, size: %d\n", stat_minor_gcs, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ())));
2378 max_garbage_amount = nursery_next - sgen_get_nursery_start ();
2379 g_assert (nursery_section->size >= max_garbage_amount);
2381 /* world must be stopped already */
2382 TV_GETTIME (all_atv);
2386 time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2388 if (xdomain_checks) {
2389 sgen_clear_nursery_fragments ();
2390 check_for_xdomain_refs ();
2393 nursery_section->next_data = nursery_next;
2395 major_collector.start_nursery_collection ();
2397 sgen_memgov_minor_collection_start ();
2399 sgen_gray_object_queue_init (&gray_queue);
2400 sgen_workers_init_distribute_gray_queue ();
2403 gc_stats.minor_gc_count ++;
2405 if (remset.prepare_for_minor_collection)
2406 remset.prepare_for_minor_collection ();
2408 process_fin_stage_entries ();
2409 process_dislink_stage_entries ();
2411 /* pin from pinned handles */
2412 sgen_init_pinning ();
2413 mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
2414 pin_from_roots (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2415 /* identify pinned objects */
2416 sgen_optimize_pin_queue (0);
2417 sgen_pinning_setup_section (nursery_section);
2418 sgen_pin_objects_in_section (nursery_section, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2419 sgen_pinning_trim_queue_to_section (nursery_section);
2422 time_minor_pinning += TV_ELAPSED (btv, atv);
2423 DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", sgen_get_pinned_count (), TV_ELAPSED (btv, atv)));
2424 DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", sgen_get_pinned_count ()));
2426 if (whole_heap_check_before_collection)
2427 sgen_check_whole_heap ();
2428 if (consistency_check_at_minor_collection)
2429 sgen_check_consistency ();
2431 sgen_workers_start_all_workers ();
2434 * Perform the sequential part of remembered set scanning.
2435 * This usually involves scanning global information that might later be produced by evacuation.
2437 if (remset.begin_scan_remsets)
2438 remset.begin_scan_remsets (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2440 sgen_workers_start_marking ();
2442 frssjd.heap_start = sgen_get_nursery_start ();
2443 frssjd.heap_end = nursery_next;
2444 sgen_workers_enqueue_job (job_finish_remembered_set_scan, &frssjd);
2446 /* we don't have complete write barrier yet, so we scan all the old generation sections */
2448 time_minor_scan_remsets += TV_ELAPSED (atv, btv);
2449 DEBUG (2, fprintf (gc_debug_file, "Old generation scan: %d usecs\n", TV_ELAPSED (atv, btv)));
2451 if (!sgen_collection_is_parallel ())
2452 sgen_drain_gray_stack (&gray_queue, -1);
2454 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2455 report_registered_roots ();
2456 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2457 report_finalizer_roots ();
2459 time_minor_scan_pinned += TV_ELAPSED (btv, atv);
2461 /* registered roots, this includes static fields */
2462 scrrjd_normal.func = current_object_ops.copy_or_mark_object;
2463 scrrjd_normal.heap_start = sgen_get_nursery_start ();
2464 scrrjd_normal.heap_end = nursery_next;
2465 scrrjd_normal.root_type = ROOT_TYPE_NORMAL;
2466 sgen_workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_normal);
2468 scrrjd_wbarrier.func = current_object_ops.copy_or_mark_object;
2469 scrrjd_wbarrier.heap_start = sgen_get_nursery_start ();
2470 scrrjd_wbarrier.heap_end = nursery_next;
2471 scrrjd_wbarrier.root_type = ROOT_TYPE_WBARRIER;
2472 sgen_workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_wbarrier);
2475 time_minor_scan_registered_roots += TV_ELAPSED (atv, btv);
2478 stdjd.heap_start = sgen_get_nursery_start ();
2479 stdjd.heap_end = nursery_next;
2480 sgen_workers_enqueue_job (job_scan_thread_data, &stdjd);
2483 time_minor_scan_thread_data += TV_ELAPSED (btv, atv);
2486 if (sgen_collection_is_parallel ()) {
2487 while (!sgen_gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE)) {
2488 sgen_workers_distribute_gray_queue_sections ();
2492 sgen_workers_join ();
2494 if (sgen_collection_is_parallel ())
2495 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2497 /* Scan the list of objects ready for finalization. If */
2498 sfejd_fin_ready.list = fin_ready_list;
2499 sgen_workers_enqueue_job (job_scan_finalizer_entries, &sfejd_fin_ready);
2501 sfejd_critical_fin.list = critical_fin_list;
2502 sgen_workers_enqueue_job (job_scan_finalizer_entries, &sfejd_critical_fin);
2504 finish_gray_stack (sgen_get_nursery_start (), nursery_next, GENERATION_NURSERY, &gray_queue);
2506 time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
2507 mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
2510 * The (single-threaded) finalization code might have done
2511 * some copying/marking so we can only reset the GC thread's
2512 * worker data here instead of earlier when we joined the
2515 sgen_workers_reset_data ();
2517 if (objects_pinned) {
2518 sgen_optimize_pin_queue (0);
2519 sgen_pinning_setup_section (nursery_section);
2522 /* walk the pin_queue, build up the fragment list of free memory, unmark
2523 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2526 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
2527 fragment_total = sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries);
2528 if (!fragment_total)
2531 /* Clear TLABs for all threads */
2532 sgen_clear_tlabs ();
2534 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
2536 time_minor_fragment_creation += TV_ELAPSED (atv, btv);
2537 DEBUG (2, fprintf (gc_debug_file, "Fragment creation: %d usecs, %lu bytes available\n", TV_ELAPSED (atv, btv), (unsigned long)fragment_total));
2539 if (consistency_check_at_minor_collection)
2540 sgen_check_major_refs ();
2542 major_collector.finish_nursery_collection ();
2544 TV_GETTIME (all_btv);
2545 gc_stats.minor_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
2548 dump_heap ("minor", stat_minor_gcs - 1, NULL);
2550 /* prepare the pin queue for the next collection */
2551 sgen_finish_pinning ();
2552 if (fin_ready_list || critical_fin_list) {
2553 DEBUG (4, fprintf (gc_debug_file, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers));
2554 mono_gc_finalize_notify ();
2556 sgen_pin_stats_reset ();
2558 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2560 if (remset.finish_minor_collection)
2561 remset.finish_minor_collection ();
2563 check_scan_starts ();
2565 binary_protocol_flush_buffers (FALSE);
2567 sgen_memgov_minor_collection_end ();
2569 /*objects are late pinned because of lack of memory, so a major is a good call*/
2570 needs_major = objects_pinned > 0;
2571 current_collection_generation = -1;
2578 major_do_collection (const char *reason)
2580 LOSObject *bigobj, *prevbo;
2581 TV_DECLARE (all_atv);
2582 TV_DECLARE (all_btv);
2585 /* FIXME: only use these values for the precise scan
2586 * note that to_space pointers should be excluded anyway...
2588 char *heap_start = NULL;
2589 char *heap_end = (char*)-1;
2590 int old_next_pin_slot;
2591 ScanFromRegisteredRootsJobData scrrjd_normal, scrrjd_wbarrier;
2592 ScanThreadDataJobData stdjd;
2593 ScanFinalizerEntriesJobData sfejd_fin_ready, sfejd_critical_fin;
2595 current_collection_generation = GENERATION_OLD;
2596 mono_perfcounters->gc_collections1++;
2598 current_object_ops = major_collector.major_ops;
2600 reset_pinned_from_failed_allocation ();
2602 sgen_memgov_major_collection_start ();
2604 //count_ref_nonref_objs ();
2605 //consistency_check ();
2607 binary_protocol_collection (GENERATION_OLD);
2608 check_scan_starts ();
2610 sgen_gray_object_queue_init (&gray_queue);
2611 sgen_workers_init_distribute_gray_queue ();
2612 sgen_nursery_alloc_prepare_for_major ();
2615 DEBUG (1, fprintf (gc_debug_file, "Start major collection %d\n", stat_major_gcs));
2617 gc_stats.major_gc_count ++;
2619 /* world must be stopped already */
2620 TV_GETTIME (all_atv);
2623 /* Pinning depends on this */
2624 sgen_clear_nursery_fragments ();
2626 if (whole_heap_check_before_collection)
2627 sgen_check_whole_heap ();
2630 time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2632 nursery_section->next_data = sgen_get_nursery_end ();
2633 /* we should also coalesce scanning from sections close to each other
2634 * and deal with pointers outside of the sections later.
2637 if (major_collector.start_major_collection)
2638 major_collector.start_major_collection ();
2641 *major_collector.have_swept = FALSE;
2643 if (xdomain_checks) {
2644 sgen_clear_nursery_fragments ();
2645 check_for_xdomain_refs ();
2648 /* Remsets are not useful for a major collection */
2649 remset.prepare_for_major_collection ();
2651 process_fin_stage_entries ();
2652 process_dislink_stage_entries ();
2655 sgen_init_pinning ();
2656 DEBUG (6, fprintf (gc_debug_file, "Collecting pinned addresses\n"));
2657 pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2658 sgen_optimize_pin_queue (0);
2661 * pin_queue now contains all candidate pointers, sorted and
2662 * uniqued. We must do two passes now to figure out which
2663 * objects are pinned.
2665 * The first is to find within the pin_queue the area for each
2666 * section. This requires that the pin_queue be sorted. We
2667 * also process the LOS objects and pinned chunks here.
2669 * The second, destructive, pass is to reduce the section
2670 * areas to pointers to the actually pinned objects.
2672 DEBUG (6, fprintf (gc_debug_file, "Pinning from sections\n"));
2673 /* first pass for the sections */
2674 sgen_find_section_pin_queue_start_end (nursery_section);
2675 major_collector.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2676 /* identify possible pointers to the insize of large objects */
2677 DEBUG (6, fprintf (gc_debug_file, "Pinning from large objects\n"));
2678 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
2680 gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
2681 GCRootReport report;
2683 if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + bigobj->size, &dummy)) {
2684 binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (bigobj->data));
2685 pin_object (bigobj->data);
2686 /* FIXME: only enqueue if object has references */
2687 GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data);
2688 if (G_UNLIKELY (do_pin_stats))
2689 sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
2690 DEBUG (6, fprintf (gc_debug_file, "Marked large object %p (%s) size: %lu from roots\n", bigobj->data, safe_name (bigobj->data), (unsigned long)bigobj->size));
2693 add_profile_gc_root (&report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
2696 notify_gc_roots (&report);
2698 /* second pass for the sections */
2699 sgen_pin_objects_in_section (nursery_section, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2700 major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2701 old_next_pin_slot = sgen_get_pinned_count ();
2704 time_major_pinning += TV_ELAPSED (atv, btv);
2705 DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", sgen_get_pinned_count (), TV_ELAPSED (atv, btv)));
2706 DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", sgen_get_pinned_count ()));
2708 major_collector.init_to_space ();
2710 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2711 main_gc_thread = mono_native_thread_self ();
2714 sgen_workers_start_all_workers ();
2715 sgen_workers_start_marking ();
2717 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2718 report_registered_roots ();
2720 time_major_scan_pinned += TV_ELAPSED (btv, atv);
2722 /* registered roots, this includes static fields */
2723 scrrjd_normal.func = current_object_ops.copy_or_mark_object;
2724 scrrjd_normal.heap_start = heap_start;
2725 scrrjd_normal.heap_end = heap_end;
2726 scrrjd_normal.root_type = ROOT_TYPE_NORMAL;
2727 sgen_workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_normal);
2729 scrrjd_wbarrier.func = current_object_ops.copy_or_mark_object;
2730 scrrjd_wbarrier.heap_start = heap_start;
2731 scrrjd_wbarrier.heap_end = heap_end;
2732 scrrjd_wbarrier.root_type = ROOT_TYPE_WBARRIER;
2733 sgen_workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_wbarrier);
2736 time_major_scan_registered_roots += TV_ELAPSED (atv, btv);
2739 stdjd.heap_start = heap_start;
2740 stdjd.heap_end = heap_end;
2741 sgen_workers_enqueue_job (job_scan_thread_data, &stdjd);
2744 time_major_scan_thread_data += TV_ELAPSED (btv, atv);
2747 time_major_scan_alloc_pinned += TV_ELAPSED (atv, btv);
2749 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2750 report_finalizer_roots ();
2752 /* scan the list of objects ready for finalization */
2753 sfejd_fin_ready.list = fin_ready_list;
2754 sgen_workers_enqueue_job (job_scan_finalizer_entries, &sfejd_fin_ready);
2756 sfejd_critical_fin.list = critical_fin_list;
2757 sgen_workers_enqueue_job (job_scan_finalizer_entries, &sfejd_critical_fin);
2760 time_major_scan_finalized += TV_ELAPSED (btv, atv);
2761 DEBUG (2, fprintf (gc_debug_file, "Root scan: %d usecs\n", TV_ELAPSED (btv, atv)));
2764 time_major_scan_big_objects += TV_ELAPSED (atv, btv);
2766 if (major_collector.is_parallel) {
2767 while (!sgen_gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE)) {
2768 sgen_workers_distribute_gray_queue_sections ();
2772 sgen_workers_join ();
2774 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2775 main_gc_thread = NULL;
2778 if (major_collector.is_parallel)
2779 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2781 /* all the objects in the heap */
2782 finish_gray_stack (heap_start, heap_end, GENERATION_OLD, &gray_queue);
2784 time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
2787 * The (single-threaded) finalization code might have done
2788 * some copying/marking so we can only reset the GC thread's
2789 * worker data here instead of earlier when we joined the
2792 sgen_workers_reset_data ();
2794 if (objects_pinned) {
2795 /*This is slow, but we just OOM'd*/
2796 sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
2797 sgen_optimize_pin_queue (0);
2798 sgen_find_section_pin_queue_start_end (nursery_section);
2802 reset_heap_boundaries ();
2803 sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
2805 /* sweep the big objects list */
2807 for (bigobj = los_object_list; bigobj;) {
2808 if (object_is_pinned (bigobj->data)) {
2809 unpin_object (bigobj->data);
2810 sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + bigobj->size);
2813 /* not referenced anywhere, so we can free it */
2815 prevbo->next = bigobj->next;
2817 los_object_list = bigobj->next;
2819 bigobj = bigobj->next;
2820 sgen_los_free_object (to_free);
2824 bigobj = bigobj->next;
2828 time_major_free_bigobjs += TV_ELAPSED (atv, btv);
2833 time_major_los_sweep += TV_ELAPSED (btv, atv);
2835 major_collector.sweep ();
2838 time_major_sweep += TV_ELAPSED (atv, btv);
2840 /* walk the pin_queue, build up the fragment list of free memory, unmark
2841 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2844 if (!sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries))
2847 /* Clear TLABs for all threads */
2848 sgen_clear_tlabs ();
2851 time_major_fragment_creation += TV_ELAPSED (btv, atv);
2853 TV_GETTIME (all_btv);
2854 gc_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
2857 dump_heap ("major", stat_major_gcs - 1, reason);
2859 /* prepare the pin queue for the next collection */
2860 sgen_finish_pinning ();
2862 if (fin_ready_list || critical_fin_list) {
2863 DEBUG (4, fprintf (gc_debug_file, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers));
2864 mono_gc_finalize_notify ();
2866 sgen_pin_stats_reset ();
2868 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2870 sgen_memgov_major_collection_end ();
2871 current_collection_generation = -1;
2873 major_collector.finish_major_collection ();
2875 check_scan_starts ();
2877 binary_protocol_flush_buffers (FALSE);
2879 //consistency_check ();
2881 return bytes_pinned_from_failed_allocation > 0;
2884 static gboolean major_do_collection (const char *reason);
2887 * Ensure an allocation request for @size will succeed by freeing enough memory.
2889 * LOCKING: The GC lock MUST be held.
2892 sgen_ensure_free_space (size_t size)
2894 int generation_to_collect = -1;
2895 const char *reason = NULL;
2898 if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
2899 if (sgen_need_major_collection (size)) {
2900 reason = "LOS overflow";
2901 generation_to_collect = GENERATION_OLD;
2904 if (degraded_mode) {
2905 if (sgen_need_major_collection (size)) {
2906 reason = "Degraded mode overflow";
2907 generation_to_collect = GENERATION_OLD;
2909 } else if (sgen_need_major_collection (size)) {
2910 reason = "Minor allowance";
2911 generation_to_collect = GENERATION_OLD;
2913 generation_to_collect = GENERATION_NURSERY;
2914 reason = "Nursery full";
2918 if (generation_to_collect == -1)
2920 sgen_perform_collection (size, generation_to_collect, reason);
2924 sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason)
2926 TV_DECLARE (gc_end);
2927 GGTimingInfo infos [2];
2928 int overflow_generation_to_collect = -1;
2929 const char *overflow_reason = NULL;
2931 memset (infos, 0, sizeof (infos));
2932 mono_profiler_gc_event (MONO_GC_EVENT_START, generation_to_collect);
2934 infos [0].generation = generation_to_collect;
2935 infos [0].reason = reason;
2936 infos [0].is_overflow = FALSE;
2937 TV_GETTIME (infos [0].total_time);
2938 infos [1].generation = -1;
2940 stop_world (generation_to_collect);
2941 //FIXME extract overflow reason
2942 if (generation_to_collect == GENERATION_NURSERY) {
2943 if (collect_nursery ()) {
2944 overflow_generation_to_collect = GENERATION_OLD;
2945 overflow_reason = "Minor overflow";
2948 if (major_do_collection (reason)) {
2949 overflow_generation_to_collect = GENERATION_NURSERY;
2950 overflow_reason = "Excessive pinning";
2954 TV_GETTIME (gc_end);
2955 infos [0].total_time = SGEN_TV_ELAPSED (infos [0].total_time, gc_end);
2958 if (overflow_generation_to_collect != -1) {
2959 mono_profiler_gc_event (MONO_GC_EVENT_START, overflow_generation_to_collect);
2960 infos [1].generation = overflow_generation_to_collect;
2961 infos [1].reason = overflow_reason;
2962 infos [1].is_overflow = TRUE;
2963 infos [1].total_time = gc_end;
2965 if (overflow_generation_to_collect == GENERATION_NURSERY)
2968 major_do_collection (overflow_reason);
2970 TV_GETTIME (gc_end);
2971 infos [1].total_time = SGEN_TV_ELAPSED (infos [1].total_time, gc_end);
2973 /* keep events symmetric */
2974 mono_profiler_gc_event (MONO_GC_EVENT_END, overflow_generation_to_collect);
2977 DEBUG (2, fprintf (gc_debug_file, "Heap size: %lu, LOS size: %lu\n", (unsigned long)mono_gc_get_heap_size (), (unsigned long)los_memory_usage));
2979 /* this also sets the proper pointers for the next allocation */
2980 if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
2981 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
2982 DEBUG (1, fprintf (gc_debug_file, "nursery collection didn't find enough room for %zd alloc (%d pinned)\n", requested_size, sgen_get_pinned_count ()));
2983 sgen_dump_pin_queue ();
2987 restart_world (generation_to_collect, infos);
2989 mono_profiler_gc_event (MONO_GC_EVENT_END, generation_to_collect);
2993 * ######################################################################
2994 * ######## Memory allocation from the OS
2995 * ######################################################################
2996 * This section of code deals with getting memory from the OS and
2997 * allocating memory for GC-internal data structures.
2998 * Internal memory can be handled with a freelist for small objects.
3004 G_GNUC_UNUSED static void
3005 report_internal_mem_usage (void)
3007 printf ("Internal memory usage:\n");
3008 sgen_report_internal_mem_usage ();
3009 printf ("Pinned memory usage:\n");
3010 major_collector.report_pinned_memory_usage ();
3014 * ######################################################################
3015 * ######## Finalization support
3016 * ######################################################################
3020 * If the object has been forwarded it means it's still referenced from a root.
3021 * If it is pinned it's still alive as well.
3022 * A LOS object is only alive if we have pinned it.
3023 * Return TRUE if @obj is ready to be finalized.
3025 static inline gboolean
3026 sgen_is_object_alive (void *object)
3028 if (ptr_in_nursery (object))
3029 return sgen_nursery_is_object_alive (object);
3030 /* Oldgen objects can be pinned and forwarded too */
3031 if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
3033 return major_collector.is_object_live (object);
3037 sgen_gc_is_object_ready_for_finalization (void *object)
3039 return !sgen_is_object_alive (object);
3043 has_critical_finalizer (MonoObject *obj)
3047 if (!mono_defaults.critical_finalizer_object)
3050 class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
3052 return mono_class_has_parent_fast (class, mono_defaults.critical_finalizer_object);
3056 queue_finalization_entry (MonoObject *obj) {
3057 FinalizeReadyEntry *entry = sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
3058 entry->object = obj;
3059 if (has_critical_finalizer (obj)) {
3060 entry->next = critical_fin_list;
3061 critical_fin_list = entry;
3063 entry->next = fin_ready_list;
3064 fin_ready_list = entry;
3069 object_is_reachable (char *object, char *start, char *end)
3071 /*This happens for non nursery objects during minor collections. We just treat all objects as alive.*/
3072 if (object < start || object >= end)
3075 return sgen_is_object_alive (object);
3078 #include "sgen-fin-weak-hash.c"
3081 sgen_object_is_live (void *obj)
3083 if (ptr_in_nursery (obj))
3084 return object_is_pinned (obj);
3085 /* FIXME This is semantically wrong! All tenured object are considered alive during a nursery collection. */
3086 if (current_collection_generation == GENERATION_NURSERY)
3088 return major_collector.is_object_live (obj);
3091 /* LOCKING: requires that the GC lock is held */
3093 null_ephemerons_for_domain (MonoDomain *domain)
3095 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3098 MonoObject *object = (MonoObject*)current->array;
3100 if (object && !object->vtable) {
3101 EphemeronLinkNode *tmp = current;
3104 prev->next = current->next;
3106 ephemeron_list = current->next;
3108 current = current->next;
3109 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3112 current = current->next;
3117 /* LOCKING: requires that the GC lock is held */
3119 clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue)
3121 int was_in_nursery, was_promoted;
3122 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3124 Ephemeron *cur, *array_end;
3128 char *object = current->array;
3130 if (!object_is_reachable (object, start, end)) {
3131 EphemeronLinkNode *tmp = current;
3133 DEBUG (5, fprintf (gc_debug_file, "Dead Ephemeron array at %p\n", object));
3136 prev->next = current->next;
3138 ephemeron_list = current->next;
3140 current = current->next;
3141 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3146 was_in_nursery = ptr_in_nursery (object);
3147 copy_func ((void**)&object, queue);
3148 current->array = object;
3150 /*The array was promoted, add global remsets for key/values left behind in nursery.*/
3151 was_promoted = was_in_nursery && !ptr_in_nursery (object);
3153 DEBUG (5, fprintf (gc_debug_file, "Clearing unreachable entries for ephemeron array at %p\n", object));
3155 array = (MonoArray*)object;
3156 cur = mono_array_addr (array, Ephemeron, 0);
3157 array_end = cur + mono_array_length_fast (array);
3158 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3160 for (; cur < array_end; ++cur) {
3161 char *key = (char*)cur->key;
3163 if (!key || key == tombstone)
3166 DEBUG (5, fprintf (gc_debug_file, "[%td] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
3167 key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
3168 cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable"));
3170 if (!object_is_reachable (key, start, end)) {
3171 cur->key = tombstone;
3177 if (ptr_in_nursery (key)) {/*key was not promoted*/
3178 DEBUG (5, fprintf (gc_debug_file, "\tAdded remset to key %p\n", key));
3179 sgen_add_to_global_remset (&cur->key);
3181 if (ptr_in_nursery (cur->value)) {/*value was not promoted*/
3182 DEBUG (5, fprintf (gc_debug_file, "\tAdded remset to value %p\n", cur->value));
3183 sgen_add_to_global_remset (&cur->value);
3188 current = current->next;
3192 /* LOCKING: requires that the GC lock is held */
3194 mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue)
3196 int nothing_marked = 1;
3197 EphemeronLinkNode *current = ephemeron_list;
3199 Ephemeron *cur, *array_end;
3202 for (current = ephemeron_list; current; current = current->next) {
3203 char *object = current->array;
3204 DEBUG (5, fprintf (gc_debug_file, "Ephemeron array at %p\n", object));
3207 For now we process all ephemerons during all collections.
3208 Ideally we should use remset information to partially scan those
3210 We already emit write barriers for Ephemeron fields, it's
3211 just that we don't process them.
3213 /*if (object < start || object >= end)
3216 /*It has to be alive*/
3217 if (!object_is_reachable (object, start, end)) {
3218 DEBUG (5, fprintf (gc_debug_file, "\tnot reachable\n"));
3222 copy_func ((void**)&object, queue);
3224 array = (MonoArray*)object;
3225 cur = mono_array_addr (array, Ephemeron, 0);
3226 array_end = cur + mono_array_length_fast (array);
3227 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3229 for (; cur < array_end; ++cur) {
3230 char *key = cur->key;
3232 if (!key || key == tombstone)
3235 DEBUG (5, fprintf (gc_debug_file, "[%td] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
3236 key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
3237 cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable"));
3239 if (object_is_reachable (key, start, end)) {
3240 char *value = cur->value;
3242 copy_func ((void**)&cur->key, queue);
3244 if (!object_is_reachable (value, start, end))
3246 copy_func ((void**)&cur->value, queue);
3252 DEBUG (5, fprintf (gc_debug_file, "Ephemeron run finished. Is it done %d\n", nothing_marked));
3253 return nothing_marked;
3257 mono_gc_invoke_finalizers (void)
3259 FinalizeReadyEntry *entry = NULL;
3260 gboolean entry_is_critical = FALSE;
3263 /* FIXME: batch to reduce lock contention */
3264 while (fin_ready_list || critical_fin_list) {
3268 FinalizeReadyEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
3270 /* We have finalized entry in the last
3271 interation, now we need to remove it from
3274 *list = entry->next;
3276 FinalizeReadyEntry *e = *list;
3277 while (e->next != entry)
3279 e->next = entry->next;
3281 sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_READY_ENTRY);
3285 /* Now look for the first non-null entry. */
3286 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
3289 entry_is_critical = FALSE;
3291 entry_is_critical = TRUE;
3292 for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
3297 g_assert (entry->object);
3298 num_ready_finalizers--;
3299 obj = entry->object;
3300 entry->object = NULL;
3301 DEBUG (7, fprintf (gc_debug_file, "Finalizing object %p (%s)\n", obj, safe_name (obj)));
3309 g_assert (entry->object == NULL);
3311 /* the object is on the stack so it is pinned */
3312 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
3313 mono_gc_run_finalize (obj, NULL);
3320 mono_gc_pending_finalizers (void)
3322 return fin_ready_list || critical_fin_list;
3326 * ######################################################################
3327 * ######## registered roots support
3328 * ######################################################################
3332 * We do not coalesce roots.
3335 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
3337 RootRecord new_root;
3340 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
3341 RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
3342 /* we allow changing the size and the descriptor (for thread statics etc) */
3344 size_t old_size = root->end_root - start;
3345 root->end_root = start + size;
3346 g_assert (((root->root_desc != 0) && (descr != NULL)) ||
3347 ((root->root_desc == 0) && (descr == NULL)));
3348 root->root_desc = (mword)descr;
3350 roots_size -= old_size;
3356 new_root.end_root = start + size;
3357 new_root.root_desc = (mword)descr;
3359 sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
3362 DEBUG (3, fprintf (gc_debug_file, "Added root for range: %p-%p, descr: %p (%d/%d bytes)\n", start, new_root.end_root, descr, (int)size, (int)roots_size));
3369 mono_gc_register_root (char *start, size_t size, void *descr)
3371 return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
3375 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
3377 return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
3381 mono_gc_deregister_root (char* addr)
3387 for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
3388 if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
3389 roots_size -= (root.end_root - addr);
3395 * ######################################################################
3396 * ######## Thread handling (stop/start code)
3397 * ######################################################################
3400 unsigned int sgen_global_stop_count = 0;
3403 static MonoContext cur_thread_ctx = {0};
3405 static mword cur_thread_regs [ARCH_NUM_REGS] = {0};
3409 update_current_thread_stack (void *start)
3411 int stack_guard = 0;
3412 #ifndef USE_MONO_CTX
3413 void *ptr = cur_thread_regs;
3415 SgenThreadInfo *info = mono_thread_info_current ();
3417 info->stack_start = align_pointer (&stack_guard);
3418 g_assert (info->stack_start >= info->stack_start_limit && info->stack_start < info->stack_end);
3420 MONO_CONTEXT_GET_CURRENT (cur_thread_ctx);
3421 info->monoctx = &cur_thread_ctx;
3423 ARCH_STORE_REGS (ptr);
3424 info->stopped_regs = ptr;
3426 if (gc_callbacks.thread_suspend_func)
3427 gc_callbacks.thread_suspend_func (info->runtime_data, NULL);
3431 sgen_fill_thread_info_for_suspend (SgenThreadInfo *info)
3433 if (remset.fill_thread_info_for_suspend)
3434 remset.fill_thread_info_for_suspend (info);
3438 is_ip_in_managed_allocator (MonoDomain *domain, gpointer ip);
3441 restart_threads_until_none_in_managed_allocator (void)
3443 SgenThreadInfo *info;
3444 int num_threads_died = 0;
3445 int sleep_duration = -1;
3448 int restart_count = 0, restarted_count = 0;
3449 /* restart all threads that stopped in the
3451 FOREACH_THREAD_SAFE (info) {
3453 if (info->skip || info->gc_disabled || !info->joined_stw)
3455 if (!info->thread_is_dying && (!info->stack_start || info->in_critical_region ||
3456 is_ip_in_managed_allocator (info->stopped_domain, info->stopped_ip))) {
3457 binary_protocol_thread_restart ((gpointer)mono_thread_info_get_tid (info));
3458 result = sgen_resume_thread (info);
3465 /* we set the stopped_ip to
3466 NULL for threads which
3467 we're not restarting so
3468 that we can easily identify
3470 info->stopped_ip = NULL;
3471 info->stopped_domain = NULL;
3473 } END_FOREACH_THREAD_SAFE
3474 /* if no threads were restarted, we're done */
3475 if (restart_count == 0)
3478 /* wait for the threads to signal their restart */
3479 sgen_wait_for_suspend_ack (restart_count);
3481 if (sleep_duration < 0) {
3489 g_usleep (sleep_duration);
3490 sleep_duration += 10;
3493 /* stop them again */
3494 FOREACH_THREAD (info) {
3496 if (info->skip || info->stopped_ip == NULL)
3498 result = sgen_suspend_thread (info);
3505 } END_FOREACH_THREAD
3506 /* some threads might have died */
3507 num_threads_died += restart_count - restarted_count;
3508 /* wait for the threads to signal their suspension
3510 sgen_wait_for_suspend_ack (restarted_count);
3513 return num_threads_died;
3517 acquire_gc_locks (void)
3520 mono_thread_info_suspend_lock ();
3524 release_gc_locks (void)
3526 mono_thread_info_suspend_unlock ();
3527 UNLOCK_INTERRUPTION;
3530 static TV_DECLARE (stop_world_time);
3531 static unsigned long max_pause_usec = 0;
3533 /* LOCKING: assumes the GC lock is held */
3535 stop_world (int generation)
3539 /*XXX this is the right stop, thought might not be the nicest place to put it*/
3540 sgen_process_togglerefs ();
3542 mono_profiler_gc_event (MONO_GC_EVENT_PRE_STOP_WORLD, generation);
3543 acquire_gc_locks ();
3545 update_current_thread_stack (&count);
3547 sgen_global_stop_count++;
3548 DEBUG (3, fprintf (gc_debug_file, "stopping world n %d from %p %p\n", sgen_global_stop_count, mono_thread_info_current (), (gpointer)mono_native_thread_id_get ()));
3549 TV_GETTIME (stop_world_time);
3550 count = sgen_thread_handshake (TRUE);
3551 dead = restart_threads_until_none_in_managed_allocator ();
3553 g_error ("More threads have died (%d) that been initialy suspended %d", dead, count);
3556 DEBUG (3, fprintf (gc_debug_file, "world stopped %d thread(s)\n", count));
3557 mono_profiler_gc_event (MONO_GC_EVENT_POST_STOP_WORLD, generation);
3559 sgen_memgov_collection_start (generation);
3564 /* LOCKING: assumes the GC lock is held */
3566 restart_world (int generation, GGTimingInfo *timing)
3569 SgenThreadInfo *info;
3570 TV_DECLARE (end_sw);
3571 TV_DECLARE (end_bridge);
3572 unsigned long usec, bridge_usec;
3574 /* notify the profiler of the leftovers */
3575 if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_GC_MOVES)) {
3576 if (moved_objects_idx) {
3577 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
3578 moved_objects_idx = 0;
3581 mono_profiler_gc_event (MONO_GC_EVENT_PRE_START_WORLD, generation);
3582 FOREACH_THREAD (info) {
3583 info->stack_start = NULL;
3585 info->monoctx = NULL;
3587 info->stopped_regs = NULL;
3589 } END_FOREACH_THREAD
3591 stw_bridge_process ();
3592 release_gc_locks ();
3594 count = sgen_thread_handshake (FALSE);
3595 TV_GETTIME (end_sw);
3596 usec = TV_ELAPSED (stop_world_time, end_sw);
3597 max_pause_usec = MAX (usec, max_pause_usec);
3598 DEBUG (2, fprintf (gc_debug_file, "restarted %d thread(s) (pause time: %d usec, max: %d)\n", count, (int)usec, (int)max_pause_usec));
3599 mono_profiler_gc_event (MONO_GC_EVENT_POST_START_WORLD, generation);
3603 TV_GETTIME (end_bridge);
3604 bridge_usec = TV_ELAPSED (end_sw, end_bridge);
3607 timing [0].stw_time = usec;
3608 timing [0].bridge_time = bridge_usec;
3611 sgen_memgov_collection_end (generation, timing, timing ? 2 : 0);
3617 sgen_get_current_collection_generation (void)
3619 return current_collection_generation;
3623 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
3625 gc_callbacks = *callbacks;
3629 mono_gc_get_gc_callbacks ()
3631 return &gc_callbacks;
3634 /* Variables holding start/end nursery so it won't have to be passed at every call */
3635 static void *scan_area_arg_start, *scan_area_arg_end;
3638 mono_gc_conservatively_scan_area (void *start, void *end)
3640 conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
3644 mono_gc_scan_object (void *obj)
3646 UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
3647 current_object_ops.copy_or_mark_object (&obj, data->queue);
3652 * Mark from thread stacks and registers.
3655 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue)
3657 SgenThreadInfo *info;
3659 scan_area_arg_start = start_nursery;
3660 scan_area_arg_end = end_nursery;
3662 FOREACH_THREAD (info) {
3664 DEBUG (3, fprintf (gc_debug_file, "Skipping dead thread %p, range: %p-%p, size: %td\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
3667 if (info->gc_disabled) {
3668 DEBUG (3, fprintf (gc_debug_file, "GC disabled for thread %p, range: %p-%p, size: %td\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
3672 if (!info->joined_stw) {
3673 DEBUG (3, fprintf (gc_debug_file, "Skipping thread not seen in STW %p, range: %p-%p, size: %td\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
3677 DEBUG (3, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, sgen_get_pinned_count ()));
3678 if (!info->thread_is_dying) {
3679 if (gc_callbacks.thread_mark_func && !conservative_stack_mark) {
3680 UserCopyOrMarkData data = { NULL, queue };
3681 set_user_copy_or_mark_data (&data);
3682 gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
3683 set_user_copy_or_mark_data (NULL);
3684 } else if (!precise) {
3685 conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
3690 if (!info->thread_is_dying && !precise)
3691 conservatively_pin_objects_from ((void**)info->monoctx, (void**)info->monoctx + ARCH_NUM_REGS,
3692 start_nursery, end_nursery, PIN_TYPE_STACK);
3694 if (!info->thread_is_dying && !precise)
3695 conservatively_pin_objects_from (info->stopped_regs, info->stopped_regs + ARCH_NUM_REGS,
3696 start_nursery, end_nursery, PIN_TYPE_STACK);
3698 } END_FOREACH_THREAD
3702 find_pinning_ref_from_thread (char *obj, size_t size)
3705 SgenThreadInfo *info;
3706 char *endobj = obj + size;
3708 FOREACH_THREAD (info) {
3709 char **start = (char**)info->stack_start;
3712 while (start < (char**)info->stack_end) {
3713 if (*start >= obj && *start < endobj) {
3714 DEBUG (0, fprintf (gc_debug_file, "Object %p referenced in thread %p (id %p) at %p, stack: %p-%p\n", obj, info, (gpointer)mono_thread_info_get_tid (info), start, info->stack_start, info->stack_end));
3719 for (j = 0; j < ARCH_NUM_REGS; ++j) {
3721 mword w = ((mword*)info->monoctx) [j];
3723 mword w = (mword)info->stopped_regs [j];
3726 if (w >= (mword)obj && w < (mword)obj + size)
3727 DEBUG (0, fprintf (gc_debug_file, "Object %p referenced in saved reg %d of thread %p (id %p)\n", obj, j, info, (gpointer)mono_thread_info_get_tid (info)));
3728 } END_FOREACH_THREAD
3733 ptr_on_stack (void *ptr)
3735 gpointer stack_start = &stack_start;
3736 SgenThreadInfo *info = mono_thread_info_current ();
3738 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
3744 sgen_thread_register (SgenThreadInfo* info, void *addr)
3746 #ifndef HAVE_KW_THREAD
3747 SgenThreadInfo *__thread_info__ = info;
3751 #ifndef HAVE_KW_THREAD
3752 info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
3754 g_assert (!mono_native_tls_get_value (thread_info_key));
3755 mono_native_tls_set_value (thread_info_key, info);
3760 #if !defined(__MACH__)
3761 info->stop_count = -1;
3765 info->joined_stw = FALSE;
3766 info->doing_handshake = FALSE;
3767 info->thread_is_dying = FALSE;
3768 info->stack_start = NULL;
3769 info->store_remset_buffer_addr = &STORE_REMSET_BUFFER;
3770 info->store_remset_buffer_index_addr = &STORE_REMSET_BUFFER_INDEX;
3771 info->stopped_ip = NULL;
3772 info->stopped_domain = NULL;
3774 info->monoctx = NULL;
3776 info->stopped_regs = NULL;
3779 sgen_init_tlab_info (info);
3781 binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
3783 #ifdef HAVE_KW_THREAD
3784 store_remset_buffer_index_addr = &store_remset_buffer_index;
3787 #if defined(__MACH__)
3788 info->mach_port = mach_thread_self ();
3791 /* try to get it with attributes first */
3792 #if defined(HAVE_PTHREAD_GETATTR_NP) && defined(HAVE_PTHREAD_ATTR_GETSTACK)
3796 pthread_attr_t attr;
3797 pthread_getattr_np (pthread_self (), &attr);
3798 pthread_attr_getstack (&attr, &sstart, &size);
3799 info->stack_start_limit = sstart;
3800 info->stack_end = (char*)sstart + size;
3801 pthread_attr_destroy (&attr);
3803 #elif defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP)
3804 info->stack_end = (char*)pthread_get_stackaddr_np (pthread_self ());
3805 info->stack_start_limit = (char*)info->stack_end - pthread_get_stacksize_np (pthread_self ());
3808 /* FIXME: we assume the stack grows down */
3809 gsize stack_bottom = (gsize)addr;
3810 stack_bottom += 4095;
3811 stack_bottom &= ~4095;
3812 info->stack_end = (char*)stack_bottom;
3816 #ifdef HAVE_KW_THREAD
3817 stack_end = info->stack_end;
3820 if (remset.register_thread)
3821 remset.register_thread (info);
3823 DEBUG (3, fprintf (gc_debug_file, "registered thread %p (%p) stack end %p\n", info, (gpointer)mono_thread_info_get_tid (info), info->stack_end));
3825 if (gc_callbacks.thread_attach_func)
3826 info->runtime_data = gc_callbacks.thread_attach_func ();
3833 sgen_wbarrier_cleanup_thread (SgenThreadInfo *p)
3835 if (remset.cleanup_thread)
3836 remset.cleanup_thread (p);
3840 sgen_thread_unregister (SgenThreadInfo *p)
3842 /* If a delegate is passed to native code and invoked on a thread we dont
3843 * know about, the jit will register it with mono_jit_thread_attach, but
3844 * we have no way of knowing when that thread goes away. SGen has a TSD
3845 * so we assume that if the domain is still registered, we can detach
3848 if (mono_domain_get ())
3849 mono_thread_detach (mono_thread_current ());
3851 p->thread_is_dying = TRUE;
3854 There is a race condition between a thread finishing executing and been removed
3855 from the GC thread set.
3856 This happens on posix systems when TLS data is been cleaned-up, libpthread will
3857 set the thread_info slot to NULL before calling the cleanup function. This
3858 opens a window in which the thread is registered but has a NULL TLS.
3860 The suspend signal handler needs TLS data to know where to store thread state
3861 data or otherwise it will simply ignore the thread.
3863 This solution works because the thread doing STW will wait until all threads been
3864 suspended handshake back, so there is no race between the doing_hankshake test
3865 and the suspend_thread call.
3867 This is not required on systems that do synchronous STW as those can deal with
3868 the above race at suspend time.
3870 FIXME: I believe we could avoid this by using mono_thread_info_lookup when
3871 mono_thread_info_current returns NULL. Or fix mono_thread_info_lookup to do so.
3873 #if (defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED) || !defined(HAVE_PTHREAD_KILL)
3876 while (!TRYLOCK_GC) {
3877 if (!sgen_park_current_thread_if_doing_handshake (p))
3882 binary_protocol_thread_unregister ((gpointer)mono_thread_info_get_tid (p));
3883 DEBUG (3, fprintf (gc_debug_file, "unregister thread %p (%p)\n", p, (gpointer)mono_thread_info_get_tid (p)));
3885 #if defined(__MACH__)
3886 mach_port_deallocate (current_task (), p->mach_port);
3889 if (gc_callbacks.thread_detach_func) {
3890 gc_callbacks.thread_detach_func (p->runtime_data);
3891 p->runtime_data = NULL;
3893 sgen_wbarrier_cleanup_thread (p);
3895 mono_threads_unregister_current_thread (p);
3901 sgen_thread_attach (SgenThreadInfo *info)
3904 /*this is odd, can we get attached before the gc is inited?*/
3908 if (gc_callbacks.thread_attach_func && !info->runtime_data)
3909 info->runtime_data = gc_callbacks.thread_attach_func ();
3912 mono_gc_register_thread (void *baseptr)
3914 return mono_thread_info_attach (baseptr) != NULL;
3918 * mono_gc_set_stack_end:
3920 * Set the end of the current threads stack to STACK_END. The stack space between
3921 * STACK_END and the real end of the threads stack will not be scanned during collections.
3924 mono_gc_set_stack_end (void *stack_end)
3926 SgenThreadInfo *info;
3929 info = mono_thread_info_current ();
3931 g_assert (stack_end < info->stack_end);
3932 info->stack_end = stack_end;
3937 #if USE_PTHREAD_INTERCEPT
3941 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
3943 return pthread_create (new_thread, attr, start_routine, arg);
3947 mono_gc_pthread_join (pthread_t thread, void **retval)
3949 return pthread_join (thread, retval);
3953 mono_gc_pthread_detach (pthread_t thread)
3955 return pthread_detach (thread);
3959 mono_gc_pthread_exit (void *retval)
3961 pthread_exit (retval);
3964 #endif /* USE_PTHREAD_INTERCEPT */
3967 * ######################################################################
3968 * ######## Write barriers
3969 * ######################################################################
3973 * Note: the write barriers first do the needed GC work and then do the actual store:
3974 * this way the value is visible to the conservative GC scan after the write barrier
3975 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
3976 * the conservative scan, otherwise by the remembered set scan.
3979 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
3981 HEAVY_STAT (++stat_wbarrier_set_field);
3982 if (ptr_in_nursery (field_ptr)) {
3983 *(void**)field_ptr = value;
3986 DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", field_ptr));
3988 binary_protocol_wbarrier (field_ptr, value, value->vtable);
3990 remset.wbarrier_set_field (obj, field_ptr, value);
3994 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
3996 HEAVY_STAT (++stat_wbarrier_set_arrayref);
3997 if (ptr_in_nursery (slot_ptr)) {
3998 *(void**)slot_ptr = value;
4001 DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", slot_ptr));
4003 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
4005 remset.wbarrier_set_arrayref (arr, slot_ptr, value);
4009 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
4011 HEAVY_STAT (++stat_wbarrier_arrayref_copy);
4012 /*This check can be done without taking a lock since dest_ptr array is pinned*/
4013 if (ptr_in_nursery (dest_ptr) || count <= 0) {
4014 mono_gc_memmove (dest_ptr, src_ptr, count * sizeof (gpointer));
4018 #ifdef SGEN_BINARY_PROTOCOL
4021 for (i = 0; i < count; ++i) {
4022 gpointer dest = (gpointer*)dest_ptr + i;
4023 gpointer obj = *((gpointer*)src_ptr + i);
4025 binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
4030 remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
4033 static char *found_obj;
4036 find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
4038 char *ptr = user_data;
4040 if (ptr >= obj && ptr < obj + size) {
4041 g_assert (!found_obj);
4046 /* for use in the debugger */
4047 char* find_object_for_ptr (char *ptr);
4049 find_object_for_ptr (char *ptr)
4051 if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
4053 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
4054 find_object_for_ptr_callback, ptr, TRUE);
4060 sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
4065 * Very inefficient, but this is debugging code, supposed to
4066 * be called from gdb, so we don't care.
4069 major_collector.iterate_objects (TRUE, TRUE, find_object_for_ptr_callback, ptr);
4074 mono_gc_wbarrier_generic_nostore (gpointer ptr)
4076 HEAVY_STAT (++stat_wbarrier_generic_store);
4078 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
4079 /* FIXME: ptr_in_heap must be called with the GC lock held */
4080 if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
4081 char *start = find_object_for_ptr (ptr);
4082 MonoObject *value = *(MonoObject**)ptr;
4086 MonoObject *obj = (MonoObject*)start;
4087 if (obj->vtable->domain != value->vtable->domain)
4088 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
4094 if (*(gpointer*)ptr)
4095 binary_protocol_wbarrier (ptr, *(gpointer*)ptr, (gpointer)LOAD_VTABLE (*(gpointer*)ptr));
4097 if (ptr_in_nursery (ptr) || ptr_on_stack (ptr) || !ptr_in_nursery (*(gpointer*)ptr)) {
4098 DEBUG (8, fprintf (gc_debug_file, "Skipping remset at %p\n", ptr));
4102 DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", ptr));
4104 remset.wbarrier_generic_nostore (ptr);
4108 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
4110 DEBUG (8, fprintf (gc_debug_file, "Wbarrier store at %p to %p (%s)\n", ptr, value, value ? safe_name (value) : "null"));
4111 *(void**)ptr = value;
4112 if (ptr_in_nursery (value))
4113 mono_gc_wbarrier_generic_nostore (ptr);
4114 sgen_dummy_use (value);
4117 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
4119 mword *dest = _dest;
4124 mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
4129 size -= SIZEOF_VOID_P;
4134 #ifdef SGEN_BINARY_PROTOCOL
4136 #define HANDLE_PTR(ptr,obj) do { \
4137 gpointer o = *(gpointer*)(ptr); \
4139 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
4140 binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
4145 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
4147 #define SCAN_OBJECT_NOVTABLE
4148 #include "sgen-scan-object.h"
4153 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
4155 HEAVY_STAT (++stat_wbarrier_value_copy);
4156 g_assert (klass->valuetype);
4158 DEBUG (8, fprintf (gc_debug_file, "Adding value remset at %p, count %d, descr %p for class %s (%p)\n", dest, count, klass->gc_descr, klass->name, klass));
4160 if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
4161 size_t element_size = mono_class_value_size (klass, NULL);
4162 size_t size = count * element_size;
4163 mono_gc_memmove (dest, src, size);
4167 #ifdef SGEN_BINARY_PROTOCOL
4170 for (i = 0; i < count; ++i) {
4171 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
4172 (char*)src + i * element_size - sizeof (MonoObject),
4173 (mword) klass->gc_descr);
4178 remset.wbarrier_value_copy (dest, src, count, klass);
4182 * mono_gc_wbarrier_object_copy:
4184 * Write barrier to call when obj is the result of a clone or copy of an object.
4187 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
4191 HEAVY_STAT (++stat_wbarrier_object_copy);
4193 if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
4194 size = mono_object_class (obj)->instance_size;
4195 mono_gc_memmove ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
4196 size - sizeof (MonoObject));
4200 #ifdef SGEN_BINARY_PROTOCOL
4201 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
4204 remset.wbarrier_object_copy (obj, src);
4209 * ######################################################################
4210 * ######## Other mono public interface functions.
4211 * ######################################################################
4214 #define REFS_SIZE 128
4217 MonoGCReferences callback;
4221 MonoObject *refs [REFS_SIZE];
4222 uintptr_t offsets [REFS_SIZE];
4226 #define HANDLE_PTR(ptr,obj) do { \
4228 if (hwi->count == REFS_SIZE) { \
4229 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
4233 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
4234 hwi->refs [hwi->count++] = *(ptr); \
4239 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
4241 #include "sgen-scan-object.h"
4245 walk_references (char *start, size_t size, void *data)
4247 HeapWalkInfo *hwi = data;
4250 collect_references (hwi, start, size);
4251 if (hwi->count || !hwi->called)
4252 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
4256 * mono_gc_walk_heap:
4257 * @flags: flags for future use
4258 * @callback: a function pointer called for each object in the heap
4259 * @data: a user data pointer that is passed to callback
4261 * This function can be used to iterate over all the live objects in the heap:
4262 * for each object, @callback is invoked, providing info about the object's
4263 * location in memory, its class, its size and the objects it references.
4264 * For each referenced object it's offset from the object address is
4265 * reported in the offsets array.
4266 * The object references may be buffered, so the callback may be invoked
4267 * multiple times for the same object: in all but the first call, the size
4268 * argument will be zero.
4269 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
4270 * profiler event handler.
4272 * Returns: a non-zero value if the GC doesn't support heap walking
4275 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
4280 hwi.callback = callback;
4283 sgen_clear_nursery_fragments ();
4284 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
4286 major_collector.iterate_objects (TRUE, TRUE, walk_references, &hwi);
4287 sgen_los_iterate_objects (walk_references, &hwi);
4293 mono_gc_collect (int generation)
4298 sgen_perform_collection (0, generation, "user request");
4303 mono_gc_max_generation (void)
4309 mono_gc_collection_count (int generation)
4311 if (generation == 0)
4312 return stat_minor_gcs;
4313 return stat_major_gcs;
4317 mono_gc_get_used_size (void)
4321 tot = los_memory_usage;
4322 tot += nursery_section->next_data - nursery_section->data;
4323 tot += major_collector.get_used_size ();
4324 /* FIXME: account for pinned objects */
4330 mono_gc_disable (void)
4338 mono_gc_enable (void)
4346 mono_gc_get_los_limit (void)
4348 return MAX_SMALL_OBJ_SIZE;
4352 mono_gc_user_markers_supported (void)
4358 mono_object_is_alive (MonoObject* o)
4364 mono_gc_get_generation (MonoObject *obj)
4366 if (ptr_in_nursery (obj))
4372 mono_gc_enable_events (void)
4377 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
4379 mono_gc_register_disappearing_link (obj, link_addr, track, FALSE);
4383 mono_gc_weak_link_remove (void **link_addr)
4385 mono_gc_register_disappearing_link (NULL, link_addr, FALSE, FALSE);
4389 mono_gc_weak_link_get (void **link_addr)
4393 return (MonoObject*) REVEAL_POINTER (*link_addr);
4397 mono_gc_ephemeron_array_add (MonoObject *obj)
4399 EphemeronLinkNode *node;
4403 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
4408 node->array = (char*)obj;
4409 node->next = ephemeron_list;
4410 ephemeron_list = node;
4412 DEBUG (5, fprintf (gc_debug_file, "Registered ephemeron array %p\n", obj));
4419 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
4423 result = func (data);
4424 UNLOCK_INTERRUPTION;
4429 mono_gc_is_gc_thread (void)
4433 result = mono_thread_info_current () != NULL;
4439 is_critical_method (MonoMethod *method)
4441 return mono_runtime_is_critical_method (method) || mono_gc_is_critical_method (method);
4445 mono_gc_base_init (void)
4447 MonoThreadInfoCallbacks cb;
4450 char *major_collector_opt = NULL;
4451 char *minor_collector_opt = NULL;
4453 glong soft_limit = 0;
4457 gboolean debug_print_allowance = FALSE;
4458 double allowance_ratio = 0, save_target = 0;
4461 result = InterlockedCompareExchange (&gc_initialized, -1, 0);
4464 /* already inited */
4467 /* being inited by another thread */
4471 /* we will init it */
4474 g_assert_not_reached ();
4476 } while (result != 0);
4478 LOCK_INIT (gc_mutex);
4480 pagesize = mono_pagesize ();
4481 gc_debug_file = stderr;
4483 cb.thread_register = sgen_thread_register;
4484 cb.thread_unregister = sgen_thread_unregister;
4485 cb.thread_attach = sgen_thread_attach;
4486 cb.mono_method_is_critical = (gpointer)is_critical_method;
4488 cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
4491 mono_threads_init (&cb, sizeof (SgenThreadInfo));
4493 LOCK_INIT (interruption_mutex);
4494 LOCK_INIT (pin_queue_mutex);
4496 init_user_copy_or_mark_key ();
4498 if ((env = getenv ("MONO_GC_PARAMS"))) {
4499 opts = g_strsplit (env, ",", -1);
4500 for (ptr = opts; *ptr; ++ptr) {
4502 if (g_str_has_prefix (opt, "major=")) {
4503 opt = strchr (opt, '=') + 1;
4504 major_collector_opt = g_strdup (opt);
4505 } else if (g_str_has_prefix (opt, "minor=")) {
4506 opt = strchr (opt, '=') + 1;
4507 minor_collector_opt = g_strdup (opt);
4515 sgen_init_internal_allocator ();
4516 sgen_init_nursery_allocator ();
4518 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
4519 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_READY_ENTRY, sizeof (FinalizeReadyEntry));
4520 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
4521 g_assert (sizeof (GenericStoreRememberedSet) == sizeof (gpointer) * STORE_REMSET_BUFFER_SIZE);
4522 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_STORE_REMSET, sizeof (GenericStoreRememberedSet));
4523 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
4525 #ifndef HAVE_KW_THREAD
4526 mono_native_tls_alloc (&thread_info_key, NULL);
4530 * This needs to happen before any internal allocations because
4531 * it inits the small id which is required for hazard pointer
4536 mono_thread_info_attach (&dummy);
4538 if (!minor_collector_opt) {
4539 sgen_simple_nursery_init (&sgen_minor_collector);
4541 if (!strcmp (minor_collector_opt, "simple"))
4542 sgen_simple_nursery_init (&sgen_minor_collector);
4543 else if (!strcmp (minor_collector_opt, "split"))
4544 sgen_split_nursery_init (&sgen_minor_collector);
4546 fprintf (stderr, "Unknown minor collector `%s'.\n", minor_collector_opt);
4551 if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
4552 sgen_marksweep_init (&major_collector);
4553 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed")) {
4554 sgen_marksweep_fixed_init (&major_collector);
4555 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-par")) {
4556 sgen_marksweep_par_init (&major_collector);
4557 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed-par")) {
4558 sgen_marksweep_fixed_par_init (&major_collector);
4559 } else if (!strcmp (major_collector_opt, "copying")) {
4560 sgen_copying_init (&major_collector);
4562 fprintf (stderr, "Unknown major collector `%s'.\n", major_collector_opt);
4566 #ifdef SGEN_HAVE_CARDTABLE
4567 use_cardtable = major_collector.supports_cardtable;
4569 use_cardtable = FALSE;
4572 num_workers = mono_cpu_count ();
4573 g_assert (num_workers > 0);
4574 if (num_workers > 16)
4577 ///* Keep this the default for now */
4578 /* Precise marking is broken on all supported targets. Disable until fixed. */
4579 conservative_stack_mark = TRUE;
4581 sgen_nursery_size = DEFAULT_NURSERY_SIZE;
4584 for (ptr = opts; *ptr; ++ptr) {
4586 if (g_str_has_prefix (opt, "major="))
4588 if (g_str_has_prefix (opt, "minor="))
4590 if (g_str_has_prefix (opt, "wbarrier=")) {
4591 opt = strchr (opt, '=') + 1;
4592 if (strcmp (opt, "remset") == 0) {
4593 use_cardtable = FALSE;
4594 } else if (strcmp (opt, "cardtable") == 0) {
4595 if (!use_cardtable) {
4596 if (major_collector.supports_cardtable)
4597 fprintf (stderr, "The cardtable write barrier is not supported on this platform.\n");
4599 fprintf (stderr, "The major collector does not support the cardtable write barrier.\n");
4603 fprintf (stderr, "wbarrier must either be `remset' or `cardtable'.");
4608 if (g_str_has_prefix (opt, "max-heap-size=")) {
4609 opt = strchr (opt, '=') + 1;
4610 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap)) {
4611 if ((max_heap & (mono_pagesize () - 1))) {
4612 fprintf (stderr, "max-heap-size size must be a multiple of %d.\n", mono_pagesize ());
4616 fprintf (stderr, "max-heap-size must be an integer.\n");
4621 if (g_str_has_prefix (opt, "soft-heap-limit=")) {
4622 opt = strchr (opt, '=') + 1;
4623 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
4624 if (soft_limit <= 0) {
4625 fprintf (stderr, "soft-heap-limit must be positive.\n");
4629 fprintf (stderr, "soft-heap-limit must be an integer.\n");
4634 if (g_str_has_prefix (opt, "workers=")) {
4637 if (!major_collector.is_parallel) {
4638 fprintf (stderr, "The workers= option can only be used for parallel collectors.");
4641 opt = strchr (opt, '=') + 1;
4642 val = strtol (opt, &endptr, 10);
4643 if (!*opt || *endptr) {
4644 fprintf (stderr, "Cannot parse the workers= option value.");
4647 if (val <= 0 || val > 16) {
4648 fprintf (stderr, "The number of workers must be in the range 1 to 16.");
4651 num_workers = (int)val;
4654 if (g_str_has_prefix (opt, "stack-mark=")) {
4655 opt = strchr (opt, '=') + 1;
4656 if (!strcmp (opt, "precise")) {
4657 conservative_stack_mark = FALSE;
4658 } else if (!strcmp (opt, "conservative")) {
4659 conservative_stack_mark = TRUE;
4661 fprintf (stderr, "Invalid value '%s' for stack-mark= option, possible values are: 'precise', 'conservative'.\n", opt);
4666 if (g_str_has_prefix (opt, "bridge=")) {
4667 opt = strchr (opt, '=') + 1;
4668 sgen_register_test_bridge_callbacks (g_strdup (opt));
4672 if (g_str_has_prefix (opt, "nursery-size=")) {
4674 opt = strchr (opt, '=') + 1;
4675 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
4676 sgen_nursery_size = val;
4677 #ifdef SGEN_ALIGN_NURSERY
4678 if ((val & (val - 1))) {
4679 fprintf (stderr, "The nursery size must be a power of two.\n");
4683 if (val < SGEN_MAX_NURSERY_WASTE) {
4684 fprintf (stderr, "The nursery size must be at least %d bytes.\n", SGEN_MAX_NURSERY_WASTE);
4688 sgen_nursery_bits = 0;
4689 while (1 << (++ sgen_nursery_bits) != sgen_nursery_size)
4693 fprintf (stderr, "nursery-size must be an integer.\n");
4699 if (g_str_has_prefix (opt, "save-target-ratio=")) {
4701 opt = strchr (opt, '=') + 1;
4702 save_target = strtod (opt, &endptr);
4703 if (endptr == opt) {
4704 fprintf (stderr, "save-target-ratio must be a number.");
4707 if (save_target < SGEN_MIN_SAVE_TARGET_RATIO || save_target > SGEN_MAX_SAVE_TARGET_RATIO) {
4708 fprintf (stderr, "save-target-ratio must be between %.2f - %.2f.", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
4713 if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
4715 opt = strchr (opt, '=') + 1;
4717 allowance_ratio = strtod (opt, &endptr);
4718 if (endptr == opt) {
4719 fprintf (stderr, "save-target-ratio must be a number.");
4722 if (allowance_ratio < SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO || allowance_ratio > SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO) {
4723 fprintf (stderr, "default-allowance-ratio must be between %.2f - %.2f.", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO);
4729 if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
4732 if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
4735 fprintf (stderr, "MONO_GC_PARAMS must be a comma-delimited list of one or more of the following:\n");
4736 fprintf (stderr, " max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4737 fprintf (stderr, " soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
4738 fprintf (stderr, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4739 fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-par' or `copying')\n");
4740 fprintf (stderr, " minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
4741 fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
4742 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
4743 if (major_collector.print_gc_param_usage)
4744 major_collector.print_gc_param_usage ();
4745 if (sgen_minor_collector.print_gc_param_usage)
4746 sgen_minor_collector.print_gc_param_usage ();
4747 fprintf (stderr, " Experimental options:\n");
4748 fprintf (stderr, " save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
4749 fprintf (stderr, " default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
4755 if (major_collector.is_parallel)
4756 sgen_workers_init (num_workers);
4758 if (major_collector_opt)
4759 g_free (major_collector_opt);
4761 if (minor_collector_opt)
4762 g_free (minor_collector_opt);
4766 if ((env = getenv ("MONO_GC_DEBUG"))) {
4767 opts = g_strsplit (env, ",", -1);
4768 for (ptr = opts; ptr && *ptr; ptr ++) {
4770 if (opt [0] >= '0' && opt [0] <= '9') {
4771 gc_debug_level = atoi (opt);
4777 char *rf = g_strdup_printf ("%s.%d", opt, GetCurrentProcessId ());
4779 char *rf = g_strdup_printf ("%s.%d", opt, getpid ());
4781 gc_debug_file = fopen (rf, "wb");
4783 gc_debug_file = stderr;
4786 } else if (!strcmp (opt, "print-allowance")) {
4787 debug_print_allowance = TRUE;
4788 } else if (!strcmp (opt, "print-pinning")) {
4789 do_pin_stats = TRUE;
4790 } else if (!strcmp (opt, "verify-before-allocs")) {
4791 verify_before_allocs = 1;
4792 has_per_allocation_action = TRUE;
4793 } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
4794 char *arg = strchr (opt, '=') + 1;
4795 verify_before_allocs = atoi (arg);
4796 has_per_allocation_action = TRUE;
4797 } else if (!strcmp (opt, "collect-before-allocs")) {
4798 collect_before_allocs = 1;
4799 has_per_allocation_action = TRUE;
4800 } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
4801 char *arg = strchr (opt, '=') + 1;
4802 has_per_allocation_action = TRUE;
4803 collect_before_allocs = atoi (arg);
4804 } else if (!strcmp (opt, "verify-before-collections")) {
4805 whole_heap_check_before_collection = TRUE;
4806 } else if (!strcmp (opt, "check-at-minor-collections")) {
4807 consistency_check_at_minor_collection = TRUE;
4808 nursery_clear_policy = CLEAR_AT_GC;
4809 } else if (!strcmp (opt, "xdomain-checks")) {
4810 xdomain_checks = TRUE;
4811 } else if (!strcmp (opt, "clear-at-gc")) {
4812 nursery_clear_policy = CLEAR_AT_GC;
4813 } else if (!strcmp (opt, "clear-nursery-at-gc")) {
4814 nursery_clear_policy = CLEAR_AT_GC;
4815 } else if (!strcmp (opt, "check-scan-starts")) {
4816 do_scan_starts_check = TRUE;
4817 } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
4818 do_verify_nursery = TRUE;
4819 } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
4820 do_dump_nursery_content = TRUE;
4821 } else if (!strcmp (opt, "disable-minor")) {
4822 disable_minor_collections = TRUE;
4823 } else if (!strcmp (opt, "disable-major")) {
4824 disable_major_collections = TRUE;
4825 } else if (g_str_has_prefix (opt, "heap-dump=")) {
4826 char *filename = strchr (opt, '=') + 1;
4827 nursery_clear_policy = CLEAR_AT_GC;
4828 heap_dump_file = fopen (filename, "w");
4829 if (heap_dump_file) {
4830 fprintf (heap_dump_file, "<sgen-dump>\n");
4831 do_pin_stats = TRUE;
4833 #ifdef SGEN_BINARY_PROTOCOL
4834 } else if (g_str_has_prefix (opt, "binary-protocol=")) {
4835 char *filename = strchr (opt, '=') + 1;
4836 binary_protocol_init (filename);
4838 fprintf (stderr, "Warning: Cardtable write barriers will not be binary-protocolled.\n");
4841 fprintf (stderr, "Invalid format for the MONO_GC_DEBUG env variable: '%s'\n", env);
4842 fprintf (stderr, "The format is: MONO_GC_DEBUG=[l[:filename]|<option>]+ where l is a debug level 0-9.\n");
4843 fprintf (stderr, "Valid options are:\n");
4844 fprintf (stderr, " collect-before-allocs[=<n>]\n");
4845 fprintf (stderr, " verify-before-allocs[=<n>]\n");
4846 fprintf (stderr, " check-at-minor-collections\n");
4847 fprintf (stderr, " verify-before-collections\n");
4848 fprintf (stderr, " disable-minor\n");
4849 fprintf (stderr, " disable-major\n");
4850 fprintf (stderr, " xdomain-checks\n");
4851 fprintf (stderr, " clear-at-gc\n");
4852 fprintf (stderr, " print-allowance\n");
4853 fprintf (stderr, " print-pinning\n");
4860 if (major_collector.is_parallel) {
4861 if (heap_dump_file) {
4862 fprintf (stderr, "Error: Cannot do heap dump with the parallel collector.\n");
4866 fprintf (stderr, "Error: Cannot gather pinning statistics with the parallel collector.\n");
4871 if (major_collector.post_param_init)
4872 major_collector.post_param_init ();
4874 sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
4876 memset (&remset, 0, sizeof (remset));
4878 #ifdef SGEN_HAVE_CARDTABLE
4880 sgen_card_table_init (&remset);
4883 sgen_ssb_init (&remset);
4885 if (remset.register_thread)
4886 remset.register_thread (mono_thread_info_current ());
4892 mono_gc_get_gc_name (void)
4897 static MonoMethod *write_barrier_method;
4900 mono_gc_is_critical_method (MonoMethod *method)
4902 return (method == write_barrier_method || sgen_is_managed_allocator (method));
4906 is_ip_in_managed_allocator (MonoDomain *domain, gpointer ip)
4910 if (!mono_thread_internal_current ())
4911 /* Happens during thread attach */
4916 ji = mono_jit_info_table_find (domain, ip);
4920 return mono_gc_is_critical_method (ji->method);
4924 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels)
4926 memset (nursery_check_return_labels, 0, sizeof (int) * 3);
4927 #ifdef SGEN_ALIGN_NURSERY
4928 // if (ptr_in_nursery (ptr)) return;
4930 * Masking out the bits might be faster, but we would have to use 64 bit
4931 * immediates, which might be slower.
4933 mono_mb_emit_ldarg (mb, 0);
4934 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
4935 mono_mb_emit_byte (mb, CEE_SHR_UN);
4936 mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
4937 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
4939 // if (!ptr_in_nursery (*ptr)) return;
4940 mono_mb_emit_ldarg (mb, 0);
4941 mono_mb_emit_byte (mb, CEE_LDIND_I);
4942 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
4943 mono_mb_emit_byte (mb, CEE_SHR_UN);
4944 mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
4945 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
4947 int label_continue1, label_continue2;
4948 int dereferenced_var;
4950 // if (ptr < (sgen_get_nursery_start ())) goto continue;
4951 mono_mb_emit_ldarg (mb, 0);
4952 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
4953 label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
4955 // if (ptr >= sgen_get_nursery_end ())) goto continue;
4956 mono_mb_emit_ldarg (mb, 0);
4957 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
4958 label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
4961 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BR);
4964 mono_mb_patch_branch (mb, label_continue_1);
4965 mono_mb_patch_branch (mb, label_continue_2);
4967 // Dereference and store in local var
4968 dereferenced_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
4969 mono_mb_emit_ldarg (mb, 0);
4970 mono_mb_emit_byte (mb, CEE_LDIND_I);
4971 mono_mb_emit_stloc (mb, dereferenced_var);
4973 // if (*ptr < sgen_get_nursery_start ()) return;
4974 mono_mb_emit_ldloc (mb, dereferenced_var);
4975 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
4976 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
4978 // if (*ptr >= sgen_get_nursery_end ()) return;
4979 mono_mb_emit_ldloc (mb, dereferenced_var);
4980 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
4981 nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
4986 mono_gc_get_write_barrier (void)
4989 MonoMethodBuilder *mb;
4990 MonoMethodSignature *sig;
4991 #ifdef MANAGED_WBARRIER
4992 int i, nursery_check_labels [3];
4993 int label_no_wb_3, label_no_wb_4, label_need_wb, label_slow_path;
4994 int buffer_var, buffer_index_var, dummy_var;
4996 #ifdef HAVE_KW_THREAD
4997 int stack_end_offset = -1, store_remset_buffer_offset = -1;
4998 int store_remset_buffer_index_offset = -1, store_remset_buffer_index_addr_offset = -1;
5000 MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
5001 g_assert (stack_end_offset != -1);
5002 MONO_THREAD_VAR_OFFSET (store_remset_buffer, store_remset_buffer_offset);
5003 g_assert (store_remset_buffer_offset != -1);
5004 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index, store_remset_buffer_index_offset);
5005 g_assert (store_remset_buffer_index_offset != -1);
5006 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index_addr, store_remset_buffer_index_addr_offset);
5007 g_assert (store_remset_buffer_index_addr_offset != -1);
5011 // FIXME: Maybe create a separate version for ctors (the branch would be
5012 // correctly predicted more times)
5013 if (write_barrier_method)
5014 return write_barrier_method;
5016 /* Create the IL version of mono_gc_barrier_generic_store () */
5017 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
5018 sig->ret = &mono_defaults.void_class->byval_arg;
5019 sig->params [0] = &mono_defaults.int_class->byval_arg;
5021 mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
5023 #ifdef MANAGED_WBARRIER
5024 if (use_cardtable) {
5025 emit_nursery_check (mb, nursery_check_labels);
5027 addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
5031 LDC_PTR sgen_cardtable
5033 address >> CARD_BITS
5037 if (SGEN_HAVE_OVERLAPPING_CARDS) {
5038 LDC_PTR card_table_mask
5045 mono_mb_emit_ptr (mb, sgen_cardtable);
5046 mono_mb_emit_ldarg (mb, 0);
5047 mono_mb_emit_icon (mb, CARD_BITS);
5048 mono_mb_emit_byte (mb, CEE_SHR_UN);
5049 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
5050 mono_mb_emit_ptr (mb, (gpointer)CARD_MASK);
5051 mono_mb_emit_byte (mb, CEE_AND);
5053 mono_mb_emit_byte (mb, CEE_ADD);
5054 mono_mb_emit_icon (mb, 1);
5055 mono_mb_emit_byte (mb, CEE_STIND_I1);
5058 for (i = 0; i < 3; ++i) {
5059 if (nursery_check_labels [i])
5060 mono_mb_patch_branch (mb, nursery_check_labels [i]);
5062 mono_mb_emit_byte (mb, CEE_RET);
5063 } else if (mono_runtime_has_tls_get ()) {
5064 emit_nursery_check (mb, nursery_check_labels);
5066 // if (ptr >= stack_end) goto need_wb;
5067 mono_mb_emit_ldarg (mb, 0);
5068 EMIT_TLS_ACCESS (mb, stack_end, stack_end_offset);
5069 label_need_wb = mono_mb_emit_branch (mb, CEE_BGE_UN);
5071 // if (ptr >= stack_start) return;
5072 dummy_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5073 mono_mb_emit_ldarg (mb, 0);
5074 mono_mb_emit_ldloc_addr (mb, dummy_var);
5075 label_no_wb_3 = mono_mb_emit_branch (mb, CEE_BGE_UN);
5078 mono_mb_patch_branch (mb, label_need_wb);
5080 // buffer = STORE_REMSET_BUFFER;
5081 buffer_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5082 EMIT_TLS_ACCESS (mb, store_remset_buffer, store_remset_buffer_offset);
5083 mono_mb_emit_stloc (mb, buffer_var);
5085 // buffer_index = STORE_REMSET_BUFFER_INDEX;
5086 buffer_index_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5087 EMIT_TLS_ACCESS (mb, store_remset_buffer_index, store_remset_buffer_index_offset);
5088 mono_mb_emit_stloc (mb, buffer_index_var);
5090 // if (buffer [buffer_index] == ptr) return;
5091 mono_mb_emit_ldloc (mb, buffer_var);
5092 mono_mb_emit_ldloc (mb, buffer_index_var);
5093 g_assert (sizeof (gpointer) == 4 || sizeof (gpointer) == 8);
5094 mono_mb_emit_icon (mb, sizeof (gpointer) == 4 ? 2 : 3);
5095 mono_mb_emit_byte (mb, CEE_SHL);
5096 mono_mb_emit_byte (mb, CEE_ADD);
5097 mono_mb_emit_byte (mb, CEE_LDIND_I);
5098 mono_mb_emit_ldarg (mb, 0);
5099 label_no_wb_4 = mono_mb_emit_branch (mb, CEE_BEQ);
5102 mono_mb_emit_ldloc (mb, buffer_index_var);
5103 mono_mb_emit_icon (mb, 1);
5104 mono_mb_emit_byte (mb, CEE_ADD);
5105 mono_mb_emit_stloc (mb, buffer_index_var);
5107 // if (buffer_index >= STORE_REMSET_BUFFER_SIZE) goto slow_path;
5108 mono_mb_emit_ldloc (mb, buffer_index_var);
5109 mono_mb_emit_icon (mb, STORE_REMSET_BUFFER_SIZE);
5110 label_slow_path = mono_mb_emit_branch (mb, CEE_BGE);
5112 // buffer [buffer_index] = ptr;
5113 mono_mb_emit_ldloc (mb, buffer_var);
5114 mono_mb_emit_ldloc (mb, buffer_index_var);
5115 g_assert (sizeof (gpointer) == 4 || sizeof (gpointer) == 8);
5116 mono_mb_emit_icon (mb, sizeof (gpointer) == 4 ? 2 : 3);
5117 mono_mb_emit_byte (mb, CEE_SHL);
5118 mono_mb_emit_byte (mb, CEE_ADD);
5119 mono_mb_emit_ldarg (mb, 0);
5120 mono_mb_emit_byte (mb, CEE_STIND_I);
5122 // STORE_REMSET_BUFFER_INDEX = buffer_index;
5123 EMIT_TLS_ACCESS (mb, store_remset_buffer_index_addr, store_remset_buffer_index_addr_offset);
5124 mono_mb_emit_ldloc (mb, buffer_index_var);
5125 mono_mb_emit_byte (mb, CEE_STIND_I);
5128 for (i = 0; i < 3; ++i) {
5129 if (nursery_check_labels [i])
5130 mono_mb_patch_branch (mb, nursery_check_labels [i]);
5132 mono_mb_patch_branch (mb, label_no_wb_3);
5133 mono_mb_patch_branch (mb, label_no_wb_4);
5134 mono_mb_emit_byte (mb, CEE_RET);
5137 mono_mb_patch_branch (mb, label_slow_path);
5139 mono_mb_emit_ldarg (mb, 0);
5140 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5141 mono_mb_emit_byte (mb, CEE_RET);
5145 mono_mb_emit_ldarg (mb, 0);
5146 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5147 mono_mb_emit_byte (mb, CEE_RET);
5150 res = mono_mb_create_method (mb, sig, 16);
5153 mono_loader_lock ();
5154 if (write_barrier_method) {
5155 /* Already created */
5156 mono_free_method (res);
5158 /* double-checked locking */
5159 mono_memory_barrier ();
5160 write_barrier_method = res;
5162 mono_loader_unlock ();
5164 return write_barrier_method;
5168 mono_gc_get_description (void)
5170 return g_strdup ("sgen");
5174 mono_gc_set_desktop_mode (void)
5179 mono_gc_is_moving (void)
5185 mono_gc_is_disabled (void)
5191 sgen_debug_printf (int level, const char *format, ...)
5195 if (level > gc_debug_level)
5198 va_start (ap, format);
5199 vfprintf (gc_debug_file, format, ap);
5204 sgen_get_logfile (void)
5206 return gc_debug_file;
5210 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
5217 sgen_get_nursery_clear_policy (void)
5219 return nursery_clear_policy;
5223 sgen_get_array_fill_vtable (void)
5225 if (!array_fill_vtable) {
5226 static MonoClass klass;
5227 static MonoVTable vtable;
5230 MonoDomain *domain = mono_get_root_domain ();
5233 klass.element_class = mono_defaults.byte_class;
5235 klass.instance_size = sizeof (MonoArray);
5236 klass.sizes.element_size = 1;
5237 klass.name = "array_filler_type";
5239 vtable.klass = &klass;
5241 vtable.gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
5244 array_fill_vtable = &vtable;
5246 return array_fill_vtable;
5256 sgen_gc_unlock (void)
5262 sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
5264 major_collector.iterate_live_block_ranges (callback);
5268 sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
5270 major_collector.scan_card_table (queue);
5274 sgen_get_major_collector (void)
5276 return &major_collector;
5279 void mono_gc_set_skip_thread (gboolean skip)
5281 SgenThreadInfo *info = mono_thread_info_current ();
5284 info->gc_disabled = skip;
5289 sgen_get_remset (void)
5295 mono_gc_get_vtable_bits (MonoClass *class)
5297 if (sgen_need_bridge_processing () && sgen_is_bridge_class (class))
5298 return SGEN_GC_BIT_BRIDGE_OBJECT;
5303 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
5310 sgen_check_whole_heap_stw (void)
5313 sgen_clear_nursery_fragments ();
5314 sgen_check_whole_heap ();
5315 restart_world (0, NULL);
5318 #endif /* HAVE_SGEN_GC */