2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
11 * Thread start/stop adapted from Boehm's GC:
12 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
13 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
14 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
15 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
16 * Copyright 2001-2003 Ximian, Inc
17 * Copyright 2003-2010 Novell, Inc.
18 * Copyright 2011 Xamarin, Inc.
19 * Copyright (C) 2012 Xamarin Inc
21 * This library is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU Library General Public
23 * License 2.0 as published by the Free Software Foundation;
25 * This library is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Library General Public License for more details.
30 * You should have received a copy of the GNU Library General Public
31 * License 2.0 along with this library; if not, write to the Free
32 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 * Important: allocation provides always zeroed memory, having to do
35 * a memset after allocation is deadly for performance.
36 * Memory usage at startup is currently as follows:
38 * 64 KB internal space
40 * We should provide a small memory config with half the sizes
42 * We currently try to make as few mono assumptions as possible:
43 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
45 * 2) gc descriptor is the second word in the vtable (first word in the class)
46 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
47 * 4) there is a function to get an object's size and the number of
48 * elements in an array.
49 * 5) we know the special way bounds are allocated for complex arrays
50 * 6) we know about proxies and how to treat them when domains are unloaded
52 * Always try to keep stack usage to a minimum: no recursive behaviour
53 * and no large stack allocs.
55 * General description.
56 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
57 * When the nursery is full we start a nursery collection: this is performed with a
59 * When the old generation is full we start a copying GC of the old generation as well:
60 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
61 * in the future. Maybe we'll even do both during the same collection like IMMIX.
63 * The things that complicate this description are:
64 * *) pinned objects: we can't move them so we need to keep track of them
65 * *) no precise info of the thread stacks and registers: we need to be able to
66 * quickly find the objects that may be referenced conservatively and pin them
67 * (this makes the first issues more important)
68 * *) large objects are too expensive to be dealt with using copying GC: we handle them
69 * with mark/sweep during major collections
70 * *) some objects need to not move even if they are small (interned strings, Type handles):
71 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
72 * PinnedChunks regions
78 *) we could have a function pointer in MonoClass to implement
79 customized write barriers for value types
81 *) investigate the stuff needed to advance a thread to a GC-safe
82 point (single-stepping, read from unmapped memory etc) and implement it.
83 This would enable us to inline allocations and write barriers, for example,
84 or at least parts of them, like the write barrier checks.
85 We may need this also for handling precise info on stacks, even simple things
86 as having uninitialized data on the stack and having to wait for the prolog
87 to zero it. Not an issue for the last frame that we scan conservatively.
88 We could always not trust the value in the slots anyway.
90 *) modify the jit to save info about references in stack locations:
91 this can be done just for locals as a start, so that at least
92 part of the stack is handled precisely.
94 *) test/fix endianess issues
96 *) Implement a card table as the write barrier instead of remembered
97 sets? Card tables are not easy to implement with our current
98 memory layout. We have several different kinds of major heap
99 objects: Small objects in regular blocks, small objects in pinned
100 chunks and LOS objects. If we just have a pointer we have no way
101 to tell which kind of object it points into, therefore we cannot
102 know where its card table is. The least we have to do to make
103 this happen is to get rid of write barriers for indirect stores.
106 *) Get rid of write barriers for indirect stores. We can do this by
107 telling the GC to wbarrier-register an object once we do an ldloca
108 or ldelema on it, and to unregister it once it's not used anymore
109 (it can only travel downwards on the stack). The problem with
110 unregistering is that it needs to happen eventually no matter
111 what, even if exceptions are thrown, the thread aborts, etc.
112 Rodrigo suggested that we could do only the registering part and
113 let the collector find out (pessimistically) when it's safe to
114 unregister, namely when the stack pointer of the thread that
115 registered the object is higher than it was when the registering
116 happened. This might make for a good first implementation to get
117 some data on performance.
119 *) Some sort of blacklist support? Blacklists is a concept from the
120 Boehm GC: if during a conservative scan we find pointers to an
121 area which we might use as heap, we mark that area as unusable, so
122 pointer retention by random pinning pointers is reduced.
124 *) experiment with max small object size (very small right now - 2kb,
125 because it's tied to the max freelist size)
127 *) add an option to mmap the whole heap in one chunk: it makes for many
128 simplifications in the checks (put the nursery at the top and just use a single
129 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
130 not flexible (too much of the address space may be used by default or we can't
131 increase the heap as needed) and we'd need a race-free mechanism to return memory
132 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
133 was written to, munmap is needed, but the following mmap may not find the same segment
136 *) memzero the major fragments after restarting the world and optionally a smaller
139 *) investigate having fragment zeroing threads
141 *) separate locks for finalization and other minor stuff to reduce
144 *) try a different copying order to improve memory locality
146 *) a thread abort after a store but before the write barrier will
147 prevent the write barrier from executing
149 *) specialized dynamically generated markers/copiers
151 *) Dynamically adjust TLAB size to the number of threads. If we have
152 too many threads that do allocation, we might need smaller TLABs,
153 and we might get better performance with larger TLABs if we only
154 have a handful of threads. We could sum up the space left in all
155 assigned TLABs and if that's more than some percentage of the
156 nursery size, reduce the TLAB size.
158 *) Explore placing unreachable objects on unused nursery memory.
159 Instead of memset'ng a region to zero, place an int[] covering it.
160 A good place to start is add_nursery_frag. The tricky thing here is
161 placing those objects atomically outside of a collection.
163 *) Allocation should use asymmetric Dekker synchronization:
164 http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
165 This should help weak consistency archs.
172 #define _XOPEN_SOURCE
173 #define _DARWIN_C_SOURCE
179 #ifdef HAVE_PTHREAD_H
182 #ifdef HAVE_SEMAPHORE_H
183 #include <semaphore.h>
191 #include "metadata/sgen-gc.h"
192 #include "metadata/metadata-internals.h"
193 #include "metadata/class-internals.h"
194 #include "metadata/gc-internal.h"
195 #include "metadata/object-internals.h"
196 #include "metadata/threads.h"
197 #include "metadata/sgen-cardtable.h"
198 #include "metadata/sgen-ssb.h"
199 #include "metadata/sgen-protocol.h"
200 #include "metadata/sgen-archdep.h"
201 #include "metadata/sgen-bridge.h"
202 #include "metadata/sgen-memory-governor.h"
203 #include "metadata/sgen-hash-table.h"
204 #include "metadata/mono-gc.h"
205 #include "metadata/method-builder.h"
206 #include "metadata/profiler-private.h"
207 #include "metadata/monitor.h"
208 #include "metadata/threadpool-internals.h"
209 #include "metadata/mempool-internals.h"
210 #include "metadata/marshal.h"
211 #include "metadata/runtime.h"
212 #include "metadata/sgen-cardtable.h"
213 #include "metadata/sgen-pinning.h"
214 #include "metadata/sgen-workers.h"
215 #include "utils/mono-mmap.h"
216 #include "utils/mono-time.h"
217 #include "utils/mono-semaphore.h"
218 #include "utils/mono-counters.h"
219 #include "utils/mono-proclib.h"
220 #include "utils/mono-memory-model.h"
221 #include "utils/mono-logger-internal.h"
222 #include "utils/dtrace.h"
224 #include <mono/utils/mono-logger-internal.h>
225 #include <mono/utils/memcheck.h>
227 #if defined(__MACH__)
228 #include "utils/mach-support.h"
231 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
235 #include "mono/cil/opcode.def"
241 #undef pthread_create
243 #undef pthread_detach
246 * ######################################################################
247 * ######## Types and constants used by the GC.
248 * ######################################################################
251 /* 0 means not initialized, 1 is initialized, -1 means in progress */
252 static int gc_initialized = 0;
253 /* If set, check if we need to do something every X allocations */
254 gboolean has_per_allocation_action;
255 /* If set, do a heap check every X allocation */
256 guint32 verify_before_allocs = 0;
257 /* If set, do a minor collection before every X allocation */
258 guint32 collect_before_allocs = 0;
259 /* If set, do a whole heap check before each collection */
260 static gboolean whole_heap_check_before_collection = FALSE;
261 /* If set, do a heap consistency check before each minor collection */
262 static gboolean consistency_check_at_minor_collection = FALSE;
263 /* If set, check that there are no references to the domain left at domain unload */
264 static gboolean xdomain_checks = FALSE;
265 /* If not null, dump the heap after each collection into this file */
266 static FILE *heap_dump_file = NULL;
267 /* If set, mark stacks conservatively, even if precise marking is possible */
268 static gboolean conservative_stack_mark = FALSE;
269 /* If set, do a plausibility check on the scan_starts before and after
271 static gboolean do_scan_starts_check = FALSE;
272 static gboolean nursery_collection_is_parallel = FALSE;
273 static gboolean disable_minor_collections = FALSE;
274 static gboolean disable_major_collections = FALSE;
275 gboolean do_pin_stats = FALSE;
276 static gboolean do_verify_nursery = FALSE;
277 static gboolean do_dump_nursery_content = FALSE;
279 #ifdef HEAVY_STATISTICS
280 long long stat_objects_alloced_degraded = 0;
281 long long stat_bytes_alloced_degraded = 0;
283 long long stat_copy_object_called_nursery = 0;
284 long long stat_objects_copied_nursery = 0;
285 long long stat_copy_object_called_major = 0;
286 long long stat_objects_copied_major = 0;
288 long long stat_scan_object_called_nursery = 0;
289 long long stat_scan_object_called_major = 0;
291 long long stat_slots_allocated_in_vain;
293 long long stat_nursery_copy_object_failed_from_space = 0;
294 long long stat_nursery_copy_object_failed_forwarded = 0;
295 long long stat_nursery_copy_object_failed_pinned = 0;
296 long long stat_nursery_copy_object_failed_to_space = 0;
298 static int stat_wbarrier_set_field = 0;
299 static int stat_wbarrier_set_arrayref = 0;
300 static int stat_wbarrier_arrayref_copy = 0;
301 static int stat_wbarrier_generic_store = 0;
302 static int stat_wbarrier_set_root = 0;
303 static int stat_wbarrier_value_copy = 0;
304 static int stat_wbarrier_object_copy = 0;
307 int stat_minor_gcs = 0;
308 int stat_major_gcs = 0;
310 static long long stat_pinned_objects = 0;
312 static long long time_minor_pre_collection_fragment_clear = 0;
313 static long long time_minor_pinning = 0;
314 static long long time_minor_scan_remsets = 0;
315 static long long time_minor_scan_pinned = 0;
316 static long long time_minor_scan_registered_roots = 0;
317 static long long time_minor_scan_thread_data = 0;
318 static long long time_minor_finish_gray_stack = 0;
319 static long long time_minor_fragment_creation = 0;
321 static long long time_major_pre_collection_fragment_clear = 0;
322 static long long time_major_pinning = 0;
323 static long long time_major_scan_pinned = 0;
324 static long long time_major_scan_registered_roots = 0;
325 static long long time_major_scan_thread_data = 0;
326 static long long time_major_scan_alloc_pinned = 0;
327 static long long time_major_scan_finalized = 0;
328 static long long time_major_scan_big_objects = 0;
329 static long long time_major_finish_gray_stack = 0;
330 static long long time_major_free_bigobjs = 0;
331 static long long time_major_los_sweep = 0;
332 static long long time_major_sweep = 0;
333 static long long time_major_fragment_creation = 0;
335 int gc_debug_level = 0;
340 mono_gc_flush_info (void)
342 fflush (gc_debug_file);
346 #define TV_DECLARE SGEN_TV_DECLARE
347 #define TV_GETTIME SGEN_TV_GETTIME
348 #define TV_ELAPSED SGEN_TV_ELAPSED
349 #define TV_ELAPSED_MS SGEN_TV_ELAPSED_MS
351 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
353 NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
355 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
356 #define object_is_pinned SGEN_OBJECT_IS_PINNED
357 #define pin_object SGEN_PIN_OBJECT
358 #define unpin_object SGEN_UNPIN_OBJECT
360 #define ptr_in_nursery sgen_ptr_in_nursery
362 #define LOAD_VTABLE SGEN_LOAD_VTABLE
365 safe_name (void* obj)
367 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
368 return vt->klass->name;
371 #define safe_object_get_size sgen_safe_object_get_size
374 sgen_safe_name (void* obj)
376 return safe_name (obj);
380 * ######################################################################
381 * ######## Global data.
382 * ######################################################################
384 LOCK_DECLARE (gc_mutex);
385 static int gc_disabled = 0;
387 static gboolean use_cardtable;
389 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
391 static mword pagesize = 4096;
392 int degraded_mode = 0;
394 static mword bytes_pinned_from_failed_allocation = 0;
396 GCMemSection *nursery_section = NULL;
397 static mword lowest_heap_address = ~(mword)0;
398 static mword highest_heap_address = 0;
400 LOCK_DECLARE (sgen_interruption_mutex);
401 static LOCK_DECLARE (pin_queue_mutex);
403 #define LOCK_PIN_QUEUE mono_mutex_lock (&pin_queue_mutex)
404 #define UNLOCK_PIN_QUEUE mono_mutex_unlock (&pin_queue_mutex)
406 typedef struct _FinalizeReadyEntry FinalizeReadyEntry;
407 struct _FinalizeReadyEntry {
408 FinalizeReadyEntry *next;
412 typedef struct _EphemeronLinkNode EphemeronLinkNode;
414 struct _EphemeronLinkNode {
415 EphemeronLinkNode *next;
424 int current_collection_generation = -1;
425 volatile gboolean concurrent_collection_in_progress = FALSE;
427 /* objects that are ready to be finalized */
428 static FinalizeReadyEntry *fin_ready_list = NULL;
429 static FinalizeReadyEntry *critical_fin_list = NULL;
431 static EphemeronLinkNode *ephemeron_list;
433 /* registered roots: the key to the hash is the root start address */
435 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
437 SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
438 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
439 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
440 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL)
442 static mword roots_size = 0; /* amount of memory in the root set */
444 #define GC_ROOT_NUM 32
446 int count; /* must be the first field */
447 void *objects [GC_ROOT_NUM];
448 int root_types [GC_ROOT_NUM];
449 uintptr_t extra_info [GC_ROOT_NUM];
453 notify_gc_roots (GCRootReport *report)
457 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
462 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
464 if (report->count == GC_ROOT_NUM)
465 notify_gc_roots (report);
466 report->objects [report->count] = object;
467 report->root_types [report->count] = rtype;
468 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
471 MonoNativeTlsKey thread_info_key;
473 #ifdef HAVE_KW_THREAD
474 __thread SgenThreadInfo *sgen_thread_info;
475 __thread gpointer *store_remset_buffer;
476 __thread long store_remset_buffer_index;
477 __thread char *stack_end;
478 __thread long *store_remset_buffer_index_addr;
481 /* The size of a TLAB */
482 /* The bigger the value, the less often we have to go to the slow path to allocate a new
483 * one, but the more space is wasted by threads not allocating much memory.
485 * FIXME: Make this self-tuning for each thread.
487 guint32 tlab_size = (1024 * 4);
489 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
491 /* Functions supplied by the runtime to be called by the GC */
492 static MonoGCCallbacks gc_callbacks;
494 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
495 #define ALLOC_ALIGN_BITS SGEN_ALLOC_ALIGN_BITS
497 #define ALIGN_UP SGEN_ALIGN_UP
499 #define MOVED_OBJECTS_NUM 64
500 static void *moved_objects [MOVED_OBJECTS_NUM];
501 static int moved_objects_idx = 0;
503 /* Vtable of the objects used to fill out nursery fragments before a collection */
504 static MonoVTable *array_fill_vtable;
506 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
507 MonoNativeThreadId main_gc_thread = NULL;
510 /*Object was pinned during the current collection*/
511 static mword objects_pinned;
514 * ######################################################################
515 * ######## Macros and function declarations.
516 * ######################################################################
520 align_pointer (void *ptr)
522 mword p = (mword)ptr;
523 p += sizeof (gpointer) - 1;
524 p &= ~ (sizeof (gpointer) - 1);
528 typedef SgenGrayQueue GrayQueue;
530 /* forward declarations */
531 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
532 static void scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, ScanObjectFunc scan_func, char *addr_start, char *addr_end, int root_type, GrayQueue *queue);
533 static void scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeReadyEntry *list, GrayQueue *queue);
534 static void report_finalizer_roots (void);
535 static void report_registered_roots (void);
537 static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
538 static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, GrayQueue *queue, gboolean only_enqueue);
539 static void finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue);
541 void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
544 static void init_stats (void);
546 static int mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue);
547 static void clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue);
548 static void null_ephemerons_for_domain (MonoDomain *domain);
550 SgenObjectOperations current_object_ops;
551 SgenMajorCollector major_collector;
552 SgenMinorCollector sgen_minor_collector;
553 static GrayQueue gray_queue;
555 static SgenRemeberedSet remset;
557 /* The gray queue to use from the main collection thread. */
558 static SgenGrayQueue*
559 sgen_workers_get_main_thread_queue (void)
561 if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ())
562 return sgen_workers_get_distribute_gray_queue ();
566 #define WORKERS_DISTRIBUTE_GRAY_QUEUE (sgen_workers_get_main_thread_queue ())
569 * The gray queue a worker job must use. If we're not parallel or
570 * concurrent, we use the main gray queue.
572 static SgenGrayQueue*
573 sgen_workers_get_job_gray_queue (WorkerData *worker_data)
575 return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
578 static LOCK_DECLARE (workers_distribute_gray_queue_mutex);
581 sgen_remember_major_object_for_concurrent_mark (char *obj)
583 gboolean need_lock = current_collection_generation != GENERATION_NURSERY;
585 if (!major_collector.is_concurrent)
588 g_assert (current_collection_generation == GENERATION_NURSERY || current_collection_generation == -1);
590 if (!concurrent_collection_in_progress)
594 mono_mutex_lock (&workers_distribute_gray_queue_mutex);
596 sgen_gray_object_enqueue (sgen_workers_get_distribute_gray_queue (), obj);
599 mono_mutex_unlock (&workers_distribute_gray_queue_mutex);
603 is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
605 MonoObject *o = (MonoObject*)(obj);
606 MonoObject *ref = (MonoObject*)*(ptr);
607 int offset = (char*)(ptr) - (char*)o;
609 if (o->vtable->klass == mono_defaults.thread_class && offset == G_STRUCT_OFFSET (MonoThread, internal_thread))
611 if (o->vtable->klass == mono_defaults.internal_thread_class && offset == G_STRUCT_OFFSET (MonoInternalThread, current_appcontext))
613 if (mono_class_has_parent_fast (o->vtable->klass, mono_defaults.real_proxy_class) &&
614 offset == G_STRUCT_OFFSET (MonoRealProxy, unwrapped_server))
616 /* Thread.cached_culture_info */
617 if (!strcmp (ref->vtable->klass->name_space, "System.Globalization") &&
618 !strcmp (ref->vtable->klass->name, "CultureInfo") &&
619 !strcmp(o->vtable->klass->name_space, "System") &&
620 !strcmp(o->vtable->klass->name, "Object[]"))
623 * at System.IO.MemoryStream.InternalConstructor (byte[],int,int,bool,bool) [0x0004d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:121
624 * at System.IO.MemoryStream..ctor (byte[]) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:81
625 * at (wrapper remoting-invoke-with-check) System.IO.MemoryStream..ctor (byte[]) <IL 0x00020, 0xffffffff>
626 * at System.Runtime.Remoting.Messaging.CADMethodCallMessage.GetArguments () [0x0000d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/CADMessages.cs:327
627 * at System.Runtime.Remoting.Messaging.MethodCall..ctor (System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/MethodCall.cs:87
628 * at System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) [0x00018] in /home/schani/Work/novell/trunk/mcs/class/corlib/System/AppDomain.cs:1213
629 * at (wrapper remoting-invoke-with-check) System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) <IL 0x0003d, 0xffffffff>
630 * at System.Runtime.Remoting.Channels.CrossAppDomainSink.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00008] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Channels/CrossAppDomainChannel.cs:198
631 * at (wrapper runtime-invoke) object.runtime_invoke_CrossAppDomainSink/ProcessMessageRes_object_object (object,intptr,intptr,intptr) <IL 0x0004c, 0xffffffff>
633 if (!strcmp (ref->vtable->klass->name_space, "System") &&
634 !strcmp (ref->vtable->klass->name, "Byte[]") &&
635 !strcmp (o->vtable->klass->name_space, "System.IO") &&
636 !strcmp (o->vtable->klass->name, "MemoryStream"))
638 /* append_job() in threadpool.c */
639 if (!strcmp (ref->vtable->klass->name_space, "System.Runtime.Remoting.Messaging") &&
640 !strcmp (ref->vtable->klass->name, "AsyncResult") &&
641 !strcmp (o->vtable->klass->name_space, "System") &&
642 !strcmp (o->vtable->klass->name, "Object[]") &&
643 mono_thread_pool_is_queue_array ((MonoArray*) o))
649 check_reference_for_xdomain (gpointer *ptr, char *obj, MonoDomain *domain)
651 MonoObject *o = (MonoObject*)(obj);
652 MonoObject *ref = (MonoObject*)*(ptr);
653 int offset = (char*)(ptr) - (char*)o;
655 MonoClassField *field;
658 if (!ref || ref->vtable->domain == domain)
660 if (is_xdomain_ref_allowed (ptr, obj, domain))
664 for (class = o->vtable->klass; class; class = class->parent) {
667 for (i = 0; i < class->field.count; ++i) {
668 if (class->fields[i].offset == offset) {
669 field = &class->fields[i];
677 if (ref->vtable->klass == mono_defaults.string_class)
678 str = mono_string_to_utf8 ((MonoString*)ref);
681 g_print ("xdomain reference in %p (%s.%s) at offset %d (%s) to %p (%s.%s) (%s) - pointed to by:\n",
682 o, o->vtable->klass->name_space, o->vtable->klass->name,
683 offset, field ? field->name : "",
684 ref, ref->vtable->klass->name_space, ref->vtable->klass->name, str ? str : "");
685 mono_gc_scan_for_specific_ref (o, TRUE);
691 #define HANDLE_PTR(ptr,obj) check_reference_for_xdomain ((ptr), (obj), domain)
694 scan_object_for_xdomain_refs (char *start, mword size, void *data)
696 MonoDomain *domain = ((MonoObject*)start)->vtable->domain;
698 #include "sgen-scan-object.h"
701 static gboolean scan_object_for_specific_ref_precise = TRUE;
704 #define HANDLE_PTR(ptr,obj) do { \
705 if ((MonoObject*)*(ptr) == key) { \
706 g_print ("found ref to %p in object %p (%s) at offset %td\n", \
707 key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
712 scan_object_for_specific_ref (char *start, MonoObject *key)
716 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (start)))
719 if (scan_object_for_specific_ref_precise) {
720 #include "sgen-scan-object.h"
722 mword *words = (mword*)start;
723 size_t size = safe_object_get_size ((MonoObject*)start);
725 for (i = 0; i < size / sizeof (mword); ++i) {
726 if (words [i] == (mword)key) {
727 g_print ("found possible ref to %p in object %p (%s) at offset %td\n",
728 key, start, safe_name (start), i * sizeof (mword));
735 sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
737 while (start < end) {
741 if (!*(void**)start) {
742 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
747 if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
753 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
755 if ((MonoVTable*)SGEN_LOAD_VTABLE (obj) != array_fill_vtable)
756 callback (obj, size, data);
763 scan_object_for_specific_ref_callback (char *obj, size_t size, MonoObject *key)
765 scan_object_for_specific_ref (obj, key);
769 check_root_obj_specific_ref (RootRecord *root, MonoObject *key, MonoObject *obj)
773 g_print ("found ref to %p in root record %p\n", key, root);
776 static MonoObject *check_key = NULL;
777 static RootRecord *check_root = NULL;
780 check_root_obj_specific_ref_from_marker (void **obj)
782 check_root_obj_specific_ref (check_root, check_key, *obj);
786 scan_roots_for_specific_ref (MonoObject *key, int root_type)
792 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
793 mword desc = root->root_desc;
797 switch (desc & ROOT_DESC_TYPE_MASK) {
798 case ROOT_DESC_BITMAP:
799 desc >>= ROOT_DESC_TYPE_SHIFT;
802 check_root_obj_specific_ref (root, key, *start_root);
807 case ROOT_DESC_COMPLEX: {
808 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
809 int bwords = (*bitmap_data) - 1;
810 void **start_run = start_root;
812 while (bwords-- > 0) {
813 gsize bmap = *bitmap_data++;
814 void **objptr = start_run;
817 check_root_obj_specific_ref (root, key, *objptr);
821 start_run += GC_BITS_PER_WORD;
825 case ROOT_DESC_USER: {
826 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
827 marker (start_root, check_root_obj_specific_ref_from_marker);
830 case ROOT_DESC_RUN_LEN:
831 g_assert_not_reached ();
833 g_assert_not_reached ();
835 } SGEN_HASH_TABLE_FOREACH_END;
842 mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise)
847 scan_object_for_specific_ref_precise = precise;
849 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
850 (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key, TRUE);
852 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
854 sgen_los_iterate_objects ((IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
856 scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
857 scan_roots_for_specific_ref (key, ROOT_TYPE_WBARRIER);
859 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], ptr, root) {
860 while (ptr < (void**)root->end_root) {
861 check_root_obj_specific_ref (root, *ptr, key);
864 } SGEN_HASH_TABLE_FOREACH_END;
868 need_remove_object_for_domain (char *start, MonoDomain *domain)
870 if (mono_object_domain (start) == domain) {
871 SGEN_LOG (4, "Need to cleanup object %p", start);
872 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
879 process_object_for_domain_clearing (char *start, MonoDomain *domain)
881 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
882 if (vt->klass == mono_defaults.internal_thread_class)
883 g_assert (mono_object_domain (start) == mono_get_root_domain ());
884 /* The object could be a proxy for an object in the domain
886 if (mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
887 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
889 /* The server could already have been zeroed out, so
890 we need to check for that, too. */
891 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
892 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
893 ((MonoRealProxy*)start)->unwrapped_server = NULL;
898 static MonoDomain *check_domain = NULL;
901 check_obj_not_in_domain (void **o)
903 g_assert (((MonoObject*)(*o))->vtable->domain != check_domain);
907 scan_for_registered_roots_in_domain (MonoDomain *domain, int root_type)
911 check_domain = domain;
912 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
913 mword desc = root->root_desc;
915 /* The MonoDomain struct is allowed to hold
916 references to objects in its own domain. */
917 if (start_root == (void**)domain)
920 switch (desc & ROOT_DESC_TYPE_MASK) {
921 case ROOT_DESC_BITMAP:
922 desc >>= ROOT_DESC_TYPE_SHIFT;
924 if ((desc & 1) && *start_root)
925 check_obj_not_in_domain (*start_root);
930 case ROOT_DESC_COMPLEX: {
931 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
932 int bwords = (*bitmap_data) - 1;
933 void **start_run = start_root;
935 while (bwords-- > 0) {
936 gsize bmap = *bitmap_data++;
937 void **objptr = start_run;
939 if ((bmap & 1) && *objptr)
940 check_obj_not_in_domain (*objptr);
944 start_run += GC_BITS_PER_WORD;
948 case ROOT_DESC_USER: {
949 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
950 marker (start_root, check_obj_not_in_domain);
953 case ROOT_DESC_RUN_LEN:
954 g_assert_not_reached ();
956 g_assert_not_reached ();
958 } SGEN_HASH_TABLE_FOREACH_END;
964 check_for_xdomain_refs (void)
968 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
969 (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL, FALSE);
971 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
973 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
974 scan_object_for_xdomain_refs (bigobj->data, sgen_los_object_size (bigobj), NULL);
978 clear_domain_process_object (char *obj, MonoDomain *domain)
982 process_object_for_domain_clearing (obj, domain);
983 remove = need_remove_object_for_domain (obj, domain);
985 if (remove && ((MonoObject*)obj)->synchronisation) {
986 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
988 sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
995 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
997 if (clear_domain_process_object (obj, domain))
998 memset (obj, 0, size);
1002 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
1004 clear_domain_process_object (obj, domain);
1008 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1010 if (need_remove_object_for_domain (obj, domain))
1011 major_collector.free_non_pinned_object (obj, size);
1015 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1017 if (need_remove_object_for_domain (obj, domain))
1018 major_collector.free_pinned_object (obj, size);
1022 * When appdomains are unloaded we can easily remove objects that have finalizers,
1023 * but all the others could still be present in random places on the heap.
1024 * We need a sweep to get rid of them even though it's going to be costly
1026 * The reason we need to remove them is because we access the vtable and class
1027 * structures to know the object size and the reference bitmap: once the domain is
1028 * unloaded the point to random memory.
1031 mono_gc_clear_domain (MonoDomain * domain)
1033 LOSObject *bigobj, *prev;
1038 sgen_process_fin_stage_entries ();
1039 sgen_process_dislink_stage_entries ();
1041 sgen_clear_nursery_fragments ();
1043 if (xdomain_checks && domain != mono_get_root_domain ()) {
1044 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
1045 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
1046 check_for_xdomain_refs ();
1049 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
1050 to memory returned to the OS.*/
1051 null_ephemerons_for_domain (domain);
1053 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1054 sgen_null_links_for_domain (domain, i);
1056 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1057 sgen_remove_finalizers_for_domain (domain, i);
1059 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1060 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
1062 /* We need two passes over major and large objects because
1063 freeing such objects might give their memory back to the OS
1064 (in the case of large objects) or obliterate its vtable
1065 (pinned objects with major-copying or pinned and non-pinned
1066 objects with major-mark&sweep), but we might need to
1067 dereference a pointer from an object to another object if
1068 the first object is a proxy. */
1069 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
1070 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1071 clear_domain_process_object (bigobj->data, domain);
1074 for (bigobj = los_object_list; bigobj;) {
1075 if (need_remove_object_for_domain (bigobj->data, domain)) {
1076 LOSObject *to_free = bigobj;
1078 prev->next = bigobj->next;
1080 los_object_list = bigobj->next;
1081 bigobj = bigobj->next;
1082 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
1083 sgen_los_free_object (to_free);
1087 bigobj = bigobj->next;
1089 major_collector.iterate_objects (TRUE, FALSE, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
1090 major_collector.iterate_objects (FALSE, TRUE, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
1092 if (G_UNLIKELY (do_pin_stats)) {
1093 if (domain == mono_get_root_domain ())
1094 sgen_pin_stats_print_class_stats ();
1101 * sgen_add_to_global_remset:
1103 * The global remset contains locations which point into newspace after
1104 * a minor collection. This can happen if the objects they point to are pinned.
1106 * LOCKING: If called from a parallel collector, the global remset
1107 * lock must be held. For serial collectors that is not necessary.
1110 sgen_add_to_global_remset (gpointer ptr)
1112 remset.record_pointer (ptr);
1116 * sgen_drain_gray_stack:
1118 * Scan objects in the gray stack until the stack is empty. This should be called
1119 * frequently after each object is copied, to achieve better locality and cache
1123 sgen_drain_gray_stack (GrayQueue *queue, ScanObjectFunc scan_func, int max_objs)
1127 if (max_objs == -1) {
1129 GRAY_OBJECT_DEQUEUE (queue, obj);
1132 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
1133 scan_func (obj, queue);
1139 for (i = 0; i != max_objs; ++i) {
1140 GRAY_OBJECT_DEQUEUE (queue, obj);
1143 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
1144 scan_func (obj, queue);
1146 } while (max_objs < 0);
1152 * Addresses from start to end are already sorted. This function finds
1153 * the object header for each address and pins the object. The
1154 * addresses must be inside the passed section. The (start of the)
1155 * address array is overwritten with the addresses of the actually
1156 * pinned objects. Return the number of pinned objects.
1159 pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, GrayQueue *queue, gboolean only_enqueue)
1164 void *last_obj = NULL;
1165 size_t last_obj_size = 0;
1168 void **definitely_pinned = start;
1170 sgen_nursery_allocator_prepare_for_pinning ();
1172 while (start < end) {
1174 /* the range check should be reduntant */
1175 if (addr != last && addr >= start_nursery && addr < end_nursery) {
1176 SGEN_LOG (5, "Considering pinning addr %p", addr);
1177 /* multiple pointers to the same object */
1178 if (addr >= last_obj && (char*)addr < (char*)last_obj + last_obj_size) {
1182 idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
1183 g_assert (idx < section->num_scan_start);
1184 search_start = (void*)section->scan_starts [idx];
1185 if (!search_start || search_start > addr) {
1188 search_start = section->scan_starts [idx];
1189 if (search_start && search_start <= addr)
1192 if (!search_start || search_start > addr)
1193 search_start = start_nursery;
1195 if (search_start < last_obj)
1196 search_start = (char*)last_obj + last_obj_size;
1197 /* now addr should be in an object a short distance from search_start
1198 * Note that search_start must point to zeroed mem or point to an object.
1202 if (!*(void**)search_start) {
1203 /* Consistency check */
1205 for (frag = nursery_fragments; frag; frag = frag->next) {
1206 if (search_start >= frag->fragment_start && search_start < frag->fragment_end)
1207 g_assert_not_reached ();
1211 search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
1214 last_obj = search_start;
1215 last_obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
1217 if (((MonoObject*)last_obj)->synchronisation == GINT_TO_POINTER (-1)) {
1218 /* Marks the beginning of a nursery fragment, skip */
1220 SGEN_LOG (8, "Pinned try match %p (%s), size %zd", last_obj, safe_name (last_obj), last_obj_size);
1221 if (addr >= search_start && (char*)addr < (char*)last_obj + last_obj_size) {
1222 SGEN_LOG (4, "Pinned object %p, vtable %p (%s), count %d\n", search_start, *(void**)search_start, safe_name (search_start), count);
1223 binary_protocol_pin (search_start, (gpointer)LOAD_VTABLE (search_start), safe_object_get_size (search_start));
1224 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1225 int gen = sgen_ptr_in_nursery (search_start) ? GENERATION_NURSERY : GENERATION_OLD;
1226 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (search_start);
1227 MONO_GC_OBJ_PINNED ((mword)search_start, sgen_safe_object_get_size (search_start), vt->klass->name_space, vt->klass->name, gen);
1230 pin_object (search_start);
1231 GRAY_OBJECT_ENQUEUE (queue, search_start);
1232 if (G_UNLIKELY (do_pin_stats))
1233 sgen_pin_stats_register_object (search_start, last_obj_size);
1234 definitely_pinned [count] = search_start;
1239 /* skip to the next object */
1240 search_start = (void*)((char*)search_start + last_obj_size);
1241 } while (search_start <= addr);
1242 /* we either pinned the correct object or we ignored the addr because
1243 * it points to unused zeroed memory.
1249 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1250 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1251 GCRootReport report;
1253 for (idx = 0; idx < count; ++idx)
1254 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1255 notify_gc_roots (&report);
1257 stat_pinned_objects += count;
1262 sgen_pin_objects_in_section (GCMemSection *section, GrayQueue *queue, gboolean only_enqueue)
1264 int num_entries = section->pin_queue_num_entries;
1266 void **start = section->pin_queue_start;
1268 reduced_to = pin_objects_from_addresses (section, start, start + num_entries,
1269 section->data, section->next_data, queue, only_enqueue);
1270 section->pin_queue_num_entries = reduced_to;
1272 section->pin_queue_start = NULL;
1278 sgen_pin_object (void *object, GrayQueue *queue)
1280 g_assert (!concurrent_collection_in_progress);
1282 if (sgen_collection_is_parallel ()) {
1284 /*object arrives pinned*/
1285 sgen_pin_stage_ptr (object);
1289 SGEN_PIN_OBJECT (object);
1290 sgen_pin_stage_ptr (object);
1292 if (G_UNLIKELY (do_pin_stats))
1293 sgen_pin_stats_register_object (object, safe_object_get_size (object));
1295 GRAY_OBJECT_ENQUEUE (queue, object);
1296 binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
1297 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1298 int gen = sgen_ptr_in_nursery (object) ? GENERATION_NURSERY : GENERATION_OLD;
1299 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (object);
1300 MONO_GC_OBJ_PINNED ((mword)object, sgen_safe_object_get_size (object), vt->klass->name_space, vt->klass->name, gen);
1305 sgen_parallel_pin_or_update (void **ptr, void *obj, MonoVTable *vt, SgenGrayQueue *queue)
1309 gboolean major_pinned = FALSE;
1311 if (sgen_ptr_in_nursery (obj)) {
1312 if (SGEN_CAS_PTR (obj, (void*)((mword)vt | SGEN_PINNED_BIT), vt) == vt) {
1313 sgen_pin_object (obj, queue);
1317 major_collector.pin_major_object (obj, queue);
1318 major_pinned = TRUE;
1321 vtable_word = *(mword*)obj;
1322 /*someone else forwarded it, update the pointer and bail out*/
1323 if (vtable_word & SGEN_FORWARDED_BIT) {
1324 *ptr = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1328 /*someone pinned it, nothing to do.*/
1329 if (vtable_word & SGEN_PINNED_BIT || major_pinned)
1334 /* Sort the addresses in array in increasing order.
1335 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1338 sgen_sort_addresses (void **array, int size)
1343 for (i = 1; i < size; ++i) {
1346 int parent = (child - 1) / 2;
1348 if (array [parent] >= array [child])
1351 tmp = array [parent];
1352 array [parent] = array [child];
1353 array [child] = tmp;
1359 for (i = size - 1; i > 0; --i) {
1362 array [i] = array [0];
1368 while (root * 2 + 1 <= end) {
1369 int child = root * 2 + 1;
1371 if (child < end && array [child] < array [child + 1])
1373 if (array [root] >= array [child])
1377 array [root] = array [child];
1378 array [child] = tmp;
1386 * Scan the memory between start and end and queue values which could be pointers
1387 * to the area between start_nursery and end_nursery for later consideration.
1388 * Typically used for thread stacks.
1391 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1395 #ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
1396 VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
1399 while (start < end) {
1400 if (*start >= start_nursery && *start < end_nursery) {
1402 * *start can point to the middle of an object
1403 * note: should we handle pointing at the end of an object?
1404 * pinning in C# code disallows pointing at the end of an object
1405 * but there is some small chance that an optimizing C compiler
1406 * may keep the only reference to an object by pointing
1407 * at the end of it. We ignore this small chance for now.
1408 * Pointers to the end of an object are indistinguishable
1409 * from pointers to the start of the next object in memory
1410 * so if we allow that we'd need to pin two objects...
1411 * We queue the pointer in an array, the
1412 * array will then be sorted and uniqued. This way
1413 * we can coalesce several pinning pointers and it should
1414 * be faster since we'd do a memory scan with increasing
1415 * addresses. Note: we can align the address to the allocation
1416 * alignment, so the unique process is more effective.
1418 mword addr = (mword)*start;
1419 addr &= ~(ALLOC_ALIGN - 1);
1420 if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
1421 SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
1422 sgen_pin_stage_ptr ((void*)addr);
1425 if (G_UNLIKELY (do_pin_stats)) {
1426 if (ptr_in_nursery ((void*)addr))
1427 sgen_pin_stats_register_address ((char*)addr, pin_type);
1433 SGEN_LOG (7, "found %d potential pinned heap pointers", count);
1437 * The first thing we do in a collection is to identify pinned objects.
1438 * This function considers all the areas of memory that need to be
1439 * conservatively scanned.
1442 pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue)
1446 SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
1447 /* objects pinned from the API are inside these roots */
1448 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
1449 SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
1450 conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
1451 } SGEN_HASH_TABLE_FOREACH_END;
1452 /* now deal with the thread stacks
1453 * in the future we should be able to conservatively scan only:
1454 * *) the cpu registers
1455 * *) the unmanaged stack frames
1456 * *) the _last_ managed stack frame
1457 * *) pointers slots in managed frames
1459 scan_thread_data (start_nursery, end_nursery, FALSE, queue);
1463 CopyOrMarkObjectFunc func;
1465 } UserCopyOrMarkData;
1467 static MonoNativeTlsKey user_copy_or_mark_key;
1470 init_user_copy_or_mark_key (void)
1472 mono_native_tls_alloc (&user_copy_or_mark_key, NULL);
1476 set_user_copy_or_mark_data (UserCopyOrMarkData *data)
1478 mono_native_tls_set_value (user_copy_or_mark_key, data);
1482 single_arg_user_copy_or_mark (void **obj)
1484 UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
1486 data->func (obj, data->queue);
1490 * The memory area from start_root to end_root contains pointers to objects.
1491 * Their position is precisely described by @desc (this means that the pointer
1492 * can be either NULL or the pointer to the start of an object).
1493 * This functions copies them to to_space updates them.
1495 * This function is not thread-safe!
1498 precisely_scan_objects_from (CopyOrMarkObjectFunc copy_func, ScanObjectFunc scan_func, void** start_root, void** end_root, char* n_start, char *n_end, mword desc, GrayQueue *queue)
1500 switch (desc & ROOT_DESC_TYPE_MASK) {
1501 case ROOT_DESC_BITMAP:
1502 desc >>= ROOT_DESC_TYPE_SHIFT;
1504 if ((desc & 1) && *start_root) {
1505 copy_func (start_root, queue);
1506 SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
1507 sgen_drain_gray_stack (queue, scan_func, -1);
1513 case ROOT_DESC_COMPLEX: {
1514 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1515 int bwords = (*bitmap_data) - 1;
1516 void **start_run = start_root;
1518 while (bwords-- > 0) {
1519 gsize bmap = *bitmap_data++;
1520 void **objptr = start_run;
1522 if ((bmap & 1) && *objptr) {
1523 copy_func (objptr, queue);
1524 SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
1525 sgen_drain_gray_stack (queue, scan_func, -1);
1530 start_run += GC_BITS_PER_WORD;
1534 case ROOT_DESC_USER: {
1535 UserCopyOrMarkData data = { copy_func, queue };
1536 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1537 set_user_copy_or_mark_data (&data);
1538 marker (start_root, single_arg_user_copy_or_mark);
1539 set_user_copy_or_mark_data (NULL);
1542 case ROOT_DESC_RUN_LEN:
1543 g_assert_not_reached ();
1545 g_assert_not_reached ();
1550 reset_heap_boundaries (void)
1552 lowest_heap_address = ~(mword)0;
1553 highest_heap_address = 0;
1557 sgen_update_heap_boundaries (mword low, mword high)
1562 old = lowest_heap_address;
1565 } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
1568 old = highest_heap_address;
1571 } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
1575 * Allocate and setup the data structures needed to be able to allocate objects
1576 * in the nursery. The nursery is stored in nursery_section.
1579 alloc_nursery (void)
1581 GCMemSection *section;
1586 if (nursery_section)
1588 SGEN_LOG (2, "Allocating nursery size: %lu", (unsigned long)sgen_nursery_size);
1589 /* later we will alloc a larger area for the nursery but only activate
1590 * what we need. The rest will be used as expansion if we have too many pinned
1591 * objects in the existing nursery.
1593 /* FIXME: handle OOM */
1594 section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
1596 alloc_size = sgen_nursery_size;
1598 /* If there isn't enough space even for the nursery we should simply abort. */
1599 g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
1601 #ifdef SGEN_ALIGN_NURSERY
1602 data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
1604 data = major_collector.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
1606 sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
1607 SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)mono_gc_get_heap_size ());
1608 section->data = section->next_data = data;
1609 section->size = alloc_size;
1610 section->end_data = data + sgen_nursery_size;
1611 scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
1612 section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
1613 section->num_scan_start = scan_starts;
1614 section->block.role = MEMORY_ROLE_GEN0;
1615 section->block.next = NULL;
1617 nursery_section = section;
1619 sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
1623 mono_gc_get_nursery (int *shift_bits, size_t *size)
1625 *size = sgen_nursery_size;
1626 #ifdef SGEN_ALIGN_NURSERY
1627 *shift_bits = DEFAULT_NURSERY_BITS;
1631 return sgen_get_nursery_start ();
1635 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
1637 SgenThreadInfo *info = mono_thread_info_current ();
1639 /* Could be called from sgen_thread_unregister () with a NULL info */
1642 info->stopped_domain = domain;
1647 mono_gc_precise_stack_mark_enabled (void)
1649 return !conservative_stack_mark;
1653 mono_gc_get_logfile (void)
1655 return gc_debug_file;
1659 report_finalizer_roots_list (FinalizeReadyEntry *list)
1661 GCRootReport report;
1662 FinalizeReadyEntry *fin;
1665 for (fin = list; fin; fin = fin->next) {
1668 add_profile_gc_root (&report, fin->object, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1670 notify_gc_roots (&report);
1674 report_finalizer_roots (void)
1676 report_finalizer_roots_list (fin_ready_list);
1677 report_finalizer_roots_list (critical_fin_list);
1680 static GCRootReport *root_report;
1683 single_arg_report_root (void **obj)
1686 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1690 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1692 switch (desc & ROOT_DESC_TYPE_MASK) {
1693 case ROOT_DESC_BITMAP:
1694 desc >>= ROOT_DESC_TYPE_SHIFT;
1696 if ((desc & 1) && *start_root) {
1697 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1703 case ROOT_DESC_COMPLEX: {
1704 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1705 int bwords = (*bitmap_data) - 1;
1706 void **start_run = start_root;
1708 while (bwords-- > 0) {
1709 gsize bmap = *bitmap_data++;
1710 void **objptr = start_run;
1712 if ((bmap & 1) && *objptr) {
1713 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1718 start_run += GC_BITS_PER_WORD;
1722 case ROOT_DESC_USER: {
1723 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1724 root_report = report;
1725 marker (start_root, single_arg_report_root);
1728 case ROOT_DESC_RUN_LEN:
1729 g_assert_not_reached ();
1731 g_assert_not_reached ();
1736 report_registered_roots_by_type (int root_type)
1738 GCRootReport report;
1742 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1743 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1744 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1745 } SGEN_HASH_TABLE_FOREACH_END;
1746 notify_gc_roots (&report);
1750 report_registered_roots (void)
1752 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1753 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1757 scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeReadyEntry *list, GrayQueue *queue)
1759 FinalizeReadyEntry *fin;
1761 for (fin = list; fin; fin = fin->next) {
1764 SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object));
1765 copy_func (&fin->object, queue);
1770 generation_name (int generation)
1772 switch (generation) {
1773 case GENERATION_NURSERY: return "nursery";
1774 case GENERATION_OLD: return "old";
1775 default: g_assert_not_reached ();
1780 sgen_generation_name (int generation)
1782 return generation_name (generation);
1785 SgenObjectOperations *
1786 sgen_get_current_object_ops (void){
1787 return ¤t_object_ops;
1792 finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue)
1796 int done_with_ephemerons, ephemeron_rounds = 0;
1797 CopyOrMarkObjectFunc copy_func = current_object_ops.copy_or_mark_object;
1798 ScanObjectFunc scan_func = current_object_ops.scan_object;
1801 * We copied all the reachable objects. Now it's the time to copy
1802 * the objects that were not referenced by the roots, but by the copied objects.
1803 * we built a stack of objects pointed to by gray_start: they are
1804 * additional roots and we may add more items as we go.
1805 * We loop until gray_start == gray_objects which means no more objects have
1806 * been added. Note this is iterative: no recursion is involved.
1807 * We need to walk the LO list as well in search of marked big objects
1808 * (use a flag since this is needed only on major collections). We need to loop
1809 * here as well, so keep a counter of marked LO (increasing it in copy_object).
1810 * To achieve better cache locality and cache usage, we drain the gray stack
1811 * frequently, after each object is copied, and just finish the work here.
1813 sgen_drain_gray_stack (queue, scan_func, -1);
1815 SGEN_LOG (2, "%s generation done", generation_name (generation));
1818 Reset bridge data, we might have lingering data from a previous collection if this is a major
1819 collection trigged by minor overflow.
1821 We must reset the gathered bridges since their original block might be evacuated due to major
1822 fragmentation in the meanwhile and the bridge code should not have to deal with that.
1824 sgen_bridge_reset_data ();
1827 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
1828 * before processing finalizable objects or non-tracking weak hamdle to avoid finalizing/clearing
1829 * objects that are in fact reachable.
1831 done_with_ephemerons = 0;
1833 done_with_ephemerons = mark_ephemerons_in_range (copy_func, start_addr, end_addr, queue);
1834 sgen_drain_gray_stack (queue, scan_func, -1);
1836 } while (!done_with_ephemerons);
1838 sgen_scan_togglerefs (copy_func, start_addr, end_addr, queue);
1839 if (generation == GENERATION_OLD)
1840 sgen_scan_togglerefs (copy_func, sgen_get_nursery_start (), sgen_get_nursery_end (), queue);
1842 if (sgen_need_bridge_processing ()) {
1843 sgen_collect_bridge_objects (copy_func, start_addr, end_addr, generation, queue);
1844 if (generation == GENERATION_OLD)
1845 sgen_collect_bridge_objects (copy_func, sgen_get_nursery_start (), sgen_get_nursery_end (), GENERATION_NURSERY, queue);
1849 Make sure we drain the gray stack before processing disappearing links and finalizers.
1850 If we don't make sure it is empty we might wrongly see a live object as dead.
1852 sgen_drain_gray_stack (queue, scan_func, -1);
1855 We must clear weak links that don't track resurrection before processing object ready for
1856 finalization so they can be cleared before that.
1858 sgen_null_link_in_range (copy_func, start_addr, end_addr, generation, TRUE, queue);
1859 if (generation == GENERATION_OLD)
1860 sgen_null_link_in_range (copy_func, start_addr, end_addr, GENERATION_NURSERY, TRUE, queue);
1863 /* walk the finalization queue and move also the objects that need to be
1864 * finalized: use the finalized objects as new roots so the objects they depend
1865 * on are also not reclaimed. As with the roots above, only objects in the nursery
1866 * are marked/copied.
1868 sgen_finalize_in_range (copy_func, start_addr, end_addr, generation, queue);
1869 if (generation == GENERATION_OLD)
1870 sgen_finalize_in_range (copy_func, sgen_get_nursery_start (), sgen_get_nursery_end (), GENERATION_NURSERY, queue);
1871 /* drain the new stack that might have been created */
1872 SGEN_LOG (6, "Precise scan of gray area post fin");
1873 sgen_drain_gray_stack (queue, scan_func, -1);
1876 * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
1878 done_with_ephemerons = 0;
1880 done_with_ephemerons = mark_ephemerons_in_range (copy_func, start_addr, end_addr, queue);
1881 sgen_drain_gray_stack (queue, scan_func, -1);
1883 } while (!done_with_ephemerons);
1886 * Clear ephemeron pairs with unreachable keys.
1887 * We pass the copy func so we can figure out if an array was promoted or not.
1889 clear_unreachable_ephemerons (copy_func, start_addr, end_addr, queue);
1892 SGEN_LOG (2, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
1895 * handle disappearing links
1896 * Note we do this after checking the finalization queue because if an object
1897 * survives (at least long enough to be finalized) we don't clear the link.
1898 * This also deals with a possible issue with the monitor reclamation: with the Boehm
1899 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
1902 g_assert (sgen_gray_object_queue_is_empty (queue));
1904 sgen_null_link_in_range (copy_func, start_addr, end_addr, generation, FALSE, queue);
1905 if (generation == GENERATION_OLD)
1906 sgen_null_link_in_range (copy_func, start_addr, end_addr, GENERATION_NURSERY, FALSE, queue);
1907 if (sgen_gray_object_queue_is_empty (queue))
1909 sgen_drain_gray_stack (queue, scan_func, -1);
1912 g_assert (sgen_gray_object_queue_is_empty (queue));
1916 sgen_check_section_scan_starts (GCMemSection *section)
1919 for (i = 0; i < section->num_scan_start; ++i) {
1920 if (section->scan_starts [i]) {
1921 guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
1922 g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
1928 check_scan_starts (void)
1930 if (!do_scan_starts_check)
1932 sgen_check_section_scan_starts (nursery_section);
1933 major_collector.check_scan_starts ();
1937 scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, ScanObjectFunc scan_func, char *addr_start, char *addr_end, int root_type, GrayQueue *queue)
1941 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1942 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1943 precisely_scan_objects_from (copy_func, scan_func, start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, queue);
1944 } SGEN_HASH_TABLE_FOREACH_END;
1948 sgen_dump_occupied (char *start, char *end, char *section_start)
1950 fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
1954 sgen_dump_section (GCMemSection *section, const char *type)
1956 char *start = section->data;
1957 char *end = section->data + section->size;
1958 char *occ_start = NULL;
1960 char *old_start = NULL; /* just for debugging */
1962 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
1964 while (start < end) {
1968 if (!*(void**)start) {
1970 sgen_dump_occupied (occ_start, start, section->data);
1973 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
1976 g_assert (start < section->next_data);
1981 vt = (GCVTable*)LOAD_VTABLE (start);
1984 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
1987 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
1988 start - section->data,
1989 vt->klass->name_space, vt->klass->name,
1997 sgen_dump_occupied (occ_start, start, section->data);
1999 fprintf (heap_dump_file, "</section>\n");
2003 dump_object (MonoObject *obj, gboolean dump_location)
2005 static char class_name [1024];
2007 MonoClass *class = mono_object_class (obj);
2011 * Python's XML parser is too stupid to parse angle brackets
2012 * in strings, so we just ignore them;
2015 while (class->name [i] && j < sizeof (class_name) - 1) {
2016 if (!strchr ("<>\"", class->name [i]))
2017 class_name [j++] = class->name [i];
2020 g_assert (j < sizeof (class_name));
2023 fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%d\"",
2024 class->name_space, class_name,
2025 safe_object_get_size (obj));
2026 if (dump_location) {
2027 const char *location;
2028 if (ptr_in_nursery (obj))
2029 location = "nursery";
2030 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
2034 fprintf (heap_dump_file, " location=\"%s\"", location);
2036 fprintf (heap_dump_file, "/>\n");
2040 dump_heap (const char *type, int num, const char *reason)
2045 fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
2047 fprintf (heap_dump_file, " reason=\"%s\"", reason);
2048 fprintf (heap_dump_file, ">\n");
2049 fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
2050 sgen_dump_internal_mem_usage (heap_dump_file);
2051 fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
2052 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
2053 fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
2055 fprintf (heap_dump_file, "<pinned-objects>\n");
2056 for (list = sgen_pin_stats_get_object_list (); list; list = list->next)
2057 dump_object (list->obj, TRUE);
2058 fprintf (heap_dump_file, "</pinned-objects>\n");
2060 sgen_dump_section (nursery_section, "nursery");
2062 major_collector.dump_heap (heap_dump_file);
2064 fprintf (heap_dump_file, "<los>\n");
2065 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
2066 dump_object ((MonoObject*)bigobj->data, FALSE);
2067 fprintf (heap_dump_file, "</los>\n");
2069 fprintf (heap_dump_file, "</collection>\n");
2073 sgen_register_moved_object (void *obj, void *destination)
2075 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
2077 /* FIXME: handle this for parallel collector */
2078 g_assert (!sgen_collection_is_parallel ());
2080 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
2081 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
2082 moved_objects_idx = 0;
2084 moved_objects [moved_objects_idx++] = obj;
2085 moved_objects [moved_objects_idx++] = destination;
2091 static gboolean inited = FALSE;
2096 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pre_collection_fragment_clear);
2097 mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pinning);
2098 mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_remsets);
2099 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_pinned);
2100 mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_registered_roots);
2101 mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_thread_data);
2102 mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_finish_gray_stack);
2103 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_fragment_creation);
2105 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pre_collection_fragment_clear);
2106 mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pinning);
2107 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_pinned);
2108 mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_registered_roots);
2109 mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_thread_data);
2110 mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_alloc_pinned);
2111 mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_finalized);
2112 mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_big_objects);
2113 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_finish_gray_stack);
2114 mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_free_bigobjs);
2115 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_los_sweep);
2116 mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_sweep);
2117 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_fragment_creation);
2119 mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_pinned_objects);
2121 #ifdef HEAVY_STATISTICS
2122 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
2123 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
2124 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
2125 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
2126 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
2127 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
2128 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
2130 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
2131 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
2133 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
2134 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
2135 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_major);
2136 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_major);
2138 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
2139 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);
2141 mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_slots_allocated_in_vain);
2143 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
2144 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
2145 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
2146 mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_to_space);
2148 sgen_nursery_allocator_init_heavy_stats ();
2149 sgen_alloc_init_heavy_stats ();
2157 reset_pinned_from_failed_allocation (void)
2159 bytes_pinned_from_failed_allocation = 0;
2163 sgen_set_pinned_from_failed_allocation (mword objsize)
2165 bytes_pinned_from_failed_allocation += objsize;
2169 sgen_collection_is_parallel (void)
2171 switch (current_collection_generation) {
2172 case GENERATION_NURSERY:
2173 return nursery_collection_is_parallel;
2174 case GENERATION_OLD:
2175 return major_collector.is_parallel;
2177 g_error ("Invalid current generation %d", current_collection_generation);
2182 sgen_collection_is_concurrent (void)
2184 switch (current_collection_generation) {
2185 case GENERATION_NURSERY:
2187 case GENERATION_OLD:
2188 return major_collector.is_concurrent;
2190 g_error ("Invalid current generation %d", current_collection_generation);
2195 sgen_concurrent_collection_in_progress (void)
2197 return concurrent_collection_in_progress;
2204 } FinishRememberedSetScanJobData;
2207 job_finish_remembered_set_scan (WorkerData *worker_data, void *job_data_untyped)
2209 FinishRememberedSetScanJobData *job_data = job_data_untyped;
2211 remset.finish_scan_remsets (job_data->heap_start, job_data->heap_end, sgen_workers_get_job_gray_queue (worker_data));
2212 sgen_free_internal_dynamic (job_data, sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2217 CopyOrMarkObjectFunc copy_or_mark_func;
2218 ScanObjectFunc scan_func;
2222 } ScanFromRegisteredRootsJobData;
2225 job_scan_from_registered_roots (WorkerData *worker_data, void *job_data_untyped)
2227 ScanFromRegisteredRootsJobData *job_data = job_data_untyped;
2229 scan_from_registered_roots (job_data->copy_or_mark_func, job_data->scan_func,
2230 job_data->heap_start, job_data->heap_end,
2231 job_data->root_type,
2232 sgen_workers_get_job_gray_queue (worker_data));
2233 sgen_free_internal_dynamic (job_data, sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2240 } ScanThreadDataJobData;
2243 job_scan_thread_data (WorkerData *worker_data, void *job_data_untyped)
2245 ScanThreadDataJobData *job_data = job_data_untyped;
2247 scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
2248 sgen_workers_get_job_gray_queue (worker_data));
2249 sgen_free_internal_dynamic (job_data, sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2254 FinalizeReadyEntry *list;
2255 } ScanFinalizerEntriesJobData;
2258 job_scan_finalizer_entries (WorkerData *worker_data, void *job_data_untyped)
2260 ScanFinalizerEntriesJobData *job_data = job_data_untyped;
2262 scan_finalizer_entries (current_object_ops.copy_or_mark_object,
2264 sgen_workers_get_job_gray_queue (worker_data));
2265 sgen_free_internal_dynamic (job_data, sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2269 job_scan_major_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2271 g_assert (concurrent_collection_in_progress);
2272 major_collector.scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2276 job_scan_los_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2278 g_assert (concurrent_collection_in_progress);
2279 sgen_los_scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2283 verify_scan_starts (char *start, char *end)
2287 for (i = 0; i < nursery_section->num_scan_start; ++i) {
2288 char *addr = nursery_section->scan_starts [i];
2289 if (addr > start && addr < end)
2290 SGEN_LOG (1, "NFC-BAD SCAN START [%d] %p for obj [%p %p]", i, addr, start, end);
2295 verify_nursery (void)
2297 char *start, *end, *cur, *hole_start;
2299 if (!do_verify_nursery)
2302 /*This cleans up unused fragments */
2303 sgen_nursery_allocator_prepare_for_pinning ();
2305 hole_start = start = cur = sgen_get_nursery_start ();
2306 end = sgen_get_nursery_end ();
2311 if (!*(void**)cur) {
2312 cur += sizeof (void*);
2316 if (object_is_forwarded (cur))
2317 SGEN_LOG (1, "FORWARDED OBJ %p", cur);
2318 else if (object_is_pinned (cur))
2319 SGEN_LOG (1, "PINNED OBJ %p", cur);
2321 ss = safe_object_get_size ((MonoObject*)cur);
2322 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2323 verify_scan_starts (cur, cur + size);
2324 if (do_dump_nursery_content) {
2325 if (cur > hole_start)
2326 SGEN_LOG (1, "HOLE [%p %p %d]", hole_start, cur, (int)(cur - hole_start));
2327 SGEN_LOG (1, "OBJ [%p %p %d %d %s %d]", cur, cur + size, (int)size, (int)ss, sgen_safe_name ((MonoObject*)cur), (gpointer)LOAD_VTABLE (cur) == sgen_get_array_fill_vtable ());
2335 * Checks that no objects in the nursery are fowarded or pinned. This
2336 * is a precondition to restarting the mutator while doing a
2337 * concurrent collection. Note that we don't clear fragments because
2338 * we depend on that having happened earlier.
2341 check_nursery_is_clean (void)
2343 char *start, *end, *cur;
2345 start = cur = sgen_get_nursery_start ();
2346 end = sgen_get_nursery_end ();
2351 if (!*(void**)cur) {
2352 cur += sizeof (void*);
2356 g_assert (!object_is_forwarded (cur));
2357 g_assert (!object_is_pinned (cur));
2359 ss = safe_object_get_size ((MonoObject*)cur);
2360 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2361 verify_scan_starts (cur, cur + size);
2368 init_gray_queue (void)
2370 if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ()) {
2371 sgen_gray_object_queue_init_invalid (&gray_queue);
2372 sgen_workers_init_distribute_gray_queue ();
2374 sgen_gray_object_queue_init (&gray_queue);
2379 * Collect objects in the nursery. Returns whether to trigger a major
2383 collect_nursery (void)
2385 gboolean needs_major;
2386 size_t max_garbage_amount;
2388 FinishRememberedSetScanJobData *frssjd;
2389 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2390 ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2391 ScanThreadDataJobData *stdjd;
2392 mword fragment_total;
2393 TV_DECLARE (all_atv);
2394 TV_DECLARE (all_btv);
2398 if (disable_minor_collections)
2401 MONO_GC_BEGIN (GENERATION_NURSERY);
2405 #ifndef DISABLE_PERFCOUNTERS
2406 mono_perfcounters->gc_collections0++;
2409 current_collection_generation = GENERATION_NURSERY;
2410 if (sgen_collection_is_parallel ())
2411 current_object_ops = sgen_minor_collector.parallel_ops;
2413 current_object_ops = sgen_minor_collector.serial_ops;
2415 reset_pinned_from_failed_allocation ();
2417 binary_protocol_collection (stat_minor_gcs, GENERATION_NURSERY);
2418 check_scan_starts ();
2420 sgen_nursery_alloc_prepare_for_minor ();
2424 nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
2425 /* FIXME: optimize later to use the higher address where an object can be present */
2426 nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
2428 SGEN_LOG (1, "Start nursery collection %d %p-%p, size: %d", stat_minor_gcs, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ()));
2429 max_garbage_amount = nursery_next - sgen_get_nursery_start ();
2430 g_assert (nursery_section->size >= max_garbage_amount);
2432 /* world must be stopped already */
2433 TV_GETTIME (all_atv);
2437 time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2439 if (xdomain_checks) {
2440 sgen_clear_nursery_fragments ();
2441 check_for_xdomain_refs ();
2444 nursery_section->next_data = nursery_next;
2446 major_collector.start_nursery_collection ();
2448 sgen_memgov_minor_collection_start ();
2453 gc_stats.minor_gc_count ++;
2455 if (remset.prepare_for_minor_collection)
2456 remset.prepare_for_minor_collection ();
2458 sgen_process_fin_stage_entries ();
2459 sgen_process_dislink_stage_entries ();
2461 /* pin from pinned handles */
2462 sgen_init_pinning ();
2463 mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
2464 pin_from_roots (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2465 /* identify pinned objects */
2466 sgen_optimize_pin_queue (0);
2467 sgen_pinning_setup_section (nursery_section);
2468 sgen_pin_objects_in_section (nursery_section, WORKERS_DISTRIBUTE_GRAY_QUEUE, FALSE);
2469 sgen_pinning_trim_queue_to_section (nursery_section);
2472 time_minor_pinning += TV_ELAPSED (btv, atv);
2473 SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
2474 SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
2476 if (whole_heap_check_before_collection)
2477 sgen_check_whole_heap ();
2478 if (consistency_check_at_minor_collection)
2479 sgen_check_consistency ();
2481 sgen_workers_start_all_workers ();
2484 * Perform the sequential part of remembered set scanning.
2485 * This usually involves scanning global information that might later be produced by evacuation.
2487 if (remset.begin_scan_remsets)
2488 remset.begin_scan_remsets (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2490 sgen_workers_start_marking ();
2492 frssjd = sgen_alloc_internal_dynamic (sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2493 frssjd->heap_start = sgen_get_nursery_start ();
2494 frssjd->heap_end = nursery_next;
2495 sgen_workers_enqueue_job (job_finish_remembered_set_scan, frssjd);
2497 /* we don't have complete write barrier yet, so we scan all the old generation sections */
2499 time_minor_scan_remsets += TV_ELAPSED (atv, btv);
2500 SGEN_LOG (2, "Old generation scan: %d usecs", TV_ELAPSED (atv, btv));
2502 if (!sgen_collection_is_parallel ())
2503 sgen_drain_gray_stack (&gray_queue, current_object_ops.scan_object, -1);
2505 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2506 report_registered_roots ();
2507 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2508 report_finalizer_roots ();
2510 time_minor_scan_pinned += TV_ELAPSED (btv, atv);
2512 /* registered roots, this includes static fields */
2513 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2514 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2515 scrrjd_normal->scan_func = current_object_ops.scan_object;
2516 scrrjd_normal->heap_start = sgen_get_nursery_start ();
2517 scrrjd_normal->heap_end = nursery_next;
2518 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2519 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2521 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2522 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2523 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2524 scrrjd_wbarrier->heap_start = sgen_get_nursery_start ();
2525 scrrjd_wbarrier->heap_end = nursery_next;
2526 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2527 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2530 time_minor_scan_registered_roots += TV_ELAPSED (atv, btv);
2533 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2534 stdjd->heap_start = sgen_get_nursery_start ();
2535 stdjd->heap_end = nursery_next;
2536 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2539 time_minor_scan_thread_data += TV_ELAPSED (btv, atv);
2542 if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ()) {
2543 while (!sgen_gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE)) {
2544 sgen_workers_distribute_gray_queue_sections ();
2548 sgen_workers_join ();
2550 if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ())
2551 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2553 /* Scan the list of objects ready for finalization. If */
2554 sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2555 sfejd_fin_ready->list = fin_ready_list;
2556 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
2558 sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2559 sfejd_critical_fin->list = critical_fin_list;
2560 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
2562 finish_gray_stack (sgen_get_nursery_start (), nursery_next, GENERATION_NURSERY, &gray_queue);
2564 time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
2565 mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
2568 * The (single-threaded) finalization code might have done
2569 * some copying/marking so we can only reset the GC thread's
2570 * worker data here instead of earlier when we joined the
2573 sgen_workers_reset_data ();
2575 if (objects_pinned) {
2576 sgen_optimize_pin_queue (0);
2577 sgen_pinning_setup_section (nursery_section);
2580 /* walk the pin_queue, build up the fragment list of free memory, unmark
2581 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2584 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
2585 fragment_total = sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries);
2586 if (!fragment_total)
2589 /* Clear TLABs for all threads */
2590 sgen_clear_tlabs ();
2592 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
2594 time_minor_fragment_creation += TV_ELAPSED (atv, btv);
2595 SGEN_LOG (2, "Fragment creation: %d usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
2597 if (consistency_check_at_minor_collection)
2598 sgen_check_major_refs ();
2600 major_collector.finish_nursery_collection ();
2602 TV_GETTIME (all_btv);
2603 gc_stats.minor_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
2606 dump_heap ("minor", stat_minor_gcs - 1, NULL);
2608 /* prepare the pin queue for the next collection */
2609 sgen_finish_pinning ();
2610 if (fin_ready_list || critical_fin_list) {
2611 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2612 mono_gc_finalize_notify ();
2614 sgen_pin_stats_reset ();
2616 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2618 if (remset.finish_minor_collection)
2619 remset.finish_minor_collection ();
2621 check_scan_starts ();
2623 binary_protocol_flush_buffers (FALSE);
2625 sgen_memgov_minor_collection_end ();
2627 /*objects are late pinned because of lack of memory, so a major is a good call*/
2628 needs_major = objects_pinned > 0;
2629 current_collection_generation = -1;
2632 MONO_GC_END (GENERATION_NURSERY);
2638 major_copy_or_mark_from_roots (int *old_next_pin_slot, gboolean finish_up_concurrent_mark)
2643 /* FIXME: only use these values for the precise scan
2644 * note that to_space pointers should be excluded anyway...
2646 char *heap_start = NULL;
2647 char *heap_end = (char*)-1;
2648 gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
2649 GCRootReport root_report = { 0 };
2650 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2651 ScanThreadDataJobData *stdjd;
2652 ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2654 if (major_collector.is_concurrent) {
2655 /*This cleans up unused fragments */
2656 sgen_nursery_allocator_prepare_for_pinning ();
2658 check_nursery_is_clean ();
2660 /* The concurrent collector doesn't touch the nursery. */
2661 sgen_nursery_alloc_prepare_for_major ();
2668 /* Pinning depends on this */
2669 sgen_clear_nursery_fragments ();
2671 if (whole_heap_check_before_collection)
2672 sgen_check_whole_heap ();
2675 time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2677 if (!sgen_collection_is_concurrent ())
2678 nursery_section->next_data = sgen_get_nursery_end ();
2679 /* we should also coalesce scanning from sections close to each other
2680 * and deal with pointers outside of the sections later.
2684 *major_collector.have_swept = FALSE;
2686 if (xdomain_checks) {
2687 sgen_clear_nursery_fragments ();
2688 check_for_xdomain_refs ();
2691 if (!finish_up_concurrent_mark) {
2692 /* Remsets are not useful for a major collection */
2693 remset.prepare_for_major_collection ();
2696 sgen_process_fin_stage_entries ();
2697 sgen_process_dislink_stage_entries ();
2700 sgen_init_pinning ();
2701 SGEN_LOG (6, "Collecting pinned addresses");
2702 pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2703 sgen_optimize_pin_queue (0);
2706 * pin_queue now contains all candidate pointers, sorted and
2707 * uniqued. We must do two passes now to figure out which
2708 * objects are pinned.
2710 * The first is to find within the pin_queue the area for each
2711 * section. This requires that the pin_queue be sorted. We
2712 * also process the LOS objects and pinned chunks here.
2714 * The second, destructive, pass is to reduce the section
2715 * areas to pointers to the actually pinned objects.
2717 SGEN_LOG (6, "Pinning from sections");
2718 /* first pass for the sections */
2719 sgen_find_section_pin_queue_start_end (nursery_section);
2720 major_collector.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2721 /* identify possible pointers to the insize of large objects */
2722 SGEN_LOG (6, "Pinning from large objects");
2723 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
2725 if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy)) {
2726 binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((MonoObject*)(bigobj->data))));
2727 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
2728 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (bigobj->data);
2729 MONO_GC_OBJ_PINNED ((mword)bigobj->data, sgen_safe_object_get_size ((MonoObject*)bigobj->data), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
2731 if (sgen_los_object_is_pinned (bigobj->data)) {
2732 g_assert (finish_up_concurrent_mark);
2735 sgen_los_pin_object (bigobj->data);
2736 /* FIXME: only enqueue if object has references */
2737 GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data);
2738 if (G_UNLIKELY (do_pin_stats))
2739 sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
2740 SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data, safe_name (bigobj->data), (unsigned long)sgen_los_object_size (bigobj));
2743 add_profile_gc_root (&root_report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
2747 notify_gc_roots (&root_report);
2748 /* second pass for the sections */
2749 sgen_pin_objects_in_section (nursery_section, WORKERS_DISTRIBUTE_GRAY_QUEUE, concurrent_collection_in_progress);
2750 major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2751 if (old_next_pin_slot)
2752 *old_next_pin_slot = sgen_get_pinned_count ();
2755 time_major_pinning += TV_ELAPSED (atv, btv);
2756 SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
2757 SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
2759 major_collector.init_to_space ();
2761 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2762 main_gc_thread = mono_native_thread_self ();
2765 sgen_workers_start_all_workers ();
2766 sgen_workers_start_marking ();
2768 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2769 report_registered_roots ();
2771 time_major_scan_pinned += TV_ELAPSED (btv, atv);
2773 /* registered roots, this includes static fields */
2774 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2775 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2776 scrrjd_normal->scan_func = current_object_ops.scan_object;
2777 scrrjd_normal->heap_start = heap_start;
2778 scrrjd_normal->heap_end = heap_end;
2779 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2780 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2782 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2783 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2784 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2785 scrrjd_wbarrier->heap_start = heap_start;
2786 scrrjd_wbarrier->heap_end = heap_end;
2787 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2788 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2791 time_major_scan_registered_roots += TV_ELAPSED (atv, btv);
2794 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2795 stdjd->heap_start = heap_start;
2796 stdjd->heap_end = heap_end;
2797 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2800 time_major_scan_thread_data += TV_ELAPSED (btv, atv);
2803 time_major_scan_alloc_pinned += TV_ELAPSED (atv, btv);
2805 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2806 report_finalizer_roots ();
2808 /* scan the list of objects ready for finalization */
2809 sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2810 sfejd_fin_ready->list = fin_ready_list;
2811 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
2813 sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2814 sfejd_critical_fin->list = critical_fin_list;
2815 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
2817 if (finish_up_concurrent_mark) {
2818 /* Mod union card table */
2819 sgen_workers_enqueue_job (job_scan_major_mod_union_cardtable, NULL);
2820 sgen_workers_enqueue_job (job_scan_los_mod_union_cardtable, NULL);
2824 time_major_scan_finalized += TV_ELAPSED (btv, atv);
2825 SGEN_LOG (2, "Root scan: %d usecs", TV_ELAPSED (btv, atv));
2828 time_major_scan_big_objects += TV_ELAPSED (atv, btv);
2830 if (major_collector.is_concurrent) {
2831 /* prepare the pin queue for the next collection */
2832 sgen_finish_pinning ();
2834 sgen_pin_stats_reset ();
2836 check_nursery_is_clean ();
2841 major_start_collection (int *old_next_pin_slot)
2843 MONO_GC_BEGIN (GENERATION_OLD);
2845 current_collection_generation = GENERATION_OLD;
2846 #ifndef DISABLE_PERFCOUNTERS
2847 mono_perfcounters->gc_collections1++;
2850 if (major_collector.is_concurrent)
2851 concurrent_collection_in_progress = TRUE;
2853 current_object_ops = major_collector.major_ops;
2855 reset_pinned_from_failed_allocation ();
2857 sgen_memgov_major_collection_start ();
2859 //count_ref_nonref_objs ();
2860 //consistency_check ();
2862 binary_protocol_collection (stat_major_gcs, GENERATION_OLD);
2863 check_scan_starts ();
2866 SGEN_LOG (1, "Start major collection %d", stat_major_gcs);
2868 gc_stats.major_gc_count ++;
2870 if (major_collector.start_major_collection)
2871 major_collector.start_major_collection ();
2873 major_copy_or_mark_from_roots (old_next_pin_slot, FALSE);
2877 wait_for_workers_to_finish (void)
2879 if (major_collector.is_parallel || major_collector.is_concurrent) {
2880 while (!sgen_gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE)) {
2881 sgen_workers_distribute_gray_queue_sections ();
2885 sgen_workers_join ();
2887 if (major_collector.is_parallel || major_collector.is_concurrent)
2888 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2890 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2891 main_gc_thread = NULL;
2896 major_finish_collection (const char *reason, int old_next_pin_slot)
2898 LOSObject *bigobj, *prevbo;
2901 char *heap_start = NULL;
2902 char *heap_end = (char*)-1;
2906 wait_for_workers_to_finish ();
2908 current_object_ops = major_collector.major_ops;
2910 if (major_collector.is_concurrent) {
2911 major_collector.update_cardtable_mod_union ();
2912 sgen_los_update_cardtable_mod_union ();
2914 major_copy_or_mark_from_roots (NULL, TRUE);
2915 wait_for_workers_to_finish ();
2917 check_nursery_is_clean ();
2920 /* all the objects in the heap */
2921 finish_gray_stack (heap_start, heap_end, GENERATION_OLD, &gray_queue);
2923 time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
2926 * The (single-threaded) finalization code might have done
2927 * some copying/marking so we can only reset the GC thread's
2928 * worker data here instead of earlier when we joined the
2931 sgen_workers_reset_data ();
2933 if (objects_pinned) {
2934 g_assert (!major_collector.is_concurrent);
2936 /*This is slow, but we just OOM'd*/
2937 sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
2938 sgen_optimize_pin_queue (0);
2939 sgen_find_section_pin_queue_start_end (nursery_section);
2943 reset_heap_boundaries ();
2944 sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
2946 /* sweep the big objects list */
2948 for (bigobj = los_object_list; bigobj;) {
2949 g_assert (!object_is_pinned (bigobj->data));
2950 if (sgen_los_object_is_pinned (bigobj->data)) {
2951 sgen_los_unpin_object (bigobj->data);
2952 sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
2955 /* not referenced anywhere, so we can free it */
2957 prevbo->next = bigobj->next;
2959 los_object_list = bigobj->next;
2961 bigobj = bigobj->next;
2962 sgen_los_free_object (to_free);
2966 bigobj = bigobj->next;
2970 time_major_free_bigobjs += TV_ELAPSED (atv, btv);
2975 time_major_los_sweep += TV_ELAPSED (btv, atv);
2977 major_collector.sweep ();
2980 time_major_sweep += TV_ELAPSED (atv, btv);
2982 if (!major_collector.is_concurrent) {
2983 /* walk the pin_queue, build up the fragment list of free memory, unmark
2984 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2987 if (!sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries))
2990 /* prepare the pin queue for the next collection */
2991 sgen_finish_pinning ();
2993 /* Clear TLABs for all threads */
2994 sgen_clear_tlabs ();
2996 sgen_pin_stats_reset ();
3000 time_major_fragment_creation += TV_ELAPSED (btv, atv);
3003 dump_heap ("major", stat_major_gcs - 1, reason);
3005 if (fin_ready_list || critical_fin_list) {
3006 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
3007 mono_gc_finalize_notify ();
3010 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3012 sgen_memgov_major_collection_end ();
3013 current_collection_generation = -1;
3015 major_collector.finish_major_collection ();
3017 if (major_collector.is_concurrent)
3018 concurrent_collection_in_progress = FALSE;
3020 check_scan_starts ();
3022 binary_protocol_flush_buffers (FALSE);
3024 //consistency_check ();
3026 MONO_GC_END (GENERATION_OLD);
3030 major_do_collection (const char *reason)
3032 TV_DECLARE (all_atv);
3033 TV_DECLARE (all_btv);
3034 int old_next_pin_slot;
3036 /* world must be stopped already */
3037 TV_GETTIME (all_atv);
3039 major_start_collection (&old_next_pin_slot);
3040 major_finish_collection (reason, old_next_pin_slot);
3042 TV_GETTIME (all_btv);
3043 gc_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
3045 return bytes_pinned_from_failed_allocation > 0;
3048 static gboolean major_do_collection (const char *reason);
3051 major_start_concurrent_collection (const char *reason)
3053 // FIXME: store reason and pass it when finishing
3054 major_start_collection (NULL);
3056 sgen_workers_distribute_gray_queue_sections ();
3057 g_assert (sgen_gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE));
3059 sgen_workers_wait_for_jobs ();
3061 current_collection_generation = -1;
3065 major_finish_concurrent_collection (void)
3067 current_collection_generation = GENERATION_OLD;
3068 major_finish_collection ("finishing", -1);
3069 current_collection_generation = -1;
3073 * Ensure an allocation request for @size will succeed by freeing enough memory.
3075 * LOCKING: The GC lock MUST be held.
3078 sgen_ensure_free_space (size_t size)
3080 int generation_to_collect = -1;
3081 const char *reason = NULL;
3084 if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
3085 if (sgen_need_major_collection (size)) {
3086 reason = "LOS overflow";
3087 generation_to_collect = GENERATION_OLD;
3090 if (degraded_mode) {
3091 if (sgen_need_major_collection (size)) {
3092 reason = "Degraded mode overflow";
3093 generation_to_collect = GENERATION_OLD;
3095 } else if (sgen_need_major_collection (size)) {
3096 reason = "Minor allowance";
3097 generation_to_collect = GENERATION_OLD;
3099 generation_to_collect = GENERATION_NURSERY;
3100 reason = "Nursery full";
3104 if (generation_to_collect == -1)
3106 sgen_perform_collection (size, generation_to_collect, reason, generation_to_collect == GENERATION_NURSERY);
3110 sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
3112 TV_DECLARE (gc_end);
3113 GGTimingInfo infos [2];
3114 int overflow_generation_to_collect = -1;
3115 const char *overflow_reason = NULL;
3117 memset (infos, 0, sizeof (infos));
3118 mono_profiler_gc_event (MONO_GC_EVENT_START, generation_to_collect);
3120 infos [0].generation = generation_to_collect;
3121 infos [0].reason = reason;
3122 infos [0].is_overflow = FALSE;
3123 TV_GETTIME (infos [0].total_time);
3124 infos [1].generation = -1;
3126 sgen_stop_world (generation_to_collect);
3128 if (concurrent_collection_in_progress) {
3129 g_assert (generation_to_collect == GENERATION_NURSERY);
3130 major_finish_concurrent_collection ();
3133 //FIXME extract overflow reason
3134 if (generation_to_collect == GENERATION_NURSERY) {
3135 if (collect_nursery ()) {
3136 overflow_generation_to_collect = GENERATION_OLD;
3137 overflow_reason = "Minor overflow";
3140 if (major_collector.is_concurrent)
3143 if (major_collector.is_concurrent && !wait_to_finish) {
3144 major_start_concurrent_collection (reason);
3145 // FIXME: set infos[0] properly
3148 if (major_do_collection (reason)) {
3149 overflow_generation_to_collect = GENERATION_NURSERY;
3150 overflow_reason = "Excessive pinning";
3155 TV_GETTIME (gc_end);
3156 infos [0].total_time = SGEN_TV_ELAPSED (infos [0].total_time, gc_end);
3159 if (!major_collector.is_concurrent && overflow_generation_to_collect != -1) {
3160 mono_profiler_gc_event (MONO_GC_EVENT_START, overflow_generation_to_collect);
3161 infos [1].generation = overflow_generation_to_collect;
3162 infos [1].reason = overflow_reason;
3163 infos [1].is_overflow = TRUE;
3164 infos [1].total_time = gc_end;
3166 if (overflow_generation_to_collect == GENERATION_NURSERY)
3169 major_do_collection (overflow_reason);
3171 TV_GETTIME (gc_end);
3172 infos [1].total_time = SGEN_TV_ELAPSED (infos [1].total_time, gc_end);
3174 /* keep events symmetric */
3175 mono_profiler_gc_event (MONO_GC_EVENT_END, overflow_generation_to_collect);
3178 SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)mono_gc_get_heap_size (), (unsigned long)los_memory_usage);
3180 /* this also sets the proper pointers for the next allocation */
3181 if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
3182 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
3183 SGEN_LOG (1, "nursery collection didn't find enough room for %zd alloc (%d pinned)", requested_size, sgen_get_pinned_count ());
3184 sgen_dump_pin_queue ();
3189 sgen_restart_world (generation_to_collect, infos);
3191 mono_profiler_gc_event (MONO_GC_EVENT_END, generation_to_collect);
3195 * ######################################################################
3196 * ######## Memory allocation from the OS
3197 * ######################################################################
3198 * This section of code deals with getting memory from the OS and
3199 * allocating memory for GC-internal data structures.
3200 * Internal memory can be handled with a freelist for small objects.
3206 G_GNUC_UNUSED static void
3207 report_internal_mem_usage (void)
3209 printf ("Internal memory usage:\n");
3210 sgen_report_internal_mem_usage ();
3211 printf ("Pinned memory usage:\n");
3212 major_collector.report_pinned_memory_usage ();
3216 * ######################################################################
3217 * ######## Finalization support
3218 * ######################################################################
3222 * If the object has been forwarded it means it's still referenced from a root.
3223 * If it is pinned it's still alive as well.
3224 * A LOS object is only alive if we have pinned it.
3225 * Return TRUE if @obj is ready to be finalized.
3227 static inline gboolean
3228 sgen_is_object_alive (void *object)
3232 if (ptr_in_nursery (object))
3233 return sgen_nursery_is_object_alive (object);
3234 /* Oldgen objects can be pinned and forwarded too */
3235 if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
3239 * FIXME: major_collector.is_object_live() also calculates the
3240 * size. Avoid the double calculation.
3242 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)object));
3243 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
3244 return sgen_los_object_is_pinned (object);
3246 return major_collector.is_object_live (object);
3250 sgen_gc_is_object_ready_for_finalization (void *object)
3252 return !sgen_is_object_alive (object);
3256 has_critical_finalizer (MonoObject *obj)
3260 if (!mono_defaults.critical_finalizer_object)
3263 class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
3265 return mono_class_has_parent_fast (class, mono_defaults.critical_finalizer_object);
3269 sgen_queue_finalization_entry (MonoObject *obj)
3271 FinalizeReadyEntry *entry = sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
3272 entry->object = obj;
3273 if (has_critical_finalizer (obj)) {
3274 entry->next = critical_fin_list;
3275 critical_fin_list = entry;
3277 entry->next = fin_ready_list;
3278 fin_ready_list = entry;
3283 object_is_reachable (char *object, char *start, char *end)
3285 /*This happens for non nursery objects during minor collections. We just treat all objects as alive.*/
3286 if (object < start || object >= end)
3289 return sgen_is_object_alive (object);
3293 sgen_object_is_live (void *obj)
3295 if (ptr_in_nursery (obj))
3296 return object_is_pinned (obj);
3297 /* FIXME This is semantically wrong! All tenured object are considered alive during a nursery collection. */
3298 if (current_collection_generation == GENERATION_NURSERY)
3300 return major_collector.is_object_live (obj);
3303 /* LOCKING: requires that the GC lock is held */
3305 null_ephemerons_for_domain (MonoDomain *domain)
3307 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3310 MonoObject *object = (MonoObject*)current->array;
3312 if (object && !object->vtable) {
3313 EphemeronLinkNode *tmp = current;
3316 prev->next = current->next;
3318 ephemeron_list = current->next;
3320 current = current->next;
3321 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3324 current = current->next;
3329 /* LOCKING: requires that the GC lock is held */
3331 clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue)
3333 int was_in_nursery, was_promoted;
3334 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3336 Ephemeron *cur, *array_end;
3340 char *object = current->array;
3342 if (!object_is_reachable (object, start, end)) {
3343 EphemeronLinkNode *tmp = current;
3345 SGEN_LOG (5, "Dead Ephemeron array at %p", object);
3348 prev->next = current->next;
3350 ephemeron_list = current->next;
3352 current = current->next;
3353 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3358 was_in_nursery = ptr_in_nursery (object);
3359 copy_func ((void**)&object, queue);
3360 current->array = object;
3362 /*The array was promoted, add global remsets for key/values left behind in nursery.*/
3363 was_promoted = was_in_nursery && !ptr_in_nursery (object);
3365 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
3367 array = (MonoArray*)object;
3368 cur = mono_array_addr (array, Ephemeron, 0);
3369 array_end = cur + mono_array_length_fast (array);
3370 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3372 for (; cur < array_end; ++cur) {
3373 char *key = (char*)cur->key;
3375 if (!key || key == tombstone)
3378 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3379 key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
3380 cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable");
3382 if (!object_is_reachable (key, start, end)) {
3383 cur->key = tombstone;
3389 if (ptr_in_nursery (key)) {/*key was not promoted*/
3390 SGEN_LOG (5, "\tAdded remset to key %p", key);
3391 sgen_add_to_global_remset (&cur->key);
3393 if (ptr_in_nursery (cur->value)) {/*value was not promoted*/
3394 SGEN_LOG (5, "\tAdded remset to value %p", cur->value);
3395 sgen_add_to_global_remset (&cur->value);
3400 current = current->next;
3404 /* LOCKING: requires that the GC lock is held */
3406 mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue)
3408 int nothing_marked = 1;
3409 EphemeronLinkNode *current = ephemeron_list;
3411 Ephemeron *cur, *array_end;
3414 for (current = ephemeron_list; current; current = current->next) {
3415 char *object = current->array;
3416 SGEN_LOG (5, "Ephemeron array at %p", object);
3419 For now we process all ephemerons during all collections.
3420 Ideally we should use remset information to partially scan those
3422 We already emit write barriers for Ephemeron fields, it's
3423 just that we don't process them.
3425 /*if (object < start || object >= end)
3428 /*It has to be alive*/
3429 if (!object_is_reachable (object, start, end)) {
3430 SGEN_LOG (5, "\tnot reachable");
3434 copy_func ((void**)&object, queue);
3436 array = (MonoArray*)object;
3437 cur = mono_array_addr (array, Ephemeron, 0);
3438 array_end = cur + mono_array_length_fast (array);
3439 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3441 for (; cur < array_end; ++cur) {
3442 char *key = cur->key;
3444 if (!key || key == tombstone)
3447 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3448 key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
3449 cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable");
3451 if (object_is_reachable (key, start, end)) {
3452 char *value = cur->value;
3454 copy_func ((void**)&cur->key, queue);
3456 if (!object_is_reachable (value, start, end))
3458 copy_func ((void**)&cur->value, queue);
3464 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
3465 return nothing_marked;
3469 mono_gc_invoke_finalizers (void)
3471 FinalizeReadyEntry *entry = NULL;
3472 gboolean entry_is_critical = FALSE;
3475 /* FIXME: batch to reduce lock contention */
3476 while (fin_ready_list || critical_fin_list) {
3480 FinalizeReadyEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
3482 /* We have finalized entry in the last
3483 interation, now we need to remove it from
3486 *list = entry->next;
3488 FinalizeReadyEntry *e = *list;
3489 while (e->next != entry)
3491 e->next = entry->next;
3493 sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_READY_ENTRY);
3497 /* Now look for the first non-null entry. */
3498 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
3501 entry_is_critical = FALSE;
3503 entry_is_critical = TRUE;
3504 for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
3509 g_assert (entry->object);
3510 num_ready_finalizers--;
3511 obj = entry->object;
3512 entry->object = NULL;
3513 SGEN_LOG (7, "Finalizing object %p (%s)", obj, safe_name (obj));
3521 g_assert (entry->object == NULL);
3523 /* the object is on the stack so it is pinned */
3524 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
3525 mono_gc_run_finalize (obj, NULL);
3532 mono_gc_pending_finalizers (void)
3534 return fin_ready_list || critical_fin_list;
3538 * ######################################################################
3539 * ######## registered roots support
3540 * ######################################################################
3544 * We do not coalesce roots.
3547 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
3549 RootRecord new_root;
3552 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
3553 RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
3554 /* we allow changing the size and the descriptor (for thread statics etc) */
3556 size_t old_size = root->end_root - start;
3557 root->end_root = start + size;
3558 g_assert (((root->root_desc != 0) && (descr != NULL)) ||
3559 ((root->root_desc == 0) && (descr == NULL)));
3560 root->root_desc = (mword)descr;
3562 roots_size -= old_size;
3568 new_root.end_root = start + size;
3569 new_root.root_desc = (mword)descr;
3571 sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
3574 SGEN_LOG (3, "Added root for range: %p-%p, descr: %p (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
3581 mono_gc_register_root (char *start, size_t size, void *descr)
3583 return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
3587 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
3589 return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
3593 mono_gc_deregister_root (char* addr)
3599 for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
3600 if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
3601 roots_size -= (root.end_root - addr);
3607 * ######################################################################
3608 * ######## Thread handling (stop/start code)
3609 * ######################################################################
3612 unsigned int sgen_global_stop_count = 0;
3615 sgen_fill_thread_info_for_suspend (SgenThreadInfo *info)
3617 if (remset.fill_thread_info_for_suspend)
3618 remset.fill_thread_info_for_suspend (info);
3622 sgen_get_current_collection_generation (void)
3624 return current_collection_generation;
3628 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
3630 gc_callbacks = *callbacks;
3634 mono_gc_get_gc_callbacks ()
3636 return &gc_callbacks;
3639 /* Variables holding start/end nursery so it won't have to be passed at every call */
3640 static void *scan_area_arg_start, *scan_area_arg_end;
3643 mono_gc_conservatively_scan_area (void *start, void *end)
3645 conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
3649 mono_gc_scan_object (void *obj)
3651 UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
3652 current_object_ops.copy_or_mark_object (&obj, data->queue);
3657 * Mark from thread stacks and registers.
3660 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue)
3662 SgenThreadInfo *info;
3664 scan_area_arg_start = start_nursery;
3665 scan_area_arg_end = end_nursery;
3667 FOREACH_THREAD (info) {
3669 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3672 if (info->gc_disabled) {
3673 SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3677 if (!info->joined_stw) {
3678 SGEN_LOG (3, "Skipping thread not seen in STW %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3682 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, sgen_get_pinned_count ());
3683 if (!info->thread_is_dying) {
3684 if (gc_callbacks.thread_mark_func && !conservative_stack_mark) {
3685 UserCopyOrMarkData data = { NULL, queue };
3686 set_user_copy_or_mark_data (&data);
3687 gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
3688 set_user_copy_or_mark_data (NULL);
3689 } else if (!precise) {
3690 conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
3694 if (!info->thread_is_dying && !precise) {
3696 conservatively_pin_objects_from ((void**)&info->ctx, (void**)&info->ctx + ARCH_NUM_REGS,
3697 start_nursery, end_nursery, PIN_TYPE_STACK);
3699 conservatively_pin_objects_from (&info->regs, &info->regs + ARCH_NUM_REGS,
3700 start_nursery, end_nursery, PIN_TYPE_STACK);
3703 } END_FOREACH_THREAD
3707 ptr_on_stack (void *ptr)
3709 gpointer stack_start = &stack_start;
3710 SgenThreadInfo *info = mono_thread_info_current ();
3712 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
3718 sgen_thread_register (SgenThreadInfo* info, void *addr)
3720 #ifndef HAVE_KW_THREAD
3721 SgenThreadInfo *__thread_info__ = info;
3725 #ifndef HAVE_KW_THREAD
3726 info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
3728 g_assert (!mono_native_tls_get_value (thread_info_key));
3729 mono_native_tls_set_value (thread_info_key, info);
3731 sgen_thread_info = info;
3734 #if !defined(__MACH__)
3735 info->stop_count = -1;
3739 info->joined_stw = FALSE;
3740 info->doing_handshake = FALSE;
3741 info->thread_is_dying = FALSE;
3742 info->stack_start = NULL;
3743 info->store_remset_buffer_addr = &STORE_REMSET_BUFFER;
3744 info->store_remset_buffer_index_addr = &STORE_REMSET_BUFFER_INDEX;
3745 info->stopped_ip = NULL;
3746 info->stopped_domain = NULL;
3748 memset (&info->ctx, 0, sizeof (MonoContext));
3750 memset (&info->regs, 0, sizeof (info->regs));
3753 sgen_init_tlab_info (info);
3755 binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
3757 #ifdef HAVE_KW_THREAD
3758 store_remset_buffer_index_addr = &store_remset_buffer_index;
3761 /* try to get it with attributes first */
3762 #if defined(HAVE_PTHREAD_GETATTR_NP) && defined(HAVE_PTHREAD_ATTR_GETSTACK)
3766 pthread_attr_t attr;
3767 pthread_getattr_np (pthread_self (), &attr);
3768 pthread_attr_getstack (&attr, &sstart, &size);
3769 info->stack_start_limit = sstart;
3770 info->stack_end = (char*)sstart + size;
3771 pthread_attr_destroy (&attr);
3773 #elif defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP)
3774 info->stack_end = (char*)pthread_get_stackaddr_np (pthread_self ());
3775 info->stack_start_limit = (char*)info->stack_end - pthread_get_stacksize_np (pthread_self ());
3778 /* FIXME: we assume the stack grows down */
3779 gsize stack_bottom = (gsize)addr;
3780 stack_bottom += 4095;
3781 stack_bottom &= ~4095;
3782 info->stack_end = (char*)stack_bottom;
3786 #ifdef HAVE_KW_THREAD
3787 stack_end = info->stack_end;
3790 if (remset.register_thread)
3791 remset.register_thread (info);
3793 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->stack_end);
3795 if (gc_callbacks.thread_attach_func)
3796 info->runtime_data = gc_callbacks.thread_attach_func ();
3803 sgen_wbarrier_cleanup_thread (SgenThreadInfo *p)
3805 if (remset.cleanup_thread)
3806 remset.cleanup_thread (p);
3810 sgen_thread_unregister (SgenThreadInfo *p)
3812 /* If a delegate is passed to native code and invoked on a thread we dont
3813 * know about, the jit will register it with mono_jit_thread_attach, but
3814 * we have no way of knowing when that thread goes away. SGen has a TSD
3815 * so we assume that if the domain is still registered, we can detach
3818 if (mono_domain_get ())
3819 mono_thread_detach (mono_thread_current ());
3821 p->thread_is_dying = TRUE;
3824 There is a race condition between a thread finishing executing and been removed
3825 from the GC thread set.
3826 This happens on posix systems when TLS data is been cleaned-up, libpthread will
3827 set the thread_info slot to NULL before calling the cleanup function. This
3828 opens a window in which the thread is registered but has a NULL TLS.
3830 The suspend signal handler needs TLS data to know where to store thread state
3831 data or otherwise it will simply ignore the thread.
3833 This solution works because the thread doing STW will wait until all threads been
3834 suspended handshake back, so there is no race between the doing_hankshake test
3835 and the suspend_thread call.
3837 This is not required on systems that do synchronous STW as those can deal with
3838 the above race at suspend time.
3840 FIXME: I believe we could avoid this by using mono_thread_info_lookup when
3841 mono_thread_info_current returns NULL. Or fix mono_thread_info_lookup to do so.
3843 #if (defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED) || !defined(HAVE_PTHREAD_KILL)
3846 while (!TRYLOCK_GC) {
3847 if (!sgen_park_current_thread_if_doing_handshake (p))
3853 binary_protocol_thread_unregister ((gpointer)mono_thread_info_get_tid (p));
3854 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)mono_thread_info_get_tid (p));
3856 if (gc_callbacks.thread_detach_func) {
3857 gc_callbacks.thread_detach_func (p->runtime_data);
3858 p->runtime_data = NULL;
3860 sgen_wbarrier_cleanup_thread (p);
3862 mono_threads_unregister_current_thread (p);
3868 sgen_thread_attach (SgenThreadInfo *info)
3871 /*this is odd, can we get attached before the gc is inited?*/
3875 if (gc_callbacks.thread_attach_func && !info->runtime_data)
3876 info->runtime_data = gc_callbacks.thread_attach_func ();
3879 mono_gc_register_thread (void *baseptr)
3881 return mono_thread_info_attach (baseptr) != NULL;
3885 * mono_gc_set_stack_end:
3887 * Set the end of the current threads stack to STACK_END. The stack space between
3888 * STACK_END and the real end of the threads stack will not be scanned during collections.
3891 mono_gc_set_stack_end (void *stack_end)
3893 SgenThreadInfo *info;
3896 info = mono_thread_info_current ();
3898 g_assert (stack_end < info->stack_end);
3899 info->stack_end = stack_end;
3904 #if USE_PTHREAD_INTERCEPT
3908 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
3910 return pthread_create (new_thread, attr, start_routine, arg);
3914 mono_gc_pthread_join (pthread_t thread, void **retval)
3916 return pthread_join (thread, retval);
3920 mono_gc_pthread_detach (pthread_t thread)
3922 return pthread_detach (thread);
3926 mono_gc_pthread_exit (void *retval)
3928 pthread_exit (retval);
3931 #endif /* USE_PTHREAD_INTERCEPT */
3934 * ######################################################################
3935 * ######## Write barriers
3936 * ######################################################################
3940 * Note: the write barriers first do the needed GC work and then do the actual store:
3941 * this way the value is visible to the conservative GC scan after the write barrier
3942 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
3943 * the conservative scan, otherwise by the remembered set scan.
3946 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
3948 HEAVY_STAT (++stat_wbarrier_set_field);
3949 if (ptr_in_nursery (field_ptr)) {
3950 *(void**)field_ptr = value;
3953 SGEN_LOG (8, "Adding remset at %p", field_ptr);
3955 binary_protocol_wbarrier (field_ptr, value, value->vtable);
3957 remset.wbarrier_set_field (obj, field_ptr, value);
3961 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
3963 HEAVY_STAT (++stat_wbarrier_set_arrayref);
3964 if (ptr_in_nursery (slot_ptr)) {
3965 *(void**)slot_ptr = value;
3968 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
3970 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
3972 remset.wbarrier_set_arrayref (arr, slot_ptr, value);
3976 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
3978 HEAVY_STAT (++stat_wbarrier_arrayref_copy);
3979 /*This check can be done without taking a lock since dest_ptr array is pinned*/
3980 if (ptr_in_nursery (dest_ptr) || count <= 0) {
3981 mono_gc_memmove (dest_ptr, src_ptr, count * sizeof (gpointer));
3985 #ifdef SGEN_BINARY_PROTOCOL
3988 for (i = 0; i < count; ++i) {
3989 gpointer dest = (gpointer*)dest_ptr + i;
3990 gpointer obj = *((gpointer*)src_ptr + i);
3992 binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
3997 remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
4000 static char *found_obj;
4003 find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
4005 char *ptr = user_data;
4007 if (ptr >= obj && ptr < obj + size) {
4008 g_assert (!found_obj);
4013 /* for use in the debugger */
4014 char* find_object_for_ptr (char *ptr);
4016 find_object_for_ptr (char *ptr)
4018 if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
4020 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
4021 find_object_for_ptr_callback, ptr, TRUE);
4027 sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
4032 * Very inefficient, but this is debugging code, supposed to
4033 * be called from gdb, so we don't care.
4036 major_collector.iterate_objects (TRUE, TRUE, find_object_for_ptr_callback, ptr);
4041 mono_gc_wbarrier_generic_nostore (gpointer ptr)
4045 HEAVY_STAT (++stat_wbarrier_generic_store);
4047 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
4048 /* FIXME: ptr_in_heap must be called with the GC lock held */
4049 if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
4050 char *start = find_object_for_ptr (ptr);
4051 MonoObject *value = *(MonoObject**)ptr;
4055 MonoObject *obj = (MonoObject*)start;
4056 if (obj->vtable->domain != value->vtable->domain)
4057 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
4063 obj = *(gpointer*)ptr;
4065 binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
4067 if (ptr_in_nursery (ptr) || ptr_on_stack (ptr)) {
4068 SGEN_LOG (8, "Skipping remset at %p", ptr);
4073 * We need to record old->old pointer locations for the
4074 * concurrent collector.
4076 if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
4077 SGEN_LOG (8, "Skipping remset at %p", ptr);
4081 SGEN_LOG (8, "Adding remset at %p", ptr);
4083 remset.wbarrier_generic_nostore (ptr);
4087 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
4089 SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4090 *(void**)ptr = value;
4091 if (ptr_in_nursery (value))
4092 mono_gc_wbarrier_generic_nostore (ptr);
4093 sgen_dummy_use (value);
4096 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
4098 mword *dest = _dest;
4103 mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
4108 size -= SIZEOF_VOID_P;
4113 #ifdef SGEN_BINARY_PROTOCOL
4115 #define HANDLE_PTR(ptr,obj) do { \
4116 gpointer o = *(gpointer*)(ptr); \
4118 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
4119 binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
4124 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
4126 #define SCAN_OBJECT_NOVTABLE
4127 #include "sgen-scan-object.h"
4132 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
4134 HEAVY_STAT (++stat_wbarrier_value_copy);
4135 g_assert (klass->valuetype);
4137 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
4139 if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
4140 size_t element_size = mono_class_value_size (klass, NULL);
4141 size_t size = count * element_size;
4142 mono_gc_memmove (dest, src, size);
4146 #ifdef SGEN_BINARY_PROTOCOL
4148 size_t element_size = mono_class_value_size (klass, NULL);
4150 for (i = 0; i < count; ++i) {
4151 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
4152 (char*)src + i * element_size - sizeof (MonoObject),
4153 (mword) klass->gc_descr);
4158 remset.wbarrier_value_copy (dest, src, count, klass);
4162 * mono_gc_wbarrier_object_copy:
4164 * Write barrier to call when obj is the result of a clone or copy of an object.
4167 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
4171 HEAVY_STAT (++stat_wbarrier_object_copy);
4173 if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
4174 size = mono_object_class (obj)->instance_size;
4175 mono_gc_memmove ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
4176 size - sizeof (MonoObject));
4180 #ifdef SGEN_BINARY_PROTOCOL
4181 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
4184 remset.wbarrier_object_copy (obj, src);
4189 * ######################################################################
4190 * ######## Other mono public interface functions.
4191 * ######################################################################
4194 #define REFS_SIZE 128
4197 MonoGCReferences callback;
4201 MonoObject *refs [REFS_SIZE];
4202 uintptr_t offsets [REFS_SIZE];
4206 #define HANDLE_PTR(ptr,obj) do { \
4208 if (hwi->count == REFS_SIZE) { \
4209 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
4213 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
4214 hwi->refs [hwi->count++] = *(ptr); \
4219 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
4221 #include "sgen-scan-object.h"
4225 walk_references (char *start, size_t size, void *data)
4227 HeapWalkInfo *hwi = data;
4230 collect_references (hwi, start, size);
4231 if (hwi->count || !hwi->called)
4232 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
4236 * mono_gc_walk_heap:
4237 * @flags: flags for future use
4238 * @callback: a function pointer called for each object in the heap
4239 * @data: a user data pointer that is passed to callback
4241 * This function can be used to iterate over all the live objects in the heap:
4242 * for each object, @callback is invoked, providing info about the object's
4243 * location in memory, its class, its size and the objects it references.
4244 * For each referenced object it's offset from the object address is
4245 * reported in the offsets array.
4246 * The object references may be buffered, so the callback may be invoked
4247 * multiple times for the same object: in all but the first call, the size
4248 * argument will be zero.
4249 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
4250 * profiler event handler.
4252 * Returns: a non-zero value if the GC doesn't support heap walking
4255 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
4260 hwi.callback = callback;
4263 sgen_clear_nursery_fragments ();
4264 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
4266 major_collector.iterate_objects (TRUE, TRUE, walk_references, &hwi);
4267 sgen_los_iterate_objects (walk_references, &hwi);
4273 mono_gc_collect (int generation)
4278 sgen_perform_collection (0, generation, "user request", TRUE);
4283 mono_gc_max_generation (void)
4289 mono_gc_collection_count (int generation)
4291 if (generation == 0)
4292 return stat_minor_gcs;
4293 return stat_major_gcs;
4297 mono_gc_get_used_size (void)
4301 tot = los_memory_usage;
4302 tot += nursery_section->next_data - nursery_section->data;
4303 tot += major_collector.get_used_size ();
4304 /* FIXME: account for pinned objects */
4310 mono_gc_disable (void)
4318 mono_gc_enable (void)
4326 mono_gc_get_los_limit (void)
4328 return MAX_SMALL_OBJ_SIZE;
4332 mono_gc_user_markers_supported (void)
4338 mono_object_is_alive (MonoObject* o)
4344 mono_gc_get_generation (MonoObject *obj)
4346 if (ptr_in_nursery (obj))
4352 mono_gc_enable_events (void)
4357 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
4359 sgen_register_disappearing_link (obj, link_addr, track, FALSE);
4363 mono_gc_weak_link_remove (void **link_addr)
4365 sgen_register_disappearing_link (NULL, link_addr, FALSE, FALSE);
4369 mono_gc_weak_link_get (void **link_addr)
4372 * We must only load *link_addr once because it might change
4373 * under our feet, and REVEAL_POINTER (NULL) results in an
4374 * invalid reference.
4376 void *ptr = *link_addr;
4381 * During the second bridge processing step the world is
4382 * running again. That step processes all weak links once
4383 * more to null those that refer to dead objects. Before that
4384 * is completed, those links must not be followed, so we
4385 * conservatively wait for bridge processing when any weak
4386 * link is dereferenced.
4388 if (G_UNLIKELY (bridge_processing_in_progress))
4389 mono_gc_wait_for_bridge_processing ();
4391 return (MonoObject*) REVEAL_POINTER (ptr);
4395 mono_gc_ephemeron_array_add (MonoObject *obj)
4397 EphemeronLinkNode *node;
4401 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
4406 node->array = (char*)obj;
4407 node->next = ephemeron_list;
4408 ephemeron_list = node;
4410 SGEN_LOG (5, "Registered ephemeron array %p", obj);
4417 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
4421 result = func (data);
4422 UNLOCK_INTERRUPTION;
4427 mono_gc_is_gc_thread (void)
4431 result = mono_thread_info_current () != NULL;
4437 is_critical_method (MonoMethod *method)
4439 return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
4443 mono_gc_base_init (void)
4445 MonoThreadInfoCallbacks cb;
4448 char *major_collector_opt = NULL;
4449 char *minor_collector_opt = NULL;
4451 glong soft_limit = 0;
4455 gboolean debug_print_allowance = FALSE;
4456 double allowance_ratio = 0, save_target = 0;
4459 result = InterlockedCompareExchange (&gc_initialized, -1, 0);
4462 /* already inited */
4465 /* being inited by another thread */
4469 /* we will init it */
4472 g_assert_not_reached ();
4474 } while (result != 0);
4476 LOCK_INIT (gc_mutex);
4478 pagesize = mono_pagesize ();
4479 gc_debug_file = stderr;
4481 cb.thread_register = sgen_thread_register;
4482 cb.thread_unregister = sgen_thread_unregister;
4483 cb.thread_attach = sgen_thread_attach;
4484 cb.mono_method_is_critical = (gpointer)is_critical_method;
4486 cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
4489 mono_threads_init (&cb, sizeof (SgenThreadInfo));
4491 LOCK_INIT (sgen_interruption_mutex);
4492 LOCK_INIT (pin_queue_mutex);
4494 init_user_copy_or_mark_key ();
4496 if ((env = getenv ("MONO_GC_PARAMS"))) {
4497 opts = g_strsplit (env, ",", -1);
4498 for (ptr = opts; *ptr; ++ptr) {
4500 if (g_str_has_prefix (opt, "major=")) {
4501 opt = strchr (opt, '=') + 1;
4502 major_collector_opt = g_strdup (opt);
4503 } else if (g_str_has_prefix (opt, "minor=")) {
4504 opt = strchr (opt, '=') + 1;
4505 minor_collector_opt = g_strdup (opt);
4513 sgen_init_internal_allocator ();
4514 sgen_init_nursery_allocator ();
4516 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
4517 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_READY_ENTRY, sizeof (FinalizeReadyEntry));
4518 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
4519 g_assert (sizeof (GenericStoreRememberedSet) == sizeof (gpointer) * STORE_REMSET_BUFFER_SIZE);
4520 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_STORE_REMSET, sizeof (GenericStoreRememberedSet));
4521 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
4523 #ifndef HAVE_KW_THREAD
4524 mono_native_tls_alloc (&thread_info_key, NULL);
4528 * This needs to happen before any internal allocations because
4529 * it inits the small id which is required for hazard pointer
4534 mono_thread_info_attach (&dummy);
4536 if (!minor_collector_opt) {
4537 sgen_simple_nursery_init (&sgen_minor_collector);
4539 if (!strcmp (minor_collector_opt, "simple"))
4540 sgen_simple_nursery_init (&sgen_minor_collector);
4541 else if (!strcmp (minor_collector_opt, "split"))
4542 sgen_split_nursery_init (&sgen_minor_collector);
4544 fprintf (stderr, "Unknown minor collector `%s'.\n", minor_collector_opt);
4549 if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
4550 sgen_marksweep_init (&major_collector);
4551 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed")) {
4552 sgen_marksweep_fixed_init (&major_collector);
4553 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-par")) {
4554 sgen_marksweep_par_init (&major_collector);
4555 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed-par")) {
4556 sgen_marksweep_fixed_par_init (&major_collector);
4557 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
4558 sgen_marksweep_conc_init (&major_collector);
4559 } else if (!strcmp (major_collector_opt, "copying")) {
4560 sgen_copying_init (&major_collector);
4562 fprintf (stderr, "Unknown major collector `%s'.\n", major_collector_opt);
4566 #ifdef SGEN_HAVE_CARDTABLE
4567 use_cardtable = major_collector.supports_cardtable;
4569 use_cardtable = FALSE;
4572 num_workers = mono_cpu_count ();
4573 g_assert (num_workers > 0);
4574 if (num_workers > 16)
4577 ///* Keep this the default for now */
4578 /* Precise marking is broken on all supported targets. Disable until fixed. */
4579 conservative_stack_mark = TRUE;
4581 sgen_nursery_size = DEFAULT_NURSERY_SIZE;
4584 for (ptr = opts; *ptr; ++ptr) {
4586 if (g_str_has_prefix (opt, "major="))
4588 if (g_str_has_prefix (opt, "minor="))
4590 if (g_str_has_prefix (opt, "wbarrier=")) {
4591 opt = strchr (opt, '=') + 1;
4592 if (strcmp (opt, "remset") == 0) {
4593 if (major_collector.is_concurrent) {
4594 fprintf (stderr, "The concurrent collector does not support the SSB write barrier.\n");
4597 use_cardtable = FALSE;
4598 } else if (strcmp (opt, "cardtable") == 0) {
4599 if (!use_cardtable) {
4600 if (major_collector.supports_cardtable)
4601 fprintf (stderr, "The cardtable write barrier is not supported on this platform.\n");
4603 fprintf (stderr, "The major collector does not support the cardtable write barrier.\n");
4607 fprintf (stderr, "wbarrier must either be `remset' or `cardtable'.");
4612 if (g_str_has_prefix (opt, "max-heap-size=")) {
4613 opt = strchr (opt, '=') + 1;
4614 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap)) {
4615 if ((max_heap & (mono_pagesize () - 1))) {
4616 fprintf (stderr, "max-heap-size size must be a multiple of %d.\n", mono_pagesize ());
4620 fprintf (stderr, "max-heap-size must be an integer.\n");
4625 if (g_str_has_prefix (opt, "soft-heap-limit=")) {
4626 opt = strchr (opt, '=') + 1;
4627 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
4628 if (soft_limit <= 0) {
4629 fprintf (stderr, "soft-heap-limit must be positive.\n");
4633 fprintf (stderr, "soft-heap-limit must be an integer.\n");
4638 if (g_str_has_prefix (opt, "workers=")) {
4641 if (!major_collector.is_parallel) {
4642 fprintf (stderr, "The workers= option can only be used for parallel collectors.");
4645 opt = strchr (opt, '=') + 1;
4646 val = strtol (opt, &endptr, 10);
4647 if (!*opt || *endptr) {
4648 fprintf (stderr, "Cannot parse the workers= option value.");
4651 if (val <= 0 || val > 16) {
4652 fprintf (stderr, "The number of workers must be in the range 1 to 16.");
4655 num_workers = (int)val;
4658 if (g_str_has_prefix (opt, "stack-mark=")) {
4659 opt = strchr (opt, '=') + 1;
4660 if (!strcmp (opt, "precise")) {
4661 conservative_stack_mark = FALSE;
4662 } else if (!strcmp (opt, "conservative")) {
4663 conservative_stack_mark = TRUE;
4665 fprintf (stderr, "Invalid value '%s' for stack-mark= option, possible values are: 'precise', 'conservative'.\n", opt);
4670 if (g_str_has_prefix (opt, "bridge=")) {
4671 opt = strchr (opt, '=') + 1;
4672 sgen_register_test_bridge_callbacks (g_strdup (opt));
4676 if (g_str_has_prefix (opt, "nursery-size=")) {
4678 opt = strchr (opt, '=') + 1;
4679 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
4680 sgen_nursery_size = val;
4681 #ifdef SGEN_ALIGN_NURSERY
4682 if ((val & (val - 1))) {
4683 fprintf (stderr, "The nursery size must be a power of two.\n");
4687 if (val < SGEN_MAX_NURSERY_WASTE) {
4688 fprintf (stderr, "The nursery size must be at least %d bytes.\n", SGEN_MAX_NURSERY_WASTE);
4692 sgen_nursery_bits = 0;
4693 while (1 << (++ sgen_nursery_bits) != sgen_nursery_size)
4697 fprintf (stderr, "nursery-size must be an integer.\n");
4703 if (g_str_has_prefix (opt, "save-target-ratio=")) {
4705 opt = strchr (opt, '=') + 1;
4706 save_target = strtod (opt, &endptr);
4707 if (endptr == opt) {
4708 fprintf (stderr, "save-target-ratio must be a number.");
4711 if (save_target < SGEN_MIN_SAVE_TARGET_RATIO || save_target > SGEN_MAX_SAVE_TARGET_RATIO) {
4712 fprintf (stderr, "save-target-ratio must be between %.2f - %.2f.", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
4717 if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
4719 opt = strchr (opt, '=') + 1;
4721 allowance_ratio = strtod (opt, &endptr);
4722 if (endptr == opt) {
4723 fprintf (stderr, "save-target-ratio must be a number.");
4726 if (allowance_ratio < SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO || allowance_ratio > SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO) {
4727 fprintf (stderr, "default-allowance-ratio must be between %.2f - %.2f.", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO);
4733 if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
4736 if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
4739 fprintf (stderr, "MONO_GC_PARAMS must be a comma-delimited list of one or more of the following:\n");
4740 fprintf (stderr, " max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4741 fprintf (stderr, " soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
4742 fprintf (stderr, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4743 fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-par', 'marksweep-fixed', 'marksweep-fixed-par' or `copying')\n");
4744 fprintf (stderr, " minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
4745 fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
4746 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
4747 if (major_collector.print_gc_param_usage)
4748 major_collector.print_gc_param_usage ();
4749 if (sgen_minor_collector.print_gc_param_usage)
4750 sgen_minor_collector.print_gc_param_usage ();
4751 fprintf (stderr, " Experimental options:\n");
4752 fprintf (stderr, " save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
4753 fprintf (stderr, " default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
4759 if (major_collector.is_parallel)
4760 sgen_workers_init (num_workers);
4761 else if (major_collector.is_concurrent)
4762 sgen_workers_init (1);
4764 if (major_collector_opt)
4765 g_free (major_collector_opt);
4767 if (minor_collector_opt)
4768 g_free (minor_collector_opt);
4770 if (major_collector.is_concurrent)
4771 LOCK_INIT (workers_distribute_gray_queue_mutex);
4775 if ((env = getenv ("MONO_GC_DEBUG"))) {
4776 opts = g_strsplit (env, ",", -1);
4777 for (ptr = opts; ptr && *ptr; ptr ++) {
4779 if (opt [0] >= '0' && opt [0] <= '9') {
4780 gc_debug_level = atoi (opt);
4786 char *rf = g_strdup_printf ("%s.%d", opt, GetCurrentProcessId ());
4788 char *rf = g_strdup_printf ("%s.%d", opt, getpid ());
4790 gc_debug_file = fopen (rf, "wb");
4792 gc_debug_file = stderr;
4795 } else if (!strcmp (opt, "print-allowance")) {
4796 debug_print_allowance = TRUE;
4797 } else if (!strcmp (opt, "print-pinning")) {
4798 do_pin_stats = TRUE;
4799 } else if (!strcmp (opt, "verify-before-allocs")) {
4800 verify_before_allocs = 1;
4801 has_per_allocation_action = TRUE;
4802 } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
4803 char *arg = strchr (opt, '=') + 1;
4804 verify_before_allocs = atoi (arg);
4805 has_per_allocation_action = TRUE;
4806 } else if (!strcmp (opt, "collect-before-allocs")) {
4807 collect_before_allocs = 1;
4808 has_per_allocation_action = TRUE;
4809 } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
4810 char *arg = strchr (opt, '=') + 1;
4811 has_per_allocation_action = TRUE;
4812 collect_before_allocs = atoi (arg);
4813 } else if (!strcmp (opt, "verify-before-collections")) {
4814 whole_heap_check_before_collection = TRUE;
4815 } else if (!strcmp (opt, "check-at-minor-collections")) {
4816 consistency_check_at_minor_collection = TRUE;
4817 nursery_clear_policy = CLEAR_AT_GC;
4818 } else if (!strcmp (opt, "xdomain-checks")) {
4819 xdomain_checks = TRUE;
4820 } else if (!strcmp (opt, "clear-at-gc")) {
4821 nursery_clear_policy = CLEAR_AT_GC;
4822 } else if (!strcmp (opt, "clear-nursery-at-gc")) {
4823 nursery_clear_policy = CLEAR_AT_GC;
4824 } else if (!strcmp (opt, "check-scan-starts")) {
4825 do_scan_starts_check = TRUE;
4826 } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
4827 do_verify_nursery = TRUE;
4828 } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
4829 do_dump_nursery_content = TRUE;
4830 } else if (!strcmp (opt, "no-managed-allocator")) {
4831 sgen_set_use_managed_allocator (FALSE);
4832 } else if (!strcmp (opt, "disable-minor")) {
4833 disable_minor_collections = TRUE;
4834 } else if (!strcmp (opt, "disable-major")) {
4835 disable_major_collections = TRUE;
4836 } else if (g_str_has_prefix (opt, "heap-dump=")) {
4837 char *filename = strchr (opt, '=') + 1;
4838 nursery_clear_policy = CLEAR_AT_GC;
4839 heap_dump_file = fopen (filename, "w");
4840 if (heap_dump_file) {
4841 fprintf (heap_dump_file, "<sgen-dump>\n");
4842 do_pin_stats = TRUE;
4844 #ifdef SGEN_BINARY_PROTOCOL
4845 } else if (g_str_has_prefix (opt, "binary-protocol=")) {
4846 char *filename = strchr (opt, '=') + 1;
4847 binary_protocol_init (filename);
4849 fprintf (stderr, "Warning: Cardtable write barriers will not be binary-protocolled.\n");
4852 fprintf (stderr, "Invalid format for the MONO_GC_DEBUG env variable: '%s'\n", env);
4853 fprintf (stderr, "The format is: MONO_GC_DEBUG=[l[:filename]|<option>]+ where l is a debug level 0-9.\n");
4854 fprintf (stderr, "Valid options are:\n");
4855 fprintf (stderr, " collect-before-allocs[=<n>]\n");
4856 fprintf (stderr, " verify-before-allocs[=<n>]\n");
4857 fprintf (stderr, " check-at-minor-collections\n");
4858 fprintf (stderr, " verify-before-collections\n");
4859 fprintf (stderr, " verify-nursery-at-minor-gc\n");
4860 fprintf (stderr, " dump-nursery-at-minor-gc\n");
4861 fprintf (stderr, " disable-minor\n");
4862 fprintf (stderr, " disable-major\n");
4863 fprintf (stderr, " xdomain-checks\n");
4864 fprintf (stderr, " clear-at-gc\n");
4865 fprintf (stderr, " clear-nursery-at-gc\n");
4866 fprintf (stderr, " check-scan-starts\n");
4867 fprintf (stderr, " no-managed-allocator\n");
4868 fprintf (stderr, " print-allowance\n");
4869 fprintf (stderr, " print-pinning\n");
4870 fprintf (stderr, " heap-dump=<filename>\n");
4871 #ifdef SGEN_BINARY_PROTOCOL
4872 fprintf (stderr, " binary-protocol=<filename>\n");
4880 if (major_collector.is_parallel) {
4881 if (heap_dump_file) {
4882 fprintf (stderr, "Error: Cannot do heap dump with the parallel collector.\n");
4886 fprintf (stderr, "Error: Cannot gather pinning statistics with the parallel collector.\n");
4891 if (major_collector.post_param_init)
4892 major_collector.post_param_init ();
4894 sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
4896 memset (&remset, 0, sizeof (remset));
4898 #ifdef SGEN_HAVE_CARDTABLE
4900 sgen_card_table_init (&remset);
4903 sgen_ssb_init (&remset);
4905 if (remset.register_thread)
4906 remset.register_thread (mono_thread_info_current ());
4912 mono_gc_get_gc_name (void)
4917 static MonoMethod *write_barrier_method;
4920 sgen_is_critical_method (MonoMethod *method)
4922 return (method == write_barrier_method || sgen_is_managed_allocator (method));
4926 sgen_has_critical_method (void)
4928 return write_barrier_method || sgen_has_managed_allocator ();
4932 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels)
4934 memset (nursery_check_return_labels, 0, sizeof (int) * 3);
4935 #ifdef SGEN_ALIGN_NURSERY
4936 // if (ptr_in_nursery (ptr)) return;
4938 * Masking out the bits might be faster, but we would have to use 64 bit
4939 * immediates, which might be slower.
4941 mono_mb_emit_ldarg (mb, 0);
4942 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
4943 mono_mb_emit_byte (mb, CEE_SHR_UN);
4944 mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
4945 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
4947 if (!major_collector.is_concurrent) {
4948 // if (!ptr_in_nursery (*ptr)) return;
4949 mono_mb_emit_ldarg (mb, 0);
4950 mono_mb_emit_byte (mb, CEE_LDIND_I);
4951 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
4952 mono_mb_emit_byte (mb, CEE_SHR_UN);
4953 mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
4954 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
4957 int label_continue1, label_continue2;
4958 int dereferenced_var;
4960 // if (ptr < (sgen_get_nursery_start ())) goto continue;
4961 mono_mb_emit_ldarg (mb, 0);
4962 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
4963 label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
4965 // if (ptr >= sgen_get_nursery_end ())) goto continue;
4966 mono_mb_emit_ldarg (mb, 0);
4967 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
4968 label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
4971 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BR);
4974 mono_mb_patch_branch (mb, label_continue_1);
4975 mono_mb_patch_branch (mb, label_continue_2);
4977 // Dereference and store in local var
4978 dereferenced_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
4979 mono_mb_emit_ldarg (mb, 0);
4980 mono_mb_emit_byte (mb, CEE_LDIND_I);
4981 mono_mb_emit_stloc (mb, dereferenced_var);
4983 if (!major_collector.is_concurrent) {
4984 // if (*ptr < sgen_get_nursery_start ()) return;
4985 mono_mb_emit_ldloc (mb, dereferenced_var);
4986 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
4987 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
4989 // if (*ptr >= sgen_get_nursery_end ()) return;
4990 mono_mb_emit_ldloc (mb, dereferenced_var);
4991 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
4992 nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
4998 mono_gc_get_write_barrier (void)
5001 MonoMethodBuilder *mb;
5002 MonoMethodSignature *sig;
5003 #ifdef MANAGED_WBARRIER
5004 int i, nursery_check_labels [3];
5005 int label_no_wb_3, label_no_wb_4, label_need_wb, label_slow_path;
5006 int buffer_var, buffer_index_var, dummy_var;
5008 #ifdef HAVE_KW_THREAD
5009 int stack_end_offset = -1, store_remset_buffer_offset = -1;
5010 int store_remset_buffer_index_offset = -1, store_remset_buffer_index_addr_offset = -1;
5012 MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
5013 g_assert (stack_end_offset != -1);
5014 MONO_THREAD_VAR_OFFSET (store_remset_buffer, store_remset_buffer_offset);
5015 g_assert (store_remset_buffer_offset != -1);
5016 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index, store_remset_buffer_index_offset);
5017 g_assert (store_remset_buffer_index_offset != -1);
5018 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index_addr, store_remset_buffer_index_addr_offset);
5019 g_assert (store_remset_buffer_index_addr_offset != -1);
5023 // FIXME: Maybe create a separate version for ctors (the branch would be
5024 // correctly predicted more times)
5025 if (write_barrier_method)
5026 return write_barrier_method;
5028 /* Create the IL version of mono_gc_barrier_generic_store () */
5029 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
5030 sig->ret = &mono_defaults.void_class->byval_arg;
5031 sig->params [0] = &mono_defaults.int_class->byval_arg;
5033 mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
5035 #ifdef MANAGED_WBARRIER
5036 if (use_cardtable) {
5037 emit_nursery_check (mb, nursery_check_labels);
5039 addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
5043 LDC_PTR sgen_cardtable
5045 address >> CARD_BITS
5049 if (SGEN_HAVE_OVERLAPPING_CARDS) {
5050 LDC_PTR card_table_mask
5057 mono_mb_emit_ptr (mb, sgen_cardtable);
5058 mono_mb_emit_ldarg (mb, 0);
5059 mono_mb_emit_icon (mb, CARD_BITS);
5060 mono_mb_emit_byte (mb, CEE_SHR_UN);
5061 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
5062 mono_mb_emit_ptr (mb, (gpointer)CARD_MASK);
5063 mono_mb_emit_byte (mb, CEE_AND);
5065 mono_mb_emit_byte (mb, CEE_ADD);
5066 mono_mb_emit_icon (mb, 1);
5067 mono_mb_emit_byte (mb, CEE_STIND_I1);
5070 for (i = 0; i < 3; ++i) {
5071 if (nursery_check_labels [i])
5072 mono_mb_patch_branch (mb, nursery_check_labels [i]);
5074 mono_mb_emit_byte (mb, CEE_RET);
5075 } else if (mono_runtime_has_tls_get ()) {
5076 emit_nursery_check (mb, nursery_check_labels);
5078 // if (ptr >= stack_end) goto need_wb;
5079 mono_mb_emit_ldarg (mb, 0);
5080 EMIT_TLS_ACCESS (mb, stack_end, stack_end_offset);
5081 label_need_wb = mono_mb_emit_branch (mb, CEE_BGE_UN);
5083 // if (ptr >= stack_start) return;
5084 dummy_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5085 mono_mb_emit_ldarg (mb, 0);
5086 mono_mb_emit_ldloc_addr (mb, dummy_var);
5087 label_no_wb_3 = mono_mb_emit_branch (mb, CEE_BGE_UN);
5090 mono_mb_patch_branch (mb, label_need_wb);
5092 // buffer = STORE_REMSET_BUFFER;
5093 buffer_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5094 EMIT_TLS_ACCESS (mb, store_remset_buffer, store_remset_buffer_offset);
5095 mono_mb_emit_stloc (mb, buffer_var);
5097 // buffer_index = STORE_REMSET_BUFFER_INDEX;
5098 buffer_index_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5099 EMIT_TLS_ACCESS (mb, store_remset_buffer_index, store_remset_buffer_index_offset);
5100 mono_mb_emit_stloc (mb, buffer_index_var);
5102 // if (buffer [buffer_index] == ptr) return;
5103 mono_mb_emit_ldloc (mb, buffer_var);
5104 mono_mb_emit_ldloc (mb, buffer_index_var);
5105 g_assert (sizeof (gpointer) == 4 || sizeof (gpointer) == 8);
5106 mono_mb_emit_icon (mb, sizeof (gpointer) == 4 ? 2 : 3);
5107 mono_mb_emit_byte (mb, CEE_SHL);
5108 mono_mb_emit_byte (mb, CEE_ADD);
5109 mono_mb_emit_byte (mb, CEE_LDIND_I);
5110 mono_mb_emit_ldarg (mb, 0);
5111 label_no_wb_4 = mono_mb_emit_branch (mb, CEE_BEQ);
5114 mono_mb_emit_ldloc (mb, buffer_index_var);
5115 mono_mb_emit_icon (mb, 1);
5116 mono_mb_emit_byte (mb, CEE_ADD);
5117 mono_mb_emit_stloc (mb, buffer_index_var);
5119 // if (buffer_index >= STORE_REMSET_BUFFER_SIZE) goto slow_path;
5120 mono_mb_emit_ldloc (mb, buffer_index_var);
5121 mono_mb_emit_icon (mb, STORE_REMSET_BUFFER_SIZE);
5122 label_slow_path = mono_mb_emit_branch (mb, CEE_BGE);
5124 // buffer [buffer_index] = ptr;
5125 mono_mb_emit_ldloc (mb, buffer_var);
5126 mono_mb_emit_ldloc (mb, buffer_index_var);
5127 g_assert (sizeof (gpointer) == 4 || sizeof (gpointer) == 8);
5128 mono_mb_emit_icon (mb, sizeof (gpointer) == 4 ? 2 : 3);
5129 mono_mb_emit_byte (mb, CEE_SHL);
5130 mono_mb_emit_byte (mb, CEE_ADD);
5131 mono_mb_emit_ldarg (mb, 0);
5132 mono_mb_emit_byte (mb, CEE_STIND_I);
5134 // STORE_REMSET_BUFFER_INDEX = buffer_index;
5135 EMIT_TLS_ACCESS (mb, store_remset_buffer_index_addr, store_remset_buffer_index_addr_offset);
5136 mono_mb_emit_ldloc (mb, buffer_index_var);
5137 mono_mb_emit_byte (mb, CEE_STIND_I);
5140 for (i = 0; i < 3; ++i) {
5141 if (nursery_check_labels [i])
5142 mono_mb_patch_branch (mb, nursery_check_labels [i]);
5144 mono_mb_patch_branch (mb, label_no_wb_3);
5145 mono_mb_patch_branch (mb, label_no_wb_4);
5146 mono_mb_emit_byte (mb, CEE_RET);
5149 mono_mb_patch_branch (mb, label_slow_path);
5151 mono_mb_emit_ldarg (mb, 0);
5152 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5153 mono_mb_emit_byte (mb, CEE_RET);
5157 mono_mb_emit_ldarg (mb, 0);
5158 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5159 mono_mb_emit_byte (mb, CEE_RET);
5162 res = mono_mb_create_method (mb, sig, 16);
5165 mono_loader_lock ();
5166 if (write_barrier_method) {
5167 /* Already created */
5168 mono_free_method (res);
5170 /* double-checked locking */
5171 mono_memory_barrier ();
5172 write_barrier_method = res;
5174 mono_loader_unlock ();
5176 return write_barrier_method;
5180 mono_gc_get_description (void)
5182 return g_strdup ("sgen");
5186 mono_gc_set_desktop_mode (void)
5191 mono_gc_is_moving (void)
5197 mono_gc_is_disabled (void)
5203 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
5210 sgen_get_nursery_clear_policy (void)
5212 return nursery_clear_policy;
5216 sgen_get_array_fill_vtable (void)
5218 if (!array_fill_vtable) {
5219 static MonoClass klass;
5220 static MonoVTable vtable;
5223 MonoDomain *domain = mono_get_root_domain ();
5226 klass.element_class = mono_defaults.byte_class;
5228 klass.instance_size = sizeof (MonoArray);
5229 klass.sizes.element_size = 1;
5230 klass.name = "array_filler_type";
5232 vtable.klass = &klass;
5234 vtable.gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
5237 array_fill_vtable = &vtable;
5239 return array_fill_vtable;
5249 sgen_gc_unlock (void)
5255 sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
5257 major_collector.iterate_live_block_ranges (callback);
5261 sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
5263 major_collector.scan_card_table (FALSE, queue);
5267 sgen_get_major_collector (void)
5269 return &major_collector;
5272 void mono_gc_set_skip_thread (gboolean skip)
5274 SgenThreadInfo *info = mono_thread_info_current ();
5277 info->gc_disabled = skip;
5282 sgen_get_remset (void)
5288 mono_gc_get_vtable_bits (MonoClass *class)
5290 if (sgen_need_bridge_processing () && sgen_is_bridge_class (class))
5291 return SGEN_GC_BIT_BRIDGE_OBJECT;
5296 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
5303 sgen_check_whole_heap_stw (void)
5305 sgen_stop_world (0);
5306 sgen_clear_nursery_fragments ();
5307 sgen_check_whole_heap ();
5308 sgen_restart_world (0, NULL);
5312 sgen_gc_event_moves (void)
5314 if (moved_objects_idx) {
5315 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
5316 moved_objects_idx = 0;
5320 #endif /* HAVE_SGEN_GC */