2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
7 * Copyright 2005-2010 Novell, Inc (http://www.novell.com)
9 * Thread start/stop adapted from Boehm's GC:
10 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
11 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
12 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
13 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
15 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
16 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
18 * Permission is hereby granted to use or copy this program
19 * for any purpose, provided the above notices are retained on all copies.
20 * Permission to modify the code and to distribute modified code is granted,
21 * provided the above notices are retained, and a notice that the code was
22 * modified is included with the above copyright notice.
25 * Copyright 2001-2003 Ximian, Inc
26 * Copyright 2003-2010 Novell, Inc.
28 * Permission is hereby granted, free of charge, to any person obtaining
29 * a copy of this software and associated documentation files (the
30 * "Software"), to deal in the Software without restriction, including
31 * without limitation the rights to use, copy, modify, merge, publish,
32 * distribute, sublicense, and/or sell copies of the Software, and to
33 * permit persons to whom the Software is furnished to do so, subject to
34 * the following conditions:
36 * The above copyright notice and this permission notice shall be
37 * included in all copies or substantial portions of the Software.
39 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
40 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
41 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
42 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
43 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
44 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
45 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
48 * Important: allocation provides always zeroed memory, having to do
49 * a memset after allocation is deadly for performance.
50 * Memory usage at startup is currently as follows:
52 * 64 KB internal space
54 * We should provide a small memory config with half the sizes
56 * We currently try to make as few mono assumptions as possible:
57 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
59 * 2) gc descriptor is the second word in the vtable (first word in the class)
60 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
61 * 4) there is a function to get an object's size and the number of
62 * elements in an array.
63 * 5) we know the special way bounds are allocated for complex arrays
64 * 6) we know about proxies and how to treat them when domains are unloaded
66 * Always try to keep stack usage to a minimum: no recursive behaviour
67 * and no large stack allocs.
69 * General description.
70 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
71 * When the nursery is full we start a nursery collection: this is performed with a
73 * When the old generation is full we start a copying GC of the old generation as well:
74 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
75 * in the future. Maybe we'll even do both during the same collection like IMMIX.
77 * The things that complicate this description are:
78 * *) pinned objects: we can't move them so we need to keep track of them
79 * *) no precise info of the thread stacks and registers: we need to be able to
80 * quickly find the objects that may be referenced conservatively and pin them
81 * (this makes the first issues more important)
82 * *) large objects are too expensive to be dealt with using copying GC: we handle them
83 * with mark/sweep during major collections
84 * *) some objects need to not move even if they are small (interned strings, Type handles):
85 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
86 * PinnedChunks regions
92 *) we could have a function pointer in MonoClass to implement
93 customized write barriers for value types
95 *) investigate the stuff needed to advance a thread to a GC-safe
96 point (single-stepping, read from unmapped memory etc) and implement it.
97 This would enable us to inline allocations and write barriers, for example,
98 or at least parts of them, like the write barrier checks.
99 We may need this also for handling precise info on stacks, even simple things
100 as having uninitialized data on the stack and having to wait for the prolog
101 to zero it. Not an issue for the last frame that we scan conservatively.
102 We could always not trust the value in the slots anyway.
104 *) modify the jit to save info about references in stack locations:
105 this can be done just for locals as a start, so that at least
106 part of the stack is handled precisely.
108 *) test/fix endianess issues
110 *) Implement a card table as the write barrier instead of remembered
111 sets? Card tables are not easy to implement with our current
112 memory layout. We have several different kinds of major heap
113 objects: Small objects in regular blocks, small objects in pinned
114 chunks and LOS objects. If we just have a pointer we have no way
115 to tell which kind of object it points into, therefore we cannot
116 know where its card table is. The least we have to do to make
117 this happen is to get rid of write barriers for indirect stores.
120 *) Get rid of write barriers for indirect stores. We can do this by
121 telling the GC to wbarrier-register an object once we do an ldloca
122 or ldelema on it, and to unregister it once it's not used anymore
123 (it can only travel downwards on the stack). The problem with
124 unregistering is that it needs to happen eventually no matter
125 what, even if exceptions are thrown, the thread aborts, etc.
126 Rodrigo suggested that we could do only the registering part and
127 let the collector find out (pessimistically) when it's safe to
128 unregister, namely when the stack pointer of the thread that
129 registered the object is higher than it was when the registering
130 happened. This might make for a good first implementation to get
131 some data on performance.
133 *) Some sort of blacklist support? Blacklists is a concept from the
134 Boehm GC: if during a conservative scan we find pointers to an
135 area which we might use as heap, we mark that area as unusable, so
136 pointer retention by random pinning pointers is reduced.
138 *) experiment with max small object size (very small right now - 2kb,
139 because it's tied to the max freelist size)
141 *) add an option to mmap the whole heap in one chunk: it makes for many
142 simplifications in the checks (put the nursery at the top and just use a single
143 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
144 not flexible (too much of the address space may be used by default or we can't
145 increase the heap as needed) and we'd need a race-free mechanism to return memory
146 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
147 was written to, munmap is needed, but the following mmap may not find the same segment
150 *) memzero the major fragments after restarting the world and optionally a smaller
153 *) investigate having fragment zeroing threads
155 *) separate locks for finalization and other minor stuff to reduce
158 *) try a different copying order to improve memory locality
160 *) a thread abort after a store but before the write barrier will
161 prevent the write barrier from executing
163 *) specialized dynamically generated markers/copiers
165 *) Dynamically adjust TLAB size to the number of threads. If we have
166 too many threads that do allocation, we might need smaller TLABs,
167 and we might get better performance with larger TLABs if we only
168 have a handful of threads. We could sum up the space left in all
169 assigned TLABs and if that's more than some percentage of the
170 nursery size, reduce the TLAB size.
172 *) Explore placing unreachable objects on unused nursery memory.
173 Instead of memset'ng a region to zero, place an int[] covering it.
174 A good place to start is add_nursery_frag. The tricky thing here is
175 placing those objects atomically outside of a collection.
185 #include <semaphore.h>
194 #define _XOPEN_SOURCE
196 #include "metadata/metadata-internals.h"
197 #include "metadata/class-internals.h"
198 #include "metadata/gc-internal.h"
199 #include "metadata/object-internals.h"
200 #include "metadata/threads.h"
201 #include "metadata/sgen-gc.h"
202 #include "metadata/sgen-cardtable.h"
203 #include "metadata/sgen-protocol.h"
204 #include "metadata/sgen-archdep.h"
205 #include "metadata/sgen-bridge.h"
206 #include "metadata/mono-gc.h"
207 #include "metadata/method-builder.h"
208 #include "metadata/profiler-private.h"
209 #include "metadata/monitor.h"
210 #include "metadata/threadpool-internals.h"
211 #include "metadata/mempool-internals.h"
212 #include "metadata/marshal.h"
213 #include "utils/mono-mmap.h"
214 #include "utils/mono-time.h"
215 #include "utils/mono-semaphore.h"
216 #include "utils/mono-counters.h"
217 #include "utils/mono-proclib.h"
219 #include <mono/utils/memcheck.h>
221 #if defined(__MACH__)
222 #include "utils/mach-support.h"
225 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
229 #include "mono/cil/opcode.def"
235 #undef pthread_create
237 #undef pthread_detach
240 * ######################################################################
241 * ######## Types and constants used by the GC.
242 * ######################################################################
245 static int gc_initialized = 0;
246 /* If set, do a minor collection before every X allocation */
247 static guint32 collect_before_allocs = 0;
248 /* If set, do a heap consistency check before each minor collection */
249 static gboolean consistency_check_at_minor_collection = FALSE;
250 /* If set, check that there are no references to the domain left at domain unload */
251 static gboolean xdomain_checks = FALSE;
252 /* If not null, dump the heap after each collection into this file */
253 static FILE *heap_dump_file = NULL;
254 /* If set, mark stacks conservatively, even if precise marking is possible */
255 static gboolean conservative_stack_mark = FALSE;
256 /* If set, do a plausibility check on the scan_starts before and after
258 static gboolean do_scan_starts_check = FALSE;
260 #ifdef HEAVY_STATISTICS
261 static long long stat_objects_alloced = 0;
262 static long long stat_bytes_alloced = 0;
263 long long stat_objects_alloced_degraded = 0;
264 long long stat_bytes_alloced_degraded = 0;
265 static long long stat_bytes_alloced_los = 0;
267 long long stat_copy_object_called_nursery = 0;
268 long long stat_objects_copied_nursery = 0;
269 long long stat_copy_object_called_major = 0;
270 long long stat_objects_copied_major = 0;
272 long long stat_scan_object_called_nursery = 0;
273 long long stat_scan_object_called_major = 0;
275 long long stat_nursery_copy_object_failed_from_space = 0;
276 long long stat_nursery_copy_object_failed_forwarded = 0;
277 long long stat_nursery_copy_object_failed_pinned = 0;
279 static long long stat_store_remsets = 0;
280 static long long stat_store_remsets_unique = 0;
281 static long long stat_saved_remsets_1 = 0;
282 static long long stat_saved_remsets_2 = 0;
283 static long long stat_local_remsets_processed = 0;
284 static long long stat_global_remsets_added = 0;
285 static long long stat_global_remsets_readded = 0;
286 static long long stat_global_remsets_processed = 0;
287 static long long stat_global_remsets_discarded = 0;
289 static long long stat_wasted_fragments_used = 0;
290 static long long stat_wasted_fragments_bytes = 0;
292 static int stat_wbarrier_set_field = 0;
293 static int stat_wbarrier_set_arrayref = 0;
294 static int stat_wbarrier_arrayref_copy = 0;
295 static int stat_wbarrier_generic_store = 0;
296 static int stat_wbarrier_generic_store_remset = 0;
297 static int stat_wbarrier_set_root = 0;
298 static int stat_wbarrier_value_copy = 0;
299 static int stat_wbarrier_object_copy = 0;
302 static long long stat_pinned_objects = 0;
304 static long long time_minor_pre_collection_fragment_clear = 0;
305 static long long time_minor_pinning = 0;
306 static long long time_minor_scan_remsets = 0;
307 static long long time_minor_scan_card_table = 0;
308 static long long time_minor_scan_pinned = 0;
309 static long long time_minor_scan_registered_roots = 0;
310 static long long time_minor_scan_thread_data = 0;
311 static long long time_minor_finish_gray_stack = 0;
312 static long long time_minor_fragment_creation = 0;
314 static long long time_major_pre_collection_fragment_clear = 0;
315 static long long time_major_pinning = 0;
316 static long long time_major_scan_pinned = 0;
317 static long long time_major_scan_registered_roots = 0;
318 static long long time_major_scan_thread_data = 0;
319 static long long time_major_scan_alloc_pinned = 0;
320 static long long time_major_scan_finalized = 0;
321 static long long time_major_scan_big_objects = 0;
322 static long long time_major_finish_gray_stack = 0;
323 static long long time_major_free_bigobjs = 0;
324 static long long time_major_los_sweep = 0;
325 static long long time_major_sweep = 0;
326 static long long time_major_fragment_creation = 0;
328 #define DEBUG(level,a) do {if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) a;} while (0)
330 int gc_debug_level = 0;
335 mono_gc_flush_info (void)
337 fflush (gc_debug_file);
342 * Define this to allow the user to change the nursery size by
343 * specifying its value in the MONO_GC_PARAMS environmental
344 * variable. See mono_gc_base_init for details.
346 #define USER_CONFIG 1
348 #define TV_DECLARE SGEN_TV_DECLARE
349 #define TV_GETTIME SGEN_TV_GETTIME
350 #define TV_ELAPSED SGEN_TV_ELAPSED
351 #define TV_ELAPSED_MS SGEN_TV_ELAPSED_MS
353 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
355 /* The method used to clear the nursery */
356 /* Clearing at nursery collections is the safest, but has bad interactions with caches.
357 * Clearing at TLAB creation is much faster, but more complex and it might expose hard
362 CLEAR_AT_TLAB_CREATION
363 } NurseryClearPolicy;
365 static NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
368 * The young generation is divided into fragments. This is because
369 * we can hand one fragments to a thread for lock-less fast alloc and
370 * because the young generation ends up fragmented anyway by pinned objects.
371 * Once a collection is done, a list of fragments is created. When doing
372 * thread local alloc we use smallish nurseries so we allow new threads to
373 * allocate memory from gen0 without triggering a collection. Threads that
374 * are found to allocate lots of memory are given bigger fragments. This
375 * should make the finalizer thread use little nursery memory after a while.
376 * We should start assigning threads very small fragments: if there are many
377 * threads the nursery will be full of reserved space that the threads may not
378 * use at all, slowing down allocation speed.
379 * Thread local allocation is done from areas of memory Hotspot calls Thread Local
380 * Allocation Buffers (TLABs).
382 typedef struct _Fragment Fragment;
386 char *fragment_start;
387 char *fragment_limit; /* the current soft limit for allocation */
391 /* the runtime can register areas of memory as roots: we keep two lists of roots,
392 * a pinned root set for conservatively scanned roots and a normal one for
393 * precisely scanned roots (currently implemented as a single list).
395 typedef struct _RootRecord RootRecord;
404 * We're never actually using the first element. It's always set to
405 * NULL to simplify the elimination of consecutive duplicate
408 #define STORE_REMSET_BUFFER_SIZE 1024
410 typedef struct _GenericStoreRememberedSet GenericStoreRememberedSet;
411 struct _GenericStoreRememberedSet {
412 GenericStoreRememberedSet *next;
413 /* We need one entry less because the first entry of store
414 remset buffers is always a dummy and we don't copy it. */
415 gpointer data [STORE_REMSET_BUFFER_SIZE - 1];
418 /* we have 4 possible values in the low 2 bits */
420 REMSET_LOCATION, /* just a pointer to the exact location */
421 REMSET_RANGE, /* range of pointer fields */
422 REMSET_OBJECT, /* mark all the object for scanning */
423 REMSET_VTYPE, /* a valuetype array described by a gc descriptor and a count */
424 REMSET_TYPE_MASK = 0x3
427 #ifdef HAVE_KW_THREAD
428 static __thread RememberedSet *remembered_set MONO_TLS_FAST;
430 static pthread_key_t remembered_set_key;
431 static RememberedSet *global_remset;
432 static RememberedSet *freed_thread_remsets;
433 static GenericStoreRememberedSet *generic_store_remsets = NULL;
435 /*A two slots cache for recently inserted remsets */
436 static gpointer global_remset_cache [2];
438 /* FIXME: later choose a size that takes into account the RememberedSet struct
439 * and doesn't waste any alloc paddin space.
441 #define DEFAULT_REMSET_SIZE 1024
442 static RememberedSet* alloc_remset (int size, gpointer id);
444 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
445 #define object_is_pinned SGEN_OBJECT_IS_PINNED
446 #define pin_object SGEN_PIN_OBJECT
447 #define unpin_object SGEN_UNPIN_OBJECT
449 #define ptr_in_nursery(p) (SGEN_PTR_IN_NURSERY ((p), DEFAULT_NURSERY_BITS, nursery_start, nursery_real_end))
451 #define LOAD_VTABLE SGEN_LOAD_VTABLE
454 safe_name (void* obj)
456 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
457 return vt->klass->name;
460 #define safe_object_get_size mono_sgen_safe_object_get_size
463 mono_sgen_safe_name (void* obj)
465 return safe_name (obj);
469 * ######################################################################
470 * ######## Global data.
471 * ######################################################################
473 static LOCK_DECLARE (gc_mutex);
474 static int gc_disabled = 0;
475 static int num_minor_gcs = 0;
476 static int num_major_gcs = 0;
478 static gboolean use_cardtable;
482 /* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */
483 #define DEFAULT_NURSERY_SIZE (default_nursery_size)
484 static int default_nursery_size = (1 << 22);
485 #ifdef SGEN_ALIGN_NURSERY
486 /* The number of trailing 0 bits in DEFAULT_NURSERY_SIZE */
487 #define DEFAULT_NURSERY_BITS (default_nursery_bits)
488 static int default_nursery_bits = 22;
493 #define DEFAULT_NURSERY_SIZE (4*1024*1024)
494 #ifdef SGEN_ALIGN_NURSERY
495 #define DEFAULT_NURSERY_BITS 22
500 #ifndef SGEN_ALIGN_NURSERY
501 #define DEFAULT_NURSERY_BITS -1
504 #define MIN_MINOR_COLLECTION_ALLOWANCE (DEFAULT_NURSERY_SIZE * 4)
506 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
508 /* the minimum size of a fragment that we consider useful for allocation */
509 #define FRAGMENT_MIN_SIZE (512)
511 static mword pagesize = 4096;
512 static mword nursery_size;
513 static int degraded_mode = 0;
515 static mword total_alloc = 0;
516 /* use this to tune when to do a major/minor collection */
517 static mword memory_pressure = 0;
518 static mword minor_collection_allowance;
519 static int minor_collection_sections_alloced = 0;
521 static GCMemSection *nursery_section = NULL;
522 static mword lowest_heap_address = ~(mword)0;
523 static mword highest_heap_address = 0;
525 static LOCK_DECLARE (interruption_mutex);
526 static LOCK_DECLARE (global_remset_mutex);
527 static LOCK_DECLARE (pin_queue_mutex);
529 #define LOCK_GLOBAL_REMSET pthread_mutex_lock (&global_remset_mutex)
530 #define UNLOCK_GLOBAL_REMSET pthread_mutex_unlock (&global_remset_mutex)
532 #define LOCK_PIN_QUEUE pthread_mutex_lock (&pin_queue_mutex)
533 #define UNLOCK_PIN_QUEUE pthread_mutex_unlock (&pin_queue_mutex)
535 typedef struct _FinalizeEntry FinalizeEntry;
536 struct _FinalizeEntry {
541 typedef struct _FinalizeEntryHashTable FinalizeEntryHashTable;
542 struct _FinalizeEntryHashTable {
543 FinalizeEntry **table;
548 typedef struct _DisappearingLink DisappearingLink;
549 struct _DisappearingLink {
550 DisappearingLink *next;
554 typedef struct _DisappearingLinkHashTable DisappearingLinkHashTable;
555 struct _DisappearingLinkHashTable {
556 DisappearingLink **table;
561 typedef struct _EphemeronLinkNode EphemeronLinkNode;
563 struct _EphemeronLinkNode {
564 EphemeronLinkNode *next;
573 int current_collection_generation = -1;
576 * The link pointer is hidden by negating each bit. We use the lowest
577 * bit of the link (before negation) to store whether it needs
578 * resurrection tracking.
580 #define HIDE_POINTER(p,t) ((gpointer)(~((gulong)(p)|((t)?1:0))))
581 #define REVEAL_POINTER(p) ((gpointer)((~(gulong)(p))&~3L))
583 #define DISLINK_OBJECT(d) (REVEAL_POINTER (*(d)->link))
584 #define DISLINK_TRACK(d) ((~(gulong)(*(d)->link)) & 1)
587 * The finalizable hash has the object as the key, the
588 * disappearing_link hash, has the link address as key.
590 static FinalizeEntryHashTable minor_finalizable_hash;
591 static FinalizeEntryHashTable major_finalizable_hash;
592 /* objects that are ready to be finalized */
593 static FinalizeEntry *fin_ready_list = NULL;
594 static FinalizeEntry *critical_fin_list = NULL;
596 static DisappearingLinkHashTable minor_disappearing_link_hash;
597 static DisappearingLinkHashTable major_disappearing_link_hash;
599 static EphemeronLinkNode *ephemeron_list;
601 static int num_ready_finalizers = 0;
602 static int no_finalize = 0;
605 ROOT_TYPE_NORMAL = 0, /* "normal" roots */
606 ROOT_TYPE_PINNED = 1, /* roots without a GC descriptor */
607 ROOT_TYPE_WBARRIER = 2, /* roots with a write barrier */
611 /* registered roots: the key to the hash is the root start address */
613 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
615 static RootRecord **roots_hash [ROOT_TYPE_NUM] = { NULL, NULL };
616 static int roots_hash_size [ROOT_TYPE_NUM] = { 0, 0, 0 };
617 static mword roots_size = 0; /* amount of memory in the root set */
618 static int num_roots_entries [ROOT_TYPE_NUM] = { 0, 0, 0 };
620 #define GC_ROOT_NUM 32
623 void *objects [GC_ROOT_NUM];
624 int root_types [GC_ROOT_NUM];
625 uintptr_t extra_info [GC_ROOT_NUM];
629 notify_gc_roots (GCRootReport *report)
633 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
638 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
640 if (report->count == GC_ROOT_NUM)
641 notify_gc_roots (report);
642 report->objects [report->count] = object;
643 report->root_types [report->count] = rtype;
644 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
648 * The current allocation cursors
649 * We allocate objects in the nursery.
650 * The nursery is the area between nursery_start and nursery_real_end.
651 * Allocation is done from a Thread Local Allocation Buffer (TLAB). TLABs are allocated
652 * from nursery fragments.
653 * tlab_next is the pointer to the space inside the TLAB where the next object will
655 * tlab_temp_end is the pointer to the end of the temporary space reserved for
656 * the allocation: it allows us to set the scan starts at reasonable intervals.
657 * tlab_real_end points to the end of the TLAB.
658 * nursery_frag_real_end points to the end of the currently used nursery fragment.
659 * nursery_first_pinned_start points to the start of the first pinned object in the nursery
660 * nursery_last_pinned_end points to the end of the last pinned object in the nursery
661 * At the next allocation, the area of the nursery where objects can be present is
662 * between MIN(nursery_first_pinned_start, first_fragment_start) and
663 * MAX(nursery_last_pinned_end, nursery_frag_real_end)
665 static char *nursery_start = NULL;
667 #ifdef HAVE_KW_THREAD
668 #define TLAB_ACCESS_INIT
669 #define TLAB_START tlab_start
670 #define TLAB_NEXT tlab_next
671 #define TLAB_TEMP_END tlab_temp_end
672 #define TLAB_REAL_END tlab_real_end
673 #define REMEMBERED_SET remembered_set
674 #define STORE_REMSET_BUFFER store_remset_buffer
675 #define STORE_REMSET_BUFFER_INDEX store_remset_buffer_index
676 #define IN_CRITICAL_REGION thread_info->in_critical_region
678 static pthread_key_t thread_info_key;
679 #define TLAB_ACCESS_INIT SgenThreadInfo *__thread_info__ = pthread_getspecific (thread_info_key)
680 #define TLAB_START (__thread_info__->tlab_start)
681 #define TLAB_NEXT (__thread_info__->tlab_next)
682 #define TLAB_TEMP_END (__thread_info__->tlab_temp_end)
683 #define TLAB_REAL_END (__thread_info__->tlab_real_end)
684 #define REMEMBERED_SET (__thread_info__->remset)
685 #define STORE_REMSET_BUFFER (__thread_info__->store_remset_buffer)
686 #define STORE_REMSET_BUFFER_INDEX (__thread_info__->store_remset_buffer_index)
687 #define IN_CRITICAL_REGION (__thread_info__->in_critical_region)
690 /* we use the memory barrier only to prevent compiler reordering (a memory constraint may be enough) */
691 #define ENTER_CRITICAL_REGION do {IN_CRITICAL_REGION = 1;mono_memory_barrier ();} while (0)
692 #define EXIT_CRITICAL_REGION do {IN_CRITICAL_REGION = 0;mono_memory_barrier ();} while (0)
695 * FIXME: What is faster, a TLS variable pointing to a structure, or separate TLS
696 * variables for next+temp_end ?
698 #ifdef HAVE_KW_THREAD
699 static __thread SgenThreadInfo *thread_info;
700 static __thread char *tlab_start;
701 static __thread char *tlab_next;
702 static __thread char *tlab_temp_end;
703 static __thread char *tlab_real_end;
704 static __thread gpointer *store_remset_buffer;
705 static __thread long store_remset_buffer_index;
706 /* Used by the managed allocator/wbarrier */
707 static __thread char **tlab_next_addr;
708 static __thread char *stack_end;
709 static __thread long *store_remset_buffer_index_addr;
711 static char *nursery_next = NULL;
712 static char *nursery_frag_real_end = NULL;
713 static char *nursery_real_end = NULL;
714 static char *nursery_last_pinned_end = NULL;
716 /* The size of a TLAB */
717 /* The bigger the value, the less often we have to go to the slow path to allocate a new
718 * one, but the more space is wasted by threads not allocating much memory.
720 * FIXME: Make this self-tuning for each thread.
722 static guint32 tlab_size = (1024 * 4);
724 /*How much space is tolerable to be wasted from the current fragment when allocating a new TLAB*/
725 #define MAX_NURSERY_TLAB_WASTE 512
727 /* fragments that are free and ready to be used for allocation */
728 static Fragment *nursery_fragments = NULL;
729 /* freeelist of fragment structures */
730 static Fragment *fragment_freelist = NULL;
732 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
734 /* Functions supplied by the runtime to be called by the GC */
735 static MonoGCCallbacks gc_callbacks;
737 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
738 #define ALLOC_ALIGN_BITS SGEN_ALLOC_ALIGN_BITS
740 #define ALIGN_UP SGEN_ALIGN_UP
742 #define MOVED_OBJECTS_NUM 64
743 static void *moved_objects [MOVED_OBJECTS_NUM];
744 static int moved_objects_idx = 0;
746 /* Vtable of the objects used to fill out nursery fragments before a collection */
747 static MonoVTable *array_fill_vtable;
750 * ######################################################################
751 * ######## Heap size accounting
752 * ######################################################################
755 static mword max_heap_size = ((mword)0)- ((mword)1);
756 static mword allocated_heap;
758 /*Object was pinned during the current collection*/
759 static mword objects_pinned;
762 mono_sgen_release_space (mword size, int space)
764 allocated_heap -= size;
768 available_free_space (void)
770 return max_heap_size - MIN (allocated_heap, max_heap_size);
774 mono_sgen_try_alloc_space (mword size, int space)
776 if (available_free_space () < size)
779 allocated_heap += size;
784 init_heap_size_limits (glong max_heap)
789 if (max_heap < nursery_size * 4) {
790 fprintf (stderr, "max-heap-size must be at least 4 times larger than nursery size.\n");
793 max_heap_size = max_heap - nursery_size;
797 * ######################################################################
798 * ######## Macros and function declarations.
799 * ######################################################################
803 align_pointer (void *ptr)
805 mword p = (mword)ptr;
806 p += sizeof (gpointer) - 1;
807 p &= ~ (sizeof (gpointer) - 1);
811 typedef SgenGrayQueue GrayQueue;
813 typedef void (*CopyOrMarkObjectFunc) (void**, GrayQueue*);
814 typedef char* (*ScanObjectFunc) (char*, GrayQueue*);
816 /* forward declarations */
817 static int stop_world (int generation);
818 static int restart_world (int generation);
819 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise);
820 static void scan_from_remsets (void *start_nursery, void *end_nursery, GrayQueue *queue);
821 static void scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type, GrayQueue *queue);
822 static void scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeEntry *list, GrayQueue *queue);
823 static void report_finalizer_roots (void);
824 static void report_registered_roots (void);
825 static void find_pinning_ref_from_thread (char *obj, size_t size);
826 static void update_current_thread_stack (void *start);
827 static void finalize_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, GrayQueue *queue);
828 static void add_or_remove_disappearing_link (MonoObject *obj, void **link, gboolean track, int generation);
829 static void null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, gboolean before_finalization, GrayQueue *queue);
830 static void null_links_for_domain (MonoDomain *domain, int generation);
831 static gboolean search_fragment_for_size (size_t size);
832 static int search_fragment_for_size_range (size_t desired_size, size_t minimum_size);
833 static void clear_nursery_fragments (char *next);
834 static void pin_from_roots (void *start_nursery, void *end_nursery);
835 static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, GrayQueue *queue);
836 static void optimize_pin_queue (int start_slot);
837 static void clear_remsets (void);
838 static void clear_tlabs (void);
839 static void sort_addresses (void **array, int size);
840 static gboolean drain_gray_stack (GrayQueue *queue, int max_objs);
841 static void finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue);
842 static gboolean need_major_collection (mword space_needed);
843 static void major_collection (const char *reason);
845 static void mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track);
847 void describe_ptr (char *ptr);
848 void check_object (char *start);
850 static void check_consistency (void);
851 static void check_major_refs (void);
852 static void check_scan_starts (void);
853 static void check_for_xdomain_refs (void);
854 static void dump_heap (const char *type, int num, const char *reason);
856 void mono_gc_scan_for_specific_ref (MonoObject *key);
858 static void init_stats (void);
860 static int mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue);
861 static void clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue);
862 static void null_ephemerons_for_domain (MonoDomain *domain);
864 SgenMajorCollector major_collector;
866 #include "sgen-pinning.c"
867 #include "sgen-pinning-stats.c"
868 #include "sgen-gray.c"
869 #include "sgen-workers.c"
870 #include "sgen-cardtable.c"
872 /* Root bitmap descriptors are simpler: the lower three bits describe the type
873 * and we either have 30/62 bitmap bits or nibble-based run-length,
874 * or a complex descriptor, or a user defined marker function.
877 ROOT_DESC_CONSERVATIVE, /* 0, so matches NULL value */
882 ROOT_DESC_TYPE_MASK = 0x7,
883 ROOT_DESC_TYPE_SHIFT = 3,
886 #define MAKE_ROOT_DESC(type,val) ((type) | ((val) << ROOT_DESC_TYPE_SHIFT))
888 #define MAX_USER_DESCRIPTORS 16
890 static gsize* complex_descriptors = NULL;
891 static int complex_descriptors_size = 0;
892 static int complex_descriptors_next = 0;
893 static MonoGCRootMarkFunc user_descriptors [MAX_USER_DESCRIPTORS];
894 static int user_descriptors_next = 0;
897 alloc_complex_descriptor (gsize *bitmap, int numbits)
901 numbits = ALIGN_TO (numbits, GC_BITS_PER_WORD);
902 nwords = numbits / GC_BITS_PER_WORD + 1;
905 res = complex_descriptors_next;
906 /* linear search, so we don't have duplicates with domain load/unload
907 * this should not be performance critical or we'd have bigger issues
908 * (the number and size of complex descriptors should be small).
910 for (i = 0; i < complex_descriptors_next; ) {
911 if (complex_descriptors [i] == nwords) {
913 for (j = 0; j < nwords - 1; ++j) {
914 if (complex_descriptors [i + 1 + j] != bitmap [j]) {
924 i += complex_descriptors [i];
926 if (complex_descriptors_next + nwords > complex_descriptors_size) {
927 int new_size = complex_descriptors_size * 2 + nwords;
928 complex_descriptors = g_realloc (complex_descriptors, new_size * sizeof (gsize));
929 complex_descriptors_size = new_size;
931 DEBUG (6, fprintf (gc_debug_file, "Complex descriptor %d, size: %d (total desc memory: %d)\n", res, nwords, complex_descriptors_size));
932 complex_descriptors_next += nwords;
933 complex_descriptors [res] = nwords;
934 for (i = 0; i < nwords - 1; ++i) {
935 complex_descriptors [res + 1 + i] = bitmap [i];
936 DEBUG (6, fprintf (gc_debug_file, "\tvalue: %p\n", (void*)complex_descriptors [res + 1 + i]));
943 mono_sgen_get_complex_descriptor (GCVTable *vt)
945 return complex_descriptors + (vt->desc >> LOW_TYPE_BITS);
949 * Descriptor builders.
952 mono_gc_make_descr_for_string (gsize *bitmap, int numbits)
954 return (void*) DESC_TYPE_RUN_LENGTH;
958 mono_gc_make_descr_for_object (gsize *bitmap, int numbits, size_t obj_size)
960 int first_set = -1, num_set = 0, last_set = -1, i;
962 size_t stored_size = obj_size;
963 for (i = 0; i < numbits; ++i) {
964 if (bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
972 * We don't encode the size of types that don't contain
973 * references because they might not be aligned, i.e. the
974 * bottom two bits might be set, which would clash with the
975 * bits we need to encode the descriptor type. Since we don't
976 * use the encoded size to skip objects, other than for
977 * processing remsets, in which case only the positions of
978 * references are relevant, this is not a problem.
981 return (void*)DESC_TYPE_RUN_LENGTH;
982 g_assert (!(stored_size & 0x3));
983 if (stored_size <= MAX_SMALL_OBJ_SIZE) {
984 /* check run-length encoding first: one byte offset, one byte number of pointers
985 * on 64 bit archs, we can have 3 runs, just one on 32.
986 * It may be better to use nibbles.
989 desc = DESC_TYPE_RUN_LENGTH | (stored_size << 1);
990 DEBUG (6, fprintf (gc_debug_file, "Ptrfree descriptor %p, size: %zd\n", (void*)desc, stored_size));
992 } else if (first_set < 256 && num_set < 256 && (first_set + num_set == last_set + 1)) {
993 desc = DESC_TYPE_RUN_LENGTH | (stored_size << 1) | (first_set << 16) | (num_set << 24);
994 DEBUG (6, fprintf (gc_debug_file, "Runlen descriptor %p, size: %zd, first set: %d, num set: %d\n", (void*)desc, stored_size, first_set, num_set));
997 /* we know the 2-word header is ptr-free */
998 if (last_set < SMALL_BITMAP_SIZE + OBJECT_HEADER_WORDS) {
999 desc = DESC_TYPE_SMALL_BITMAP | (stored_size << 1) | ((*bitmap >> OBJECT_HEADER_WORDS) << SMALL_BITMAP_SHIFT);
1000 DEBUG (6, fprintf (gc_debug_file, "Smallbitmap descriptor %p, size: %zd, last set: %d\n", (void*)desc, stored_size, last_set));
1001 return (void*) desc;
1004 /* we know the 2-word header is ptr-free */
1005 if (last_set < LARGE_BITMAP_SIZE + OBJECT_HEADER_WORDS) {
1006 desc = DESC_TYPE_LARGE_BITMAP | ((*bitmap >> OBJECT_HEADER_WORDS) << LOW_TYPE_BITS);
1007 DEBUG (6, fprintf (gc_debug_file, "Largebitmap descriptor %p, size: %zd, last set: %d\n", (void*)desc, stored_size, last_set));
1008 return (void*) desc;
1010 /* it's a complex object ... */
1011 desc = DESC_TYPE_COMPLEX | (alloc_complex_descriptor (bitmap, last_set + 1) << LOW_TYPE_BITS);
1012 return (void*) desc;
1015 /* If the array holds references, numbits == 1 and the first bit is set in elem_bitmap */
1017 mono_gc_make_descr_for_array (int vector, gsize *elem_bitmap, int numbits, size_t elem_size)
1019 int first_set = -1, num_set = 0, last_set = -1, i;
1020 mword desc = vector? DESC_TYPE_VECTOR: DESC_TYPE_ARRAY;
1021 for (i = 0; i < numbits; ++i) {
1022 if (elem_bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
1029 /* See comment at the definition of DESC_TYPE_RUN_LENGTH. */
1031 return (void*)DESC_TYPE_RUN_LENGTH;
1032 if (elem_size <= MAX_ELEMENT_SIZE) {
1033 desc |= elem_size << VECTOR_ELSIZE_SHIFT;
1035 return (void*)(desc | VECTOR_SUBTYPE_PTRFREE);
1037 /* Note: we also handle structs with just ref fields */
1038 if (num_set * sizeof (gpointer) == elem_size) {
1039 return (void*)(desc | VECTOR_SUBTYPE_REFS | ((gssize)(-1) << 16));
1041 /* FIXME: try run-len first */
1042 /* Note: we can't skip the object header here, because it's not present */
1043 if (last_set <= SMALL_BITMAP_SIZE) {
1044 return (void*)(desc | VECTOR_SUBTYPE_BITMAP | (*elem_bitmap << 16));
1047 /* it's am array of complex structs ... */
1048 desc = DESC_TYPE_COMPLEX_ARR;
1049 desc |= alloc_complex_descriptor (elem_bitmap, last_set + 1) << LOW_TYPE_BITS;
1050 return (void*) desc;
1053 /* Return the bitmap encoded by a descriptor */
1055 mono_gc_get_bitmap_for_descr (void *descr, int *numbits)
1057 mword d = (mword)descr;
1061 case DESC_TYPE_RUN_LENGTH: {
1062 int first_set = (d >> 16) & 0xff;
1063 int num_set = (d >> 24) & 0xff;
1066 bitmap = g_new0 (gsize, (first_set + num_set + 7) / 8);
1068 for (i = first_set; i < first_set + num_set; ++i)
1069 bitmap [i / GC_BITS_PER_WORD] |= ((gsize)1 << (i % GC_BITS_PER_WORD));
1071 *numbits = first_set + num_set;
1075 case DESC_TYPE_SMALL_BITMAP:
1076 bitmap = g_new0 (gsize, 1);
1078 bitmap [0] = (d >> SMALL_BITMAP_SHIFT) << OBJECT_HEADER_WORDS;
1080 *numbits = GC_BITS_PER_WORD;
1084 g_assert_not_reached ();
1089 is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
1091 MonoObject *o = (MonoObject*)(obj);
1092 MonoObject *ref = (MonoObject*)*(ptr);
1093 int offset = (char*)(ptr) - (char*)o;
1095 if (o->vtable->klass == mono_defaults.thread_class && offset == G_STRUCT_OFFSET (MonoThread, internal_thread))
1097 if (o->vtable->klass == mono_defaults.internal_thread_class && offset == G_STRUCT_OFFSET (MonoInternalThread, current_appcontext))
1099 if (mono_class_has_parent (o->vtable->klass, mono_defaults.real_proxy_class) &&
1100 offset == G_STRUCT_OFFSET (MonoRealProxy, unwrapped_server))
1102 /* Thread.cached_culture_info */
1103 if (!strcmp (ref->vtable->klass->name_space, "System.Globalization") &&
1104 !strcmp (ref->vtable->klass->name, "CultureInfo") &&
1105 !strcmp(o->vtable->klass->name_space, "System") &&
1106 !strcmp(o->vtable->klass->name, "Object[]"))
1109 * at System.IO.MemoryStream.InternalConstructor (byte[],int,int,bool,bool) [0x0004d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:121
1110 * at System.IO.MemoryStream..ctor (byte[]) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:81
1111 * at (wrapper remoting-invoke-with-check) System.IO.MemoryStream..ctor (byte[]) <IL 0x00020, 0xffffffff>
1112 * at System.Runtime.Remoting.Messaging.CADMethodCallMessage.GetArguments () [0x0000d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/CADMessages.cs:327
1113 * at System.Runtime.Remoting.Messaging.MethodCall..ctor (System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/MethodCall.cs:87
1114 * at System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) [0x00018] in /home/schani/Work/novell/trunk/mcs/class/corlib/System/AppDomain.cs:1213
1115 * at (wrapper remoting-invoke-with-check) System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) <IL 0x0003d, 0xffffffff>
1116 * at System.Runtime.Remoting.Channels.CrossAppDomainSink.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00008] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Channels/CrossAppDomainChannel.cs:198
1117 * at (wrapper runtime-invoke) object.runtime_invoke_CrossAppDomainSink/ProcessMessageRes_object_object (object,intptr,intptr,intptr) <IL 0x0004c, 0xffffffff>
1119 if (!strcmp (ref->vtable->klass->name_space, "System") &&
1120 !strcmp (ref->vtable->klass->name, "Byte[]") &&
1121 !strcmp (o->vtable->klass->name_space, "System.IO") &&
1122 !strcmp (o->vtable->klass->name, "MemoryStream"))
1124 /* append_job() in threadpool.c */
1125 if (!strcmp (ref->vtable->klass->name_space, "System.Runtime.Remoting.Messaging") &&
1126 !strcmp (ref->vtable->klass->name, "AsyncResult") &&
1127 !strcmp (o->vtable->klass->name_space, "System") &&
1128 !strcmp (o->vtable->klass->name, "Object[]") &&
1129 mono_thread_pool_is_queue_array ((MonoArray*) o))
1135 check_reference_for_xdomain (gpointer *ptr, char *obj, MonoDomain *domain)
1137 MonoObject *o = (MonoObject*)(obj);
1138 MonoObject *ref = (MonoObject*)*(ptr);
1139 int offset = (char*)(ptr) - (char*)o;
1141 MonoClassField *field;
1144 if (!ref || ref->vtable->domain == domain)
1146 if (is_xdomain_ref_allowed (ptr, obj, domain))
1150 for (class = o->vtable->klass; class; class = class->parent) {
1153 for (i = 0; i < class->field.count; ++i) {
1154 if (class->fields[i].offset == offset) {
1155 field = &class->fields[i];
1163 if (ref->vtable->klass == mono_defaults.string_class)
1164 str = mono_string_to_utf8 ((MonoString*)ref);
1167 g_print ("xdomain reference in %p (%s.%s) at offset %d (%s) to %p (%s.%s) (%s) - pointed to by:\n",
1168 o, o->vtable->klass->name_space, o->vtable->klass->name,
1169 offset, field ? field->name : "",
1170 ref, ref->vtable->klass->name_space, ref->vtable->klass->name, str ? str : "");
1171 mono_gc_scan_for_specific_ref (o);
1177 #define HANDLE_PTR(ptr,obj) check_reference_for_xdomain ((ptr), (obj), domain)
1180 scan_object_for_xdomain_refs (char *start, mword size, void *data)
1182 MonoDomain *domain = ((MonoObject*)start)->vtable->domain;
1184 #include "sgen-scan-object.h"
1188 #define HANDLE_PTR(ptr,obj) do { \
1189 if ((MonoObject*)*(ptr) == key) { \
1190 g_print ("found ref to %p in object %p (%s) at offset %td\n", \
1191 key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
1196 scan_object_for_specific_ref (char *start, MonoObject *key)
1200 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (start)))
1203 #include "sgen-scan-object.h"
1207 mono_sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
1209 while (start < end) {
1213 if (!*(void**)start) {
1214 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
1219 if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
1225 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
1227 callback (obj, size, data);
1234 scan_object_for_specific_ref_callback (char *obj, size_t size, MonoObject *key)
1236 scan_object_for_specific_ref (obj, key);
1240 check_root_obj_specific_ref (RootRecord *root, MonoObject *key, MonoObject *obj)
1244 g_print ("found ref to %p in root record %p\n", key, root);
1247 static MonoObject *check_key = NULL;
1248 static RootRecord *check_root = NULL;
1251 check_root_obj_specific_ref_from_marker (void **obj)
1253 check_root_obj_specific_ref (check_root, check_key, *obj);
1257 scan_roots_for_specific_ref (MonoObject *key, int root_type)
1262 for (i = 0; i < roots_hash_size [root_type]; ++i) {
1263 for (root = roots_hash [root_type][i]; root; root = root->next) {
1264 void **start_root = (void**)root->start_root;
1265 mword desc = root->root_desc;
1269 switch (desc & ROOT_DESC_TYPE_MASK) {
1270 case ROOT_DESC_BITMAP:
1271 desc >>= ROOT_DESC_TYPE_SHIFT;
1274 check_root_obj_specific_ref (root, key, *start_root);
1279 case ROOT_DESC_COMPLEX: {
1280 gsize *bitmap_data = complex_descriptors + (desc >> ROOT_DESC_TYPE_SHIFT);
1281 int bwords = (*bitmap_data) - 1;
1282 void **start_run = start_root;
1284 while (bwords-- > 0) {
1285 gsize bmap = *bitmap_data++;
1286 void **objptr = start_run;
1289 check_root_obj_specific_ref (root, key, *objptr);
1293 start_run += GC_BITS_PER_WORD;
1297 case ROOT_DESC_USER: {
1298 MonoGCRootMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
1299 marker (start_root, check_root_obj_specific_ref_from_marker);
1302 case ROOT_DESC_RUN_LEN:
1303 g_assert_not_reached ();
1305 g_assert_not_reached ();
1314 mono_gc_scan_for_specific_ref (MonoObject *key)
1319 mono_sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1320 (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key, TRUE);
1322 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
1324 mono_sgen_los_iterate_objects ((IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
1326 scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
1327 scan_roots_for_specific_ref (key, ROOT_TYPE_WBARRIER);
1329 for (i = 0; i < roots_hash_size [ROOT_TYPE_PINNED]; ++i) {
1330 for (root = roots_hash [ROOT_TYPE_PINNED][i]; root; root = root->next) {
1331 void **ptr = (void**)root->start_root;
1333 while (ptr < (void**)root->end_root) {
1334 check_root_obj_specific_ref (root, *ptr, key);
1342 clear_current_nursery_fragment (char *next)
1344 if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
1345 g_assert (next <= nursery_frag_real_end);
1346 DEBUG (4, fprintf (gc_debug_file, "Clear nursery frag %p-%p\n", next, nursery_frag_real_end));
1347 memset (next, 0, nursery_frag_real_end - next);
1351 /* Clear all remaining nursery fragments */
1353 clear_nursery_fragments (char *next)
1356 if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
1357 clear_current_nursery_fragment (next);
1358 for (frag = nursery_fragments; frag; frag = frag->next) {
1359 DEBUG (4, fprintf (gc_debug_file, "Clear nursery frag %p-%p\n", frag->fragment_start, frag->fragment_end));
1360 memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start);
1366 need_remove_object_for_domain (char *start, MonoDomain *domain)
1368 if (mono_object_domain (start) == domain) {
1369 DEBUG (4, fprintf (gc_debug_file, "Need to cleanup object %p\n", start));
1370 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
1377 process_object_for_domain_clearing (char *start, MonoDomain *domain)
1379 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
1380 if (vt->klass == mono_defaults.internal_thread_class)
1381 g_assert (mono_object_domain (start) == mono_get_root_domain ());
1382 /* The object could be a proxy for an object in the domain
1384 if (mono_class_has_parent (vt->klass, mono_defaults.real_proxy_class)) {
1385 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
1387 /* The server could already have been zeroed out, so
1388 we need to check for that, too. */
1389 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
1390 DEBUG (4, fprintf (gc_debug_file, "Cleaning up remote pointer in %p to object %p\n",
1392 ((MonoRealProxy*)start)->unwrapped_server = NULL;
1397 static MonoDomain *check_domain = NULL;
1400 check_obj_not_in_domain (void **o)
1402 g_assert (((MonoObject*)(*o))->vtable->domain != check_domain);
1406 scan_for_registered_roots_in_domain (MonoDomain *domain, int root_type)
1410 check_domain = domain;
1411 for (i = 0; i < roots_hash_size [root_type]; ++i) {
1412 for (root = roots_hash [root_type][i]; root; root = root->next) {
1413 void **start_root = (void**)root->start_root;
1414 mword desc = root->root_desc;
1416 /* The MonoDomain struct is allowed to hold
1417 references to objects in its own domain. */
1418 if (start_root == (void**)domain)
1421 switch (desc & ROOT_DESC_TYPE_MASK) {
1422 case ROOT_DESC_BITMAP:
1423 desc >>= ROOT_DESC_TYPE_SHIFT;
1425 if ((desc & 1) && *start_root)
1426 check_obj_not_in_domain (*start_root);
1431 case ROOT_DESC_COMPLEX: {
1432 gsize *bitmap_data = complex_descriptors + (desc >> ROOT_DESC_TYPE_SHIFT);
1433 int bwords = (*bitmap_data) - 1;
1434 void **start_run = start_root;
1436 while (bwords-- > 0) {
1437 gsize bmap = *bitmap_data++;
1438 void **objptr = start_run;
1440 if ((bmap & 1) && *objptr)
1441 check_obj_not_in_domain (*objptr);
1445 start_run += GC_BITS_PER_WORD;
1449 case ROOT_DESC_USER: {
1450 MonoGCRootMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
1451 marker (start_root, check_obj_not_in_domain);
1454 case ROOT_DESC_RUN_LEN:
1455 g_assert_not_reached ();
1457 g_assert_not_reached ();
1461 check_domain = NULL;
1465 check_for_xdomain_refs (void)
1469 mono_sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1470 (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL, FALSE);
1472 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
1474 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1475 scan_object_for_xdomain_refs (bigobj->data, bigobj->size, NULL);
1479 clear_domain_process_object (char *obj, MonoDomain *domain)
1483 process_object_for_domain_clearing (obj, domain);
1484 remove = need_remove_object_for_domain (obj, domain);
1486 if (remove && ((MonoObject*)obj)->synchronisation) {
1487 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
1489 mono_gc_register_disappearing_link (NULL, dislink, FALSE);
1496 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
1498 if (clear_domain_process_object (obj, domain))
1499 memset (obj, 0, size);
1503 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
1505 clear_domain_process_object (obj, domain);
1509 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1511 if (need_remove_object_for_domain (obj, domain))
1512 major_collector.free_non_pinned_object (obj, size);
1516 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1518 if (need_remove_object_for_domain (obj, domain))
1519 major_collector.free_pinned_object (obj, size);
1523 * When appdomains are unloaded we can easily remove objects that have finalizers,
1524 * but all the others could still be present in random places on the heap.
1525 * We need a sweep to get rid of them even though it's going to be costly
1527 * The reason we need to remove them is because we access the vtable and class
1528 * structures to know the object size and the reference bitmap: once the domain is
1529 * unloaded the point to random memory.
1532 mono_gc_clear_domain (MonoDomain * domain)
1534 LOSObject *bigobj, *prev;
1539 clear_nursery_fragments (nursery_next);
1541 if (xdomain_checks && domain != mono_get_root_domain ()) {
1542 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
1543 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
1544 check_for_xdomain_refs ();
1547 mono_sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1548 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
1550 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
1551 to memory returned to the OS.*/
1552 null_ephemerons_for_domain (domain);
1554 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1555 null_links_for_domain (domain, i);
1557 /* We need two passes over major and large objects because
1558 freeing such objects might give their memory back to the OS
1559 (in the case of large objects) or obliterate its vtable
1560 (pinned objects with major-copying or pinned and non-pinned
1561 objects with major-mark&sweep), but we might need to
1562 dereference a pointer from an object to another object if
1563 the first object is a proxy. */
1564 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
1565 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1566 clear_domain_process_object (bigobj->data, domain);
1569 for (bigobj = los_object_list; bigobj;) {
1570 if (need_remove_object_for_domain (bigobj->data, domain)) {
1571 LOSObject *to_free = bigobj;
1573 prev->next = bigobj->next;
1575 los_object_list = bigobj->next;
1576 bigobj = bigobj->next;
1577 DEBUG (4, fprintf (gc_debug_file, "Freeing large object %p\n",
1579 mono_sgen_los_free_object (to_free);
1583 bigobj = bigobj->next;
1585 major_collector.iterate_objects (TRUE, FALSE, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
1586 major_collector.iterate_objects (FALSE, TRUE, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
1592 global_remset_cache_clear (void)
1594 memset (global_remset_cache, 0, sizeof (global_remset_cache));
1598 * Tries to check if a given remset location was already added to the global remset.
1601 * A 2 entry, LRU cache of recently saw location remsets.
1603 * It's hand-coded instead of done using loops to reduce the number of memory references on cache hit.
1605 * Returns TRUE is the element was added..
1608 global_remset_location_was_not_added (gpointer ptr)
1611 gpointer first = global_remset_cache [0], second;
1613 HEAVY_STAT (++stat_global_remsets_discarded);
1617 second = global_remset_cache [1];
1619 if (second == ptr) {
1620 /*Move the second to the front*/
1621 global_remset_cache [0] = second;
1622 global_remset_cache [1] = first;
1624 HEAVY_STAT (++stat_global_remsets_discarded);
1628 global_remset_cache [0] = second;
1629 global_remset_cache [1] = ptr;
1634 * mono_sgen_add_to_global_remset:
1636 * The global remset contains locations which point into newspace after
1637 * a minor collection. This can happen if the objects they point to are pinned.
1639 * LOCKING: If called from a parallel collector, the global remset
1640 * lock must be held. For serial collectors that is not necessary.
1643 mono_sgen_add_to_global_remset (gpointer ptr)
1648 if (use_cardtable) {
1649 sgen_card_table_mark_address ((mword)ptr);
1653 g_assert (!ptr_in_nursery (ptr) && ptr_in_nursery (*(gpointer*)ptr));
1655 lock = (current_collection_generation == GENERATION_OLD && major_collector.is_parallel);
1659 if (!global_remset_location_was_not_added (ptr))
1662 DEBUG (8, fprintf (gc_debug_file, "Adding global remset for %p\n", ptr));
1663 binary_protocol_global_remset (ptr, *(gpointer*)ptr, (gpointer)LOAD_VTABLE (*(gpointer*)ptr));
1665 HEAVY_STAT (++stat_global_remsets_added);
1668 * FIXME: If an object remains pinned, we need to add it at every minor collection.
1669 * To avoid uncontrolled growth of the global remset, only add each pointer once.
1671 if (global_remset->store_next + 3 < global_remset->end_set) {
1672 *(global_remset->store_next++) = (mword)ptr;
1675 rs = alloc_remset (global_remset->end_set - global_remset->data, NULL);
1676 rs->next = global_remset;
1678 *(global_remset->store_next++) = (mword)ptr;
1681 int global_rs_size = 0;
1683 for (rs = global_remset; rs; rs = rs->next) {
1684 global_rs_size += rs->store_next - rs->data;
1686 DEBUG (4, fprintf (gc_debug_file, "Global remset now has size %d\n", global_rs_size));
1691 UNLOCK_GLOBAL_REMSET;
1697 * Scan objects in the gray stack until the stack is empty. This should be called
1698 * frequently after each object is copied, to achieve better locality and cache
1702 drain_gray_stack (GrayQueue *queue, int max_objs)
1706 if (current_collection_generation == GENERATION_NURSERY) {
1708 GRAY_OBJECT_DEQUEUE (queue, obj);
1711 DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", obj, safe_name (obj)));
1712 major_collector.minor_scan_object (obj, queue);
1717 if (major_collector.is_parallel && queue == &workers_distribute_gray_queue)
1721 for (i = 0; i != max_objs; ++i) {
1722 GRAY_OBJECT_DEQUEUE (queue, obj);
1725 DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", obj, safe_name (obj)));
1726 major_collector.major_scan_object (obj, queue);
1728 } while (max_objs < 0);
1734 * Addresses from start to end are already sorted. This function finds
1735 * the object header for each address and pins the object. The
1736 * addresses must be inside the passed section. The (start of the)
1737 * address array is overwritten with the addresses of the actually
1738 * pinned objects. Return the number of pinned objects.
1741 pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, GrayQueue *queue)
1746 void *last_obj = NULL;
1747 size_t last_obj_size = 0;
1750 void **definitely_pinned = start;
1754 * The code below starts the search from an entry in scan_starts, which might point into a nursery
1755 * fragment containing random data. Clearing the nursery fragments takes a lot of time, and searching
1756 * though them too, so lay arrays at each location inside a fragment where a search can start:
1757 * - scan_locations[i]
1759 * - the start of each fragment (the last_obj + last_obj case)
1760 * The third encompasses the first two, since scan_locations [i] can't point inside a nursery fragment.
1762 for (frag = nursery_fragments; frag; frag = frag->next) {
1765 g_assert (frag->fragment_end - frag->fragment_start >= sizeof (MonoArray));
1766 o = (MonoArray*)frag->fragment_start;
1767 memset (o, 0, sizeof (MonoArray));
1768 g_assert (array_fill_vtable);
1769 o->obj.vtable = array_fill_vtable;
1770 /* Mark this as not a real object */
1771 o->obj.synchronisation = GINT_TO_POINTER (-1);
1772 o->max_length = (frag->fragment_end - frag->fragment_start) - sizeof (MonoArray);
1773 g_assert (frag->fragment_start + safe_object_get_size ((MonoObject*)o) == frag->fragment_end);
1776 while (start < end) {
1778 /* the range check should be reduntant */
1779 if (addr != last && addr >= start_nursery && addr < end_nursery) {
1780 DEBUG (5, fprintf (gc_debug_file, "Considering pinning addr %p\n", addr));
1781 /* multiple pointers to the same object */
1782 if (addr >= last_obj && (char*)addr < (char*)last_obj + last_obj_size) {
1786 idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
1787 g_assert (idx < section->num_scan_start);
1788 search_start = (void*)section->scan_starts [idx];
1789 if (!search_start || search_start > addr) {
1792 search_start = section->scan_starts [idx];
1793 if (search_start && search_start <= addr)
1796 if (!search_start || search_start > addr)
1797 search_start = start_nursery;
1799 if (search_start < last_obj)
1800 search_start = (char*)last_obj + last_obj_size;
1801 /* now addr should be in an object a short distance from search_start
1802 * Note that search_start must point to zeroed mem or point to an object.
1806 if (!*(void**)search_start) {
1807 /* Consistency check */
1809 for (frag = nursery_fragments; frag; frag = frag->next) {
1810 if (search_start >= frag->fragment_start && search_start < frag->fragment_end)
1811 g_assert_not_reached ();
1815 search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
1818 last_obj = search_start;
1819 last_obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
1821 if (((MonoObject*)last_obj)->synchronisation == GINT_TO_POINTER (-1)) {
1822 /* Marks the beginning of a nursery fragment, skip */
1824 DEBUG (8, fprintf (gc_debug_file, "Pinned try match %p (%s), size %zd\n", last_obj, safe_name (last_obj), last_obj_size));
1825 if (addr >= search_start && (char*)addr < (char*)last_obj + last_obj_size) {
1826 DEBUG (4, fprintf (gc_debug_file, "Pinned object %p, vtable %p (%s), count %d\n", search_start, *(void**)search_start, safe_name (search_start), count));
1827 binary_protocol_pin (search_start, (gpointer)LOAD_VTABLE (search_start), safe_object_get_size (search_start));
1828 pin_object (search_start);
1829 GRAY_OBJECT_ENQUEUE (queue, search_start);
1831 mono_sgen_pin_stats_register_object (search_start, last_obj_size);
1832 definitely_pinned [count] = search_start;
1837 /* skip to the next object */
1838 search_start = (void*)((char*)search_start + last_obj_size);
1839 } while (search_start <= addr);
1840 /* we either pinned the correct object or we ignored the addr because
1841 * it points to unused zeroed memory.
1847 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1848 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1849 GCRootReport report;
1851 for (idx = 0; idx < count; ++idx)
1852 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING, 0);
1853 notify_gc_roots (&report);
1855 stat_pinned_objects += count;
1860 mono_sgen_pin_objects_in_section (GCMemSection *section, GrayQueue *queue)
1862 int num_entries = section->pin_queue_num_entries;
1864 void **start = section->pin_queue_start;
1866 reduced_to = pin_objects_from_addresses (section, start, start + num_entries,
1867 section->data, section->next_data, queue);
1868 section->pin_queue_num_entries = reduced_to;
1870 section->pin_queue_start = NULL;
1876 mono_sgen_pin_object (void *object, GrayQueue *queue)
1878 if (major_collector.is_parallel) {
1880 /*object arrives pinned*/
1881 pin_stage_ptr (object);
1885 SGEN_PIN_OBJECT (object);
1886 pin_stage_ptr (object);
1889 GRAY_OBJECT_ENQUEUE (queue, object);
1892 /* Sort the addresses in array in increasing order.
1893 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1896 sort_addresses (void **array, int size)
1901 for (i = 1; i < size; ++i) {
1904 int parent = (child - 1) / 2;
1906 if (array [parent] >= array [child])
1909 tmp = array [parent];
1910 array [parent] = array [child];
1911 array [child] = tmp;
1917 for (i = size - 1; i > 0; --i) {
1920 array [i] = array [0];
1926 while (root * 2 + 1 <= end) {
1927 int child = root * 2 + 1;
1929 if (child < end && array [child] < array [child + 1])
1931 if (array [root] >= array [child])
1935 array [root] = array [child];
1936 array [child] = tmp;
1943 static G_GNUC_UNUSED void
1944 print_nursery_gaps (void* start_nursery, void *end_nursery)
1947 gpointer first = start_nursery;
1949 for (i = 0; i < next_pin_slot; ++i) {
1950 next = pin_queue [i];
1951 fprintf (gc_debug_file, "Nursery range: %p-%p, size: %td\n", first, next, (char*)next-(char*)first);
1955 fprintf (gc_debug_file, "Nursery range: %p-%p, size: %td\n", first, next, (char*)next-(char*)first);
1958 /* reduce the info in the pin queue, removing duplicate pointers and sorting them */
1960 optimize_pin_queue (int start_slot)
1962 void **start, **cur, **end;
1963 /* sort and uniq pin_queue: we just sort and we let the rest discard multiple values */
1964 /* it may be better to keep ranges of pinned memory instead of individually pinning objects */
1965 DEBUG (5, fprintf (gc_debug_file, "Sorting pin queue, size: %d\n", next_pin_slot));
1966 if ((next_pin_slot - start_slot) > 1)
1967 sort_addresses (pin_queue + start_slot, next_pin_slot - start_slot);
1968 start = cur = pin_queue + start_slot;
1969 end = pin_queue + next_pin_slot;
1972 while (*start == *cur && cur < end)
1976 next_pin_slot = start - pin_queue;
1977 DEBUG (5, fprintf (gc_debug_file, "Pin queue reduced to size: %d\n", next_pin_slot));
1978 //DEBUG (6, print_nursery_gaps (start_nursery, end_nursery));
1983 * Scan the memory between start and end and queue values which could be pointers
1984 * to the area between start_nursery and end_nursery for later consideration.
1985 * Typically used for thread stacks.
1988 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1991 while (start < end) {
1992 if (*start >= start_nursery && *start < end_nursery) {
1994 * *start can point to the middle of an object
1995 * note: should we handle pointing at the end of an object?
1996 * pinning in C# code disallows pointing at the end of an object
1997 * but there is some small chance that an optimizing C compiler
1998 * may keep the only reference to an object by pointing
1999 * at the end of it. We ignore this small chance for now.
2000 * Pointers to the end of an object are indistinguishable
2001 * from pointers to the start of the next object in memory
2002 * so if we allow that we'd need to pin two objects...
2003 * We queue the pointer in an array, the
2004 * array will then be sorted and uniqued. This way
2005 * we can coalesce several pinning pointers and it should
2006 * be faster since we'd do a memory scan with increasing
2007 * addresses. Note: we can align the address to the allocation
2008 * alignment, so the unique process is more effective.
2010 mword addr = (mword)*start;
2011 addr &= ~(ALLOC_ALIGN - 1);
2012 if (addr >= (mword)start_nursery && addr < (mword)end_nursery)
2013 pin_stage_ptr ((void*)addr);
2015 pin_stats_register_address ((char*)addr, pin_type);
2016 DEBUG (6, if (count) fprintf (gc_debug_file, "Pinning address %p from %p\n", (void*)addr, start));
2021 DEBUG (7, if (count) fprintf (gc_debug_file, "found %d potential pinned heap pointers\n", count));
2025 * Debugging function: find in the conservative roots where @obj is being pinned.
2027 static G_GNUC_UNUSED void
2028 find_pinning_reference (char *obj, size_t size)
2032 char *endobj = obj + size;
2033 for (i = 0; i < roots_hash_size [0]; ++i) {
2034 for (root = roots_hash [0][i]; root; root = root->next) {
2035 /* if desc is non-null it has precise info */
2036 if (!root->root_desc) {
2037 char ** start = (char**)root->start_root;
2038 while (start < (char**)root->end_root) {
2039 if (*start >= obj && *start < endobj) {
2040 DEBUG (0, fprintf (gc_debug_file, "Object %p referenced in pinned roots %p-%p (at %p in record %p)\n", obj, root->start_root, root->end_root, start, root));
2047 find_pinning_ref_from_thread (obj, size);
2051 * The first thing we do in a collection is to identify pinned objects.
2052 * This function considers all the areas of memory that need to be
2053 * conservatively scanned.
2056 pin_from_roots (void *start_nursery, void *end_nursery)
2060 DEBUG (2, fprintf (gc_debug_file, "Scanning pinned roots (%d bytes, %d/%d entries)\n", (int)roots_size, num_roots_entries [ROOT_TYPE_NORMAL], num_roots_entries [ROOT_TYPE_PINNED]));
2061 /* objects pinned from the API are inside these roots */
2062 for (i = 0; i < roots_hash_size [ROOT_TYPE_PINNED]; ++i) {
2063 for (root = roots_hash [ROOT_TYPE_PINNED][i]; root; root = root->next) {
2064 DEBUG (6, fprintf (gc_debug_file, "Pinned roots %p-%p\n", root->start_root, root->end_root));
2065 conservatively_pin_objects_from ((void**)root->start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
2068 /* now deal with the thread stacks
2069 * in the future we should be able to conservatively scan only:
2070 * *) the cpu registers
2071 * *) the unmanaged stack frames
2072 * *) the _last_ managed stack frame
2073 * *) pointers slots in managed frames
2075 scan_thread_data (start_nursery, end_nursery, FALSE);
2077 evacuate_pin_staging_area ();
2080 static CopyOrMarkObjectFunc user_copy_or_mark_func;
2081 static GrayQueue *user_copy_or_mark_queue;
2084 single_arg_user_copy_or_mark (void **obj)
2086 user_copy_or_mark_func (obj, user_copy_or_mark_queue);
2090 * The memory area from start_root to end_root contains pointers to objects.
2091 * Their position is precisely described by @desc (this means that the pointer
2092 * can be either NULL or the pointer to the start of an object).
2093 * This functions copies them to to_space updates them.
2095 * This function is not thread-safe!
2098 precisely_scan_objects_from (CopyOrMarkObjectFunc copy_func, void** start_root, void** end_root, char* n_start, char *n_end, mword desc, GrayQueue *queue)
2100 switch (desc & ROOT_DESC_TYPE_MASK) {
2101 case ROOT_DESC_BITMAP:
2102 desc >>= ROOT_DESC_TYPE_SHIFT;
2104 if ((desc & 1) && *start_root) {
2105 copy_func (start_root, queue);
2106 DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", start_root, *start_root));
2107 drain_gray_stack (queue, -1);
2113 case ROOT_DESC_COMPLEX: {
2114 gsize *bitmap_data = complex_descriptors + (desc >> ROOT_DESC_TYPE_SHIFT);
2115 int bwords = (*bitmap_data) - 1;
2116 void **start_run = start_root;
2118 while (bwords-- > 0) {
2119 gsize bmap = *bitmap_data++;
2120 void **objptr = start_run;
2122 if ((bmap & 1) && *objptr) {
2123 copy_func (objptr, queue);
2124 DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", objptr, *objptr));
2125 drain_gray_stack (queue, -1);
2130 start_run += GC_BITS_PER_WORD;
2134 case ROOT_DESC_USER: {
2135 MonoGCRootMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
2136 user_copy_or_mark_func = copy_func;
2137 user_copy_or_mark_queue = queue;
2138 marker (start_root, single_arg_user_copy_or_mark);
2139 user_copy_or_mark_func = NULL;
2140 user_copy_or_mark_queue = NULL;
2143 case ROOT_DESC_RUN_LEN:
2144 g_assert_not_reached ();
2146 g_assert_not_reached ();
2151 reset_heap_boundaries (void)
2153 lowest_heap_address = ~(mword)0;
2154 highest_heap_address = 0;
2158 mono_sgen_update_heap_boundaries (mword low, mword high)
2163 old = lowest_heap_address;
2166 } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
2169 old = highest_heap_address;
2172 } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
2176 alloc_fragment (void)
2178 Fragment *frag = fragment_freelist;
2180 fragment_freelist = frag->next;
2184 frag = mono_sgen_alloc_internal (INTERNAL_MEM_FRAGMENT);
2189 /* size must be a power of 2 */
2191 mono_sgen_alloc_os_memory_aligned (mword size, mword alignment, gboolean activate)
2193 /* Allocate twice the memory to be able to put the block on an aligned address */
2194 char *mem = mono_sgen_alloc_os_memory (size + alignment, activate);
2199 aligned = (char*)((mword)(mem + (alignment - 1)) & ~(alignment - 1));
2200 g_assert (aligned >= mem && aligned + size <= mem + size + alignment && !((mword)aligned & (alignment - 1)));
2203 mono_sgen_free_os_memory (mem, aligned - mem);
2204 if (aligned + size < mem + size + alignment)
2205 mono_sgen_free_os_memory (aligned + size, (mem + size + alignment) - (aligned + size));
2211 * Allocate and setup the data structures needed to be able to allocate objects
2212 * in the nursery. The nursery is stored in nursery_section.
2215 alloc_nursery (void)
2217 GCMemSection *section;
2223 if (nursery_section)
2225 DEBUG (2, fprintf (gc_debug_file, "Allocating nursery size: %lu\n", (unsigned long)nursery_size));
2226 /* later we will alloc a larger area for the nursery but only activate
2227 * what we need. The rest will be used as expansion if we have too many pinned
2228 * objects in the existing nursery.
2230 /* FIXME: handle OOM */
2231 section = mono_sgen_alloc_internal (INTERNAL_MEM_SECTION);
2233 g_assert (nursery_size == DEFAULT_NURSERY_SIZE);
2234 alloc_size = nursery_size;
2235 #ifdef SGEN_ALIGN_NURSERY
2236 data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
2238 data = major_collector.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
2240 nursery_start = data;
2241 nursery_real_end = nursery_start + nursery_size;
2242 mono_sgen_update_heap_boundaries ((mword)nursery_start, (mword)nursery_real_end);
2243 nursery_next = nursery_start;
2244 DEBUG (4, fprintf (gc_debug_file, "Expanding nursery size (%p-%p): %lu, total: %lu\n", data, data + alloc_size, (unsigned long)nursery_size, (unsigned long)total_alloc));
2245 section->data = section->next_data = data;
2246 section->size = alloc_size;
2247 section->end_data = nursery_real_end;
2248 scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
2249 section->scan_starts = mono_sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS);
2250 section->num_scan_start = scan_starts;
2251 section->block.role = MEMORY_ROLE_GEN0;
2252 section->block.next = NULL;
2254 nursery_section = section;
2256 /* Setup the single first large fragment */
2257 frag = alloc_fragment ();
2258 frag->fragment_start = nursery_start;
2259 frag->fragment_limit = nursery_start;
2260 frag->fragment_end = nursery_real_end;
2261 nursery_frag_real_end = nursery_real_end;
2262 /* FIXME: frag here is lost */
2266 mono_gc_get_nursery (int *shift_bits, size_t *size)
2268 *size = nursery_size;
2269 #ifdef SGEN_ALIGN_NURSERY
2270 *shift_bits = DEFAULT_NURSERY_BITS;
2274 return nursery_start;
2278 mono_gc_precise_stack_mark_enabled (void)
2280 return !conservative_stack_mark;
2284 mono_gc_get_logfile (void)
2286 return mono_sgen_get_logfile ();
2290 report_finalizer_roots_list (FinalizeEntry *list)
2292 GCRootReport report;
2296 for (fin = list; fin; fin = fin->next) {
2299 add_profile_gc_root (&report, fin->object, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
2301 notify_gc_roots (&report);
2305 report_finalizer_roots (void)
2307 report_finalizer_roots_list (fin_ready_list);
2308 report_finalizer_roots_list (critical_fin_list);
2311 static GCRootReport *root_report;
2314 single_arg_report_root (void **obj)
2317 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
2321 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
2323 switch (desc & ROOT_DESC_TYPE_MASK) {
2324 case ROOT_DESC_BITMAP:
2325 desc >>= ROOT_DESC_TYPE_SHIFT;
2327 if ((desc & 1) && *start_root) {
2328 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
2334 case ROOT_DESC_COMPLEX: {
2335 gsize *bitmap_data = complex_descriptors + (desc >> ROOT_DESC_TYPE_SHIFT);
2336 int bwords = (*bitmap_data) - 1;
2337 void **start_run = start_root;
2339 while (bwords-- > 0) {
2340 gsize bmap = *bitmap_data++;
2341 void **objptr = start_run;
2343 if ((bmap & 1) && *objptr) {
2344 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
2349 start_run += GC_BITS_PER_WORD;
2353 case ROOT_DESC_USER: {
2354 MonoGCRootMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
2355 root_report = report;
2356 marker (start_root, single_arg_report_root);
2359 case ROOT_DESC_RUN_LEN:
2360 g_assert_not_reached ();
2362 g_assert_not_reached ();
2367 report_registered_roots_by_type (int root_type)
2369 GCRootReport report;
2373 for (i = 0; i < roots_hash_size [root_type]; ++i) {
2374 for (root = roots_hash [root_type][i]; root; root = root->next) {
2375 DEBUG (6, fprintf (gc_debug_file, "Precise root scan %p-%p (desc: %p)\n", root->start_root, root->end_root, (void*)root->root_desc));
2376 precisely_report_roots_from (&report, (void**)root->start_root, (void**)root->end_root, root->root_desc);
2379 notify_gc_roots (&report);
2383 report_registered_roots (void)
2385 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
2386 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
2390 scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeEntry *list, GrayQueue *queue)
2394 for (fin = list; fin; fin = fin->next) {
2397 DEBUG (5, fprintf (gc_debug_file, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object)));
2398 copy_func (&fin->object, queue);
2402 static mword fragment_total = 0;
2404 * We found a fragment of free memory in the nursery: memzero it and if
2405 * it is big enough, add it to the list of fragments that can be used for
2409 add_nursery_frag (size_t frag_size, char* frag_start, char* frag_end)
2412 DEBUG (4, fprintf (gc_debug_file, "Found empty fragment: %p-%p, size: %zd\n", frag_start, frag_end, frag_size));
2413 binary_protocol_empty (frag_start, frag_size);
2414 /* Not worth dealing with smaller fragments: need to tune */
2415 if (frag_size >= FRAGMENT_MIN_SIZE) {
2416 /* memsetting just the first chunk start is bound to provide better cache locality */
2417 if (nursery_clear_policy == CLEAR_AT_GC)
2418 memset (frag_start, 0, frag_size);
2420 fragment = alloc_fragment ();
2421 fragment->fragment_start = frag_start;
2422 fragment->fragment_limit = frag_start;
2423 fragment->fragment_end = frag_end;
2424 fragment->next = nursery_fragments;
2425 nursery_fragments = fragment;
2426 fragment_total += frag_size;
2428 /* Clear unused fragments, pinning depends on this */
2429 /*TODO place an int[] here instead of the memset if size justify it*/
2430 memset (frag_start, 0, frag_size);
2435 generation_name (int generation)
2437 switch (generation) {
2438 case GENERATION_NURSERY: return "nursery";
2439 case GENERATION_OLD: return "old";
2440 default: g_assert_not_reached ();
2444 static DisappearingLinkHashTable*
2445 get_dislink_hash_table (int generation)
2447 switch (generation) {
2448 case GENERATION_NURSERY: return &minor_disappearing_link_hash;
2449 case GENERATION_OLD: return &major_disappearing_link_hash;
2450 default: g_assert_not_reached ();
2454 static FinalizeEntryHashTable*
2455 get_finalize_entry_hash_table (int generation)
2457 switch (generation) {
2458 case GENERATION_NURSERY: return &minor_finalizable_hash;
2459 case GENERATION_OLD: return &major_finalizable_hash;
2460 default: g_assert_not_reached ();
2464 static MonoObject **finalized_array = NULL;
2465 static int finalized_array_capacity = 0;
2466 static int finalized_array_entries = 0;
2469 bridge_register_finalized_object (MonoObject *object)
2471 if (!finalized_array)
2474 if (finalized_array_entries >= finalized_array_capacity) {
2475 MonoObject **new_array;
2476 g_assert (finalized_array_entries == finalized_array_capacity);
2477 finalized_array_capacity *= 2;
2478 new_array = mono_sgen_alloc_internal_dynamic (sizeof (MonoObject*) * finalized_array_capacity, INTERNAL_MEM_BRIDGE_DATA);
2479 memcpy (new_array, finalized_array, sizeof (MonoObject*) * finalized_array_entries);
2480 mono_sgen_free_internal_dynamic (finalized_array, sizeof (MonoObject*) * finalized_array_entries, INTERNAL_MEM_BRIDGE_DATA);
2481 finalized_array = new_array;
2483 finalized_array [finalized_array_entries++] = object;
2487 finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue)
2492 int ephemeron_rounds = 0;
2494 CopyOrMarkObjectFunc copy_func = current_collection_generation == GENERATION_NURSERY ? major_collector.copy_object : major_collector.copy_or_mark_object;
2497 * We copied all the reachable objects. Now it's the time to copy
2498 * the objects that were not referenced by the roots, but by the copied objects.
2499 * we built a stack of objects pointed to by gray_start: they are
2500 * additional roots and we may add more items as we go.
2501 * We loop until gray_start == gray_objects which means no more objects have
2502 * been added. Note this is iterative: no recursion is involved.
2503 * We need to walk the LO list as well in search of marked big objects
2504 * (use a flag since this is needed only on major collections). We need to loop
2505 * here as well, so keep a counter of marked LO (increasing it in copy_object).
2506 * To achieve better cache locality and cache usage, we drain the gray stack
2507 * frequently, after each object is copied, and just finish the work here.
2509 drain_gray_stack (queue, -1);
2511 DEBUG (2, fprintf (gc_debug_file, "%s generation done\n", generation_name (generation)));
2514 We must clear weak links that don't track resurrection before processing object ready for
2515 finalization so they can be cleared before that.
2517 null_link_in_range (copy_func, start_addr, end_addr, generation, TRUE, queue);
2518 if (generation == GENERATION_OLD)
2519 null_link_in_range (copy_func, start_addr, end_addr, GENERATION_NURSERY, TRUE, queue);
2521 if (finalized_array == NULL && mono_sgen_need_bridge_processing ()) {
2522 finalized_array_capacity = 32;
2523 finalized_array = mono_sgen_alloc_internal_dynamic (sizeof (MonoObject*) * finalized_array_capacity, INTERNAL_MEM_BRIDGE_DATA);
2525 finalized_array_entries = 0;
2527 /* walk the finalization queue and move also the objects that need to be
2528 * finalized: use the finalized objects as new roots so the objects they depend
2529 * on are also not reclaimed. As with the roots above, only objects in the nursery
2530 * are marked/copied.
2531 * We need a loop here, since objects ready for finalizers may reference other objects
2532 * that are fin-ready. Speedup with a flag?
2537 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
2538 * before processing finalizable objects to avoid finalizing reachable values.
2540 * It must be done inside the finalizaters loop since objects must not be removed from CWT tables
2541 * while they are been finalized.
2543 int done_with_ephemerons = 0;
2545 done_with_ephemerons = mark_ephemerons_in_range (copy_func, start_addr, end_addr, queue);
2546 drain_gray_stack (queue, -1);
2548 } while (!done_with_ephemerons);
2550 fin_ready = num_ready_finalizers;
2551 finalize_in_range (copy_func, start_addr, end_addr, generation, queue);
2552 if (generation == GENERATION_OLD)
2553 finalize_in_range (copy_func, nursery_start, nursery_real_end, GENERATION_NURSERY, queue);
2555 if (fin_ready != num_ready_finalizers) {
2557 if (finalized_array != NULL)
2558 mono_sgen_bridge_processing (finalized_array_entries, finalized_array);
2561 /* drain the new stack that might have been created */
2562 DEBUG (6, fprintf (gc_debug_file, "Precise scan of gray area post fin\n"));
2563 drain_gray_stack (queue, -1);
2564 } while (fin_ready != num_ready_finalizers);
2566 if (mono_sgen_need_bridge_processing ())
2567 g_assert (num_loops <= 1);
2570 * Clear ephemeron pairs with unreachable keys.
2571 * We pass the copy func so we can figure out if an array was promoted or not.
2573 clear_unreachable_ephemerons (copy_func, start_addr, end_addr, queue);
2576 DEBUG (2, fprintf (gc_debug_file, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron roundss\n", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds));
2579 * handle disappearing links
2580 * Note we do this after checking the finalization queue because if an object
2581 * survives (at least long enough to be finalized) we don't clear the link.
2582 * This also deals with a possible issue with the monitor reclamation: with the Boehm
2583 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
2586 g_assert (gray_object_queue_is_empty (queue));
2588 null_link_in_range (copy_func, start_addr, end_addr, generation, FALSE, queue);
2589 if (generation == GENERATION_OLD)
2590 null_link_in_range (copy_func, start_addr, end_addr, GENERATION_NURSERY, FALSE, queue);
2591 if (gray_object_queue_is_empty (queue))
2593 drain_gray_stack (queue, -1);
2596 g_assert (gray_object_queue_is_empty (queue));
2600 mono_sgen_check_section_scan_starts (GCMemSection *section)
2603 for (i = 0; i < section->num_scan_start; ++i) {
2604 if (section->scan_starts [i]) {
2605 guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
2606 g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
2612 check_scan_starts (void)
2614 if (!do_scan_starts_check)
2616 mono_sgen_check_section_scan_starts (nursery_section);
2617 major_collector.check_scan_starts ();
2620 static int last_num_pinned = 0;
2623 build_nursery_fragments (void **start, int num_entries)
2625 char *frag_start, *frag_end;
2629 while (nursery_fragments) {
2630 Fragment *next = nursery_fragments->next;
2631 nursery_fragments->next = fragment_freelist;
2632 fragment_freelist = nursery_fragments;
2633 nursery_fragments = next;
2635 frag_start = nursery_start;
2637 /* clear scan starts */
2638 memset (nursery_section->scan_starts, 0, nursery_section->num_scan_start * sizeof (gpointer));
2639 for (i = 0; i < num_entries; ++i) {
2640 frag_end = start [i];
2641 /* remove the pin bit from pinned objects */
2642 unpin_object (frag_end);
2643 nursery_section->scan_starts [((char*)frag_end - (char*)nursery_section->data)/SCAN_START_SIZE] = frag_end;
2644 frag_size = frag_end - frag_start;
2646 add_nursery_frag (frag_size, frag_start, frag_end);
2647 frag_size = ALIGN_UP (safe_object_get_size ((MonoObject*)start [i]));
2648 frag_start = (char*)start [i] + frag_size;
2650 nursery_last_pinned_end = frag_start;
2651 frag_end = nursery_real_end;
2652 frag_size = frag_end - frag_start;
2654 add_nursery_frag (frag_size, frag_start, frag_end);
2655 if (!nursery_fragments) {
2656 DEBUG (1, fprintf (gc_debug_file, "Nursery fully pinned (%d)\n", num_entries));
2657 for (i = 0; i < num_entries; ++i) {
2658 DEBUG (3, fprintf (gc_debug_file, "Bastard pinning obj %p (%s), size: %d\n", start [i], safe_name (start [i]), safe_object_get_size (start [i])));
2663 nursery_next = nursery_frag_real_end = NULL;
2665 /* Clear TLABs for all threads */
2670 scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type, GrayQueue *queue)
2674 for (i = 0; i < roots_hash_size [root_type]; ++i) {
2675 for (root = roots_hash [root_type][i]; root; root = root->next) {
2676 DEBUG (6, fprintf (gc_debug_file, "Precise root scan %p-%p (desc: %p)\n", root->start_root, root->end_root, (void*)root->root_desc));
2677 precisely_scan_objects_from (copy_func, (void**)root->start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, queue);
2683 mono_sgen_dump_occupied (char *start, char *end, char *section_start)
2685 fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
2689 mono_sgen_dump_section (GCMemSection *section, const char *type)
2691 char *start = section->data;
2692 char *end = section->data + section->size;
2693 char *occ_start = NULL;
2695 char *old_start = NULL; /* just for debugging */
2697 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
2699 while (start < end) {
2703 if (!*(void**)start) {
2705 mono_sgen_dump_occupied (occ_start, start, section->data);
2708 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
2711 g_assert (start < section->next_data);
2716 vt = (GCVTable*)LOAD_VTABLE (start);
2719 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
2722 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
2723 start - section->data,
2724 vt->klass->name_space, vt->klass->name,
2732 mono_sgen_dump_occupied (occ_start, start, section->data);
2734 fprintf (heap_dump_file, "</section>\n");
2738 dump_object (MonoObject *obj, gboolean dump_location)
2740 static char class_name [1024];
2742 MonoClass *class = mono_object_class (obj);
2746 * Python's XML parser is too stupid to parse angle brackets
2747 * in strings, so we just ignore them;
2750 while (class->name [i] && j < sizeof (class_name) - 1) {
2751 if (!strchr ("<>\"", class->name [i]))
2752 class_name [j++] = class->name [i];
2755 g_assert (j < sizeof (class_name));
2758 fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%d\"",
2759 class->name_space, class_name,
2760 safe_object_get_size (obj));
2761 if (dump_location) {
2762 const char *location;
2763 if (ptr_in_nursery (obj))
2764 location = "nursery";
2765 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
2769 fprintf (heap_dump_file, " location=\"%s\"", location);
2771 fprintf (heap_dump_file, "/>\n");
2775 dump_heap (const char *type, int num, const char *reason)
2780 fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
2782 fprintf (heap_dump_file, " reason=\"%s\"", reason);
2783 fprintf (heap_dump_file, ">\n");
2784 fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
2785 mono_sgen_dump_internal_mem_usage (heap_dump_file);
2786 fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", pinned_byte_counts [PIN_TYPE_STACK]);
2787 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
2788 fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", pinned_byte_counts [PIN_TYPE_OTHER]);
2790 fprintf (heap_dump_file, "<pinned-objects>\n");
2791 for (list = pinned_objects; list; list = list->next)
2792 dump_object (list->obj, TRUE);
2793 fprintf (heap_dump_file, "</pinned-objects>\n");
2795 mono_sgen_dump_section (nursery_section, "nursery");
2797 major_collector.dump_heap (heap_dump_file);
2799 fprintf (heap_dump_file, "<los>\n");
2800 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
2801 dump_object ((MonoObject*)bigobj->data, FALSE);
2802 fprintf (heap_dump_file, "</los>\n");
2804 fprintf (heap_dump_file, "</collection>\n");
2808 mono_sgen_register_moved_object (void *obj, void *destination)
2810 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
2812 /* FIXME: handle this for parallel collector */
2813 g_assert (!major_collector.is_parallel);
2815 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
2816 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
2817 moved_objects_idx = 0;
2819 moved_objects [moved_objects_idx++] = obj;
2820 moved_objects [moved_objects_idx++] = destination;
2826 static gboolean inited = FALSE;
2831 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_pre_collection_fragment_clear);
2832 mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_pinning);
2833 mono_counters_register ("Minor scan remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_remsets);
2834 mono_counters_register ("Minor scan cardtables", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_card_table);
2835 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_pinned);
2836 mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_registered_roots);
2837 mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_thread_data);
2838 mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_finish_gray_stack);
2839 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_fragment_creation);
2841 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_pre_collection_fragment_clear);
2842 mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_pinning);
2843 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_pinned);
2844 mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_registered_roots);
2845 mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_thread_data);
2846 mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_alloc_pinned);
2847 mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_finalized);
2848 mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_big_objects);
2849 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_finish_gray_stack);
2850 mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_free_bigobjs);
2851 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_los_sweep);
2852 mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_sweep);
2853 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_fragment_creation);
2855 mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_pinned_objects);
2857 #ifdef HEAVY_STATISTICS
2858 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
2859 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
2860 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
2861 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
2862 mono_counters_register ("WBarrier generic store stored", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store_remset);
2863 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
2864 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
2865 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
2867 mono_counters_register ("# objects allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced);
2868 mono_counters_register ("bytes allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced);
2869 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
2870 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
2871 mono_counters_register ("bytes allocated in LOS", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_los);
2873 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
2874 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
2875 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_major);
2876 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_major);
2878 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
2879 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);
2881 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
2882 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
2883 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
2885 mono_counters_register ("# wasted fragments used", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_wasted_fragments_used);
2886 mono_counters_register ("bytes in wasted fragments", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_wasted_fragments_bytes);
2888 mono_counters_register ("Store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets);
2889 mono_counters_register ("Unique store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets_unique);
2890 mono_counters_register ("Saved remsets 1", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_1);
2891 mono_counters_register ("Saved remsets 2", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_2);
2892 mono_counters_register ("Non-global remsets processed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_local_remsets_processed);
2893 mono_counters_register ("Global remsets added", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_added);
2894 mono_counters_register ("Global remsets re-added", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_readded);
2895 mono_counters_register ("Global remsets processed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_processed);
2896 mono_counters_register ("Global remsets discarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_discarded);
2902 static gboolean need_calculate_minor_collection_allowance;
2904 static int last_collection_old_num_major_sections;
2905 static mword last_collection_los_memory_usage = 0;
2906 static mword last_collection_old_los_memory_usage;
2907 static mword last_collection_los_memory_alloced;
2910 reset_minor_collection_allowance (void)
2912 need_calculate_minor_collection_allowance = TRUE;
2916 try_calculate_minor_collection_allowance (gboolean overwrite)
2918 int num_major_sections, num_major_sections_saved, save_target, allowance_target;
2919 mword los_memory_saved;
2922 g_assert (need_calculate_minor_collection_allowance);
2924 if (!need_calculate_minor_collection_allowance)
2927 if (!*major_collector.have_swept) {
2929 minor_collection_allowance = MIN_MINOR_COLLECTION_ALLOWANCE;
2933 num_major_sections = major_collector.get_num_major_sections ();
2935 num_major_sections_saved = MAX (last_collection_old_num_major_sections - num_major_sections, 0);
2936 los_memory_saved = MAX (last_collection_old_los_memory_usage - last_collection_los_memory_usage, 1);
2938 save_target = ((num_major_sections * major_collector.section_size) + los_memory_saved) / 2;
2941 * We aim to allow the allocation of as many sections as is
2942 * necessary to reclaim save_target sections in the next
2943 * collection. We assume the collection pattern won't change.
2944 * In the last cycle, we had num_major_sections_saved for
2945 * minor_collection_sections_alloced. Assuming things won't
2946 * change, this must be the same ratio as save_target for
2947 * allowance_target, i.e.
2949 * num_major_sections_saved save_target
2950 * --------------------------------- == ----------------
2951 * minor_collection_sections_alloced allowance_target
2955 allowance_target = (mword)((double)save_target * (double)(minor_collection_sections_alloced * major_collector.section_size + last_collection_los_memory_alloced) / (double)(num_major_sections_saved * major_collector.section_size + los_memory_saved));
2957 minor_collection_allowance = MAX (MIN (allowance_target, num_major_sections * major_collector.section_size + los_memory_usage), MIN_MINOR_COLLECTION_ALLOWANCE);
2959 if (major_collector.have_computed_minor_collection_allowance)
2960 major_collector.have_computed_minor_collection_allowance ();
2962 need_calculate_minor_collection_allowance = FALSE;
2966 need_major_collection (mword space_needed)
2968 mword los_alloced = los_memory_usage - MIN (last_collection_los_memory_usage, los_memory_usage);
2969 return (space_needed > available_free_space ()) ||
2970 minor_collection_sections_alloced * major_collector.section_size + los_alloced > minor_collection_allowance;
2974 mono_sgen_need_major_collection (mword space_needed)
2976 return need_major_collection (space_needed);
2980 * Collect objects in the nursery. Returns whether to trigger a major
2984 collect_nursery (size_t requested_size)
2986 gboolean needs_major;
2987 size_t max_garbage_amount;
2988 char *orig_nursery_next;
2989 TV_DECLARE (all_atv);
2990 TV_DECLARE (all_btv);
2994 mono_perfcounters->gc_collections0++;
2996 current_collection_generation = GENERATION_NURSERY;
2998 binary_protocol_collection (GENERATION_NURSERY);
2999 check_scan_starts ();
3003 orig_nursery_next = nursery_next;
3004 nursery_next = MAX (nursery_next, nursery_last_pinned_end);
3005 /* FIXME: optimize later to use the higher address where an object can be present */
3006 nursery_next = MAX (nursery_next, nursery_real_end);
3008 DEBUG (1, fprintf (gc_debug_file, "Start nursery collection %d %p-%p, size: %d\n", num_minor_gcs, nursery_start, nursery_next, (int)(nursery_next - nursery_start)));
3009 max_garbage_amount = nursery_next - nursery_start;
3010 g_assert (nursery_section->size >= max_garbage_amount);
3012 /* world must be stopped already */
3013 TV_GETTIME (all_atv);
3016 /* Pinning no longer depends on clearing all nursery fragments */
3017 clear_current_nursery_fragment (orig_nursery_next);
3020 time_minor_pre_collection_fragment_clear += TV_ELAPSED_MS (atv, btv);
3023 check_for_xdomain_refs ();
3025 nursery_section->next_data = nursery_next;
3027 major_collector.start_nursery_collection ();
3029 try_calculate_minor_collection_allowance (FALSE);
3031 gray_object_queue_init (&gray_queue, mono_sgen_get_unmanaged_allocator ());
3034 mono_stats.minor_gc_count ++;
3036 global_remset_cache_clear ();
3038 /* pin from pinned handles */
3040 mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
3041 pin_from_roots (nursery_start, nursery_next);
3042 /* identify pinned objects */
3043 optimize_pin_queue (0);
3044 next_pin_slot = pin_objects_from_addresses (nursery_section, pin_queue, pin_queue + next_pin_slot, nursery_start, nursery_next, &gray_queue);
3045 nursery_section->pin_queue_start = pin_queue;
3046 nursery_section->pin_queue_num_entries = next_pin_slot;
3048 time_minor_pinning += TV_ELAPSED_MS (btv, atv);
3049 DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot, TV_ELAPSED (btv, atv)));
3050 DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", next_pin_slot));
3052 if (consistency_check_at_minor_collection)
3053 check_consistency ();
3056 * walk all the roots and copy the young objects to the old generation,
3057 * starting from to_space
3060 scan_from_remsets (nursery_start, nursery_next, &gray_queue);
3061 /* we don't have complete write barrier yet, so we scan all the old generation sections */
3063 time_minor_scan_remsets += TV_ELAPSED_MS (atv, btv);
3064 DEBUG (2, fprintf (gc_debug_file, "Old generation scan: %d usecs\n", TV_ELAPSED (atv, btv)));
3066 if (use_cardtable) {
3068 card_tables_collect_stats (TRUE);
3069 scan_from_card_tables (nursery_start, nursery_next, &gray_queue);
3071 time_minor_scan_card_table += TV_ELAPSED_MS (atv, btv);
3074 drain_gray_stack (&gray_queue, -1);
3076 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
3077 report_registered_roots ();
3078 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
3079 report_finalizer_roots ();
3081 time_minor_scan_pinned += TV_ELAPSED_MS (btv, atv);
3082 /* registered roots, this includes static fields */
3083 scan_from_registered_roots (major_collector.copy_object, nursery_start, nursery_next, ROOT_TYPE_NORMAL, &gray_queue);
3084 scan_from_registered_roots (major_collector.copy_object, nursery_start, nursery_next, ROOT_TYPE_WBARRIER, &gray_queue);
3086 time_minor_scan_registered_roots += TV_ELAPSED_MS (atv, btv);
3088 scan_thread_data (nursery_start, nursery_next, TRUE);
3090 time_minor_scan_thread_data += TV_ELAPSED_MS (btv, atv);
3093 finish_gray_stack (nursery_start, nursery_next, GENERATION_NURSERY, &gray_queue);
3095 time_minor_finish_gray_stack += TV_ELAPSED_MS (btv, atv);
3096 mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
3098 if (objects_pinned) {
3099 evacuate_pin_staging_area ();
3100 optimize_pin_queue (0);
3101 nursery_section->pin_queue_start = pin_queue;
3102 nursery_section->pin_queue_num_entries = next_pin_slot;
3105 /* walk the pin_queue, build up the fragment list of free memory, unmark
3106 * pinned objects as we go, memzero() the empty fragments so they are ready for the
3109 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
3110 build_nursery_fragments (pin_queue, next_pin_slot);
3111 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
3113 time_minor_fragment_creation += TV_ELAPSED_MS (atv, btv);
3114 DEBUG (2, fprintf (gc_debug_file, "Fragment creation: %d usecs, %lu bytes available\n", TV_ELAPSED (atv, btv), (unsigned long)fragment_total));
3116 if (consistency_check_at_minor_collection)
3117 check_major_refs ();
3119 major_collector.finish_nursery_collection ();
3121 TV_GETTIME (all_btv);
3122 mono_stats.minor_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
3125 dump_heap ("minor", num_minor_gcs - 1, NULL);
3127 /* prepare the pin queue for the next collection */
3128 last_num_pinned = next_pin_slot;
3130 if (fin_ready_list || critical_fin_list) {
3131 DEBUG (4, fprintf (gc_debug_file, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers));
3132 mono_gc_finalize_notify ();
3136 g_assert (gray_object_queue_is_empty (&gray_queue));
3139 card_tables_collect_stats (FALSE);
3141 check_scan_starts ();
3143 binary_protocol_flush_buffers (FALSE);
3145 /*objects are late pinned because of lack of memory, so a major is a good call*/
3146 needs_major = need_major_collection (0) || objects_pinned;
3147 current_collection_generation = -1;
3154 major_do_collection (const char *reason)
3156 LOSObject *bigobj, *prevbo;
3157 TV_DECLARE (all_atv);
3158 TV_DECLARE (all_btv);
3161 /* FIXME: only use these values for the precise scan
3162 * note that to_space pointers should be excluded anyway...
3164 char *heap_start = NULL;
3165 char *heap_end = (char*)-1;
3166 int old_next_pin_slot;
3168 mono_perfcounters->gc_collections1++;
3170 last_collection_old_num_major_sections = major_collector.get_num_major_sections ();
3173 * A domain could have been freed, resulting in
3174 * los_memory_usage being less than last_collection_los_memory_usage.
3176 last_collection_los_memory_alloced = los_memory_usage - MIN (last_collection_los_memory_usage, los_memory_usage);
3177 last_collection_old_los_memory_usage = los_memory_usage;
3180 //count_ref_nonref_objs ();
3181 //consistency_check ();
3183 binary_protocol_collection (GENERATION_OLD);
3184 check_scan_starts ();
3185 gray_object_queue_init (&gray_queue, mono_sgen_get_unmanaged_allocator ());
3186 if (major_collector.is_parallel)
3187 gray_object_queue_init (&workers_distribute_gray_queue, mono_sgen_get_unmanaged_allocator ());
3190 DEBUG (1, fprintf (gc_debug_file, "Start major collection %d\n", num_major_gcs));
3192 mono_stats.major_gc_count ++;
3194 /* world must be stopped already */
3195 TV_GETTIME (all_atv);
3198 /* Pinning depends on this */
3199 clear_nursery_fragments (nursery_next);
3202 time_major_pre_collection_fragment_clear += TV_ELAPSED_MS (atv, btv);
3204 nursery_section->next_data = nursery_real_end;
3205 /* we should also coalesce scanning from sections close to each other
3206 * and deal with pointers outside of the sections later.
3209 if (major_collector.start_major_collection)
3210 major_collector.start_major_collection ();
3212 *major_collector.have_swept = FALSE;
3213 reset_minor_collection_allowance ();
3216 check_for_xdomain_refs ();
3218 /* The remsets are not useful for a major collection */
3220 global_remset_cache_clear ();
3222 card_table_clear ();
3226 DEBUG (6, fprintf (gc_debug_file, "Collecting pinned addresses\n"));
3227 pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address);
3228 optimize_pin_queue (0);
3231 * pin_queue now contains all candidate pointers, sorted and
3232 * uniqued. We must do two passes now to figure out which
3233 * objects are pinned.
3235 * The first is to find within the pin_queue the area for each
3236 * section. This requires that the pin_queue be sorted. We
3237 * also process the LOS objects and pinned chunks here.
3239 * The second, destructive, pass is to reduce the section
3240 * areas to pointers to the actually pinned objects.
3242 DEBUG (6, fprintf (gc_debug_file, "Pinning from sections\n"));
3243 /* first pass for the sections */
3244 mono_sgen_find_section_pin_queue_start_end (nursery_section);
3245 major_collector.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE);
3246 /* identify possible pointers to the insize of large objects */
3247 DEBUG (6, fprintf (gc_debug_file, "Pinning from large objects\n"));
3248 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
3250 if (mono_sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + bigobj->size, &dummy)) {
3251 pin_object (bigobj->data);
3252 /* FIXME: only enqueue if object has references */
3253 GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data);
3255 mono_sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
3256 DEBUG (6, fprintf (gc_debug_file, "Marked large object %p (%s) size: %lu from roots\n", bigobj->data, safe_name (bigobj->data), (unsigned long)bigobj->size));
3259 /* second pass for the sections */
3260 mono_sgen_pin_objects_in_section (nursery_section, WORKERS_DISTRIBUTE_GRAY_QUEUE);
3261 major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
3262 old_next_pin_slot = next_pin_slot;
3265 time_major_pinning += TV_ELAPSED_MS (atv, btv);
3266 DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot, TV_ELAPSED (atv, btv)));
3267 DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", next_pin_slot));
3269 major_collector.init_to_space ();
3271 workers_start_all_workers ();
3273 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
3274 report_registered_roots ();
3276 time_major_scan_pinned += TV_ELAPSED_MS (btv, atv);
3278 /* registered roots, this includes static fields */
3279 scan_from_registered_roots (major_collector.copy_or_mark_object, heap_start, heap_end, ROOT_TYPE_NORMAL, WORKERS_DISTRIBUTE_GRAY_QUEUE);
3280 scan_from_registered_roots (major_collector.copy_or_mark_object, heap_start, heap_end, ROOT_TYPE_WBARRIER, WORKERS_DISTRIBUTE_GRAY_QUEUE);
3282 time_major_scan_registered_roots += TV_ELAPSED_MS (atv, btv);
3285 /* FIXME: This is the wrong place for this, because it does
3287 scan_thread_data (heap_start, heap_end, TRUE);
3289 time_major_scan_thread_data += TV_ELAPSED_MS (btv, atv);
3292 time_major_scan_alloc_pinned += TV_ELAPSED_MS (atv, btv);
3294 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
3295 report_finalizer_roots ();
3296 /* scan the list of objects ready for finalization */
3297 scan_finalizer_entries (major_collector.copy_or_mark_object, fin_ready_list, WORKERS_DISTRIBUTE_GRAY_QUEUE);
3298 scan_finalizer_entries (major_collector.copy_or_mark_object, critical_fin_list, WORKERS_DISTRIBUTE_GRAY_QUEUE);
3300 time_major_scan_finalized += TV_ELAPSED_MS (btv, atv);
3301 DEBUG (2, fprintf (gc_debug_file, "Root scan: %d usecs\n", TV_ELAPSED (btv, atv)));
3304 time_major_scan_big_objects += TV_ELAPSED_MS (atv, btv);
3306 if (major_collector.is_parallel) {
3307 while (!gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE)) {
3308 workers_distribute_gray_queue_sections ();
3314 if (major_collector.is_parallel)
3315 g_assert (gray_object_queue_is_empty (&gray_queue));
3317 /* all the objects in the heap */
3318 finish_gray_stack (heap_start, heap_end, GENERATION_OLD, &gray_queue);
3320 time_major_finish_gray_stack += TV_ELAPSED_MS (btv, atv);
3323 * The (single-threaded) finalization code might have done
3324 * some copying/marking so we can only reset the GC thread's
3325 * worker data here instead of earlier when we joined the
3328 if (major_collector.reset_worker_data)
3329 major_collector.reset_worker_data (workers_gc_thread_data.major_collector_data);
3331 if (objects_pinned) {
3332 /*This is slow, but we just OOM'd*/
3333 mono_sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
3334 evacuate_pin_staging_area ();
3335 optimize_pin_queue (0);
3336 mono_sgen_find_section_pin_queue_start_end (nursery_section);
3340 reset_heap_boundaries ();
3341 mono_sgen_update_heap_boundaries ((mword)nursery_start, (mword)nursery_real_end);
3343 /* sweep the big objects list */
3345 for (bigobj = los_object_list; bigobj;) {
3346 if (object_is_pinned (bigobj->data)) {
3347 unpin_object (bigobj->data);
3348 mono_sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + bigobj->size);
3351 /* not referenced anywhere, so we can free it */
3353 prevbo->next = bigobj->next;
3355 los_object_list = bigobj->next;
3357 bigobj = bigobj->next;
3358 mono_sgen_los_free_object (to_free);
3362 bigobj = bigobj->next;
3366 time_major_free_bigobjs += TV_ELAPSED_MS (atv, btv);
3368 mono_sgen_los_sweep ();
3371 time_major_los_sweep += TV_ELAPSED_MS (btv, atv);
3373 major_collector.sweep ();
3376 time_major_sweep += TV_ELAPSED_MS (atv, btv);
3378 /* walk the pin_queue, build up the fragment list of free memory, unmark
3379 * pinned objects as we go, memzero() the empty fragments so they are ready for the
3382 build_nursery_fragments (nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries);
3385 time_major_fragment_creation += TV_ELAPSED_MS (btv, atv);
3387 TV_GETTIME (all_btv);
3388 mono_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
3391 dump_heap ("major", num_major_gcs - 1, reason);
3393 /* prepare the pin queue for the next collection */
3395 if (fin_ready_list || critical_fin_list) {
3396 DEBUG (4, fprintf (gc_debug_file, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers));
3397 mono_gc_finalize_notify ();
3401 g_assert (gray_object_queue_is_empty (&gray_queue));
3403 try_calculate_minor_collection_allowance (TRUE);
3405 minor_collection_sections_alloced = 0;
3406 last_collection_los_memory_usage = los_memory_usage;
3408 major_collector.finish_major_collection ();
3410 check_scan_starts ();
3412 binary_protocol_flush_buffers (FALSE);
3414 //consistency_check ();
3418 major_collection (const char *reason)
3420 if (g_getenv ("MONO_GC_NO_MAJOR")) {
3421 collect_nursery (0);
3425 current_collection_generation = GENERATION_OLD;
3426 major_do_collection (reason);
3427 current_collection_generation = -1;
3431 sgen_collect_major_no_lock (const char *reason)
3433 mono_profiler_gc_event (MONO_GC_EVENT_START, 1);
3435 major_collection (reason);
3437 mono_profiler_gc_event (MONO_GC_EVENT_END, 1);
3441 * When deciding if it's better to collect or to expand, keep track
3442 * of how much garbage was reclaimed with the last collection: if it's too
3444 * This is called when we could not allocate a small object.
3446 static void __attribute__((noinline))
3447 minor_collect_or_expand_inner (size_t size)
3449 int do_minor_collection = 1;
3451 g_assert (nursery_section);
3452 if (do_minor_collection) {
3453 mono_profiler_gc_event (MONO_GC_EVENT_START, 0);
3455 if (collect_nursery (size)) {
3456 mono_profiler_gc_event (MONO_GC_EVENT_START, 1);
3457 major_collection ("minor overflow");
3458 /* keep events symmetric */
3459 mono_profiler_gc_event (MONO_GC_EVENT_END, 1);
3461 DEBUG (2, fprintf (gc_debug_file, "Heap size: %lu, LOS size: %lu\n", (unsigned long)total_alloc, (unsigned long)los_memory_usage));
3463 /* this also sets the proper pointers for the next allocation */
3464 if (!search_fragment_for_size (size)) {
3466 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
3467 DEBUG (1, fprintf (gc_debug_file, "nursery collection didn't find enough room for %zd alloc (%d pinned)\n", size, last_num_pinned));
3468 for (i = 0; i < last_num_pinned; ++i) {
3469 DEBUG (3, fprintf (gc_debug_file, "Bastard pinning obj %p (%s), size: %d\n", pin_queue [i], safe_name (pin_queue [i]), safe_object_get_size (pin_queue [i])));
3473 mono_profiler_gc_event (MONO_GC_EVENT_END, 0);
3475 //report_internal_mem_usage ();
3479 * ######################################################################
3480 * ######## Memory allocation from the OS
3481 * ######################################################################
3482 * This section of code deals with getting memory from the OS and
3483 * allocating memory for GC-internal data structures.
3484 * Internal memory can be handled with a freelist for small objects.
3490 G_GNUC_UNUSED static void
3491 report_internal_mem_usage (void)
3493 printf ("Internal memory usage:\n");
3494 mono_sgen_report_internal_mem_usage ();
3495 printf ("Pinned memory usage:\n");
3496 major_collector.report_pinned_memory_usage ();
3500 * Allocate a big chunk of memory from the OS (usually 64KB to several megabytes).
3501 * This must not require any lock.
3504 mono_sgen_alloc_os_memory (size_t size, int activate)
3507 unsigned long prot_flags = activate? MONO_MMAP_READ|MONO_MMAP_WRITE: MONO_MMAP_NONE;
3509 prot_flags |= MONO_MMAP_PRIVATE | MONO_MMAP_ANON;
3510 size += pagesize - 1;
3511 size &= ~(pagesize - 1);
3512 ptr = mono_valloc (0, size, prot_flags);
3514 total_alloc += size;
3519 * Free the memory returned by mono_sgen_alloc_os_memory (), returning it to the OS.
3522 mono_sgen_free_os_memory (void *addr, size_t size)
3524 mono_vfree (addr, size);
3526 size += pagesize - 1;
3527 size &= ~(pagesize - 1);
3529 total_alloc -= size;
3533 * ######################################################################
3534 * ######## Object allocation
3535 * ######################################################################
3536 * This section of code deals with allocating memory for objects.
3537 * There are several ways:
3538 * *) allocate large objects
3539 * *) allocate normal objects
3540 * *) fast lock-free allocation
3541 * *) allocation of pinned objects
3545 setup_fragment (Fragment *frag, Fragment *prev, size_t size)
3547 /* remove from the list */
3549 prev->next = frag->next;
3551 nursery_fragments = frag->next;
3552 nursery_next = frag->fragment_start;
3553 nursery_frag_real_end = frag->fragment_end;
3555 DEBUG (4, fprintf (gc_debug_file, "Using nursery fragment %p-%p, size: %td (req: %zd)\n", nursery_next, nursery_frag_real_end, nursery_frag_real_end - nursery_next, size));
3556 frag->next = fragment_freelist;
3557 fragment_freelist = frag;
3560 /* check if we have a suitable fragment in nursery_fragments to be able to allocate
3561 * an object of size @size
3562 * Return FALSE if not found (which means we need a collection)
3565 search_fragment_for_size (size_t size)
3567 Fragment *frag, *prev;
3568 DEBUG (4, fprintf (gc_debug_file, "Searching nursery fragment %p, size: %zd\n", nursery_frag_real_end, size));
3570 if (nursery_frag_real_end > nursery_next && nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
3571 /* Clear the remaining space, pinning depends on this */
3572 memset (nursery_next, 0, nursery_frag_real_end - nursery_next);
3576 for (frag = nursery_fragments; frag; frag = frag->next) {
3577 if (size <= (frag->fragment_end - frag->fragment_start)) {
3578 setup_fragment (frag, prev, size);
3587 * Same as search_fragment_for_size but if search for @desired_size fails, try to satisfy @minimum_size.
3588 * This improves nursery usage.
3591 search_fragment_for_size_range (size_t desired_size, size_t minimum_size)
3593 Fragment *frag, *prev, *min_prev;
3594 DEBUG (4, fprintf (gc_debug_file, "Searching nursery fragment %p, desired size: %zd minimum size %zd\n", nursery_frag_real_end, desired_size, minimum_size));
3596 if (nursery_frag_real_end > nursery_next && nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
3597 /* Clear the remaining space, pinning depends on this */
3598 memset (nursery_next, 0, nursery_frag_real_end - nursery_next);
3601 min_prev = GINT_TO_POINTER (-1);
3604 for (frag = nursery_fragments; frag; frag = frag->next) {
3605 int frag_size = frag->fragment_end - frag->fragment_start;
3606 if (desired_size <= frag_size) {
3607 setup_fragment (frag, prev, desired_size);
3608 return desired_size;
3610 if (minimum_size <= frag_size)
3616 if (min_prev != GINT_TO_POINTER (-1)) {
3619 frag = min_prev->next;
3621 frag = nursery_fragments;
3623 frag_size = frag->fragment_end - frag->fragment_start;
3624 HEAVY_STAT (++stat_wasted_fragments_used);
3625 HEAVY_STAT (stat_wasted_fragments_bytes += frag_size);
3627 setup_fragment (frag, min_prev, minimum_size);
3635 alloc_degraded (MonoVTable *vtable, size_t size)
3637 if (need_major_collection (0)) {
3638 mono_profiler_gc_event (MONO_GC_EVENT_START, 1);
3640 major_collection ("degraded overflow");
3642 mono_profiler_gc_event (MONO_GC_EVENT_END, 1);
3645 return major_collector.alloc_degraded (vtable, size);
3649 * Provide a variant that takes just the vtable for small fixed-size objects.
3650 * The aligned size is already computed and stored in vt->gc_descr.
3651 * Note: every SCAN_START_SIZE or so we are given the chance to do some special
3652 * processing. We can keep track of where objects start, for example,
3653 * so when we scan the thread stacks for pinned objects, we can start
3654 * a search for the pinned object in SCAN_START_SIZE chunks.
3657 mono_gc_alloc_obj_nolock (MonoVTable *vtable, size_t size)
3659 /* FIXME: handle OOM */
3664 HEAVY_STAT (++stat_objects_alloced);
3665 if (size <= MAX_SMALL_OBJ_SIZE)
3666 HEAVY_STAT (stat_bytes_alloced += size);
3668 HEAVY_STAT (stat_bytes_alloced_los += size);
3670 size = ALIGN_UP (size);
3672 g_assert (vtable->gc_descr);
3674 if (G_UNLIKELY (collect_before_allocs)) {
3675 static int alloc_count;
3677 InterlockedIncrement (&alloc_count);
3678 if (((alloc_count % collect_before_allocs) == 0) && nursery_section) {
3679 mono_profiler_gc_event (MONO_GC_EVENT_START, 0);
3681 collect_nursery (0);
3683 mono_profiler_gc_event (MONO_GC_EVENT_END, 0);
3684 if (!degraded_mode && !search_fragment_for_size (size) && size <= MAX_SMALL_OBJ_SIZE) {
3686 g_assert_not_reached ();
3692 * We must already have the lock here instead of after the
3693 * fast path because we might be interrupted in the fast path
3694 * (after confirming that new_next < TLAB_TEMP_END) by the GC,
3695 * and we'll end up allocating an object in a fragment which
3696 * no longer belongs to us.
3698 * The managed allocator does not do this, but it's treated
3699 * specially by the world-stopping code.
3702 if (size > MAX_SMALL_OBJ_SIZE) {
3703 p = mono_sgen_los_alloc_large_inner (vtable, size);
3705 /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
3707 p = (void**)TLAB_NEXT;
3708 /* FIXME: handle overflow */
3709 new_next = (char*)p + size;
3710 TLAB_NEXT = new_next;
3712 if (G_LIKELY (new_next < TLAB_TEMP_END)) {
3716 * FIXME: We might need a memory barrier here so the change to tlab_next is
3717 * visible before the vtable store.
3720 DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
3721 binary_protocol_alloc (p , vtable, size);
3722 g_assert (*p == NULL);
3725 g_assert (TLAB_NEXT == new_next);
3732 /* there are two cases: the object is too big or we run out of space in the TLAB */
3733 /* we also reach here when the thread does its first allocation after a minor
3734 * collection, since the tlab_ variables are initialized to NULL.
3735 * there can be another case (from ORP), if we cooperate with the runtime a bit:
3736 * objects that need finalizers can have the high bit set in their size
3737 * so the above check fails and we can readily add the object to the queue.
3738 * This avoids taking again the GC lock when registering, but this is moot when
3739 * doing thread-local allocation, so it may not be a good idea.
3741 g_assert (TLAB_NEXT == new_next);
3742 if (TLAB_NEXT >= TLAB_REAL_END) {
3744 * Run out of space in the TLAB. When this happens, some amount of space
3745 * remains in the TLAB, but not enough to satisfy the current allocation
3746 * request. Currently, we retire the TLAB in all cases, later we could
3747 * keep it if the remaining space is above a treshold, and satisfy the
3748 * allocation directly from the nursery.
3751 /* when running in degraded mode, we continue allocing that way
3752 * for a while, to decrease the number of useless nursery collections.
3754 if (degraded_mode && degraded_mode < DEFAULT_NURSERY_SIZE) {
3755 p = alloc_degraded (vtable, size);
3756 binary_protocol_alloc_degraded (p, vtable, size);
3760 /*FIXME This codepath is current deadcode since tlab_size > MAX_SMALL_OBJ_SIZE*/
3761 if (size > tlab_size) {
3762 /* Allocate directly from the nursery */
3763 if (nursery_next + size >= nursery_frag_real_end) {
3764 if (!search_fragment_for_size (size)) {
3765 minor_collect_or_expand_inner (size);
3766 if (degraded_mode) {
3767 p = alloc_degraded (vtable, size);
3768 binary_protocol_alloc_degraded (p, vtable, size);
3774 p = (void*)nursery_next;
3775 nursery_next += size;
3776 if (nursery_next > nursery_frag_real_end) {
3781 if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
3782 memset (p, 0, size);
3785 int alloc_size = tlab_size;
3786 int available_in_nursery = nursery_frag_real_end - nursery_next;
3788 DEBUG (3, fprintf (gc_debug_file, "Retire TLAB: %p-%p [%ld]\n", TLAB_START, TLAB_REAL_END, (long)(TLAB_REAL_END - TLAB_NEXT - size)));
3790 if (alloc_size >= available_in_nursery) {
3791 if (available_in_nursery > MAX_NURSERY_TLAB_WASTE && available_in_nursery > size) {
3792 alloc_size = available_in_nursery;
3794 alloc_size = search_fragment_for_size_range (tlab_size, size);
3796 alloc_size = tlab_size;
3797 minor_collect_or_expand_inner (tlab_size);
3798 if (degraded_mode) {
3799 p = alloc_degraded (vtable, size);
3800 binary_protocol_alloc_degraded (p, vtable, size);
3807 /* Allocate a new TLAB from the current nursery fragment */
3808 TLAB_START = nursery_next;
3809 nursery_next += alloc_size;
3810 TLAB_NEXT = TLAB_START;
3811 TLAB_REAL_END = TLAB_START + alloc_size;
3812 TLAB_TEMP_END = TLAB_START + MIN (SCAN_START_SIZE, alloc_size);
3814 if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
3815 memset (TLAB_START, 0, alloc_size);
3818 /* Allocate from the TLAB */
3819 p = (void*)TLAB_NEXT;
3821 g_assert (TLAB_NEXT <= TLAB_REAL_END);
3823 nursery_section->scan_starts [((char*)p - (char*)nursery_section->data)/SCAN_START_SIZE] = (char*)p;
3826 /* Reached tlab_temp_end */
3828 /* record the scan start so we can find pinned objects more easily */
3829 nursery_section->scan_starts [((char*)p - (char*)nursery_section->data)/SCAN_START_SIZE] = (char*)p;
3830 /* we just bump tlab_temp_end as well */
3831 TLAB_TEMP_END = MIN (TLAB_REAL_END, TLAB_NEXT + SCAN_START_SIZE);
3832 DEBUG (5, fprintf (gc_debug_file, "Expanding local alloc: %p-%p\n", TLAB_NEXT, TLAB_TEMP_END));
3837 DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
3838 binary_protocol_alloc (p, vtable, size);
3846 mono_gc_try_alloc_obj_nolock (MonoVTable *vtable, size_t size)
3852 size = ALIGN_UP (size);
3854 g_assert (vtable->gc_descr);
3855 if (size <= MAX_SMALL_OBJ_SIZE) {
3856 /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
3858 p = (void**)TLAB_NEXT;
3859 /* FIXME: handle overflow */
3860 new_next = (char*)p + size;
3861 TLAB_NEXT = new_next;
3863 if (G_LIKELY (new_next < TLAB_TEMP_END)) {
3867 * FIXME: We might need a memory barrier here so the change to tlab_next is
3868 * visible before the vtable store.
3871 HEAVY_STAT (++stat_objects_alloced);
3872 HEAVY_STAT (stat_bytes_alloced += size);
3874 DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
3875 binary_protocol_alloc (p, vtable, size);
3876 g_assert (*p == NULL);
3879 g_assert (TLAB_NEXT == new_next);
3888 mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
3891 #ifndef DISABLE_CRITICAL_REGION
3893 ENTER_CRITICAL_REGION;
3894 res = mono_gc_try_alloc_obj_nolock (vtable, size);
3896 EXIT_CRITICAL_REGION;
3899 EXIT_CRITICAL_REGION;
3902 res = mono_gc_alloc_obj_nolock (vtable, size);
3904 if (G_UNLIKELY (!res))
3905 return mono_gc_out_of_memory (size);
3910 mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length)
3913 #ifndef DISABLE_CRITICAL_REGION
3915 ENTER_CRITICAL_REGION;
3916 arr = mono_gc_try_alloc_obj_nolock (vtable, size);
3918 arr->max_length = max_length;
3919 EXIT_CRITICAL_REGION;
3922 EXIT_CRITICAL_REGION;
3927 arr = mono_gc_alloc_obj_nolock (vtable, size);
3928 if (G_UNLIKELY (!arr)) {
3930 return mono_gc_out_of_memory (size);
3933 arr->max_length = max_length;
3941 mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size)
3944 MonoArrayBounds *bounds;
3948 arr = mono_gc_alloc_obj_nolock (vtable, size);
3949 if (G_UNLIKELY (!arr)) {
3951 return mono_gc_out_of_memory (size);
3954 arr->max_length = max_length;
3956 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
3957 arr->bounds = bounds;
3965 mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len)
3968 #ifndef DISABLE_CRITICAL_REGION
3970 ENTER_CRITICAL_REGION;
3971 str = mono_gc_try_alloc_obj_nolock (vtable, size);
3974 EXIT_CRITICAL_REGION;
3977 EXIT_CRITICAL_REGION;
3982 str = mono_gc_alloc_obj_nolock (vtable, size);
3983 if (G_UNLIKELY (!str)) {
3985 return mono_gc_out_of_memory (size);
3996 * To be used for interned strings and possibly MonoThread, reflection handles.
3997 * We may want to explicitly free these objects.
4000 mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size)
4003 size = ALIGN_UP (size);
4006 if (size > MAX_SMALL_OBJ_SIZE) {
4007 /* large objects are always pinned anyway */
4008 p = mono_sgen_los_alloc_large_inner (vtable, size);
4010 DEBUG (9, g_assert (vtable->klass->inited));
4011 p = major_collector.alloc_small_pinned_obj (size, SGEN_VTABLE_HAS_REFERENCES (vtable));
4014 DEBUG (6, fprintf (gc_debug_file, "Allocated pinned object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
4015 binary_protocol_alloc_pinned (p, vtable, size);
4023 mono_gc_alloc_mature (MonoVTable *vtable)
4026 size_t size = ALIGN_UP (vtable->klass->instance_size);
4028 res = alloc_degraded (vtable, size);
4035 * ######################################################################
4036 * ######## Finalization support
4037 * ######################################################################
4041 * this is valid for the nursery: if the object has been forwarded it means it's
4042 * still refrenced from a root. If it is pinned it's still alive as well.
4043 * Return TRUE if @obj is ready to be finalized.
4045 #define object_is_fin_ready(obj) (!object_is_pinned (obj) && !object_is_forwarded (obj))
4048 is_critical_finalizer (FinalizeEntry *entry)
4053 if (!mono_defaults.critical_finalizer_object)
4056 obj = entry->object;
4057 class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
4059 return mono_class_has_parent (class, mono_defaults.critical_finalizer_object);
4063 queue_finalization_entry (FinalizeEntry *entry) {
4064 if (is_critical_finalizer (entry)) {
4065 entry->next = critical_fin_list;
4066 critical_fin_list = entry;
4068 entry->next = fin_ready_list;
4069 fin_ready_list = entry;
4073 /* LOCKING: requires that the GC lock is held */
4075 rehash_fin_table (FinalizeEntryHashTable *hash_table)
4077 FinalizeEntry **finalizable_hash = hash_table->table;
4078 mword finalizable_hash_size = hash_table->size;
4081 FinalizeEntry **new_hash;
4082 FinalizeEntry *entry, *next;
4083 int new_size = g_spaced_primes_closest (hash_table->num_registered);
4085 new_hash = mono_sgen_alloc_internal_dynamic (new_size * sizeof (FinalizeEntry*), INTERNAL_MEM_FIN_TABLE);
4086 for (i = 0; i < finalizable_hash_size; ++i) {
4087 for (entry = finalizable_hash [i]; entry; entry = next) {
4088 hash = mono_object_hash (entry->object) % new_size;
4090 entry->next = new_hash [hash];
4091 new_hash [hash] = entry;
4094 mono_sgen_free_internal_dynamic (finalizable_hash, finalizable_hash_size * sizeof (FinalizeEntry*), INTERNAL_MEM_FIN_TABLE);
4095 hash_table->table = new_hash;
4096 hash_table->size = new_size;
4099 /* LOCKING: requires that the GC lock is held */
4101 rehash_fin_table_if_necessary (FinalizeEntryHashTable *hash_table)
4103 if (hash_table->num_registered >= hash_table->size * 2)
4104 rehash_fin_table (hash_table);
4107 /* LOCKING: requires that the GC lock is held */
4109 finalize_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, GrayQueue *queue)
4111 FinalizeEntryHashTable *hash_table = get_finalize_entry_hash_table (generation);
4112 FinalizeEntry *entry, *prev;
4114 FinalizeEntry **finalizable_hash = hash_table->table;
4115 mword finalizable_hash_size = hash_table->size;
4119 for (i = 0; i < finalizable_hash_size; ++i) {
4121 for (entry = finalizable_hash [i]; entry;) {
4122 if ((char*)entry->object >= start && (char*)entry->object < end && !major_collector.is_object_live (entry->object)) {
4123 gboolean is_fin_ready = object_is_fin_ready (entry->object);
4124 char *copy = entry->object;
4125 copy_func ((void**)©, queue);
4128 FinalizeEntry *next;
4129 /* remove and put in fin_ready_list */
4131 prev->next = entry->next;
4133 finalizable_hash [i] = entry->next;
4135 num_ready_finalizers++;
4136 hash_table->num_registered--;
4137 queue_finalization_entry (entry);
4138 bridge_register_finalized_object ((MonoObject*)copy);
4139 /* Make it survive */
4140 from = entry->object;
4141 entry->object = copy;
4142 DEBUG (5, fprintf (gc_debug_file, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)\n", entry->object, safe_name (entry->object), from, num_ready_finalizers, hash_table->num_registered));
4146 char *from = entry->object;
4147 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
4148 FinalizeEntry *next = entry->next;
4149 unsigned int major_hash;
4150 /* remove from the list */
4152 prev->next = entry->next;
4154 finalizable_hash [i] = entry->next;
4155 hash_table->num_registered--;
4157 entry->object = copy;
4159 /* insert it into the major hash */
4160 rehash_fin_table_if_necessary (&major_finalizable_hash);
4161 major_hash = mono_object_hash ((MonoObject*) copy) %
4162 major_finalizable_hash.size;
4163 entry->next = major_finalizable_hash.table [major_hash];
4164 major_finalizable_hash.table [major_hash] = entry;
4165 major_finalizable_hash.num_registered++;
4167 DEBUG (5, fprintf (gc_debug_file, "Promoting finalization of object %p (%s) (was at %p) to major table\n", copy, safe_name (copy), from));
4172 /* update pointer */
4173 DEBUG (5, fprintf (gc_debug_file, "Updating object for finalization: %p (%s) (was at %p)\n", entry->object, safe_name (entry->object), from));
4174 entry->object = copy;
4179 entry = entry->next;
4185 object_is_reachable (char *object, char *start, char *end)
4187 /*This happens for non nursery objects during minor collections. We just treat all objects as alive.*/
4188 if (object < start || object >= end)
4190 return !object_is_fin_ready (object) || major_collector.is_object_live (object);
4194 mono_sgen_object_is_live (void *obj)
4196 if (ptr_in_nursery (obj))
4197 return object_is_pinned (obj);
4198 if (current_collection_generation == GENERATION_NURSERY)
4200 return major_collector.is_object_live (obj);
4203 /* LOCKING: requires that the GC lock is held */
4205 null_ephemerons_for_domain (MonoDomain *domain)
4207 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
4210 MonoObject *object = (MonoObject*)current->array;
4212 if (object && !object->vtable) {
4213 EphemeronLinkNode *tmp = current;
4216 prev->next = current->next;
4218 ephemeron_list = current->next;
4220 current = current->next;
4221 mono_sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
4224 current = current->next;
4229 /* LOCKING: requires that the GC lock is held */
4231 clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue)
4233 int was_in_nursery, was_promoted;
4234 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
4236 Ephemeron *cur, *array_end;
4240 char *object = current->array;
4242 if (!object_is_reachable (object, start, end)) {
4243 EphemeronLinkNode *tmp = current;
4245 DEBUG (5, fprintf (gc_debug_file, "Dead Ephemeron array at %p\n", object));
4248 prev->next = current->next;
4250 ephemeron_list = current->next;
4252 current = current->next;
4253 mono_sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
4258 was_in_nursery = ptr_in_nursery (object);
4259 copy_func ((void**)&object, queue);
4260 current->array = object;
4262 /*The array was promoted, add global remsets for key/values left behind in nursery.*/
4263 was_promoted = was_in_nursery && !ptr_in_nursery (object);
4265 DEBUG (5, fprintf (gc_debug_file, "Clearing unreachable entries for ephemeron array at %p\n", object));
4267 array = (MonoArray*)object;
4268 cur = mono_array_addr (array, Ephemeron, 0);
4269 array_end = cur + mono_array_length_fast (array);
4270 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
4272 for (; cur < array_end; ++cur) {
4273 char *key = (char*)cur->key;
4275 if (!key || key == tombstone)
4278 DEBUG (5, fprintf (gc_debug_file, "[%td] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
4279 key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
4280 cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable"));
4282 if (!object_is_reachable (key, start, end)) {
4283 cur->key = tombstone;
4289 if (ptr_in_nursery (key)) {/*key was not promoted*/
4290 DEBUG (5, fprintf (gc_debug_file, "\tAdded remset to key %p\n", key));
4291 mono_sgen_add_to_global_remset (&cur->key);
4293 if (ptr_in_nursery (cur->value)) {/*value was not promoted*/
4294 DEBUG (5, fprintf (gc_debug_file, "\tAdded remset to value %p\n", cur->value));
4295 mono_sgen_add_to_global_remset (&cur->value);
4300 current = current->next;
4304 /* LOCKING: requires that the GC lock is held */
4306 mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue)
4308 int nothing_marked = 1;
4309 EphemeronLinkNode *current = ephemeron_list;
4311 Ephemeron *cur, *array_end;
4314 for (current = ephemeron_list; current; current = current->next) {
4315 char *object = current->array;
4316 DEBUG (5, fprintf (gc_debug_file, "Ephemeron array at %p\n", object));
4318 /*We ignore arrays in old gen during minor collections since all objects are promoted by the remset machinery.*/
4319 if (object < start || object >= end)
4322 /*It has to be alive*/
4323 if (!object_is_reachable (object, start, end)) {
4324 DEBUG (5, fprintf (gc_debug_file, "\tnot reachable\n"));
4328 copy_func ((void**)&object, queue);
4330 array = (MonoArray*)object;
4331 cur = mono_array_addr (array, Ephemeron, 0);
4332 array_end = cur + mono_array_length_fast (array);
4333 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
4335 for (; cur < array_end; ++cur) {
4336 char *key = cur->key;
4338 if (!key || key == tombstone)
4341 DEBUG (5, fprintf (gc_debug_file, "[%td] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
4342 key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
4343 cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable"));
4345 if (object_is_reachable (key, start, end)) {
4346 char *value = cur->value;
4348 copy_func ((void**)&cur->key, queue);
4350 if (!object_is_reachable (value, start, end))
4352 copy_func ((void**)&cur->value, queue);
4358 DEBUG (5, fprintf (gc_debug_file, "Ephemeron run finished. Is it done %d\n", nothing_marked));
4359 return nothing_marked;
4362 /* LOCKING: requires that the GC lock is held */
4364 null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, gboolean before_finalization, GrayQueue *queue)
4366 DisappearingLinkHashTable *hash = get_dislink_hash_table (generation);
4367 DisappearingLink **disappearing_link_hash = hash->table;
4368 int disappearing_link_hash_size = hash->size;
4369 DisappearingLink *entry, *prev;
4371 if (!hash->num_links)
4373 for (i = 0; i < disappearing_link_hash_size; ++i) {
4375 for (entry = disappearing_link_hash [i]; entry;) {
4377 gboolean track = DISLINK_TRACK (entry);
4378 if (track == before_finalization) {
4380 entry = entry->next;
4384 object = DISLINK_OBJECT (entry);
4386 if (object >= start && object < end && !major_collector.is_object_live (object)) {
4387 if (!track && object_is_fin_ready (object)) {
4388 void **p = entry->link;
4389 DisappearingLink *old;
4391 /* remove from list */
4393 prev->next = entry->next;
4395 disappearing_link_hash [i] = entry->next;
4396 DEBUG (5, fprintf (gc_debug_file, "Dislink nullified at %p to GCed object %p\n", p, object));
4398 mono_sgen_free_internal (entry, INTERNAL_MEM_DISLINK);
4403 char *copy = object;
4404 copy_func ((void**)©, queue);
4406 /* Update pointer if it's moved. If the object
4407 * has been moved out of the nursery, we need to
4408 * remove the link from the minor hash table to
4411 * FIXME: what if an object is moved earlier?
4414 if (hash == &minor_disappearing_link_hash && !ptr_in_nursery (copy)) {
4415 void **link = entry->link;
4416 DisappearingLink *old;
4417 /* remove from list */
4419 prev->next = entry->next;
4421 disappearing_link_hash [i] = entry->next;
4423 mono_sgen_free_internal (entry, INTERNAL_MEM_DISLINK);
4427 add_or_remove_disappearing_link ((MonoObject*)copy, link,
4428 track, GENERATION_OLD);
4430 DEBUG (5, fprintf (gc_debug_file, "Upgraded dislink at %p to major because object %p moved to %p\n", link, object, copy));
4434 /* We set the track resurrection bit to
4435 * FALSE if the object is to be finalized
4436 * so that the object can be collected in
4437 * the next cycle (i.e. after it was
4440 *entry->link = HIDE_POINTER (copy,
4441 object_is_fin_ready (object) ? FALSE : track);
4442 DEBUG (5, fprintf (gc_debug_file, "Updated dislink at %p to %p\n", entry->link, DISLINK_OBJECT (entry)));
4447 entry = entry->next;
4452 /* LOCKING: requires that the GC lock is held */
4454 null_links_for_domain (MonoDomain *domain, int generation)
4456 DisappearingLinkHashTable *hash = get_dislink_hash_table (generation);
4457 DisappearingLink **disappearing_link_hash = hash->table;
4458 int disappearing_link_hash_size = hash->size;
4459 DisappearingLink *entry, *prev;
4461 for (i = 0; i < disappearing_link_hash_size; ++i) {
4463 for (entry = disappearing_link_hash [i]; entry; ) {
4464 char *object = DISLINK_OBJECT (entry);
4465 if (object && !((MonoObject*)object)->vtable) {
4466 DisappearingLink *next = entry->next;
4471 disappearing_link_hash [i] = next;
4473 if (*(entry->link)) {
4474 *(entry->link) = NULL;
4475 g_warning ("Disappearing link %p not freed", entry->link);
4477 mono_sgen_free_internal (entry, INTERNAL_MEM_DISLINK);
4484 entry = entry->next;
4489 /* LOCKING: requires that the GC lock is held */
4491 finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size,
4492 FinalizeEntryHashTable *hash_table)
4494 FinalizeEntry **finalizable_hash = hash_table->table;
4495 mword finalizable_hash_size = hash_table->size;
4496 FinalizeEntry *entry, *prev;
4499 if (no_finalize || !out_size || !out_array)
4502 for (i = 0; i < finalizable_hash_size; ++i) {
4504 for (entry = finalizable_hash [i]; entry;) {
4505 if (mono_object_domain (entry->object) == domain) {
4506 FinalizeEntry *next;
4507 /* remove and put in out_array */
4509 prev->next = entry->next;
4511 finalizable_hash [i] = entry->next;
4513 hash_table->num_registered--;
4514 out_array [count ++] = entry->object;
4515 DEBUG (5, fprintf (gc_debug_file, "Collecting object for finalization: %p (%s) (%d/%d)\n", entry->object, safe_name (entry->object), num_ready_finalizers, hash_table->num_registered));
4517 if (count == out_size)
4522 entry = entry->next;
4529 * mono_gc_finalizers_for_domain:
4530 * @domain: the unloading appdomain
4531 * @out_array: output array
4532 * @out_size: size of output array
4534 * Store inside @out_array up to @out_size objects that belong to the unloading
4535 * appdomain @domain. Returns the number of stored items. Can be called repeteadly
4536 * until it returns 0.
4537 * The items are removed from the finalizer data structure, so the caller is supposed
4539 * @out_array should be on the stack to allow the GC to know the objects are still alive.
4542 mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
4547 result = finalizers_for_domain (domain, out_array, out_size, &minor_finalizable_hash);
4548 if (result < out_size) {
4549 result += finalizers_for_domain (domain, out_array + result, out_size - result,
4550 &major_finalizable_hash);
4558 register_for_finalization (MonoObject *obj, void *user_data, int generation)
4560 FinalizeEntryHashTable *hash_table = get_finalize_entry_hash_table (generation);
4561 FinalizeEntry **finalizable_hash;
4562 mword finalizable_hash_size;
4563 FinalizeEntry *entry, *prev;
4567 g_assert (user_data == NULL || user_data == mono_gc_run_finalize);
4568 hash = mono_object_hash (obj);
4570 rehash_fin_table_if_necessary (hash_table);
4571 finalizable_hash = hash_table->table;
4572 finalizable_hash_size = hash_table->size;
4573 hash %= finalizable_hash_size;
4575 for (entry = finalizable_hash [hash]; entry; entry = entry->next) {
4576 if (entry->object == obj) {
4578 /* remove from the list */
4580 prev->next = entry->next;
4582 finalizable_hash [hash] = entry->next;
4583 hash_table->num_registered--;
4584 DEBUG (5, fprintf (gc_debug_file, "Removed finalizer %p for object: %p (%s) (%d)\n", entry, obj, obj->vtable->klass->name, hash_table->num_registered));
4585 mono_sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_ENTRY);
4593 /* request to deregister, but already out of the list */
4597 entry = mono_sgen_alloc_internal (INTERNAL_MEM_FINALIZE_ENTRY);
4598 entry->object = obj;
4599 entry->next = finalizable_hash [hash];
4600 finalizable_hash [hash] = entry;
4601 hash_table->num_registered++;
4602 DEBUG (5, fprintf (gc_debug_file, "Added finalizer %p for object: %p (%s) (%d) to %s table\n", entry, obj, obj->vtable->klass->name, hash_table->num_registered, generation_name (generation)));
4607 mono_gc_register_for_finalization (MonoObject *obj, void *user_data)
4609 if (ptr_in_nursery (obj))
4610 register_for_finalization (obj, user_data, GENERATION_NURSERY);
4612 register_for_finalization (obj, user_data, GENERATION_OLD);
4616 rehash_dislink (DisappearingLinkHashTable *hash_table)
4618 DisappearingLink **disappearing_link_hash = hash_table->table;
4619 int disappearing_link_hash_size = hash_table->size;
4622 DisappearingLink **new_hash;
4623 DisappearingLink *entry, *next;
4624 int new_size = g_spaced_primes_closest (hash_table->num_links);
4626 new_hash = mono_sgen_alloc_internal_dynamic (new_size * sizeof (DisappearingLink*), INTERNAL_MEM_DISLINK_TABLE);
4627 for (i = 0; i < disappearing_link_hash_size; ++i) {
4628 for (entry = disappearing_link_hash [i]; entry; entry = next) {
4629 hash = mono_aligned_addr_hash (entry->link) % new_size;
4631 entry->next = new_hash [hash];
4632 new_hash [hash] = entry;
4635 mono_sgen_free_internal_dynamic (disappearing_link_hash,
4636 disappearing_link_hash_size * sizeof (DisappearingLink*), INTERNAL_MEM_DISLINK_TABLE);
4637 hash_table->table = new_hash;
4638 hash_table->size = new_size;
4641 /* LOCKING: assumes the GC lock is held */
4643 add_or_remove_disappearing_link (MonoObject *obj, void **link, gboolean track, int generation)
4645 DisappearingLinkHashTable *hash_table = get_dislink_hash_table (generation);
4646 DisappearingLink *entry, *prev;
4648 DisappearingLink **disappearing_link_hash = hash_table->table;
4649 int disappearing_link_hash_size = hash_table->size;
4651 if (hash_table->num_links >= disappearing_link_hash_size * 2) {
4652 rehash_dislink (hash_table);
4653 disappearing_link_hash = hash_table->table;
4654 disappearing_link_hash_size = hash_table->size;
4656 /* FIXME: add check that link is not in the heap */
4657 hash = mono_aligned_addr_hash (link) % disappearing_link_hash_size;
4658 entry = disappearing_link_hash [hash];
4660 for (; entry; entry = entry->next) {
4661 /* link already added */
4662 if (link == entry->link) {
4663 /* NULL obj means remove */
4666 prev->next = entry->next;
4668 disappearing_link_hash [hash] = entry->next;
4669 hash_table->num_links--;
4670 DEBUG (5, fprintf (gc_debug_file, "Removed dislink %p (%d) from %s table\n", entry, hash_table->num_links, generation_name (generation)));
4671 mono_sgen_free_internal (entry, INTERNAL_MEM_DISLINK);
4674 *link = HIDE_POINTER (obj, track); /* we allow the change of object */
4682 entry = mono_sgen_alloc_internal (INTERNAL_MEM_DISLINK);
4683 *link = HIDE_POINTER (obj, track);
4685 entry->next = disappearing_link_hash [hash];
4686 disappearing_link_hash [hash] = entry;
4687 hash_table->num_links++;
4688 DEBUG (5, fprintf (gc_debug_file, "Added dislink %p for object: %p (%s) at %p to %s table\n", entry, obj, obj->vtable->klass->name, link, generation_name (generation)));
4691 /* LOCKING: assumes the GC lock is held */
4693 mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track)
4695 add_or_remove_disappearing_link (NULL, link, FALSE, GENERATION_NURSERY);
4696 add_or_remove_disappearing_link (NULL, link, FALSE, GENERATION_OLD);
4698 if (ptr_in_nursery (obj))
4699 add_or_remove_disappearing_link (obj, link, track, GENERATION_NURSERY);
4701 add_or_remove_disappearing_link (obj, link, track, GENERATION_OLD);
4706 mono_gc_invoke_finalizers (void)
4708 FinalizeEntry *entry = NULL;
4709 gboolean entry_is_critical = FALSE;
4712 /* FIXME: batch to reduce lock contention */
4713 while (fin_ready_list || critical_fin_list) {
4717 FinalizeEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
4719 /* We have finalized entry in the last
4720 interation, now we need to remove it from
4723 *list = entry->next;
4725 FinalizeEntry *e = *list;
4726 while (e->next != entry)
4728 e->next = entry->next;
4730 mono_sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_ENTRY);
4734 /* Now look for the first non-null entry. */
4735 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
4738 entry_is_critical = FALSE;
4740 entry_is_critical = TRUE;
4741 for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
4746 g_assert (entry->object);
4747 num_ready_finalizers--;
4748 obj = entry->object;
4749 entry->object = NULL;
4750 DEBUG (7, fprintf (gc_debug_file, "Finalizing object %p (%s)\n", obj, safe_name (obj)));
4758 g_assert (entry->object == NULL);
4760 /* the object is on the stack so it is pinned */
4761 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
4762 mono_gc_run_finalize (obj, NULL);
4769 mono_gc_pending_finalizers (void)
4771 return fin_ready_list || critical_fin_list;
4774 /* Negative value to remove */
4776 mono_gc_add_memory_pressure (gint64 value)
4778 /* FIXME: Use interlocked functions */
4780 memory_pressure += value;
4785 mono_sgen_register_major_sections_alloced (int num_sections)
4787 minor_collection_sections_alloced += num_sections;
4791 mono_sgen_get_minor_collection_allowance (void)
4793 return minor_collection_allowance;
4797 * ######################################################################
4798 * ######## registered roots support
4799 * ######################################################################
4803 rehash_roots (gboolean pinned)
4807 RootRecord **new_hash;
4808 RootRecord *entry, *next;
4811 new_size = g_spaced_primes_closest (num_roots_entries [pinned]);
4812 new_hash = mono_sgen_alloc_internal_dynamic (new_size * sizeof (RootRecord*), INTERNAL_MEM_ROOTS_TABLE);
4813 for (i = 0; i < roots_hash_size [pinned]; ++i) {
4814 for (entry = roots_hash [pinned][i]; entry; entry = next) {
4815 hash = mono_aligned_addr_hash (entry->start_root) % new_size;
4817 entry->next = new_hash [hash];
4818 new_hash [hash] = entry;
4821 mono_sgen_free_internal_dynamic (roots_hash [pinned], roots_hash_size [pinned] * sizeof (RootRecord*), INTERNAL_MEM_ROOTS_TABLE);
4822 roots_hash [pinned] = new_hash;
4823 roots_hash_size [pinned] = new_size;
4827 find_root (int root_type, char *start, guint32 addr_hash)
4829 RootRecord *new_root;
4831 guint32 hash = addr_hash % roots_hash_size [root_type];
4832 for (new_root = roots_hash [root_type][hash]; new_root; new_root = new_root->next) {
4833 /* we allow changing the size and the descriptor (for thread statics etc) */
4834 if (new_root->start_root == start) {
4843 * We do not coalesce roots.
4846 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
4848 RootRecord *new_root;
4849 unsigned int hash, addr_hash = mono_aligned_addr_hash (start);
4852 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
4853 if (num_roots_entries [i] >= roots_hash_size [i] * 2)
4856 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
4857 new_root = find_root (i, start, addr_hash);
4858 /* we allow changing the size and the descriptor (for thread statics etc) */
4860 size_t old_size = new_root->end_root - new_root->start_root;
4861 new_root->end_root = new_root->start_root + size;
4862 g_assert (((new_root->root_desc != 0) && (descr != NULL)) ||
4863 ((new_root->root_desc == 0) && (descr == NULL)));
4864 new_root->root_desc = (mword)descr;
4866 roots_size -= old_size;
4871 new_root = mono_sgen_alloc_internal (INTERNAL_MEM_ROOT_RECORD);
4873 new_root->start_root = start;
4874 new_root->end_root = new_root->start_root + size;
4875 new_root->root_desc = (mword)descr;
4877 hash = addr_hash % roots_hash_size [root_type];
4878 num_roots_entries [root_type]++;
4879 new_root->next = roots_hash [root_type] [hash];
4880 roots_hash [root_type][hash] = new_root;
4881 DEBUG (3, fprintf (gc_debug_file, "Added root %p for range: %p-%p, descr: %p (%d/%d bytes)\n", new_root, new_root->start_root, new_root->end_root, descr, (int)size, (int)roots_size));
4891 mono_gc_register_root (char *start, size_t size, void *descr)
4893 return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
4897 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
4899 return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
4903 mono_gc_deregister_root (char* addr)
4905 RootRecord *tmp, *prev;
4906 unsigned int hash, addr_hash = mono_aligned_addr_hash (addr);
4910 for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
4911 hash = addr_hash % roots_hash_size [root_type];
4912 tmp = roots_hash [root_type][hash];
4915 if (tmp->start_root == (char*)addr) {
4917 prev->next = tmp->next;
4919 roots_hash [root_type][hash] = tmp->next;
4920 roots_size -= (tmp->end_root - tmp->start_root);
4921 num_roots_entries [root_type]--;
4922 DEBUG (3, fprintf (gc_debug_file, "Removed root %p for range: %p-%p\n", tmp, tmp->start_root, tmp->end_root));
4923 mono_sgen_free_internal (tmp, INTERNAL_MEM_ROOT_RECORD);
4934 * ######################################################################
4935 * ######## Thread handling (stop/start code)
4936 * ######################################################################
4939 /* FIXME: handle large/small config */
4940 #define HASH_PTHREAD_T(id) (((unsigned int)(id) >> 4) * 2654435761u)
4942 static SgenThreadInfo* thread_table [THREAD_HASH_SIZE];
4944 #if USE_SIGNAL_BASED_START_STOP_WORLD
4946 static MonoSemType suspend_ack_semaphore;
4947 static MonoSemType *suspend_ack_semaphore_ptr;
4948 static unsigned int global_stop_count = 0;
4950 static sigset_t suspend_signal_mask;
4951 static mword cur_thread_regs [ARCH_NUM_REGS] = {0};
4953 /* LOCKING: assumes the GC lock is held */
4955 mono_sgen_get_thread_table (void)
4957 return thread_table;
4961 mono_sgen_thread_info_lookup (ARCH_THREAD_TYPE id)
4963 unsigned int hash = HASH_PTHREAD_T (id) % THREAD_HASH_SIZE;
4964 SgenThreadInfo *info;
4966 info = thread_table [hash];
4967 while (info && !ARCH_THREAD_EQUALS (info->id, id)) {
4974 update_current_thread_stack (void *start)
4976 void *ptr = cur_thread_regs;
4977 SgenThreadInfo *info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
4979 info->stack_start = align_pointer (&ptr);
4980 g_assert (info->stack_start >= info->stack_start_limit && info->stack_start < info->stack_end);
4981 ARCH_STORE_REGS (ptr);
4982 info->stopped_regs = ptr;
4983 if (gc_callbacks.thread_suspend_func)
4984 gc_callbacks.thread_suspend_func (info->runtime_data, NULL);
4988 * Define this and use the "xdomain-checks" MONO_GC_DEBUG option to
4989 * have cross-domain checks in the write barrier.
4991 //#define XDOMAIN_CHECKS_IN_WBARRIER
4993 #ifndef SGEN_BINARY_PROTOCOL
4994 #ifndef HEAVY_STATISTICS
4995 #define MANAGED_ALLOCATION
4996 #ifndef XDOMAIN_CHECKS_IN_WBARRIER
4997 #define MANAGED_WBARRIER
5003 is_ip_in_managed_allocator (MonoDomain *domain, gpointer ip);
5006 mono_sgen_wait_for_suspend_ack (int count)
5010 for (i = 0; i < count; ++i) {
5011 while ((result = MONO_SEM_WAIT (suspend_ack_semaphore_ptr)) != 0) {
5012 if (errno != EINTR) {
5013 g_error ("sem_wait ()");
5020 restart_threads_until_none_in_managed_allocator (void)
5022 SgenThreadInfo *info;
5023 int i, result, num_threads_died = 0;
5024 int sleep_duration = -1;
5027 int restart_count = 0, restarted_count = 0;
5028 /* restart all threads that stopped in the
5030 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5031 for (info = thread_table [i]; info; info = info->next) {
5034 if (!info->stack_start || info->in_critical_region ||
5035 is_ip_in_managed_allocator (info->stopped_domain, info->stopped_ip)) {
5036 binary_protocol_thread_restart ((gpointer)info->id);
5037 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
5038 result = thread_resume (pthread_mach_thread_np (info->id));
5040 result = pthread_kill (info->id, restart_signal_num);
5048 /* we set the stopped_ip to
5049 NULL for threads which
5050 we're not restarting so
5051 that we can easily identify
5053 info->stopped_ip = NULL;
5054 info->stopped_domain = NULL;
5058 /* if no threads were restarted, we're done */
5059 if (restart_count == 0)
5062 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
5063 /* mach thread_resume is synchronous so we dont need to wait for them */
5065 /* wait for the threads to signal their restart */
5066 mono_sgen_wait_for_suspend_ack (restart_count);
5069 if (sleep_duration < 0) {
5073 g_usleep (sleep_duration);
5074 sleep_duration += 10;
5077 /* stop them again */
5078 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5079 for (info = thread_table [i]; info; info = info->next) {
5080 if (info->skip || info->stopped_ip == NULL)
5082 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
5083 result = thread_suspend (pthread_mach_thread_np (info->id));
5085 result = pthread_kill (info->id, suspend_signal_num);
5094 /* some threads might have died */
5095 num_threads_died += restart_count - restarted_count;
5096 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
5097 /* mach thread_resume is synchronous so we dont need to wait for them */
5099 /* wait for the threads to signal their suspension
5101 mono_sgen_wait_for_suspend_ack (restart_count);
5105 return num_threads_died;
5108 /* LOCKING: assumes the GC lock is held (by the stopping thread) */
5110 suspend_handler (int sig, siginfo_t *siginfo, void *context)
5112 SgenThreadInfo *info;
5115 int old_errno = errno;
5116 gpointer regs [ARCH_NUM_REGS];
5117 gpointer stack_start;
5119 id = pthread_self ();
5120 info = mono_sgen_thread_info_lookup (id);
5121 info->stopped_domain = mono_domain_get ();
5122 info->stopped_ip = (gpointer) ARCH_SIGCTX_IP (context);
5123 stop_count = global_stop_count;
5124 /* duplicate signal */
5125 if (0 && info->stop_count == stop_count) {
5129 #ifdef HAVE_KW_THREAD
5130 /* update the remset info in the thread data structure */
5131 info->remset = remembered_set;
5133 stack_start = (char*) ARCH_SIGCTX_SP (context) - REDZONE_SIZE;
5134 /* If stack_start is not within the limits, then don't set it
5135 in info and we will be restarted. */
5136 if (stack_start >= info->stack_start_limit && info->stack_start <= info->stack_end) {
5137 info->stack_start = stack_start;
5139 ARCH_COPY_SIGCTX_REGS (regs, context);
5140 info->stopped_regs = regs;
5142 g_assert (!info->stack_start);
5145 /* Notify the JIT */
5146 if (gc_callbacks.thread_suspend_func)
5147 gc_callbacks.thread_suspend_func (info->runtime_data, context);
5149 DEBUG (4, fprintf (gc_debug_file, "Posting suspend_ack_semaphore for suspend from %p %p\n", info, (gpointer)ARCH_GET_THREAD ()));
5150 /* notify the waiting thread */
5151 MONO_SEM_POST (suspend_ack_semaphore_ptr);
5152 info->stop_count = stop_count;
5154 /* wait until we receive the restart signal */
5157 sigsuspend (&suspend_signal_mask);
5158 } while (info->signal != restart_signal_num);
5160 DEBUG (4, fprintf (gc_debug_file, "Posting suspend_ack_semaphore for resume from %p %p\n", info, (gpointer)ARCH_GET_THREAD ()));
5161 /* notify the waiting thread */
5162 MONO_SEM_POST (suspend_ack_semaphore_ptr);
5168 restart_handler (int sig)
5170 SgenThreadInfo *info;
5171 int old_errno = errno;
5173 info = mono_sgen_thread_info_lookup (pthread_self ());
5174 info->signal = restart_signal_num;
5175 DEBUG (4, fprintf (gc_debug_file, "Restart handler in %p %p\n", info, (gpointer)ARCH_GET_THREAD ()));
5181 acquire_gc_locks (void)
5187 release_gc_locks (void)
5189 UNLOCK_INTERRUPTION;
5192 static TV_DECLARE (stop_world_time);
5193 static unsigned long max_pause_usec = 0;
5195 /* LOCKING: assumes the GC lock is held */
5197 stop_world (int generation)
5201 mono_profiler_gc_event (MONO_GC_EVENT_PRE_STOP_WORLD, generation);
5202 acquire_gc_locks ();
5204 update_current_thread_stack (&count);
5206 global_stop_count++;
5207 DEBUG (3, fprintf (gc_debug_file, "stopping world n %d from %p %p\n", global_stop_count, mono_sgen_thread_info_lookup (ARCH_GET_THREAD ()), (gpointer)ARCH_GET_THREAD ()));
5208 TV_GETTIME (stop_world_time);
5209 count = mono_sgen_thread_handshake (suspend_signal_num);
5210 count -= restart_threads_until_none_in_managed_allocator ();
5211 g_assert (count >= 0);
5212 DEBUG (3, fprintf (gc_debug_file, "world stopped %d thread(s)\n", count));
5213 mono_profiler_gc_event (MONO_GC_EVENT_POST_STOP_WORLD, generation);
5217 /* LOCKING: assumes the GC lock is held */
5219 restart_world (int generation)
5222 SgenThreadInfo *info;
5223 TV_DECLARE (end_sw);
5226 /* notify the profiler of the leftovers */
5227 if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_GC_MOVES)) {
5228 if (moved_objects_idx) {
5229 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
5230 moved_objects_idx = 0;
5233 mono_profiler_gc_event (MONO_GC_EVENT_PRE_START_WORLD, generation);
5234 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5235 for (info = thread_table [i]; info; info = info->next) {
5236 info->stack_start = NULL;
5237 info->stopped_regs = NULL;
5241 release_gc_locks ();
5243 count = mono_sgen_thread_handshake (restart_signal_num);
5244 TV_GETTIME (end_sw);
5245 usec = TV_ELAPSED (stop_world_time, end_sw);
5246 max_pause_usec = MAX (usec, max_pause_usec);
5247 DEBUG (2, fprintf (gc_debug_file, "restarted %d thread(s) (pause time: %d usec, max: %d)\n", count, (int)usec, (int)max_pause_usec));
5248 mono_profiler_gc_event (MONO_GC_EVENT_POST_START_WORLD, generation);
5252 #endif /* USE_SIGNAL_BASED_START_STOP_WORLD */
5255 mono_sgen_get_current_collection_generation (void)
5257 return current_collection_generation;
5261 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
5263 gc_callbacks = *callbacks;
5267 mono_gc_get_gc_callbacks ()
5269 return &gc_callbacks;
5272 /* Variables holding start/end nursery so it won't have to be passed at every call */
5273 static void *scan_area_arg_start, *scan_area_arg_end;
5276 mono_gc_conservatively_scan_area (void *start, void *end)
5278 conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
5282 mono_gc_scan_object (void *obj)
5284 if (current_collection_generation == GENERATION_NURSERY)
5285 major_collector.copy_object (&obj, &gray_queue);
5287 major_collector.copy_or_mark_object (&obj, &gray_queue);
5292 * Mark from thread stacks and registers.
5295 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise)
5298 SgenThreadInfo *info;
5300 scan_area_arg_start = start_nursery;
5301 scan_area_arg_end = end_nursery;
5303 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5304 for (info = thread_table [i]; info; info = info->next) {
5306 DEBUG (3, fprintf (gc_debug_file, "Skipping dead thread %p, range: %p-%p, size: %td\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
5309 DEBUG (3, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, next_pin_slot));
5310 if (gc_callbacks.thread_mark_func && !conservative_stack_mark)
5311 gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
5313 conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
5316 conservatively_pin_objects_from (info->stopped_regs, info->stopped_regs + ARCH_NUM_REGS,
5317 start_nursery, end_nursery, PIN_TYPE_STACK);
5323 find_pinning_ref_from_thread (char *obj, size_t size)
5326 SgenThreadInfo *info;
5327 char *endobj = obj + size;
5329 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5330 for (info = thread_table [i]; info; info = info->next) {
5331 char **start = (char**)info->stack_start;
5334 while (start < (char**)info->stack_end) {
5335 if (*start >= obj && *start < endobj) {
5336 DEBUG (0, fprintf (gc_debug_file, "Object %p referenced in thread %p (id %p) at %p, stack: %p-%p\n", obj, info, (gpointer)info->id, start, info->stack_start, info->stack_end));
5341 for (j = 0; j < ARCH_NUM_REGS; ++j) {
5342 mword w = (mword)info->stopped_regs [j];
5344 if (w >= (mword)obj && w < (mword)obj + size)
5345 DEBUG (0, fprintf (gc_debug_file, "Object %p referenced in saved reg %d of thread %p (id %p)\n", obj, j, info, (gpointer)info->id));
5352 ptr_on_stack (void *ptr)
5354 gpointer stack_start = &stack_start;
5355 SgenThreadInfo *info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
5357 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
5363 handle_remset (mword *p, void *start_nursery, void *end_nursery, gboolean global, GrayQueue *queue)
5370 HEAVY_STAT (++stat_global_remsets_processed);
5372 HEAVY_STAT (++stat_local_remsets_processed);
5374 /* FIXME: exclude stack locations */
5375 switch ((*p) & REMSET_TYPE_MASK) {
5376 case REMSET_LOCATION:
5378 //__builtin_prefetch (ptr);
5379 if (((void*)ptr < start_nursery || (void*)ptr >= end_nursery)) {
5380 gpointer old = *ptr;
5381 major_collector.copy_object (ptr, queue);
5382 DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p\n", ptr, *ptr));
5384 binary_protocol_ptr_update (ptr, old, *ptr, (gpointer)LOAD_VTABLE (*ptr), safe_object_get_size (*ptr));
5385 if (!global && *ptr >= start_nursery && *ptr < end_nursery) {
5387 * If the object is pinned, each reference to it from nonpinned objects
5388 * becomes part of the global remset, which can grow very large.
5390 DEBUG (9, fprintf (gc_debug_file, "Add to global remset because of pinning %p (%p %s)\n", ptr, *ptr, safe_name (*ptr)));
5391 mono_sgen_add_to_global_remset (ptr);
5394 DEBUG (9, fprintf (gc_debug_file, "Skipping remset at %p holding %p\n", ptr, *ptr));
5398 ptr = (void**)(*p & ~REMSET_TYPE_MASK);
5399 if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
5402 while (count-- > 0) {
5403 major_collector.copy_object (ptr, queue);
5404 DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p (count: %d)\n", ptr, *ptr, (int)count));
5405 if (!global && *ptr >= start_nursery && *ptr < end_nursery)
5406 mono_sgen_add_to_global_remset (ptr);
5411 ptr = (void**)(*p & ~REMSET_TYPE_MASK);
5412 if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
5414 major_collector.minor_scan_object ((char*)ptr, queue);
5416 case REMSET_VTYPE: {
5417 ptr = (void**)(*p & ~REMSET_TYPE_MASK);
5418 if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
5423 ptr = (void**) major_collector.minor_scan_vtype ((char*)ptr, desc, start_nursery, end_nursery, queue);
5427 g_assert_not_reached ();
5432 #ifdef HEAVY_STATISTICS
5434 collect_store_remsets (RememberedSet *remset, mword *bumper)
5436 mword *p = remset->data;
5441 while (p < remset->store_next) {
5442 switch ((*p) & REMSET_TYPE_MASK) {
5443 case REMSET_LOCATION:
5446 ++stat_saved_remsets_1;
5448 if (*p == last1 || *p == last2) {
5449 ++stat_saved_remsets_2;
5466 g_assert_not_reached ();
5476 RememberedSet *remset;
5478 SgenThreadInfo *info;
5480 mword *addresses, *bumper, *p, *r;
5482 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5483 for (info = thread_table [i]; info; info = info->next) {
5484 for (remset = info->remset; remset; remset = remset->next)
5485 size += remset->store_next - remset->data;
5488 for (remset = freed_thread_remsets; remset; remset = remset->next)
5489 size += remset->store_next - remset->data;
5490 for (remset = global_remset; remset; remset = remset->next)
5491 size += remset->store_next - remset->data;
5493 bumper = addresses = mono_sgen_alloc_internal_dynamic (sizeof (mword) * size, INTERNAL_MEM_STATISTICS);
5495 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5496 for (info = thread_table [i]; info; info = info->next) {
5497 for (remset = info->remset; remset; remset = remset->next)
5498 bumper = collect_store_remsets (remset, bumper);
5501 for (remset = global_remset; remset; remset = remset->next)
5502 bumper = collect_store_remsets (remset, bumper);
5503 for (remset = freed_thread_remsets; remset; remset = remset->next)
5504 bumper = collect_store_remsets (remset, bumper);
5506 g_assert (bumper <= addresses + size);
5508 stat_store_remsets += bumper - addresses;
5510 sort_addresses ((void**)addresses, bumper - addresses);
5513 while (r < bumper) {
5519 stat_store_remsets_unique += p - addresses;
5521 mono_sgen_free_internal_dynamic (addresses, sizeof (mword) * size, INTERNAL_MEM_STATISTICS);
5526 clear_thread_store_remset_buffer (SgenThreadInfo *info)
5528 *info->store_remset_buffer_index_addr = 0;
5529 memset (*info->store_remset_buffer_addr, 0, sizeof (gpointer) * STORE_REMSET_BUFFER_SIZE);
5533 remset_byte_size (RememberedSet *remset)
5535 return sizeof (RememberedSet) + (remset->end_set - remset->data) * sizeof (gpointer);
5539 scan_from_remsets (void *start_nursery, void *end_nursery, GrayQueue *queue)
5542 SgenThreadInfo *info;
5543 RememberedSet *remset;
5544 GenericStoreRememberedSet *store_remset;
5545 mword *p, *next_p, *store_pos;
5547 #ifdef HEAVY_STATISTICS
5551 /* the global one */
5552 for (remset = global_remset; remset; remset = remset->next) {
5553 DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
5554 store_pos = remset->data;
5555 for (p = remset->data; p < remset->store_next; p = next_p) {
5556 void **ptr = (void**)p [0];
5558 /*Ignore previously processed remset.*/
5559 if (!global_remset_location_was_not_added (ptr)) {
5564 next_p = handle_remset (p, start_nursery, end_nursery, TRUE, queue);
5567 * Clear global remsets of locations which no longer point to the
5568 * nursery. Otherwise, they could grow indefinitely between major
5571 * Since all global remsets are location remsets, we don't need to unmask the pointer.
5573 if (ptr_in_nursery (*ptr)) {
5574 *store_pos ++ = p [0];
5575 HEAVY_STAT (++stat_global_remsets_readded);
5579 /* Truncate the remset */
5580 remset->store_next = store_pos;
5583 /* the generic store ones */
5584 store_remset = generic_store_remsets;
5585 while (store_remset) {
5586 GenericStoreRememberedSet *next = store_remset->next;
5588 for (i = 0; i < STORE_REMSET_BUFFER_SIZE - 1; ++i) {
5589 gpointer addr = store_remset->data [i];
5591 handle_remset ((mword*)&addr, start_nursery, end_nursery, FALSE, queue);
5594 mono_sgen_free_internal (store_remset, INTERNAL_MEM_STORE_REMSET);
5596 store_remset = next;
5598 generic_store_remsets = NULL;
5600 /* the per-thread ones */
5601 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5602 for (info = thread_table [i]; info; info = info->next) {
5603 RememberedSet *next;
5605 for (remset = info->remset; remset; remset = next) {
5606 DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
5607 for (p = remset->data; p < remset->store_next;)
5608 p = handle_remset (p, start_nursery, end_nursery, FALSE, queue);
5609 remset->store_next = remset->data;
5610 next = remset->next;
5611 remset->next = NULL;
5612 if (remset != info->remset) {
5613 DEBUG (4, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
5614 mono_sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET);
5617 for (j = 0; j < *info->store_remset_buffer_index_addr; ++j)
5618 handle_remset ((mword*)*info->store_remset_buffer_addr + j + 1, start_nursery, end_nursery, FALSE, queue);
5619 clear_thread_store_remset_buffer (info);
5623 /* the freed thread ones */
5624 while (freed_thread_remsets) {
5625 RememberedSet *next;
5626 remset = freed_thread_remsets;
5627 DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
5628 for (p = remset->data; p < remset->store_next;)
5629 p = handle_remset (p, start_nursery, end_nursery, FALSE, queue);
5630 next = remset->next;
5631 DEBUG (4, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
5632 mono_sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET);
5633 freed_thread_remsets = next;
5638 * Clear the info in the remembered sets: we're doing a major collection, so
5639 * the per-thread ones are not needed and the global ones will be reconstructed
5643 clear_remsets (void)
5646 SgenThreadInfo *info;
5647 RememberedSet *remset, *next;
5649 /* the global list */
5650 for (remset = global_remset; remset; remset = next) {
5651 remset->store_next = remset->data;
5652 next = remset->next;
5653 remset->next = NULL;
5654 if (remset != global_remset) {
5655 DEBUG (4, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
5656 mono_sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET);
5659 /* the generic store ones */
5660 while (generic_store_remsets) {
5661 GenericStoreRememberedSet *gs_next = generic_store_remsets->next;
5662 mono_sgen_free_internal (generic_store_remsets, INTERNAL_MEM_STORE_REMSET);
5663 generic_store_remsets = gs_next;
5665 /* the per-thread ones */
5666 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5667 for (info = thread_table [i]; info; info = info->next) {
5668 for (remset = info->remset; remset; remset = next) {
5669 remset->store_next = remset->data;
5670 next = remset->next;
5671 remset->next = NULL;
5672 if (remset != info->remset) {
5673 DEBUG (3, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
5674 mono_sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET);
5677 clear_thread_store_remset_buffer (info);
5681 /* the freed thread ones */
5682 while (freed_thread_remsets) {
5683 next = freed_thread_remsets->next;
5684 DEBUG (4, fprintf (gc_debug_file, "Freed remset at %p\n", freed_thread_remsets->data));
5685 mono_sgen_free_internal_dynamic (freed_thread_remsets, remset_byte_size (freed_thread_remsets), INTERNAL_MEM_REMSET);
5686 freed_thread_remsets = next;
5691 * Clear the thread local TLAB variables for all threads.
5696 SgenThreadInfo *info;
5699 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5700 for (info = thread_table [i]; info; info = info->next) {
5701 /* A new TLAB will be allocated when the thread does its first allocation */
5702 *info->tlab_start_addr = NULL;
5703 *info->tlab_next_addr = NULL;
5704 *info->tlab_temp_end_addr = NULL;
5705 *info->tlab_real_end_addr = NULL;
5710 /* LOCKING: assumes the GC lock is held */
5711 static SgenThreadInfo*
5712 gc_register_current_thread (void *addr)
5715 SgenThreadInfo* info = malloc (sizeof (SgenThreadInfo));
5716 #ifndef HAVE_KW_THREAD
5717 SgenThreadInfo *__thread_info__ = info;
5723 memset (info, 0, sizeof (SgenThreadInfo));
5724 #ifndef HAVE_KW_THREAD
5725 info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
5727 g_assert (!pthread_getspecific (thread_info_key));
5728 pthread_setspecific (thread_info_key, info);
5733 info->id = ARCH_GET_THREAD ();
5734 info->stop_count = -1;
5737 info->stack_start = NULL;
5738 info->tlab_start_addr = &TLAB_START;
5739 info->tlab_next_addr = &TLAB_NEXT;
5740 info->tlab_temp_end_addr = &TLAB_TEMP_END;
5741 info->tlab_real_end_addr = &TLAB_REAL_END;
5742 info->store_remset_buffer_addr = &STORE_REMSET_BUFFER;
5743 info->store_remset_buffer_index_addr = &STORE_REMSET_BUFFER_INDEX;
5744 info->stopped_ip = NULL;
5745 info->stopped_domain = NULL;
5746 info->stopped_regs = NULL;
5748 binary_protocol_thread_register ((gpointer)info->id);
5750 #ifdef HAVE_KW_THREAD
5751 tlab_next_addr = &tlab_next;
5752 store_remset_buffer_index_addr = &store_remset_buffer_index;
5755 /* try to get it with attributes first */
5756 #if defined(HAVE_PTHREAD_GETATTR_NP) && defined(HAVE_PTHREAD_ATTR_GETSTACK)
5760 pthread_attr_t attr;
5761 pthread_getattr_np (pthread_self (), &attr);
5762 pthread_attr_getstack (&attr, &sstart, &size);
5763 info->stack_start_limit = sstart;
5764 info->stack_end = (char*)sstart + size;
5765 pthread_attr_destroy (&attr);
5767 #elif defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP)
5768 info->stack_end = (char*)pthread_get_stackaddr_np (pthread_self ());
5769 info->stack_start_limit = (char*)info->stack_end - pthread_get_stacksize_np (pthread_self ());
5772 /* FIXME: we assume the stack grows down */
5773 gsize stack_bottom = (gsize)addr;
5774 stack_bottom += 4095;
5775 stack_bottom &= ~4095;
5776 info->stack_end = (char*)stack_bottom;
5780 #ifdef HAVE_KW_THREAD
5781 stack_end = info->stack_end;
5784 /* hash into the table */
5785 hash = HASH_PTHREAD_T (info->id) % THREAD_HASH_SIZE;
5786 info->next = thread_table [hash];
5787 thread_table [hash] = info;
5789 info->remset = alloc_remset (DEFAULT_REMSET_SIZE, info);
5790 pthread_setspecific (remembered_set_key, info->remset);
5791 #ifdef HAVE_KW_THREAD
5792 remembered_set = info->remset;
5795 STORE_REMSET_BUFFER = mono_sgen_alloc_internal (INTERNAL_MEM_STORE_REMSET);
5796 STORE_REMSET_BUFFER_INDEX = 0;
5798 DEBUG (3, fprintf (gc_debug_file, "registered thread %p (%p) (hash: %d)\n", info, (gpointer)info->id, hash));
5800 if (gc_callbacks.thread_attach_func)
5801 info->runtime_data = gc_callbacks.thread_attach_func ();
5807 add_generic_store_remset_from_buffer (gpointer *buffer)
5809 GenericStoreRememberedSet *remset = mono_sgen_alloc_internal (INTERNAL_MEM_STORE_REMSET);
5810 memcpy (remset->data, buffer + 1, sizeof (gpointer) * (STORE_REMSET_BUFFER_SIZE - 1));
5811 remset->next = generic_store_remsets;
5812 generic_store_remsets = remset;
5816 unregister_current_thread (void)
5819 SgenThreadInfo *prev = NULL;
5821 RememberedSet *rset;
5822 ARCH_THREAD_TYPE id = ARCH_GET_THREAD ();
5824 binary_protocol_thread_unregister ((gpointer)id);
5826 hash = HASH_PTHREAD_T (id) % THREAD_HASH_SIZE;
5827 p = thread_table [hash];
5829 DEBUG (3, fprintf (gc_debug_file, "unregister thread %p (%p)\n", p, (gpointer)p->id));
5830 while (!ARCH_THREAD_EQUALS (p->id, id)) {
5835 thread_table [hash] = p->next;
5837 prev->next = p->next;
5840 if (gc_callbacks.thread_detach_func) {
5841 gc_callbacks.thread_detach_func (p->runtime_data);
5842 p->runtime_data = NULL;
5846 if (freed_thread_remsets) {
5847 for (rset = p->remset; rset->next; rset = rset->next)
5849 rset->next = freed_thread_remsets;
5850 freed_thread_remsets = p->remset;
5852 freed_thread_remsets = p->remset;
5855 if (*p->store_remset_buffer_index_addr)
5856 add_generic_store_remset_from_buffer (*p->store_remset_buffer_addr);
5857 mono_sgen_free_internal (*p->store_remset_buffer_addr, INTERNAL_MEM_STORE_REMSET);
5862 unregister_thread (void *k)
5864 /* If a delegate is passed to native code and invoked on a thread we dont
5865 * know about, the jit will register it with mono_jit_thead_attach, but
5866 * we have no way of knowing when that thread goes away. SGen has a TSD
5867 * so we assume that if the domain is still registered, we can detach
5870 if (mono_domain_get ())
5871 mono_thread_detach (mono_thread_current ());
5874 unregister_current_thread ();
5879 mono_gc_register_thread (void *baseptr)
5881 SgenThreadInfo *info;
5885 info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
5887 info = gc_register_current_thread (baseptr);
5889 /* The main thread might get registered before callbacks are set */
5890 if (gc_callbacks.thread_attach_func && !info->runtime_data)
5891 info->runtime_data = gc_callbacks.thread_attach_func ();
5895 /* Need a better place to initialize this */
5896 if (!array_fill_vtable && mono_get_root_domain ()) {
5897 array_fill_vtable = mono_class_vtable (mono_get_root_domain (), mono_array_class_get (mono_defaults.byte_class, 1));
5900 return info != NULL;
5904 * mono_gc_set_stack_end:
5906 * Set the end of the current threads stack to STACK_END. The stack space between
5907 * STACK_END and the real end of the threads stack will not be scanned during collections.
5910 mono_gc_set_stack_end (void *stack_end)
5912 SgenThreadInfo *info;
5915 info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
5917 g_assert (stack_end < info->stack_end);
5918 info->stack_end = stack_end;
5923 #if USE_PTHREAD_INTERCEPT
5926 void *(*start_routine) (void *);
5929 MonoSemType registered;
5930 } SgenThreadStartInfo;
5933 gc_start_thread (void *arg)
5935 SgenThreadStartInfo *start_info = arg;
5936 SgenThreadInfo* info;
5937 void *t_arg = start_info->arg;
5938 void *(*start_func) (void*) = start_info->start_routine;
5943 info = gc_register_current_thread (&result);
5945 post_result = MONO_SEM_POST (&(start_info->registered));
5946 g_assert (!post_result);
5947 result = start_func (t_arg);
5948 g_assert (!mono_domain_get ());
5950 * this is done by the pthread key dtor
5952 unregister_current_thread ();
5960 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
5962 SgenThreadStartInfo *start_info;
5965 start_info = malloc (sizeof (SgenThreadStartInfo));
5968 MONO_SEM_INIT (&(start_info->registered), 0);
5969 start_info->arg = arg;
5970 start_info->start_routine = start_routine;
5972 result = pthread_create (new_thread, attr, gc_start_thread, start_info);
5974 while (MONO_SEM_WAIT (&(start_info->registered)) != 0) {
5975 /*if (EINTR != errno) ABORT("sem_wait failed"); */
5978 MONO_SEM_DESTROY (&(start_info->registered));
5984 mono_gc_pthread_join (pthread_t thread, void **retval)
5986 return pthread_join (thread, retval);
5990 mono_gc_pthread_detach (pthread_t thread)
5992 return pthread_detach (thread);
5995 #endif /* USE_PTHREAD_INTERCEPT */
5998 * ######################################################################
5999 * ######## Write barriers
6000 * ######################################################################
6004 * This causes the compile to extend the liveness of 'v' till the call to dummy_use
6007 dummy_use (gpointer v) {
6008 __asm__ volatile ("" : "=r"(v) : "r"(v));
6012 static RememberedSet*
6013 alloc_remset (int size, gpointer id) {
6014 RememberedSet* res = mono_sgen_alloc_internal_dynamic (sizeof (RememberedSet) + (size * sizeof (gpointer)), INTERNAL_MEM_REMSET);
6015 res->store_next = res->data;
6016 res->end_set = res->data + size;
6018 DEBUG (4, fprintf (gc_debug_file, "Allocated remset size %d at %p for %p\n", size, res->data, id));
6023 * Note: the write barriers first do the needed GC work and then do the actual store:
6024 * this way the value is visible to the conservative GC scan after the write barrier
6025 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
6026 * the conservative scan, otherwise by the remembered set scan.
6029 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
6031 HEAVY_STAT (++stat_wbarrier_set_field);
6032 if (ptr_in_nursery (field_ptr)) {
6033 *(void**)field_ptr = value;
6036 DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", field_ptr));
6037 if (use_cardtable) {
6038 *(void**)field_ptr = value;
6039 if (ptr_in_nursery (value))
6040 sgen_card_table_mark_address ((mword)field_ptr);
6047 rs = REMEMBERED_SET;
6048 if (rs->store_next < rs->end_set) {
6049 *(rs->store_next++) = (mword)field_ptr;
6050 *(void**)field_ptr = value;
6054 rs = alloc_remset (rs->end_set - rs->data, (void*)1);
6055 rs->next = REMEMBERED_SET;
6056 REMEMBERED_SET = rs;
6057 #ifdef HAVE_KW_THREAD
6058 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
6060 *(rs->store_next++) = (mword)field_ptr;
6061 *(void**)field_ptr = value;
6067 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
6069 HEAVY_STAT (++stat_wbarrier_set_arrayref);
6070 if (ptr_in_nursery (slot_ptr)) {
6071 *(void**)slot_ptr = value;
6074 DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", slot_ptr));
6075 if (use_cardtable) {
6076 *(void**)slot_ptr = value;
6077 if (ptr_in_nursery (value))
6078 sgen_card_table_mark_address ((mword)slot_ptr);
6085 rs = REMEMBERED_SET;
6086 if (rs->store_next < rs->end_set) {
6087 *(rs->store_next++) = (mword)slot_ptr;
6088 *(void**)slot_ptr = value;
6092 rs = alloc_remset (rs->end_set - rs->data, (void*)1);
6093 rs->next = REMEMBERED_SET;
6094 REMEMBERED_SET = rs;
6095 #ifdef HAVE_KW_THREAD
6096 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
6098 *(rs->store_next++) = (mword)slot_ptr;
6099 *(void**)slot_ptr = value;
6105 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
6107 HEAVY_STAT (++stat_wbarrier_arrayref_copy);
6108 /*This check can be done without taking a lock since dest_ptr array is pinned*/
6109 if (ptr_in_nursery (dest_ptr) || count <= 0) {
6110 memmove (dest_ptr, src_ptr, count * sizeof (gpointer));
6114 if (use_cardtable) {
6115 gpointer *dest = dest_ptr;
6116 gpointer *src = src_ptr;
6118 /*overlapping that required backward copying*/
6119 if (src < dest && (src + count) > dest) {
6120 gpointer *start = dest;
6124 for (; dest >= start; --src, --dest) {
6125 gpointer value = *src;
6127 if (ptr_in_nursery (value))
6128 sgen_card_table_mark_address ((mword)dest);
6132 gpointer *end = dest + count;
6133 for (; dest < end; ++src, ++dest) {
6134 gpointer value = *src;
6136 if (ptr_in_nursery (value))
6137 sgen_card_table_mark_address ((mword)dest);
6145 memmove (dest_ptr, src_ptr, count * sizeof (gpointer));
6147 rs = REMEMBERED_SET;
6148 DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p, %d\n", dest_ptr, count));
6149 if (rs->store_next + 1 < rs->end_set) {
6150 *(rs->store_next++) = (mword)dest_ptr | REMSET_RANGE;
6151 *(rs->store_next++) = count;
6155 rs = alloc_remset (rs->end_set - rs->data, (void*)1);
6156 rs->next = REMEMBERED_SET;
6157 REMEMBERED_SET = rs;
6158 #ifdef HAVE_KW_THREAD
6159 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
6161 *(rs->store_next++) = (mword)dest_ptr | REMSET_RANGE;
6162 *(rs->store_next++) = count;
6168 static char *found_obj;
6171 find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
6173 char *ptr = user_data;
6175 if (ptr >= obj && ptr < obj + size) {
6176 g_assert (!found_obj);
6181 /* for use in the debugger */
6182 char* find_object_for_ptr (char *ptr);
6184 find_object_for_ptr (char *ptr)
6186 if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
6188 mono_sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
6189 find_object_for_ptr_callback, ptr, TRUE);
6195 mono_sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
6200 * Very inefficient, but this is debugging code, supposed to
6201 * be called from gdb, so we don't care.
6204 major_collector.iterate_objects (TRUE, TRUE, find_object_for_ptr_callback, ptr);
6209 evacuate_remset_buffer (void)
6214 buffer = STORE_REMSET_BUFFER;
6216 add_generic_store_remset_from_buffer (buffer);
6217 memset (buffer, 0, sizeof (gpointer) * STORE_REMSET_BUFFER_SIZE);
6219 STORE_REMSET_BUFFER_INDEX = 0;
6223 mono_gc_wbarrier_generic_nostore (gpointer ptr)
6229 HEAVY_STAT (++stat_wbarrier_generic_store);
6231 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
6232 /* FIXME: ptr_in_heap must be called with the GC lock held */
6233 if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
6234 char *start = find_object_for_ptr (ptr);
6235 MonoObject *value = *(MonoObject**)ptr;
6239 MonoObject *obj = (MonoObject*)start;
6240 if (obj->vtable->domain != value->vtable->domain)
6241 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
6247 if (*(gpointer*)ptr)
6248 binary_protocol_wbarrier (ptr, *(gpointer*)ptr, (gpointer)LOAD_VTABLE (*(gpointer*)ptr));
6250 if (ptr_in_nursery (ptr) || ptr_on_stack (ptr) || !ptr_in_nursery (*(gpointer*)ptr)) {
6251 DEBUG (8, fprintf (gc_debug_file, "Skipping remset at %p\n", ptr));
6255 if (use_cardtable) {
6256 if (ptr_in_nursery(*(gpointer*)ptr))
6257 sgen_card_table_mark_address ((mword)ptr);
6263 buffer = STORE_REMSET_BUFFER;
6264 index = STORE_REMSET_BUFFER_INDEX;
6265 /* This simple optimization eliminates a sizable portion of
6266 entries. Comparing it to the last but one entry as well
6267 doesn't eliminate significantly more entries. */
6268 if (buffer [index] == ptr) {
6273 DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", ptr));
6274 HEAVY_STAT (++stat_wbarrier_generic_store_remset);
6277 if (index >= STORE_REMSET_BUFFER_SIZE) {
6278 evacuate_remset_buffer ();
6279 index = STORE_REMSET_BUFFER_INDEX;
6280 g_assert (index == 0);
6283 buffer [index] = ptr;
6284 STORE_REMSET_BUFFER_INDEX = index;
6290 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
6292 DEBUG (8, fprintf (gc_debug_file, "Wbarrier store at %p to %p (%s)\n", ptr, value, value ? safe_name (value) : "null"));
6293 *(void**)ptr = value;
6294 if (ptr_in_nursery (value))
6295 mono_gc_wbarrier_generic_nostore (ptr);
6299 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
6301 mword *dest = _dest;
6306 mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
6311 size -= SIZEOF_VOID_P;
6318 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
6321 size_t size = count * mono_class_value_size (klass, NULL);
6323 HEAVY_STAT (++stat_wbarrier_value_copy);
6324 g_assert (klass->valuetype);
6326 memmove (dest, src, size);
6327 if (use_cardtable) {
6328 sgen_card_table_mark_range ((mword)dest, size);
6330 rs = REMEMBERED_SET;
6331 if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
6335 g_assert (klass->gc_descr_inited);
6336 DEBUG (8, fprintf (gc_debug_file, "Adding value remset at %p, count %d, descr %p for class %s (%p)\n", dest, count, klass->gc_descr, klass->name, klass));
6338 if (rs->store_next + 3 < rs->end_set) {
6339 *(rs->store_next++) = (mword)dest | REMSET_VTYPE;
6340 *(rs->store_next++) = (mword)klass->gc_descr;
6341 *(rs->store_next++) = (mword)count;
6345 rs = alloc_remset (rs->end_set - rs->data, (void*)1);
6346 rs->next = REMEMBERED_SET;
6347 REMEMBERED_SET = rs;
6348 #ifdef HAVE_KW_THREAD
6349 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
6351 *(rs->store_next++) = (mword)dest | REMSET_VTYPE;
6352 *(rs->store_next++) = (mword)klass->gc_descr;
6353 *(rs->store_next++) = (mword)count;
6359 * mono_gc_wbarrier_object_copy:
6361 * Write barrier to call when obj is the result of a clone or copy of an object.
6364 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
6370 HEAVY_STAT (++stat_wbarrier_object_copy);
6371 rs = REMEMBERED_SET;
6372 DEBUG (6, fprintf (gc_debug_file, "Adding object remset for %p\n", obj));
6373 size = mono_object_class (obj)->instance_size;
6375 /* do not copy the sync state */
6376 memcpy ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
6377 size - sizeof (MonoObject));
6378 if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
6382 if (rs->store_next < rs->end_set) {
6383 *(rs->store_next++) = (mword)obj | REMSET_OBJECT;
6387 rs = alloc_remset (rs->end_set - rs->data, (void*)1);
6388 rs->next = REMEMBERED_SET;
6389 REMEMBERED_SET = rs;
6390 #ifdef HAVE_KW_THREAD
6391 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
6393 *(rs->store_next++) = (mword)obj | REMSET_OBJECT;
6398 * ######################################################################
6399 * ######## Collector debugging
6400 * ######################################################################
6403 const char*descriptor_types [] = {
6415 describe_ptr (char *ptr)
6422 if (ptr_in_nursery (ptr)) {
6423 printf ("Pointer inside nursery.\n");
6425 if (mono_sgen_ptr_is_in_los (ptr, &start)) {
6427 printf ("Pointer is the start of object %p in LOS space.\n", start);
6429 printf ("Pointer is at offset 0x%x of object %p in LOS space.\n", (int)(ptr - start), start);
6431 } else if (major_collector.ptr_is_in_non_pinned_space (ptr)) {
6432 printf ("Pointer inside oldspace.\n");
6433 } else if (major_collector.obj_is_from_pinned_alloc (ptr)) {
6434 printf ("Pointer is inside a pinned chunk.\n");
6436 printf ("Pointer unknown.\n");
6441 if (object_is_pinned (ptr))
6442 printf ("Object is pinned.\n");
6444 if (object_is_forwarded (ptr))
6445 printf ("Object is forwared.\n");
6447 // FIXME: Handle pointers to the inside of objects
6448 vtable = (MonoVTable*)LOAD_VTABLE (ptr);
6450 printf ("VTable: %p\n", vtable);
6451 if (vtable == NULL) {
6452 printf ("VTable is invalid (empty).\n");
6455 if (ptr_in_nursery (vtable)) {
6456 printf ("VTable is invalid (points inside nursery).\n");
6459 printf ("Class: %s\n", vtable->klass->name);
6461 desc = ((GCVTable*)vtable)->desc;
6462 printf ("Descriptor: %lx\n", (long)desc);
6465 printf ("Descriptor type: %d (%s)\n", type, descriptor_types [type]);
6469 find_in_remset_loc (mword *p, char *addr, gboolean *found)
6475 switch ((*p) & REMSET_TYPE_MASK) {
6476 case REMSET_LOCATION:
6477 if (*p == (mword)addr)
6481 ptr = (void**)(*p & ~REMSET_TYPE_MASK);
6483 if ((void**)addr >= ptr && (void**)addr < ptr + count)
6487 ptr = (void**)(*p & ~REMSET_TYPE_MASK);
6488 count = safe_object_get_size ((MonoObject*)ptr);
6489 count = ALIGN_UP (count);
6490 count /= sizeof (mword);
6491 if ((void**)addr >= ptr && (void**)addr < ptr + count)
6495 ptr = (void**)(*p & ~REMSET_TYPE_MASK);
6499 switch (desc & 0x7) {
6500 case DESC_TYPE_RUN_LENGTH:
6501 OBJ_RUN_LEN_SIZE (skip_size, desc, ptr);
6503 case DESC_TYPE_SMALL_BITMAP:
6504 OBJ_BITMAP_SIZE (skip_size, desc, start);
6508 g_assert_not_reached ();
6511 /* The descriptor includes the size of MonoObject */
6512 skip_size -= sizeof (MonoObject);
6514 if ((void**)addr >= ptr && (void**)addr < ptr + (skip_size / sizeof (gpointer)))
6519 g_assert_not_reached ();
6525 * Return whenever ADDR occurs in the remembered sets
6528 find_in_remsets (char *addr)
6531 SgenThreadInfo *info;
6532 RememberedSet *remset;
6533 GenericStoreRememberedSet *store_remset;
6535 gboolean found = FALSE;
6537 /* the global one */
6538 for (remset = global_remset; remset; remset = remset->next) {
6539 DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
6540 for (p = remset->data; p < remset->store_next;) {
6541 p = find_in_remset_loc (p, addr, &found);
6547 /* the generic store ones */
6548 for (store_remset = generic_store_remsets; store_remset; store_remset = store_remset->next) {
6549 for (i = 0; i < STORE_REMSET_BUFFER_SIZE - 1; ++i) {
6550 if (store_remset->data [i] == addr)
6555 /* the per-thread ones */
6556 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
6557 for (info = thread_table [i]; info; info = info->next) {
6559 for (remset = info->remset; remset; remset = remset->next) {
6560 DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
6561 for (p = remset->data; p < remset->store_next;) {
6562 p = find_in_remset_loc (p, addr, &found);
6567 for (j = 0; j < *info->store_remset_buffer_index_addr; ++j) {
6568 if ((*info->store_remset_buffer_addr) [j + 1] == addr)
6574 /* the freed thread ones */
6575 for (remset = freed_thread_remsets; remset; remset = remset->next) {
6576 DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
6577 for (p = remset->data; p < remset->store_next;) {
6578 p = find_in_remset_loc (p, addr, &found);
6587 static gboolean missing_remsets;
6590 * We let a missing remset slide if the target object is pinned,
6591 * because the store might have happened but the remset not yet added,
6592 * but in that case the target must be pinned. We might theoretically
6593 * miss some missing remsets this way, but it's very unlikely.
6596 #define HANDLE_PTR(ptr,obj) do { \
6597 if (*(ptr) && (char*)*(ptr) >= nursery_start && (char*)*(ptr) < nursery_next) { \
6598 if (!find_in_remsets ((char*)(ptr)) && (!use_cardtable || !sgen_card_table_address_is_marked ((mword)ptr))) { \
6599 fprintf (gc_debug_file, "Oldspace->newspace reference %p at offset %td in object %p (%s.%s) not found in remsets.\n", *(ptr), (char*)(ptr) - (char*)(obj), (obj), ((MonoObject*)(obj))->vtable->klass->name_space, ((MonoObject*)(obj))->vtable->klass->name); \
6600 binary_protocol_missing_remset ((obj), (gpointer)LOAD_VTABLE ((obj)), (char*)(ptr) - (char*)(obj), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
6601 if (!object_is_pinned (*(ptr))) \
6602 missing_remsets = TRUE; \
6608 * Check that each object reference which points into the nursery can
6609 * be found in the remembered sets.
6612 check_consistency_callback (char *start, size_t size, void *dummy)
6614 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
6615 DEBUG (8, fprintf (gc_debug_file, "Scanning object %p, vtable: %p (%s)\n", start, vt, vt->klass->name));
6617 #define SCAN_OBJECT_ACTION
6618 #include "sgen-scan-object.h"
6622 * Perform consistency check of the heap.
6624 * Assumes the world is stopped.
6627 check_consistency (void)
6629 // Need to add more checks
6631 missing_remsets = FALSE;
6633 DEBUG (1, fprintf (gc_debug_file, "Begin heap consistency check...\n"));
6635 // Check that oldspace->newspace pointers are registered with the collector
6636 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)check_consistency_callback, NULL);
6638 mono_sgen_los_iterate_objects ((IterateObjectCallbackFunc)check_consistency_callback, NULL);
6640 DEBUG (1, fprintf (gc_debug_file, "Heap consistency check done.\n"));
6642 if (!binary_protocol_is_enabled ())
6643 g_assert (!missing_remsets);
6648 #define HANDLE_PTR(ptr,obj) do { \
6649 if (*(ptr) && !LOAD_VTABLE (*(ptr))) \
6650 g_error ("Could not load vtable for obj %p slot %d (size %d)", obj, (char*)ptr - (char*)obj, safe_object_get_size ((MonoObject*)obj)); \
6654 check_major_refs_callback (char *start, size_t size, void *dummy)
6656 #define SCAN_OBJECT_ACTION
6657 #include "sgen-scan-object.h"
6661 check_major_refs (void)
6663 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)check_major_refs_callback, NULL);
6664 mono_sgen_los_iterate_objects ((IterateObjectCallbackFunc)check_major_refs_callback, NULL);
6667 /* Check that the reference is valid */
6669 #define HANDLE_PTR(ptr,obj) do { \
6671 g_assert (safe_name (*(ptr)) != NULL); \
6678 * Perform consistency check on an object. Currently we only check that the
6679 * reference fields are valid.
6682 check_object (char *start)
6687 #include "sgen-scan-object.h"
6691 * ######################################################################
6692 * ######## Other mono public interface functions.
6693 * ######################################################################
6696 #define REFS_SIZE 128
6699 MonoGCReferences callback;
6703 MonoObject *refs [REFS_SIZE];
6704 uintptr_t offsets [REFS_SIZE];
6708 #define HANDLE_PTR(ptr,obj) do { \
6710 if (hwi->count == REFS_SIZE) { \
6711 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
6715 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
6716 hwi->refs [hwi->count++] = *(ptr); \
6721 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
6723 #include "sgen-scan-object.h"
6727 walk_references (char *start, size_t size, void *data)
6729 HeapWalkInfo *hwi = data;
6732 collect_references (hwi, start, size);
6733 if (hwi->count || !hwi->called)
6734 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
6738 * mono_gc_walk_heap:
6739 * @flags: flags for future use
6740 * @callback: a function pointer called for each object in the heap
6741 * @data: a user data pointer that is passed to callback
6743 * This function can be used to iterate over all the live objects in the heap:
6744 * for each object, @callback is invoked, providing info about the object's
6745 * location in memory, its class, its size and the objects it references.
6746 * For each referenced object it's offset from the object address is
6747 * reported in the offsets array.
6748 * The object references may be buffered, so the callback may be invoked
6749 * multiple times for the same object: in all but the first call, the size
6750 * argument will be zero.
6751 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
6752 * profiler event handler.
6754 * Returns: a non-zero value if the GC doesn't support heap walking
6757 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
6762 hwi.callback = callback;
6765 clear_nursery_fragments (nursery_next);
6766 mono_sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
6768 major_collector.iterate_objects (TRUE, TRUE, walk_references, &hwi);
6769 mono_sgen_los_iterate_objects (walk_references, &hwi);
6775 mono_gc_collect (int generation)
6780 mono_profiler_gc_event (MONO_GC_EVENT_START, generation);
6781 stop_world (generation);
6782 if (generation == 0) {
6783 collect_nursery (0);
6785 major_collection ("user request");
6787 restart_world (generation);
6788 mono_profiler_gc_event (MONO_GC_EVENT_END, generation);
6793 mono_gc_max_generation (void)
6799 mono_gc_collection_count (int generation)
6801 if (generation == 0)
6802 return num_minor_gcs;
6803 return num_major_gcs;
6807 mono_gc_get_used_size (void)
6811 tot = los_memory_usage;
6812 tot += nursery_section->next_data - nursery_section->data;
6813 tot += major_collector.get_used_size ();
6814 /* FIXME: account for pinned objects */
6820 mono_gc_get_heap_size (void)
6826 mono_gc_disable (void)
6834 mono_gc_enable (void)
6842 mono_gc_get_los_limit (void)
6844 return MAX_SMALL_OBJ_SIZE;
6848 mono_object_is_alive (MonoObject* o)
6854 mono_gc_get_generation (MonoObject *obj)
6856 if (ptr_in_nursery (obj))
6862 mono_gc_enable_events (void)
6867 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
6870 mono_gc_register_disappearing_link (obj, link_addr, track);
6875 mono_gc_weak_link_remove (void **link_addr)
6878 mono_gc_register_disappearing_link (NULL, link_addr, FALSE);
6883 mono_gc_weak_link_get (void **link_addr)
6887 return (MonoObject*) REVEAL_POINTER (*link_addr);
6891 mono_gc_ephemeron_array_add (MonoObject *obj)
6893 EphemeronLinkNode *node;
6897 node = mono_sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
6902 node->array = (char*)obj;
6903 node->next = ephemeron_list;
6904 ephemeron_list = node;
6906 DEBUG (5, fprintf (gc_debug_file, "Registered ephemeron array %p\n", obj));
6913 mono_gc_make_descr_from_bitmap (gsize *bitmap, int numbits)
6916 return (void*)MAKE_ROOT_DESC (ROOT_DESC_BITMAP, 0);
6917 } else if (numbits < ((sizeof (*bitmap) * 8) - ROOT_DESC_TYPE_SHIFT)) {
6918 return (void*)MAKE_ROOT_DESC (ROOT_DESC_BITMAP, bitmap [0]);
6920 mword complex = alloc_complex_descriptor (bitmap, numbits);
6921 return (void*)MAKE_ROOT_DESC (ROOT_DESC_COMPLEX, complex);
6925 static void *all_ref_root_descrs [32];
6928 mono_gc_make_root_descr_all_refs (int numbits)
6933 if (numbits < 32 && all_ref_root_descrs [numbits])
6934 return all_ref_root_descrs [numbits];
6936 gc_bitmap = g_malloc0 (ALIGN_TO (numbits, 8) + 1);
6937 memset (gc_bitmap, 0xff, numbits / 8);
6939 gc_bitmap [numbits / 8] = (1 << (numbits % 8)) - 1;
6940 descr = mono_gc_make_descr_from_bitmap (gc_bitmap, numbits);
6944 all_ref_root_descrs [numbits] = descr;
6950 mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker)
6954 g_assert (user_descriptors_next < MAX_USER_DESCRIPTORS);
6955 descr = (void*)MAKE_ROOT_DESC (ROOT_DESC_USER, (mword)user_descriptors_next);
6956 user_descriptors [user_descriptors_next ++] = marker;
6962 mono_gc_alloc_fixed (size_t size, void *descr)
6964 /* FIXME: do a single allocation */
6965 void *res = calloc (1, size);
6968 if (!mono_gc_register_root (res, size, descr)) {
6976 mono_gc_free_fixed (void* addr)
6978 mono_gc_deregister_root (addr);
6983 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
6987 result = func (data);
6988 UNLOCK_INTERRUPTION;
6993 mono_gc_is_gc_thread (void)
6997 result = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ()) != NULL;
7003 mono_gc_base_init (void)
7007 char *major_collector_opt = NULL;
7008 struct sigaction sinfo;
7012 /* the gc_initialized guard seems to imply this method is
7013 idempotent, but LOCK_INIT(gc_mutex) might not be. It's
7014 defined in sgen-gc.h as nothing, so there's no danger at
7016 LOCK_INIT (gc_mutex);
7018 if (gc_initialized) {
7022 pagesize = mono_pagesize ();
7023 gc_debug_file = stdout;
7025 LOCK_INIT (interruption_mutex);
7026 LOCK_INIT (global_remset_mutex);
7027 LOCK_INIT (pin_queue_mutex);
7029 if ((env = getenv ("MONO_GC_PARAMS"))) {
7030 opts = g_strsplit (env, ",", -1);
7031 for (ptr = opts; *ptr; ++ptr) {
7033 if (g_str_has_prefix (opt, "major=")) {
7034 opt = strchr (opt, '=') + 1;
7035 major_collector_opt = g_strdup (opt);
7043 mono_sgen_init_internal_allocator ();
7045 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FRAGMENT, sizeof (Fragment));
7046 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
7047 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_ENTRY, sizeof (FinalizeEntry));
7048 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_DISLINK, sizeof (DisappearingLink));
7049 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord));
7050 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
7051 g_assert (sizeof (GenericStoreRememberedSet) == sizeof (gpointer) * STORE_REMSET_BUFFER_SIZE);
7052 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_STORE_REMSET, sizeof (GenericStoreRememberedSet));
7053 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
7055 if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
7056 mono_sgen_marksweep_init (&major_collector);
7057 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed")) {
7058 mono_sgen_marksweep_fixed_init (&major_collector);
7059 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-par")) {
7060 mono_sgen_marksweep_par_init (&major_collector);
7061 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed-par")) {
7062 mono_sgen_marksweep_fixed_par_init (&major_collector);
7063 } else if (!strcmp (major_collector_opt, "copying")) {
7064 mono_sgen_copying_init (&major_collector);
7066 fprintf (stderr, "Unknown major collector `%s'.\n", major_collector_opt);
7070 #ifdef SGEN_HAVE_CARDTABLE
7071 use_cardtable = major_collector.supports_cardtable;
7073 use_cardtable = FALSE;
7076 num_workers = mono_cpu_count ();
7077 g_assert (num_workers > 0);
7078 if (num_workers > 16)
7081 /* Keep this the default for now */
7082 conservative_stack_mark = TRUE;
7085 for (ptr = opts; *ptr; ++ptr) {
7087 if (g_str_has_prefix (opt, "major="))
7089 if (g_str_has_prefix (opt, "wbarrier=")) {
7090 opt = strchr (opt, '=') + 1;
7091 if (strcmp (opt, "remset") == 0) {
7092 use_cardtable = FALSE;
7093 } else if (strcmp (opt, "cardtable") == 0) {
7094 if (!use_cardtable) {
7095 if (major_collector.supports_cardtable)
7096 fprintf (stderr, "The cardtable write barrier is not supported on this platform.\n");
7098 fprintf (stderr, "The major collector does not support the cardtable write barrier.\n");
7104 if (g_str_has_prefix (opt, "max-heap-size=")) {
7105 opt = strchr (opt, '=') + 1;
7106 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap)) {
7107 if ((max_heap & (mono_pagesize () - 1))) {
7108 fprintf (stderr, "max-heap-size size must be a multiple of %d.\n", mono_pagesize ());
7112 fprintf (stderr, "max-heap-size must be an integer.\n");
7117 if (g_str_has_prefix (opt, "workers=")) {
7120 if (!major_collector.is_parallel) {
7121 fprintf (stderr, "The workers= option can only be used for parallel collectors.");
7124 opt = strchr (opt, '=') + 1;
7125 val = strtol (opt, &endptr, 10);
7126 if (!*opt || *endptr) {
7127 fprintf (stderr, "Cannot parse the workers= option value.");
7130 if (val <= 0 || val > 16) {
7131 fprintf (stderr, "The number of workers must be in the range 1 to 16.");
7134 num_workers = (int)val;
7137 if (g_str_has_prefix (opt, "stack-mark=")) {
7138 opt = strchr (opt, '=') + 1;
7139 if (!strcmp (opt, "precise")) {
7140 conservative_stack_mark = FALSE;
7141 } else if (!strcmp (opt, "conservative")) {
7142 conservative_stack_mark = TRUE;
7144 fprintf (stderr, "Invalid value '%s' for stack-mark= option, possible values are: 'precise', 'conservative'.\n", opt);
7150 if (g_str_has_prefix (opt, "nursery-size=")) {
7152 opt = strchr (opt, '=') + 1;
7153 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
7154 default_nursery_size = val;
7155 #ifdef SGEN_ALIGN_NURSERY
7156 if ((val & (val - 1))) {
7157 fprintf (stderr, "The nursery size must be a power of two.\n");
7161 default_nursery_bits = 0;
7162 while (1 << (++ default_nursery_bits) != default_nursery_size)
7166 fprintf (stderr, "nursery-size must be an integer.\n");
7172 if (!(major_collector.handle_gc_param && major_collector.handle_gc_param (opt))) {
7173 fprintf (stderr, "MONO_GC_PARAMS must be a comma-delimited list of one or more of the following:\n");
7174 fprintf (stderr, " max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
7175 fprintf (stderr, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
7176 fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-par' or `copying')\n");
7177 fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
7178 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
7179 if (major_collector.print_gc_param_usage)
7180 major_collector.print_gc_param_usage ();
7187 if (major_collector.is_parallel)
7188 workers_init (num_workers);
7190 if (major_collector_opt)
7191 g_free (major_collector_opt);
7193 nursery_size = DEFAULT_NURSERY_SIZE;
7194 minor_collection_allowance = MIN_MINOR_COLLECTION_ALLOWANCE;
7195 init_heap_size_limits (max_heap);
7199 if ((env = getenv ("MONO_GC_DEBUG"))) {
7200 opts = g_strsplit (env, ",", -1);
7201 for (ptr = opts; ptr && *ptr; ptr ++) {
7203 if (opt [0] >= '0' && opt [0] <= '9') {
7204 gc_debug_level = atoi (opt);
7209 char *rf = g_strdup_printf ("%s.%d", opt, getpid ());
7210 gc_debug_file = fopen (rf, "wb");
7212 gc_debug_file = stderr;
7215 } else if (!strcmp (opt, "collect-before-allocs")) {
7216 collect_before_allocs = 1;
7217 } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
7218 char *arg = strchr (opt, '=') + 1;
7219 collect_before_allocs = atoi (arg);
7220 } else if (!strcmp (opt, "check-at-minor-collections")) {
7221 consistency_check_at_minor_collection = TRUE;
7222 nursery_clear_policy = CLEAR_AT_GC;
7223 } else if (!strcmp (opt, "xdomain-checks")) {
7224 xdomain_checks = TRUE;
7225 } else if (!strcmp (opt, "clear-at-gc")) {
7226 nursery_clear_policy = CLEAR_AT_GC;
7227 } else if (!strcmp (opt, "clear-nursery-at-gc")) {
7228 nursery_clear_policy = CLEAR_AT_GC;
7229 } else if (!strcmp (opt, "check-scan-starts")) {
7230 do_scan_starts_check = TRUE;
7231 } else if (g_str_has_prefix (opt, "heap-dump=")) {
7232 char *filename = strchr (opt, '=') + 1;
7233 nursery_clear_policy = CLEAR_AT_GC;
7234 heap_dump_file = fopen (filename, "w");
7236 fprintf (heap_dump_file, "<sgen-dump>\n");
7237 #ifdef SGEN_BINARY_PROTOCOL
7238 } else if (g_str_has_prefix (opt, "binary-protocol=")) {
7239 char *filename = strchr (opt, '=') + 1;
7240 binary_protocol_init (filename);
7243 fprintf (stderr, "Invalid format for the MONO_GC_DEBUG env variable: '%s'\n", env);
7244 fprintf (stderr, "The format is: MONO_GC_DEBUG=[l[:filename]|<option>]+ where l is a debug level 0-9.\n");
7245 fprintf (stderr, "Valid options are: collect-before-allocs[=<n>], check-at-minor-collections, xdomain-checks, clear-at-gc.\n");
7252 if (major_collector.post_param_init)
7253 major_collector.post_param_init ();
7255 suspend_ack_semaphore_ptr = &suspend_ack_semaphore;
7256 MONO_SEM_INIT (&suspend_ack_semaphore, 0);
7258 sigfillset (&sinfo.sa_mask);
7259 sinfo.sa_flags = SA_RESTART | SA_SIGINFO;
7260 sinfo.sa_sigaction = suspend_handler;
7261 if (sigaction (suspend_signal_num, &sinfo, NULL) != 0) {
7262 g_error ("failed sigaction");
7265 sinfo.sa_handler = restart_handler;
7266 if (sigaction (restart_signal_num, &sinfo, NULL) != 0) {
7267 g_error ("failed sigaction");
7270 sigfillset (&suspend_signal_mask);
7271 sigdelset (&suspend_signal_mask, restart_signal_num);
7273 global_remset = alloc_remset (1024, NULL);
7274 global_remset->next = NULL;
7276 pthread_key_create (&remembered_set_key, unregister_thread);
7278 #ifndef HAVE_KW_THREAD
7279 pthread_key_create (&thread_info_key, NULL);
7285 gc_initialized = TRUE;
7287 mono_gc_register_thread (&sinfo);
7291 mono_gc_get_suspend_signal (void)
7293 return suspend_signal_num;
7303 #ifdef HAVE_KW_THREAD
7304 #define EMIT_TLS_ACCESS(mb,dummy,offset) do { \
7305 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
7306 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
7307 mono_mb_emit_i4 ((mb), (offset)); \
7312 * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin,
7313 * where the two are the same.
7316 #define EMIT_TLS_ACCESS(mb,member,dummy) do { \
7317 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
7318 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
7319 mono_mb_emit_i4 ((mb), thread_info_key); \
7320 mono_mb_emit_icon ((mb), G_STRUCT_OFFSET (SgenThreadInfo, member)); \
7321 mono_mb_emit_byte ((mb), CEE_ADD); \
7322 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
7325 #define EMIT_TLS_ACCESS(mb,member,dummy) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
7330 #ifdef MANAGED_ALLOCATION
7331 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
7332 * for each class. This is currently not easy to do, as it is hard to generate basic
7333 * blocks + branches, but it is easy with the linear IL codebase.
7335 * For this to work we'd need to solve the TLAB race, first. Now we
7336 * require the allocator to be in a few known methods to make sure
7337 * that they are executed atomically via the restart mechanism.
7340 create_allocator (int atype)
7342 int p_var, size_var;
7343 guint32 slowpath_branch, max_size_branch;
7344 MonoMethodBuilder *mb;
7346 MonoMethodSignature *csig;
7347 static gboolean registered = FALSE;
7348 int tlab_next_addr_var, new_next_var;
7350 const char *name = NULL;
7351 AllocatorWrapperInfo *info;
7353 #ifdef HAVE_KW_THREAD
7354 int tlab_next_addr_offset = -1;
7355 int tlab_temp_end_offset = -1;
7357 MONO_THREAD_VAR_OFFSET (tlab_next_addr, tlab_next_addr_offset);
7358 MONO_THREAD_VAR_OFFSET (tlab_temp_end, tlab_temp_end_offset);
7360 g_assert (tlab_next_addr_offset != -1);
7361 g_assert (tlab_temp_end_offset != -1);
7365 mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE);
7366 mono_register_jit_icall (mono_gc_alloc_vector, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE);
7370 if (atype == ATYPE_SMALL) {
7372 name = "AllocSmall";
7373 } else if (atype == ATYPE_NORMAL) {
7376 } else if (atype == ATYPE_VECTOR) {
7378 name = "AllocVector";
7380 g_assert_not_reached ();
7383 csig = mono_metadata_signature_alloc (mono_defaults.corlib, num_params);
7384 csig->ret = &mono_defaults.object_class->byval_arg;
7385 for (i = 0; i < num_params; ++i)
7386 csig->params [i] = &mono_defaults.int_class->byval_arg;
7388 mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC);
7389 size_var = mono_mb_add_local (mb, &mono_defaults.int32_class->byval_arg);
7390 if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
7391 /* size = vtable->klass->instance_size; */
7392 mono_mb_emit_ldarg (mb, 0);
7393 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoVTable, klass));
7394 mono_mb_emit_byte (mb, CEE_ADD);
7395 mono_mb_emit_byte (mb, CEE_LDIND_I);
7396 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoClass, instance_size));
7397 mono_mb_emit_byte (mb, CEE_ADD);
7398 /* FIXME: assert instance_size stays a 4 byte integer */
7399 mono_mb_emit_byte (mb, CEE_LDIND_U4);
7400 mono_mb_emit_stloc (mb, size_var);
7401 } else if (atype == ATYPE_VECTOR) {
7402 MonoExceptionClause *clause;
7404 MonoClass *oom_exc_class;
7407 /* n > MONO_ARRAY_MAX_INDEX -> OverflowException */
7408 mono_mb_emit_ldarg (mb, 1);
7409 mono_mb_emit_icon (mb, MONO_ARRAY_MAX_INDEX);
7410 pos = mono_mb_emit_short_branch (mb, CEE_BLE_UN_S);
7411 mono_mb_emit_exception (mb, "OverflowException", NULL);
7412 mono_mb_patch_short_branch (mb, pos);
7414 clause = mono_image_alloc0 (mono_defaults.corlib, sizeof (MonoExceptionClause));
7415 clause->try_offset = mono_mb_get_label (mb);
7417 /* vtable->klass->sizes.element_size */
7418 mono_mb_emit_ldarg (mb, 0);
7419 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoVTable, klass));
7420 mono_mb_emit_byte (mb, CEE_ADD);
7421 mono_mb_emit_byte (mb, CEE_LDIND_I);
7422 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoClass, sizes.element_size));
7423 mono_mb_emit_byte (mb, CEE_ADD);
7424 mono_mb_emit_byte (mb, CEE_LDIND_U4);
7427 mono_mb_emit_ldarg (mb, 1);
7428 mono_mb_emit_byte (mb, CEE_MUL_OVF_UN);
7429 /* + sizeof (MonoArray) */
7430 mono_mb_emit_icon (mb, sizeof (MonoArray));
7431 mono_mb_emit_byte (mb, CEE_ADD_OVF_UN);
7432 mono_mb_emit_stloc (mb, size_var);
7434 pos_leave = mono_mb_emit_branch (mb, CEE_LEAVE);
7437 clause->flags = MONO_EXCEPTION_CLAUSE_NONE;
7438 clause->try_len = mono_mb_get_pos (mb) - clause->try_offset;
7439 clause->data.catch_class = mono_class_from_name (mono_defaults.corlib,
7440 "System", "OverflowException");
7441 g_assert (clause->data.catch_class);
7442 clause->handler_offset = mono_mb_get_label (mb);
7444 oom_exc_class = mono_class_from_name (mono_defaults.corlib,
7445 "System", "OutOfMemoryException");
7446 g_assert (oom_exc_class);
7447 ctor = mono_class_get_method_from_name (oom_exc_class, ".ctor", 0);
7450 mono_mb_emit_byte (mb, CEE_POP);
7451 mono_mb_emit_op (mb, CEE_NEWOBJ, ctor);
7452 mono_mb_emit_byte (mb, CEE_THROW);
7454 clause->handler_len = mono_mb_get_pos (mb) - clause->handler_offset;
7455 mono_mb_set_clauses (mb, 1, clause);
7456 mono_mb_patch_branch (mb, pos_leave);
7459 g_assert_not_reached ();
7462 /* size += ALLOC_ALIGN - 1; */
7463 mono_mb_emit_ldloc (mb, size_var);
7464 mono_mb_emit_icon (mb, ALLOC_ALIGN - 1);
7465 mono_mb_emit_byte (mb, CEE_ADD);
7466 /* size &= ~(ALLOC_ALIGN - 1); */
7467 mono_mb_emit_icon (mb, ~(ALLOC_ALIGN - 1));
7468 mono_mb_emit_byte (mb, CEE_AND);
7469 mono_mb_emit_stloc (mb, size_var);
7471 /* if (size > MAX_SMALL_OBJ_SIZE) goto slowpath */
7472 if (atype != ATYPE_SMALL) {
7473 mono_mb_emit_ldloc (mb, size_var);
7474 mono_mb_emit_icon (mb, MAX_SMALL_OBJ_SIZE);
7475 max_size_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BGT_S);
7479 * We need to modify tlab_next, but the JIT only supports reading, so we read
7480 * another tls var holding its address instead.
7483 /* tlab_next_addr (local) = tlab_next_addr (TLS var) */
7484 tlab_next_addr_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
7485 EMIT_TLS_ACCESS (mb, tlab_next_addr, tlab_next_addr_offset);
7486 mono_mb_emit_stloc (mb, tlab_next_addr_var);
7488 /* p = (void**)tlab_next; */
7489 p_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
7490 mono_mb_emit_ldloc (mb, tlab_next_addr_var);
7491 mono_mb_emit_byte (mb, CEE_LDIND_I);
7492 mono_mb_emit_stloc (mb, p_var);
7494 /* new_next = (char*)p + size; */
7495 new_next_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
7496 mono_mb_emit_ldloc (mb, p_var);
7497 mono_mb_emit_ldloc (mb, size_var);
7498 mono_mb_emit_byte (mb, CEE_CONV_I);
7499 mono_mb_emit_byte (mb, CEE_ADD);
7500 mono_mb_emit_stloc (mb, new_next_var);
7502 /* tlab_next = new_next */
7503 mono_mb_emit_ldloc (mb, tlab_next_addr_var);
7504 mono_mb_emit_ldloc (mb, new_next_var);
7505 mono_mb_emit_byte (mb, CEE_STIND_I);
7507 /* if (G_LIKELY (new_next < tlab_temp_end)) */
7508 mono_mb_emit_ldloc (mb, new_next_var);
7509 EMIT_TLS_ACCESS (mb, tlab_temp_end, tlab_temp_end_offset);
7510 slowpath_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BLT_UN_S);
7513 if (atype != ATYPE_SMALL)
7514 mono_mb_patch_short_branch (mb, max_size_branch);
7516 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
7517 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
7519 /* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
7520 mono_mb_emit_ldarg (mb, 0);
7521 mono_mb_emit_ldloc (mb, size_var);
7522 if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
7523 mono_mb_emit_icall (mb, mono_gc_alloc_obj);
7524 } else if (atype == ATYPE_VECTOR) {
7525 mono_mb_emit_ldarg (mb, 1);
7526 mono_mb_emit_icall (mb, mono_gc_alloc_vector);
7528 g_assert_not_reached ();
7530 mono_mb_emit_byte (mb, CEE_RET);
7533 mono_mb_patch_short_branch (mb, slowpath_branch);
7535 /* FIXME: Memory barrier */
7538 mono_mb_emit_ldloc (mb, p_var);
7539 mono_mb_emit_ldarg (mb, 0);
7540 mono_mb_emit_byte (mb, CEE_STIND_I);
7542 if (atype == ATYPE_VECTOR) {
7543 /* arr->max_length = max_length; */
7544 mono_mb_emit_ldloc (mb, p_var);
7545 mono_mb_emit_ldflda (mb, G_STRUCT_OFFSET (MonoArray, max_length));
7546 mono_mb_emit_ldarg (mb, 1);
7547 mono_mb_emit_byte (mb, CEE_STIND_I);
7551 mono_mb_emit_ldloc (mb, p_var);
7552 mono_mb_emit_byte (mb, CEE_RET);
7554 res = mono_mb_create_method (mb, csig, 8);
7556 mono_method_get_header (res)->init_locals = FALSE;
7558 info = mono_image_alloc0 (mono_defaults.corlib, sizeof (AllocatorWrapperInfo));
7559 info->gc_name = "sgen";
7560 info->alloc_type = atype;
7561 mono_marshal_set_wrapper_info (res, info);
7568 mono_gc_get_gc_name (void)
7573 static MonoMethod* alloc_method_cache [ATYPE_NUM];
7574 static MonoMethod *write_barrier_method;
7577 is_ip_in_managed_allocator (MonoDomain *domain, gpointer ip)
7583 if (!mono_thread_internal_current ())
7584 /* Happens during thread attach */
7589 ji = mono_jit_info_table_find (domain, ip);
7592 method = ji->method;
7594 if (method == write_barrier_method)
7596 for (i = 0; i < ATYPE_NUM; ++i)
7597 if (method == alloc_method_cache [i])
7603 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
7604 * The signature of the called method is:
7605 * object allocate (MonoVTable *vtable)
7608 mono_gc_get_managed_allocator (MonoVTable *vtable, gboolean for_box)
7610 #ifdef MANAGED_ALLOCATION
7611 MonoClass *klass = vtable->klass;
7613 #ifdef HAVE_KW_THREAD
7614 int tlab_next_offset = -1;
7615 int tlab_temp_end_offset = -1;
7616 MONO_THREAD_VAR_OFFSET (tlab_next, tlab_next_offset);
7617 MONO_THREAD_VAR_OFFSET (tlab_temp_end, tlab_temp_end_offset);
7619 if (tlab_next_offset == -1 || tlab_temp_end_offset == -1)
7623 if (!mono_runtime_has_tls_get ())
7625 if (klass->instance_size > tlab_size)
7627 if (klass->has_finalize || klass->marshalbyref || (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS))
7631 if (klass->byval_arg.type == MONO_TYPE_STRING)
7633 if (collect_before_allocs)
7636 if (ALIGN_TO (klass->instance_size, ALLOC_ALIGN) < MAX_SMALL_OBJ_SIZE)
7637 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL);
7639 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL);
7646 mono_gc_get_managed_array_allocator (MonoVTable *vtable, int rank)
7648 #ifdef MANAGED_ALLOCATION
7649 MonoClass *klass = vtable->klass;
7651 #ifdef HAVE_KW_THREAD
7652 int tlab_next_offset = -1;
7653 int tlab_temp_end_offset = -1;
7654 MONO_THREAD_VAR_OFFSET (tlab_next, tlab_next_offset);
7655 MONO_THREAD_VAR_OFFSET (tlab_temp_end, tlab_temp_end_offset);
7657 if (tlab_next_offset == -1 || tlab_temp_end_offset == -1)
7663 if (!mono_runtime_has_tls_get ())
7665 if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
7667 if (collect_before_allocs)
7669 g_assert (!mono_class_has_finalizer (klass) && !klass->marshalbyref);
7671 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR);
7678 mono_gc_get_managed_allocator_by_type (int atype)
7680 #ifdef MANAGED_ALLOCATION
7683 if (!mono_runtime_has_tls_get ())
7686 mono_loader_lock ();
7687 res = alloc_method_cache [atype];
7689 res = alloc_method_cache [atype] = create_allocator (atype);
7690 mono_loader_unlock ();
7698 mono_gc_get_managed_allocator_types (void)
7705 mono_gc_get_write_barrier (void)
7708 MonoMethodBuilder *mb;
7709 MonoMethodSignature *sig;
7710 #ifdef MANAGED_WBARRIER
7711 int label_no_wb_1, label_no_wb_2, label_no_wb_3, label_no_wb_4, label_need_wb, label_slow_path;
7712 #ifndef SGEN_ALIGN_NURSERY
7713 int label_continue_1, label_continue_2, label_no_wb_5;
7714 int dereferenced_var;
7716 int buffer_var, buffer_index_var, dummy_var;
7718 #ifdef HAVE_KW_THREAD
7719 int stack_end_offset = -1, store_remset_buffer_offset = -1;
7720 int store_remset_buffer_index_offset = -1, store_remset_buffer_index_addr_offset = -1;
7722 MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
7723 g_assert (stack_end_offset != -1);
7724 MONO_THREAD_VAR_OFFSET (store_remset_buffer, store_remset_buffer_offset);
7725 g_assert (store_remset_buffer_offset != -1);
7726 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index, store_remset_buffer_index_offset);
7727 g_assert (store_remset_buffer_index_offset != -1);
7728 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index_addr, store_remset_buffer_index_addr_offset);
7729 g_assert (store_remset_buffer_index_addr_offset != -1);
7733 g_assert (!use_cardtable);
7735 // FIXME: Maybe create a separate version for ctors (the branch would be
7736 // correctly predicted more times)
7737 if (write_barrier_method)
7738 return write_barrier_method;
7740 /* Create the IL version of mono_gc_barrier_generic_store () */
7741 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
7742 sig->ret = &mono_defaults.void_class->byval_arg;
7743 sig->params [0] = &mono_defaults.int_class->byval_arg;
7745 mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
7747 #ifdef MANAGED_WBARRIER
7748 if (mono_runtime_has_tls_get ()) {
7749 #ifdef SGEN_ALIGN_NURSERY
7750 // if (ptr_in_nursery (ptr)) return;
7752 * Masking out the bits might be faster, but we would have to use 64 bit
7753 * immediates, which might be slower.
7755 mono_mb_emit_ldarg (mb, 0);
7756 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
7757 mono_mb_emit_byte (mb, CEE_SHR_UN);
7758 mono_mb_emit_icon (mb, (mword)nursery_start >> DEFAULT_NURSERY_BITS);
7759 label_no_wb_1 = mono_mb_emit_branch (mb, CEE_BEQ);
7761 // if (!ptr_in_nursery (*ptr)) return;
7762 mono_mb_emit_ldarg (mb, 0);
7763 mono_mb_emit_byte (mb, CEE_LDIND_I);
7764 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
7765 mono_mb_emit_byte (mb, CEE_SHR_UN);
7766 mono_mb_emit_icon (mb, (mword)nursery_start >> DEFAULT_NURSERY_BITS);
7767 label_no_wb_2 = mono_mb_emit_branch (mb, CEE_BNE_UN);
7770 // if (ptr < (nursery_start)) goto continue;
7771 mono_mb_emit_ldarg (mb, 0);
7772 mono_mb_emit_ptr (mb, (gpointer) nursery_start);
7773 label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
7775 // if (ptr >= nursery_real_end)) goto continue;
7776 mono_mb_emit_ldarg (mb, 0);
7777 mono_mb_emit_ptr (mb, (gpointer) nursery_real_end);
7778 label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
7781 label_no_wb_1 = mono_mb_emit_branch (mb, CEE_BR);
7784 mono_mb_patch_branch (mb, label_continue_1);
7785 mono_mb_patch_branch (mb, label_continue_2);
7787 // Dereference and store in local var
7788 dereferenced_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
7789 mono_mb_emit_ldarg (mb, 0);
7790 mono_mb_emit_byte (mb, CEE_LDIND_I);
7791 mono_mb_emit_stloc (mb, dereferenced_var);
7793 // if (*ptr < nursery_start) return;
7794 mono_mb_emit_ldloc (mb, dereferenced_var);
7795 mono_mb_emit_ptr (mb, (gpointer) nursery_start);
7796 label_no_wb_2 = mono_mb_emit_branch (mb, CEE_BLT);
7798 // if (*ptr >= nursery_end) return;
7799 mono_mb_emit_ldloc (mb, dereferenced_var);
7800 mono_mb_emit_ptr (mb, (gpointer) nursery_real_end);
7801 label_no_wb_5 = mono_mb_emit_branch (mb, CEE_BGE);
7804 // if (ptr >= stack_end) goto need_wb;
7805 mono_mb_emit_ldarg (mb, 0);
7806 EMIT_TLS_ACCESS (mb, stack_end, stack_end_offset);
7807 label_need_wb = mono_mb_emit_branch (mb, CEE_BGE_UN);
7809 // if (ptr >= stack_start) return;
7810 dummy_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
7811 mono_mb_emit_ldarg (mb, 0);
7812 mono_mb_emit_ldloc_addr (mb, dummy_var);
7813 label_no_wb_3 = mono_mb_emit_branch (mb, CEE_BGE_UN);
7816 mono_mb_patch_branch (mb, label_need_wb);
7818 // buffer = STORE_REMSET_BUFFER;
7819 buffer_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
7820 EMIT_TLS_ACCESS (mb, store_remset_buffer, store_remset_buffer_offset);
7821 mono_mb_emit_stloc (mb, buffer_var);
7823 // buffer_index = STORE_REMSET_BUFFER_INDEX;
7824 buffer_index_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
7825 EMIT_TLS_ACCESS (mb, store_remset_buffer_index, store_remset_buffer_index_offset);
7826 mono_mb_emit_stloc (mb, buffer_index_var);
7828 // if (buffer [buffer_index] == ptr) return;
7829 mono_mb_emit_ldloc (mb, buffer_var);
7830 mono_mb_emit_ldloc (mb, buffer_index_var);
7831 g_assert (sizeof (gpointer) == 4 || sizeof (gpointer) == 8);
7832 mono_mb_emit_icon (mb, sizeof (gpointer) == 4 ? 2 : 3);
7833 mono_mb_emit_byte (mb, CEE_SHL);
7834 mono_mb_emit_byte (mb, CEE_ADD);
7835 mono_mb_emit_byte (mb, CEE_LDIND_I);
7836 mono_mb_emit_ldarg (mb, 0);
7837 label_no_wb_4 = mono_mb_emit_branch (mb, CEE_BEQ);
7840 mono_mb_emit_ldloc (mb, buffer_index_var);
7841 mono_mb_emit_icon (mb, 1);
7842 mono_mb_emit_byte (mb, CEE_ADD);
7843 mono_mb_emit_stloc (mb, buffer_index_var);
7845 // if (buffer_index >= STORE_REMSET_BUFFER_SIZE) goto slow_path;
7846 mono_mb_emit_ldloc (mb, buffer_index_var);
7847 mono_mb_emit_icon (mb, STORE_REMSET_BUFFER_SIZE);
7848 label_slow_path = mono_mb_emit_branch (mb, CEE_BGE);
7850 // buffer [buffer_index] = ptr;
7851 mono_mb_emit_ldloc (mb, buffer_var);
7852 mono_mb_emit_ldloc (mb, buffer_index_var);
7853 g_assert (sizeof (gpointer) == 4 || sizeof (gpointer) == 8);
7854 mono_mb_emit_icon (mb, sizeof (gpointer) == 4 ? 2 : 3);
7855 mono_mb_emit_byte (mb, CEE_SHL);
7856 mono_mb_emit_byte (mb, CEE_ADD);
7857 mono_mb_emit_ldarg (mb, 0);
7858 mono_mb_emit_byte (mb, CEE_STIND_I);
7860 // STORE_REMSET_BUFFER_INDEX = buffer_index;
7861 EMIT_TLS_ACCESS (mb, store_remset_buffer_index_addr, store_remset_buffer_index_addr_offset);
7862 mono_mb_emit_ldloc (mb, buffer_index_var);
7863 mono_mb_emit_byte (mb, CEE_STIND_I);
7866 mono_mb_patch_branch (mb, label_no_wb_1);
7867 mono_mb_patch_branch (mb, label_no_wb_2);
7868 mono_mb_patch_branch (mb, label_no_wb_3);
7869 mono_mb_patch_branch (mb, label_no_wb_4);
7870 #ifndef SGEN_ALIGN_NURSERY
7871 mono_mb_patch_branch (mb, label_no_wb_5);
7873 mono_mb_emit_byte (mb, CEE_RET);
7876 mono_mb_patch_branch (mb, label_slow_path);
7880 mono_mb_emit_ldarg (mb, 0);
7881 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
7882 mono_mb_emit_byte (mb, CEE_RET);
7884 res = mono_mb_create_method (mb, sig, 16);
7887 mono_loader_lock ();
7888 if (write_barrier_method) {
7889 /* Already created */
7890 mono_free_method (res);
7892 /* double-checked locking */
7893 mono_memory_barrier ();
7894 write_barrier_method = res;
7896 mono_loader_unlock ();
7898 return write_barrier_method;
7902 mono_gc_get_description (void)
7904 return g_strdup ("sgen");
7908 mono_gc_set_desktop_mode (void)
7913 mono_gc_is_moving (void)
7919 mono_gc_is_disabled (void)
7925 mono_sgen_debug_printf (int level, const char *format, ...)
7929 if (level > gc_debug_level)
7932 va_start (ap, format);
7933 vfprintf (gc_debug_file, format, ap);
7938 mono_sgen_get_logfile (void)
7940 return gc_debug_file;
7944 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
7950 #endif /* HAVE_SGEN_GC */