2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
7 * Copyright 2005-2010 Novell, Inc (http://www.novell.com)
9 * Thread start/stop adapted from Boehm's GC:
10 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
11 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
12 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
13 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
15 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
16 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
18 * Permission is hereby granted to use or copy this program
19 * for any purpose, provided the above notices are retained on all copies.
20 * Permission to modify the code and to distribute modified code is granted,
21 * provided the above notices are retained, and a notice that the code was
22 * modified is included with the above copyright notice.
25 * Copyright 2001-2003 Ximian, Inc
26 * Copyright 2003-2010 Novell, Inc.
28 * Permission is hereby granted, free of charge, to any person obtaining
29 * a copy of this software and associated documentation files (the
30 * "Software"), to deal in the Software without restriction, including
31 * without limitation the rights to use, copy, modify, merge, publish,
32 * distribute, sublicense, and/or sell copies of the Software, and to
33 * permit persons to whom the Software is furnished to do so, subject to
34 * the following conditions:
36 * The above copyright notice and this permission notice shall be
37 * included in all copies or substantial portions of the Software.
39 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
40 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
41 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
42 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
43 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
44 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
45 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
48 * Important: allocation provides always zeroed memory, having to do
49 * a memset after allocation is deadly for performance.
50 * Memory usage at startup is currently as follows:
52 * 64 KB internal space
54 * We should provide a small memory config with half the sizes
56 * We currently try to make as few mono assumptions as possible:
57 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
59 * 2) gc descriptor is the second word in the vtable (first word in the class)
60 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
61 * 4) there is a function to get an object's size and the number of
62 * elements in an array.
63 * 5) we know the special way bounds are allocated for complex arrays
64 * 6) we know about proxies and how to treat them when domains are unloaded
66 * Always try to keep stack usage to a minimum: no recursive behaviour
67 * and no large stack allocs.
69 * General description.
70 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
71 * When the nursery is full we start a nursery collection: this is performed with a
73 * When the old generation is full we start a copying GC of the old generation as well:
74 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
75 * in the future. Maybe we'll even do both during the same collection like IMMIX.
77 * The things that complicate this description are:
78 * *) pinned objects: we can't move them so we need to keep track of them
79 * *) no precise info of the thread stacks and registers: we need to be able to
80 * quickly find the objects that may be referenced conservatively and pin them
81 * (this makes the first issues more important)
82 * *) large objects are too expensive to be dealt with using copying GC: we handle them
83 * with mark/sweep during major collections
84 * *) some objects need to not move even if they are small (interned strings, Type handles):
85 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
86 * PinnedChunks regions
92 *) we could have a function pointer in MonoClass to implement
93 customized write barriers for value types
95 *) investigate the stuff needed to advance a thread to a GC-safe
96 point (single-stepping, read from unmapped memory etc) and implement it.
97 This would enable us to inline allocations and write barriers, for example,
98 or at least parts of them, like the write barrier checks.
99 We may need this also for handling precise info on stacks, even simple things
100 as having uninitialized data on the stack and having to wait for the prolog
101 to zero it. Not an issue for the last frame that we scan conservatively.
102 We could always not trust the value in the slots anyway.
104 *) modify the jit to save info about references in stack locations:
105 this can be done just for locals as a start, so that at least
106 part of the stack is handled precisely.
108 *) test/fix endianess issues
110 *) Implement a card table as the write barrier instead of remembered
111 sets? Card tables are not easy to implement with our current
112 memory layout. We have several different kinds of major heap
113 objects: Small objects in regular blocks, small objects in pinned
114 chunks and LOS objects. If we just have a pointer we have no way
115 to tell which kind of object it points into, therefore we cannot
116 know where its card table is. The least we have to do to make
117 this happen is to get rid of write barriers for indirect stores.
120 *) Get rid of write barriers for indirect stores. We can do this by
121 telling the GC to wbarrier-register an object once we do an ldloca
122 or ldelema on it, and to unregister it once it's not used anymore
123 (it can only travel downwards on the stack). The problem with
124 unregistering is that it needs to happen eventually no matter
125 what, even if exceptions are thrown, the thread aborts, etc.
126 Rodrigo suggested that we could do only the registering part and
127 let the collector find out (pessimistically) when it's safe to
128 unregister, namely when the stack pointer of the thread that
129 registered the object is higher than it was when the registering
130 happened. This might make for a good first implementation to get
131 some data on performance.
133 *) Some sort of blacklist support? Blacklists is a concept from the
134 Boehm GC: if during a conservative scan we find pointers to an
135 area which we might use as heap, we mark that area as unusable, so
136 pointer retention by random pinning pointers is reduced.
138 *) experiment with max small object size (very small right now - 2kb,
139 because it's tied to the max freelist size)
141 *) add an option to mmap the whole heap in one chunk: it makes for many
142 simplifications in the checks (put the nursery at the top and just use a single
143 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
144 not flexible (too much of the address space may be used by default or we can't
145 increase the heap as needed) and we'd need a race-free mechanism to return memory
146 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
147 was written to, munmap is needed, but the following mmap may not find the same segment
150 *) memzero the major fragments after restarting the world and optionally a smaller
153 *) investigate having fragment zeroing threads
155 *) separate locks for finalization and other minor stuff to reduce
158 *) try a different copying order to improve memory locality
160 *) a thread abort after a store but before the write barrier will
161 prevent the write barrier from executing
163 *) specialized dynamically generated markers/copiers
165 *) Dynamically adjust TLAB size to the number of threads. If we have
166 too many threads that do allocation, we might need smaller TLABs,
167 and we might get better performance with larger TLABs if we only
168 have a handful of threads. We could sum up the space left in all
169 assigned TLABs and if that's more than some percentage of the
170 nursery size, reduce the TLAB size.
172 *) Explore placing unreachable objects on unused nursery memory.
173 Instead of memset'ng a region to zero, place an int[] covering it.
174 A good place to start is add_nursery_frag. The tricky thing here is
175 placing those objects atomically outside of a collection.
185 #include <semaphore.h>
194 #define _XOPEN_SOURCE
196 #include "metadata/metadata-internals.h"
197 #include "metadata/class-internals.h"
198 #include "metadata/gc-internal.h"
199 #include "metadata/object-internals.h"
200 #include "metadata/threads.h"
201 #include "metadata/sgen-gc.h"
202 #include "metadata/sgen-cardtable.h"
203 #include "metadata/sgen-archdep.h"
204 #include "metadata/mono-gc.h"
205 #include "metadata/method-builder.h"
206 #include "metadata/profiler-private.h"
207 #include "metadata/monitor.h"
208 #include "metadata/threadpool-internals.h"
209 #include "metadata/mempool-internals.h"
210 #include "metadata/marshal.h"
211 #include "utils/mono-mmap.h"
212 #include "utils/mono-time.h"
213 #include "utils/mono-semaphore.h"
214 #include "utils/mono-counters.h"
215 #include "utils/mono-proclib.h"
217 #include <mono/utils/memcheck.h>
219 #if defined(__MACH__)
220 #include "utils/mach-support.h"
223 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
227 #include "mono/cil/opcode.def"
233 #undef pthread_create
235 #undef pthread_detach
238 * ######################################################################
239 * ######## Types and constants used by the GC.
240 * ######################################################################
243 static int gc_initialized = 0;
244 /* If set, do a minor collection before every allocation */
245 static gboolean collect_before_allocs = FALSE;
246 /* If set, do a heap consistency check before each minor collection */
247 static gboolean consistency_check_at_minor_collection = FALSE;
248 /* If set, check that there are no references to the domain left at domain unload */
249 static gboolean xdomain_checks = FALSE;
250 /* If not null, dump the heap after each collection into this file */
251 static FILE *heap_dump_file = NULL;
252 /* If set, mark stacks conservatively, even if precise marking is possible */
253 static gboolean conservative_stack_mark = TRUE;
254 /* If set, do a plausibility check on the scan_starts before and after
256 static gboolean do_scan_starts_check = FALSE;
258 #ifdef HEAVY_STATISTICS
259 static long long stat_objects_alloced = 0;
260 static long long stat_bytes_alloced = 0;
261 long long stat_objects_alloced_degraded = 0;
262 long long stat_bytes_alloced_degraded = 0;
263 static long long stat_bytes_alloced_los = 0;
265 long long stat_copy_object_called_nursery = 0;
266 long long stat_objects_copied_nursery = 0;
267 long long stat_copy_object_called_major = 0;
268 long long stat_objects_copied_major = 0;
270 long long stat_scan_object_called_nursery = 0;
271 long long stat_scan_object_called_major = 0;
273 long long stat_nursery_copy_object_failed_from_space = 0;
274 long long stat_nursery_copy_object_failed_forwarded = 0;
275 long long stat_nursery_copy_object_failed_pinned = 0;
277 static long long stat_store_remsets = 0;
278 static long long stat_store_remsets_unique = 0;
279 static long long stat_saved_remsets_1 = 0;
280 static long long stat_saved_remsets_2 = 0;
281 static long long stat_local_remsets_processed = 0;
282 static long long stat_global_remsets_added = 0;
283 static long long stat_global_remsets_readded = 0;
284 static long long stat_global_remsets_processed = 0;
285 static long long stat_global_remsets_discarded = 0;
287 static long long stat_wasted_fragments_used = 0;
288 static long long stat_wasted_fragments_bytes = 0;
290 static int stat_wbarrier_set_field = 0;
291 static int stat_wbarrier_set_arrayref = 0;
292 static int stat_wbarrier_arrayref_copy = 0;
293 static int stat_wbarrier_generic_store = 0;
294 static int stat_wbarrier_generic_store_remset = 0;
295 static int stat_wbarrier_set_root = 0;
296 static int stat_wbarrier_value_copy = 0;
297 static int stat_wbarrier_object_copy = 0;
300 static long long time_minor_pre_collection_fragment_clear = 0;
301 static long long time_minor_pinning = 0;
302 static long long time_minor_scan_remsets = 0;
303 static long long time_minor_scan_card_table = 0;
304 static long long time_minor_scan_pinned = 0;
305 static long long time_minor_scan_registered_roots = 0;
306 static long long time_minor_scan_thread_data = 0;
307 static long long time_minor_finish_gray_stack = 0;
308 static long long time_minor_fragment_creation = 0;
310 static long long time_major_pre_collection_fragment_clear = 0;
311 static long long time_major_pinning = 0;
312 static long long time_major_scan_pinned = 0;
313 static long long time_major_scan_registered_roots = 0;
314 static long long time_major_scan_thread_data = 0;
315 static long long time_major_scan_alloc_pinned = 0;
316 static long long time_major_scan_finalized = 0;
317 static long long time_major_scan_big_objects = 0;
318 static long long time_major_finish_gray_stack = 0;
319 static long long time_major_free_bigobjs = 0;
320 static long long time_major_los_sweep = 0;
321 static long long time_major_sweep = 0;
322 static long long time_major_fragment_creation = 0;
324 #define DEBUG(level,a) do {if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) a;} while (0)
326 static int gc_debug_level = 0;
327 static FILE* gc_debug_file;
331 mono_gc_flush_info (void)
333 fflush (gc_debug_file);
338 * Define this to allow the user to change the nursery size by
339 * specifying its value in the MONO_GC_PARAMS environmental
340 * variable. See mono_gc_base_init for details.
342 #define USER_CONFIG 1
344 #define TV_DECLARE(name) gint64 name
345 #define TV_GETTIME(tv) tv = mono_100ns_ticks ()
346 #define TV_ELAPSED(start,end) (int)((end-start) / 10)
347 #define TV_ELAPSED_MS(start,end) ((TV_ELAPSED((start),(end)) + 500) / 1000)
349 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
351 /* The method used to clear the nursery */
352 /* Clearing at nursery collections is the safest, but has bad interactions with caches.
353 * Clearing at TLAB creation is much faster, but more complex and it might expose hard
358 CLEAR_AT_TLAB_CREATION
359 } NurseryClearPolicy;
361 static NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
364 * The young generation is divided into fragments. This is because
365 * we can hand one fragments to a thread for lock-less fast alloc and
366 * because the young generation ends up fragmented anyway by pinned objects.
367 * Once a collection is done, a list of fragments is created. When doing
368 * thread local alloc we use smallish nurseries so we allow new threads to
369 * allocate memory from gen0 without triggering a collection. Threads that
370 * are found to allocate lots of memory are given bigger fragments. This
371 * should make the finalizer thread use little nursery memory after a while.
372 * We should start assigning threads very small fragments: if there are many
373 * threads the nursery will be full of reserved space that the threads may not
374 * use at all, slowing down allocation speed.
375 * Thread local allocation is done from areas of memory Hotspot calls Thread Local
376 * Allocation Buffers (TLABs).
378 typedef struct _Fragment Fragment;
382 char *fragment_start;
383 char *fragment_limit; /* the current soft limit for allocation */
387 /* the runtime can register areas of memory as roots: we keep two lists of roots,
388 * a pinned root set for conservatively scanned roots and a normal one for
389 * precisely scanned roots (currently implemented as a single list).
391 typedef struct _RootRecord RootRecord;
400 * We're never actually using the first element. It's always set to
401 * NULL to simplify the elimination of consecutive duplicate
404 #define STORE_REMSET_BUFFER_SIZE 1024
406 typedef struct _GenericStoreRememberedSet GenericStoreRememberedSet;
407 struct _GenericStoreRememberedSet {
408 GenericStoreRememberedSet *next;
409 /* We need one entry less because the first entry of store
410 remset buffers is always a dummy and we don't copy it. */
411 gpointer data [STORE_REMSET_BUFFER_SIZE - 1];
414 /* we have 4 possible values in the low 2 bits */
416 REMSET_LOCATION, /* just a pointer to the exact location */
417 REMSET_RANGE, /* range of pointer fields */
418 REMSET_OBJECT, /* mark all the object for scanning */
419 REMSET_VTYPE, /* a valuetype array described by a gc descriptor and a count */
420 REMSET_TYPE_MASK = 0x3
423 #ifdef HAVE_KW_THREAD
424 static __thread RememberedSet *remembered_set MONO_TLS_FAST;
426 static pthread_key_t remembered_set_key;
427 static RememberedSet *global_remset;
428 static RememberedSet *freed_thread_remsets;
429 static GenericStoreRememberedSet *generic_store_remsets = NULL;
431 /*A two slots cache for recently inserted remsets */
432 static gpointer global_remset_cache [2];
434 /* FIXME: later choose a size that takes into account the RememberedSet struct
435 * and doesn't waste any alloc paddin space.
437 #define DEFAULT_REMSET_SIZE 1024
438 static RememberedSet* alloc_remset (int size, gpointer id);
440 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
441 #define object_is_pinned SGEN_OBJECT_IS_PINNED
442 #define pin_object SGEN_PIN_OBJECT
443 #define unpin_object SGEN_UNPIN_OBJECT
445 #define ptr_in_nursery(p) (SGEN_PTR_IN_NURSERY ((p), DEFAULT_NURSERY_BITS, nursery_start, nursery_real_end))
447 #define LOAD_VTABLE SGEN_LOAD_VTABLE
450 safe_name (void* obj)
452 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
453 return vt->klass->name;
456 #define safe_object_get_size mono_sgen_safe_object_get_size
459 * ######################################################################
460 * ######## Global data.
461 * ######################################################################
463 static LOCK_DECLARE (gc_mutex);
464 static int gc_disabled = 0;
465 static int num_minor_gcs = 0;
466 static int num_major_gcs = 0;
468 static gboolean use_cardtable;
472 /* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */
473 #define DEFAULT_NURSERY_SIZE (default_nursery_size)
474 static int default_nursery_size = (1 << 22);
475 #ifdef SGEN_ALIGN_NURSERY
476 /* The number of trailing 0 bits in DEFAULT_NURSERY_SIZE */
477 #define DEFAULT_NURSERY_BITS (default_nursery_bits)
478 static int default_nursery_bits = 22;
483 #define DEFAULT_NURSERY_SIZE (4*1024*1024)
484 #ifdef SGEN_ALIGN_NURSERY
485 #define DEFAULT_NURSERY_BITS 22
490 #ifndef SGEN_ALIGN_NURSERY
491 #define DEFAULT_NURSERY_BITS -1
494 #define MIN_MINOR_COLLECTION_ALLOWANCE (DEFAULT_NURSERY_SIZE * 4)
496 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
498 /* the minimum size of a fragment that we consider useful for allocation */
499 #define FRAGMENT_MIN_SIZE (512)
501 static mword pagesize = 4096;
502 static mword nursery_size;
503 static int degraded_mode = 0;
505 static mword total_alloc = 0;
506 /* use this to tune when to do a major/minor collection */
507 static mword memory_pressure = 0;
508 static mword minor_collection_allowance;
509 static int minor_collection_sections_alloced = 0;
511 static GCMemSection *nursery_section = NULL;
512 static mword lowest_heap_address = ~(mword)0;
513 static mword highest_heap_address = 0;
515 static LOCK_DECLARE (interruption_mutex);
516 static LOCK_DECLARE (global_remset_mutex);
518 #define LOCK_GLOBAL_REMSET pthread_mutex_lock (&global_remset_mutex)
519 #define UNLOCK_GLOBAL_REMSET pthread_mutex_unlock (&global_remset_mutex)
521 typedef struct _FinalizeEntry FinalizeEntry;
522 struct _FinalizeEntry {
527 typedef struct _FinalizeEntryHashTable FinalizeEntryHashTable;
528 struct _FinalizeEntryHashTable {
529 FinalizeEntry **table;
534 typedef struct _DisappearingLink DisappearingLink;
535 struct _DisappearingLink {
536 DisappearingLink *next;
540 typedef struct _DisappearingLinkHashTable DisappearingLinkHashTable;
541 struct _DisappearingLinkHashTable {
542 DisappearingLink **table;
547 typedef struct _EphemeronLinkNode EphemeronLinkNode;
549 struct _EphemeronLinkNode {
550 EphemeronLinkNode *next;
565 int current_collection_generation = -1;
568 * The link pointer is hidden by negating each bit. We use the lowest
569 * bit of the link (before negation) to store whether it needs
570 * resurrection tracking.
572 #define HIDE_POINTER(p,t) ((gpointer)(~((gulong)(p)|((t)?1:0))))
573 #define REVEAL_POINTER(p) ((gpointer)((~(gulong)(p))&~3L))
575 #define DISLINK_OBJECT(d) (REVEAL_POINTER (*(d)->link))
576 #define DISLINK_TRACK(d) ((~(gulong)(*(d)->link)) & 1)
579 * The finalizable hash has the object as the key, the
580 * disappearing_link hash, has the link address as key.
582 static FinalizeEntryHashTable minor_finalizable_hash;
583 static FinalizeEntryHashTable major_finalizable_hash;
584 /* objects that are ready to be finalized */
585 static FinalizeEntry *fin_ready_list = NULL;
586 static FinalizeEntry *critical_fin_list = NULL;
588 static DisappearingLinkHashTable minor_disappearing_link_hash;
589 static DisappearingLinkHashTable major_disappearing_link_hash;
591 static EphemeronLinkNode *ephemeron_list;
593 static int num_ready_finalizers = 0;
594 static int no_finalize = 0;
597 ROOT_TYPE_NORMAL = 0, /* "normal" roots */
598 ROOT_TYPE_PINNED = 1, /* roots without a GC descriptor */
599 ROOT_TYPE_WBARRIER = 2, /* roots with a write barrier */
603 /* registered roots: the key to the hash is the root start address */
605 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
607 static RootRecord **roots_hash [ROOT_TYPE_NUM] = { NULL, NULL };
608 static int roots_hash_size [ROOT_TYPE_NUM] = { 0, 0, 0 };
609 static mword roots_size = 0; /* amount of memory in the root set */
610 static int num_roots_entries [ROOT_TYPE_NUM] = { 0, 0, 0 };
613 * The current allocation cursors
614 * We allocate objects in the nursery.
615 * The nursery is the area between nursery_start and nursery_real_end.
616 * Allocation is done from a Thread Local Allocation Buffer (TLAB). TLABs are allocated
617 * from nursery fragments.
618 * tlab_next is the pointer to the space inside the TLAB where the next object will
620 * tlab_temp_end is the pointer to the end of the temporary space reserved for
621 * the allocation: it allows us to set the scan starts at reasonable intervals.
622 * tlab_real_end points to the end of the TLAB.
623 * nursery_frag_real_end points to the end of the currently used nursery fragment.
624 * nursery_first_pinned_start points to the start of the first pinned object in the nursery
625 * nursery_last_pinned_end points to the end of the last pinned object in the nursery
626 * At the next allocation, the area of the nursery where objects can be present is
627 * between MIN(nursery_first_pinned_start, first_fragment_start) and
628 * MAX(nursery_last_pinned_end, nursery_frag_real_end)
630 static char *nursery_start = NULL;
632 #ifdef HAVE_KW_THREAD
633 #define TLAB_ACCESS_INIT
634 #define TLAB_START tlab_start
635 #define TLAB_NEXT tlab_next
636 #define TLAB_TEMP_END tlab_temp_end
637 #define TLAB_REAL_END tlab_real_end
638 #define REMEMBERED_SET remembered_set
639 #define STORE_REMSET_BUFFER store_remset_buffer
640 #define STORE_REMSET_BUFFER_INDEX store_remset_buffer_index
641 #define IN_CRITICAL_REGION thread_info->in_critical_region
643 static pthread_key_t thread_info_key;
644 #define TLAB_ACCESS_INIT SgenThreadInfo *__thread_info__ = pthread_getspecific (thread_info_key)
645 #define TLAB_START (__thread_info__->tlab_start)
646 #define TLAB_NEXT (__thread_info__->tlab_next)
647 #define TLAB_TEMP_END (__thread_info__->tlab_temp_end)
648 #define TLAB_REAL_END (__thread_info__->tlab_real_end)
649 #define REMEMBERED_SET (__thread_info__->remset)
650 #define STORE_REMSET_BUFFER (__thread_info__->store_remset_buffer)
651 #define STORE_REMSET_BUFFER_INDEX (__thread_info__->store_remset_buffer_index)
652 #define IN_CRITICAL_REGION (__thread_info__->in_critical_region)
655 /* we use the memory barrier only to prevent compiler reordering (a memory constraint may be enough) */
656 #define ENTER_CRITICAL_REGION do {IN_CRITICAL_REGION = 1;mono_memory_barrier ();} while (0)
657 #define EXIT_CRITICAL_REGION do {IN_CRITICAL_REGION = 0;mono_memory_barrier ();} while (0)
660 * FIXME: What is faster, a TLS variable pointing to a structure, or separate TLS
661 * variables for next+temp_end ?
663 #ifdef HAVE_KW_THREAD
664 static __thread SgenThreadInfo *thread_info;
665 static __thread char *tlab_start;
666 static __thread char *tlab_next;
667 static __thread char *tlab_temp_end;
668 static __thread char *tlab_real_end;
669 static __thread gpointer *store_remset_buffer;
670 static __thread long store_remset_buffer_index;
671 /* Used by the managed allocator/wbarrier */
672 static __thread char **tlab_next_addr;
673 static __thread char *stack_end;
674 static __thread long *store_remset_buffer_index_addr;
676 static char *nursery_next = NULL;
677 static char *nursery_frag_real_end = NULL;
678 static char *nursery_real_end = NULL;
679 static char *nursery_last_pinned_end = NULL;
681 /* The size of a TLAB */
682 /* The bigger the value, the less often we have to go to the slow path to allocate a new
683 * one, but the more space is wasted by threads not allocating much memory.
685 * FIXME: Make this self-tuning for each thread.
687 static guint32 tlab_size = (1024 * 4);
689 /*How much space is tolerable to be wasted from the current fragment when allocating a new TLAB*/
690 #define MAX_NURSERY_TLAB_WASTE 512
692 /* fragments that are free and ready to be used for allocation */
693 static Fragment *nursery_fragments = NULL;
694 /* freeelist of fragment structures */
695 static Fragment *fragment_freelist = NULL;
697 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
699 /* Functions supplied by the runtime to be called by the GC */
700 static MonoGCCallbacks gc_callbacks;
702 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
703 #define ALLOC_ALIGN_BITS SGEN_ALLOC_ALIGN_BITS
705 #define ALIGN_UP SGEN_ALIGN_UP
707 #define MOVED_OBJECTS_NUM 64
708 static void *moved_objects [MOVED_OBJECTS_NUM];
709 static int moved_objects_idx = 0;
712 * ######################################################################
713 * ######## Macros and function declarations.
714 * ######################################################################
717 #define ADDR_IN_HEAP_BOUNDARIES(addr) ((p) >= lowest_heap_address && (p) < highest_heap_address)
720 align_pointer (void *ptr)
722 mword p = (mword)ptr;
723 p += sizeof (gpointer) - 1;
724 p &= ~ (sizeof (gpointer) - 1);
728 typedef SgenGrayQueue GrayQueue;
730 typedef void (*CopyOrMarkObjectFunc) (void**, GrayQueue*);
731 typedef char* (*ScanObjectFunc) (char*, GrayQueue*);
733 /* forward declarations */
734 static int stop_world (void);
735 static int restart_world (void);
736 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise);
737 static void scan_from_remsets (void *start_nursery, void *end_nursery, GrayQueue *queue);
738 static void scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type, GrayQueue *queue);
739 static void scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeEntry *list, GrayQueue *queue);
740 static void find_pinning_ref_from_thread (char *obj, size_t size);
741 static void update_current_thread_stack (void *start);
742 static void finalize_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, GrayQueue *queue);
743 static void add_or_remove_disappearing_link (MonoObject *obj, void **link, gboolean track, int generation);
744 static void null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, GrayQueue *queue);
745 static void null_links_for_domain (MonoDomain *domain, int generation);
746 static gboolean search_fragment_for_size (size_t size);
747 static int search_fragment_for_size_range (size_t desired_size, size_t minimum_size);
748 static void clear_nursery_fragments (char *next);
749 static void pin_from_roots (void *start_nursery, void *end_nursery);
750 static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, GrayQueue *queue);
751 static void optimize_pin_queue (int start_slot);
752 static void clear_remsets (void);
753 static void clear_tlabs (void);
754 static void sort_addresses (void **array, int size);
755 static void drain_gray_stack (GrayQueue *queue);
756 static void finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue);
757 static gboolean need_major_collection (void);
758 static void major_collection (const char *reason);
760 static void mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track);
762 void describe_ptr (char *ptr);
763 void check_object (char *start);
765 static void check_consistency (void);
766 static void check_major_refs (void);
767 static void check_scan_starts (void);
768 static void check_for_xdomain_refs (void);
769 static void dump_heap (const char *type, int num, const char *reason);
771 void mono_gc_scan_for_specific_ref (MonoObject *key);
773 static void init_stats (void);
775 static int mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue);
776 static void clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue);
777 static void null_ephemerons_for_domain (MonoDomain *domain);
779 SgenMajorCollector major;
781 #include "sgen-protocol.c"
782 #include "sgen-pinning.c"
783 #include "sgen-pinning-stats.c"
784 #include "sgen-gray.c"
785 #include "sgen-workers.c"
786 #include "sgen-los.c"
787 #include "sgen-cardtable.c"
789 /* Root bitmap descriptors are simpler: the lower three bits describe the type
790 * and we either have 30/62 bitmap bits or nibble-based run-length,
791 * or a complex descriptor, or a user defined marker function.
794 ROOT_DESC_CONSERVATIVE, /* 0, so matches NULL value */
799 ROOT_DESC_TYPE_MASK = 0x7,
800 ROOT_DESC_TYPE_SHIFT = 3,
803 #define MAKE_ROOT_DESC(type,val) ((type) | ((val) << ROOT_DESC_TYPE_SHIFT))
805 #define MAX_USER_DESCRIPTORS 16
807 static gsize* complex_descriptors = NULL;
808 static int complex_descriptors_size = 0;
809 static int complex_descriptors_next = 0;
810 static MonoGCRootMarkFunc user_descriptors [MAX_USER_DESCRIPTORS];
811 static int user_descriptors_next = 0;
814 alloc_complex_descriptor (gsize *bitmap, int numbits)
818 numbits = ALIGN_TO (numbits, GC_BITS_PER_WORD);
819 nwords = numbits / GC_BITS_PER_WORD + 1;
822 res = complex_descriptors_next;
823 /* linear search, so we don't have duplicates with domain load/unload
824 * this should not be performance critical or we'd have bigger issues
825 * (the number and size of complex descriptors should be small).
827 for (i = 0; i < complex_descriptors_next; ) {
828 if (complex_descriptors [i] == nwords) {
830 for (j = 0; j < nwords - 1; ++j) {
831 if (complex_descriptors [i + 1 + j] != bitmap [j]) {
841 i += complex_descriptors [i];
843 if (complex_descriptors_next + nwords > complex_descriptors_size) {
844 int new_size = complex_descriptors_size * 2 + nwords;
845 complex_descriptors = g_realloc (complex_descriptors, new_size * sizeof (gsize));
846 complex_descriptors_size = new_size;
848 DEBUG (6, fprintf (gc_debug_file, "Complex descriptor %d, size: %d (total desc memory: %d)\n", res, nwords, complex_descriptors_size));
849 complex_descriptors_next += nwords;
850 complex_descriptors [res] = nwords;
851 for (i = 0; i < nwords - 1; ++i) {
852 complex_descriptors [res + 1 + i] = bitmap [i];
853 DEBUG (6, fprintf (gc_debug_file, "\tvalue: %p\n", (void*)complex_descriptors [res + 1 + i]));
860 mono_sgen_get_complex_descriptor (GCVTable *vt)
862 return complex_descriptors + (vt->desc >> LOW_TYPE_BITS);
866 * Descriptor builders.
869 mono_gc_make_descr_for_string (gsize *bitmap, int numbits)
871 return (void*) DESC_TYPE_RUN_LENGTH;
875 mono_gc_make_descr_for_object (gsize *bitmap, int numbits, size_t obj_size)
877 int first_set = -1, num_set = 0, last_set = -1, i;
879 size_t stored_size = obj_size;
880 for (i = 0; i < numbits; ++i) {
881 if (bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
889 * We don't encode the size of types that don't contain
890 * references because they might not be aligned, i.e. the
891 * bottom two bits might be set, which would clash with the
892 * bits we need to encode the descriptor type. Since we don't
893 * use the encoded size to skip objects, other than for
894 * processing remsets, in which case only the positions of
895 * references are relevant, this is not a problem.
898 return (void*)DESC_TYPE_RUN_LENGTH;
899 g_assert (!(stored_size & 0x3));
900 if (stored_size <= MAX_SMALL_OBJ_SIZE) {
901 /* check run-length encoding first: one byte offset, one byte number of pointers
902 * on 64 bit archs, we can have 3 runs, just one on 32.
903 * It may be better to use nibbles.
906 desc = DESC_TYPE_RUN_LENGTH | (stored_size << 1);
907 DEBUG (6, fprintf (gc_debug_file, "Ptrfree descriptor %p, size: %zd\n", (void*)desc, stored_size));
909 } else if (first_set < 256 && num_set < 256 && (first_set + num_set == last_set + 1)) {
910 desc = DESC_TYPE_RUN_LENGTH | (stored_size << 1) | (first_set << 16) | (num_set << 24);
911 DEBUG (6, fprintf (gc_debug_file, "Runlen descriptor %p, size: %zd, first set: %d, num set: %d\n", (void*)desc, stored_size, first_set, num_set));
914 /* we know the 2-word header is ptr-free */
915 if (last_set < SMALL_BITMAP_SIZE + OBJECT_HEADER_WORDS) {
916 desc = DESC_TYPE_SMALL_BITMAP | (stored_size << 1) | ((*bitmap >> OBJECT_HEADER_WORDS) << SMALL_BITMAP_SHIFT);
917 DEBUG (6, fprintf (gc_debug_file, "Smallbitmap descriptor %p, size: %zd, last set: %d\n", (void*)desc, stored_size, last_set));
921 /* we know the 2-word header is ptr-free */
922 if (last_set < LARGE_BITMAP_SIZE + OBJECT_HEADER_WORDS) {
923 desc = DESC_TYPE_LARGE_BITMAP | ((*bitmap >> OBJECT_HEADER_WORDS) << LOW_TYPE_BITS);
924 DEBUG (6, fprintf (gc_debug_file, "Largebitmap descriptor %p, size: %zd, last set: %d\n", (void*)desc, stored_size, last_set));
927 /* it's a complex object ... */
928 desc = DESC_TYPE_COMPLEX | (alloc_complex_descriptor (bitmap, last_set + 1) << LOW_TYPE_BITS);
932 /* If the array holds references, numbits == 1 and the first bit is set in elem_bitmap */
934 mono_gc_make_descr_for_array (int vector, gsize *elem_bitmap, int numbits, size_t elem_size)
936 int first_set = -1, num_set = 0, last_set = -1, i;
937 mword desc = vector? DESC_TYPE_VECTOR: DESC_TYPE_ARRAY;
938 for (i = 0; i < numbits; ++i) {
939 if (elem_bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
946 /* See comment at the definition of DESC_TYPE_RUN_LENGTH. */
948 return (void*)DESC_TYPE_RUN_LENGTH;
949 if (elem_size <= MAX_ELEMENT_SIZE) {
950 desc |= elem_size << VECTOR_ELSIZE_SHIFT;
952 return (void*)(desc | VECTOR_SUBTYPE_PTRFREE);
954 /* Note: we also handle structs with just ref fields */
955 if (num_set * sizeof (gpointer) == elem_size) {
956 return (void*)(desc | VECTOR_SUBTYPE_REFS | ((gssize)(-1) << 16));
958 /* FIXME: try run-len first */
959 /* Note: we can't skip the object header here, because it's not present */
960 if (last_set <= SMALL_BITMAP_SIZE) {
961 return (void*)(desc | VECTOR_SUBTYPE_BITMAP | (*elem_bitmap << 16));
964 /* it's am array of complex structs ... */
965 desc = DESC_TYPE_COMPLEX_ARR;
966 desc |= alloc_complex_descriptor (elem_bitmap, last_set + 1) << LOW_TYPE_BITS;
970 /* Return the bitmap encoded by a descriptor */
972 mono_gc_get_bitmap_for_descr (void *descr, int *numbits)
974 mword d = (mword)descr;
978 case DESC_TYPE_RUN_LENGTH: {
979 int first_set = (d >> 16) & 0xff;
980 int num_set = (d >> 24) & 0xff;
983 bitmap = g_new0 (gsize, (first_set + num_set + 7) / 8);
985 for (i = first_set; i < first_set + num_set; ++i)
986 bitmap [i / GC_BITS_PER_WORD] |= ((gsize)1 << (i % GC_BITS_PER_WORD));
988 *numbits = first_set + num_set;
992 case DESC_TYPE_SMALL_BITMAP:
993 bitmap = g_new0 (gsize, 1);
995 bitmap [0] = (d >> SMALL_BITMAP_SHIFT) << OBJECT_HEADER_WORDS;
997 *numbits = GC_BITS_PER_WORD;
1001 g_assert_not_reached ();
1006 is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
1008 MonoObject *o = (MonoObject*)(obj);
1009 MonoObject *ref = (MonoObject*)*(ptr);
1010 int offset = (char*)(ptr) - (char*)o;
1012 if (o->vtable->klass == mono_defaults.thread_class && offset == G_STRUCT_OFFSET (MonoThread, internal_thread))
1014 if (o->vtable->klass == mono_defaults.internal_thread_class && offset == G_STRUCT_OFFSET (MonoInternalThread, current_appcontext))
1016 if (mono_class_has_parent (o->vtable->klass, mono_defaults.real_proxy_class) &&
1017 offset == G_STRUCT_OFFSET (MonoRealProxy, unwrapped_server))
1019 /* Thread.cached_culture_info */
1020 if (!strcmp (ref->vtable->klass->name_space, "System.Globalization") &&
1021 !strcmp (ref->vtable->klass->name, "CultureInfo") &&
1022 !strcmp(o->vtable->klass->name_space, "System") &&
1023 !strcmp(o->vtable->klass->name, "Object[]"))
1026 * at System.IO.MemoryStream.InternalConstructor (byte[],int,int,bool,bool) [0x0004d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:121
1027 * at System.IO.MemoryStream..ctor (byte[]) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:81
1028 * at (wrapper remoting-invoke-with-check) System.IO.MemoryStream..ctor (byte[]) <IL 0x00020, 0xffffffff>
1029 * at System.Runtime.Remoting.Messaging.CADMethodCallMessage.GetArguments () [0x0000d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/CADMessages.cs:327
1030 * at System.Runtime.Remoting.Messaging.MethodCall..ctor (System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/MethodCall.cs:87
1031 * at System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) [0x00018] in /home/schani/Work/novell/trunk/mcs/class/corlib/System/AppDomain.cs:1213
1032 * at (wrapper remoting-invoke-with-check) System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) <IL 0x0003d, 0xffffffff>
1033 * at System.Runtime.Remoting.Channels.CrossAppDomainSink.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00008] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Channels/CrossAppDomainChannel.cs:198
1034 * at (wrapper runtime-invoke) object.runtime_invoke_CrossAppDomainSink/ProcessMessageRes_object_object (object,intptr,intptr,intptr) <IL 0x0004c, 0xffffffff>
1036 if (!strcmp (ref->vtable->klass->name_space, "System") &&
1037 !strcmp (ref->vtable->klass->name, "Byte[]") &&
1038 !strcmp (o->vtable->klass->name_space, "System.IO") &&
1039 !strcmp (o->vtable->klass->name, "MemoryStream"))
1041 /* append_job() in threadpool.c */
1042 if (!strcmp (ref->vtable->klass->name_space, "System.Runtime.Remoting.Messaging") &&
1043 !strcmp (ref->vtable->klass->name, "AsyncResult") &&
1044 !strcmp (o->vtable->klass->name_space, "System") &&
1045 !strcmp (o->vtable->klass->name, "Object[]") &&
1046 mono_thread_pool_is_queue_array ((MonoArray*) o))
1052 check_reference_for_xdomain (gpointer *ptr, char *obj, MonoDomain *domain)
1054 MonoObject *o = (MonoObject*)(obj);
1055 MonoObject *ref = (MonoObject*)*(ptr);
1056 int offset = (char*)(ptr) - (char*)o;
1058 MonoClassField *field;
1061 if (!ref || ref->vtable->domain == domain)
1063 if (is_xdomain_ref_allowed (ptr, obj, domain))
1067 for (class = o->vtable->klass; class; class = class->parent) {
1070 for (i = 0; i < class->field.count; ++i) {
1071 if (class->fields[i].offset == offset) {
1072 field = &class->fields[i];
1080 if (ref->vtable->klass == mono_defaults.string_class)
1081 str = mono_string_to_utf8 ((MonoString*)ref);
1084 g_print ("xdomain reference in %p (%s.%s) at offset %d (%s) to %p (%s.%s) (%s) - pointed to by:\n",
1085 o, o->vtable->klass->name_space, o->vtable->klass->name,
1086 offset, field ? field->name : "",
1087 ref, ref->vtable->klass->name_space, ref->vtable->klass->name, str ? str : "");
1088 mono_gc_scan_for_specific_ref (o);
1094 #define HANDLE_PTR(ptr,obj) check_reference_for_xdomain ((ptr), (obj), domain)
1097 scan_object_for_xdomain_refs (char *start, mword size, void *data)
1099 MonoDomain *domain = ((MonoObject*)start)->vtable->domain;
1101 #include "sgen-scan-object.h"
1105 #define HANDLE_PTR(ptr,obj) do { \
1106 if ((MonoObject*)*(ptr) == key) { \
1107 g_print ("found ref to %p in object %p (%s) at offset %td\n", \
1108 key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
1113 scan_object_for_specific_ref (char *start, MonoObject *key)
1115 #include "sgen-scan-object.h"
1119 mono_sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data)
1121 while (start < end) {
1123 if (!*(void**)start) {
1124 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
1128 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
1130 callback (start, size, data);
1137 scan_object_for_specific_ref_callback (char *obj, size_t size, MonoObject *key)
1139 scan_object_for_specific_ref (obj, key);
1143 check_root_obj_specific_ref (RootRecord *root, MonoObject *key, MonoObject *obj)
1147 g_print ("found ref to %p in root record %p\n", key, root);
1150 static MonoObject *check_key = NULL;
1151 static RootRecord *check_root = NULL;
1154 check_root_obj_specific_ref_from_marker (void **obj)
1156 check_root_obj_specific_ref (check_root, check_key, *obj);
1160 scan_roots_for_specific_ref (MonoObject *key, int root_type)
1165 for (i = 0; i < roots_hash_size [root_type]; ++i) {
1166 for (root = roots_hash [root_type][i]; root; root = root->next) {
1167 void **start_root = (void**)root->start_root;
1168 mword desc = root->root_desc;
1172 switch (desc & ROOT_DESC_TYPE_MASK) {
1173 case ROOT_DESC_BITMAP:
1174 desc >>= ROOT_DESC_TYPE_SHIFT;
1177 check_root_obj_specific_ref (root, key, *start_root);
1182 case ROOT_DESC_COMPLEX: {
1183 gsize *bitmap_data = complex_descriptors + (desc >> ROOT_DESC_TYPE_SHIFT);
1184 int bwords = (*bitmap_data) - 1;
1185 void **start_run = start_root;
1187 while (bwords-- > 0) {
1188 gsize bmap = *bitmap_data++;
1189 void **objptr = start_run;
1192 check_root_obj_specific_ref (root, key, *objptr);
1196 start_run += GC_BITS_PER_WORD;
1200 case ROOT_DESC_USER: {
1201 MonoGCRootMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
1202 marker (start_root, check_root_obj_specific_ref_from_marker);
1205 case ROOT_DESC_RUN_LEN:
1206 g_assert_not_reached ();
1208 g_assert_not_reached ();
1217 mono_gc_scan_for_specific_ref (MonoObject *key)
1223 mono_sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1224 (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
1226 major.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
1228 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1229 scan_object_for_specific_ref (bigobj->data, key);
1231 scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
1232 scan_roots_for_specific_ref (key, ROOT_TYPE_WBARRIER);
1234 for (i = 0; i < roots_hash_size [ROOT_TYPE_PINNED]; ++i) {
1235 for (root = roots_hash [ROOT_TYPE_PINNED][i]; root; root = root->next) {
1236 void **ptr = (void**)root->start_root;
1238 while (ptr < (void**)root->end_root) {
1239 check_root_obj_specific_ref (root, *ptr, key);
1246 /* Clear all remaining nursery fragments */
1248 clear_nursery_fragments (char *next)
1251 if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
1252 g_assert (next <= nursery_frag_real_end);
1253 memset (next, 0, nursery_frag_real_end - next);
1254 for (frag = nursery_fragments; frag; frag = frag->next) {
1255 memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start);
1261 need_remove_object_for_domain (char *start, MonoDomain *domain)
1263 if (mono_object_domain (start) == domain) {
1264 DEBUG (4, fprintf (gc_debug_file, "Need to cleanup object %p\n", start));
1265 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
1272 process_object_for_domain_clearing (char *start, MonoDomain *domain)
1274 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
1275 if (vt->klass == mono_defaults.internal_thread_class)
1276 g_assert (mono_object_domain (start) == mono_get_root_domain ());
1277 /* The object could be a proxy for an object in the domain
1279 if (mono_class_has_parent (vt->klass, mono_defaults.real_proxy_class)) {
1280 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
1282 /* The server could already have been zeroed out, so
1283 we need to check for that, too. */
1284 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
1285 DEBUG (4, fprintf (gc_debug_file, "Cleaning up remote pointer in %p to object %p\n",
1287 ((MonoRealProxy*)start)->unwrapped_server = NULL;
1292 static MonoDomain *check_domain = NULL;
1295 check_obj_not_in_domain (void **o)
1297 g_assert (((MonoObject*)(*o))->vtable->domain != check_domain);
1301 scan_for_registered_roots_in_domain (MonoDomain *domain, int root_type)
1305 check_domain = domain;
1306 for (i = 0; i < roots_hash_size [root_type]; ++i) {
1307 for (root = roots_hash [root_type][i]; root; root = root->next) {
1308 void **start_root = (void**)root->start_root;
1309 mword desc = root->root_desc;
1311 /* The MonoDomain struct is allowed to hold
1312 references to objects in its own domain. */
1313 if (start_root == (void**)domain)
1316 switch (desc & ROOT_DESC_TYPE_MASK) {
1317 case ROOT_DESC_BITMAP:
1318 desc >>= ROOT_DESC_TYPE_SHIFT;
1320 if ((desc & 1) && *start_root)
1321 check_obj_not_in_domain (*start_root);
1326 case ROOT_DESC_COMPLEX: {
1327 gsize *bitmap_data = complex_descriptors + (desc >> ROOT_DESC_TYPE_SHIFT);
1328 int bwords = (*bitmap_data) - 1;
1329 void **start_run = start_root;
1331 while (bwords-- > 0) {
1332 gsize bmap = *bitmap_data++;
1333 void **objptr = start_run;
1335 if ((bmap & 1) && *objptr)
1336 check_obj_not_in_domain (*objptr);
1340 start_run += GC_BITS_PER_WORD;
1344 case ROOT_DESC_USER: {
1345 MonoGCRootMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
1346 marker (start_root, check_obj_not_in_domain);
1349 case ROOT_DESC_RUN_LEN:
1350 g_assert_not_reached ();
1352 g_assert_not_reached ();
1356 check_domain = NULL;
1360 check_for_xdomain_refs (void)
1364 mono_sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1365 (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
1367 major.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
1369 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1370 scan_object_for_xdomain_refs (bigobj->data, bigobj->size, NULL);
1374 clear_domain_process_object (char *obj, MonoDomain *domain)
1378 process_object_for_domain_clearing (obj, domain);
1379 remove = need_remove_object_for_domain (obj, domain);
1381 if (remove && ((MonoObject*)obj)->synchronisation) {
1382 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
1384 mono_gc_register_disappearing_link (NULL, dislink, FALSE);
1391 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
1393 if (clear_domain_process_object (obj, domain))
1394 memset (obj, 0, size);
1398 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
1400 clear_domain_process_object (obj, domain);
1404 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1406 if (need_remove_object_for_domain (obj, domain))
1407 major.free_non_pinned_object (obj, size);
1411 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1413 if (need_remove_object_for_domain (obj, domain))
1414 major.free_pinned_object (obj, size);
1418 * When appdomains are unloaded we can easily remove objects that have finalizers,
1419 * but all the others could still be present in random places on the heap.
1420 * We need a sweep to get rid of them even though it's going to be costly
1422 * The reason we need to remove them is because we access the vtable and class
1423 * structures to know the object size and the reference bitmap: once the domain is
1424 * unloaded the point to random memory.
1427 mono_gc_clear_domain (MonoDomain * domain)
1429 LOSObject *bigobj, *prev;
1434 clear_nursery_fragments (nursery_next);
1436 if (xdomain_checks && domain != mono_get_root_domain ()) {
1437 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
1438 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
1439 check_for_xdomain_refs ();
1442 mono_sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1443 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain);
1445 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
1446 to memory returned to the OS.*/
1447 null_ephemerons_for_domain (domain);
1449 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1450 null_links_for_domain (domain, i);
1452 /* We need two passes over major and large objects because
1453 freeing such objects might give their memory back to the OS
1454 (in the case of large objects) or obliterate its vtable
1455 (pinned objects with major-copying or pinned and non-pinned
1456 objects with major-mark&sweep), but we might need to
1457 dereference a pointer from an object to another object if
1458 the first object is a proxy. */
1459 major.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
1460 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1461 clear_domain_process_object (bigobj->data, domain);
1464 for (bigobj = los_object_list; bigobj;) {
1465 if (need_remove_object_for_domain (bigobj->data, domain)) {
1466 LOSObject *to_free = bigobj;
1468 prev->next = bigobj->next;
1470 los_object_list = bigobj->next;
1471 bigobj = bigobj->next;
1472 DEBUG (4, fprintf (gc_debug_file, "Freeing large object %p\n",
1474 free_large_object (to_free);
1478 bigobj = bigobj->next;
1480 major.iterate_objects (TRUE, FALSE, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
1481 major.iterate_objects (FALSE, TRUE, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
1487 global_remset_cache_clear (void)
1489 memset (global_remset_cache, 0, sizeof (global_remset_cache));
1493 * Tries to check if a given remset location was already added to the global remset.
1496 * A 2 entry, LRU cache of recently saw location remsets.
1498 * It's hand-coded instead of done using loops to reduce the number of memory references on cache hit.
1500 * Returns TRUE is the element was added..
1503 global_remset_location_was_not_added (gpointer ptr)
1506 gpointer first = global_remset_cache [0], second;
1508 HEAVY_STAT (++stat_global_remsets_discarded);
1512 second = global_remset_cache [1];
1514 if (second == ptr) {
1515 /*Move the second to the front*/
1516 global_remset_cache [0] = second;
1517 global_remset_cache [1] = first;
1519 HEAVY_STAT (++stat_global_remsets_discarded);
1523 global_remset_cache [0] = second;
1524 global_remset_cache [1] = ptr;
1529 * mono_sgen_add_to_global_remset:
1531 * The global remset contains locations which point into newspace after
1532 * a minor collection. This can happen if the objects they point to are pinned.
1534 * LOCKING: If called from a parallel collector, the global remset
1535 * lock must be held. For serial collectors that is not necessary.
1538 mono_sgen_add_to_global_remset (gpointer ptr)
1543 if (use_cardtable) {
1544 sgen_card_table_mark_address ((mword)ptr);
1548 g_assert (!ptr_in_nursery (ptr) && ptr_in_nursery (*(gpointer*)ptr));
1550 lock = (current_collection_generation == GENERATION_OLD && major.is_parallel);
1554 if (!global_remset_location_was_not_added (ptr))
1557 DEBUG (8, fprintf (gc_debug_file, "Adding global remset for %p\n", ptr));
1558 binary_protocol_global_remset (ptr, *(gpointer*)ptr, (gpointer)LOAD_VTABLE (*(gpointer*)ptr));
1560 HEAVY_STAT (++stat_global_remsets_added);
1563 * FIXME: If an object remains pinned, we need to add it at every minor collection.
1564 * To avoid uncontrolled growth of the global remset, only add each pointer once.
1566 if (global_remset->store_next + 3 < global_remset->end_set) {
1567 *(global_remset->store_next++) = (mword)ptr;
1570 rs = alloc_remset (global_remset->end_set - global_remset->data, NULL);
1571 rs->next = global_remset;
1573 *(global_remset->store_next++) = (mword)ptr;
1576 int global_rs_size = 0;
1578 for (rs = global_remset; rs; rs = rs->next) {
1579 global_rs_size += rs->store_next - rs->data;
1581 DEBUG (4, fprintf (gc_debug_file, "Global remset now has size %d\n", global_rs_size));
1586 UNLOCK_GLOBAL_REMSET;
1592 * Scan objects in the gray stack until the stack is empty. This should be called
1593 * frequently after each object is copied, to achieve better locality and cache
1597 drain_gray_stack (GrayQueue *queue)
1601 if (current_collection_generation == GENERATION_NURSERY) {
1603 GRAY_OBJECT_DEQUEUE (queue, obj);
1606 DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", obj, safe_name (obj)));
1607 major.minor_scan_object (obj, queue);
1610 if (major.is_parallel && queue == &workers_distribute_gray_queue)
1614 GRAY_OBJECT_DEQUEUE (queue, obj);
1617 DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", obj, safe_name (obj)));
1618 major.major_scan_object (obj, queue);
1624 * Addresses from start to end are already sorted. This function finds
1625 * the object header for each address and pins the object. The
1626 * addresses must be inside the passed section. The (start of the)
1627 * address array is overwritten with the addresses of the actually
1628 * pinned objects. Return the number of pinned objects.
1631 pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, GrayQueue *queue)
1636 void *last_obj = NULL;
1637 size_t last_obj_size = 0;
1640 void **definitely_pinned = start;
1641 while (start < end) {
1643 /* the range check should be reduntant */
1644 if (addr != last && addr >= start_nursery && addr < end_nursery) {
1645 DEBUG (5, fprintf (gc_debug_file, "Considering pinning addr %p\n", addr));
1646 /* multiple pointers to the same object */
1647 if (addr >= last_obj && (char*)addr < (char*)last_obj + last_obj_size) {
1651 idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
1652 g_assert (idx < section->num_scan_start);
1653 search_start = (void*)section->scan_starts [idx];
1654 if (!search_start || search_start > addr) {
1657 search_start = section->scan_starts [idx];
1658 if (search_start && search_start <= addr)
1661 if (!search_start || search_start > addr)
1662 search_start = start_nursery;
1664 if (search_start < last_obj)
1665 search_start = (char*)last_obj + last_obj_size;
1666 /* now addr should be in an object a short distance from search_start
1667 * Note that search_start must point to zeroed mem or point to an object.
1670 if (!*(void**)search_start) {
1671 search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
1674 last_obj = search_start;
1675 last_obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
1676 DEBUG (8, fprintf (gc_debug_file, "Pinned try match %p (%s), size %zd\n", last_obj, safe_name (last_obj), last_obj_size));
1677 if (addr >= search_start && (char*)addr < (char*)last_obj + last_obj_size) {
1678 DEBUG (4, fprintf (gc_debug_file, "Pinned object %p, vtable %p (%s), count %d\n", search_start, *(void**)search_start, safe_name (search_start), count));
1679 binary_protocol_pin (search_start, (gpointer)LOAD_VTABLE (search_start), safe_object_get_size (search_start));
1680 pin_object (search_start);
1681 GRAY_OBJECT_ENQUEUE (queue, search_start);
1683 mono_sgen_pin_stats_register_object (search_start, last_obj_size);
1684 definitely_pinned [count] = search_start;
1688 /* skip to the next object */
1689 search_start = (void*)((char*)search_start + last_obj_size);
1690 } while (search_start <= addr);
1691 /* we either pinned the correct object or we ignored the addr because
1692 * it points to unused zeroed memory.
1698 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1703 mono_sgen_pin_objects_in_section (GCMemSection *section, GrayQueue *queue)
1705 int num_entries = section->pin_queue_num_entries;
1707 void **start = section->pin_queue_start;
1709 reduced_to = pin_objects_from_addresses (section, start, start + num_entries,
1710 section->data, section->next_data, queue);
1711 section->pin_queue_num_entries = reduced_to;
1713 section->pin_queue_start = NULL;
1717 /* Sort the addresses in array in increasing order.
1718 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1721 sort_addresses (void **array, int size)
1726 for (i = 1; i < size; ++i) {
1729 int parent = (child - 1) / 2;
1731 if (array [parent] >= array [child])
1734 tmp = array [parent];
1735 array [parent] = array [child];
1736 array [child] = tmp;
1742 for (i = size - 1; i > 0; --i) {
1745 array [i] = array [0];
1751 while (root * 2 + 1 <= end) {
1752 int child = root * 2 + 1;
1754 if (child < end && array [child] < array [child + 1])
1756 if (array [root] >= array [child])
1760 array [root] = array [child];
1761 array [child] = tmp;
1768 static G_GNUC_UNUSED void
1769 print_nursery_gaps (void* start_nursery, void *end_nursery)
1772 gpointer first = start_nursery;
1774 for (i = 0; i < next_pin_slot; ++i) {
1775 next = pin_queue [i];
1776 fprintf (gc_debug_file, "Nursery range: %p-%p, size: %td\n", first, next, (char*)next-(char*)first);
1780 fprintf (gc_debug_file, "Nursery range: %p-%p, size: %td\n", first, next, (char*)next-(char*)first);
1783 /* reduce the info in the pin queue, removing duplicate pointers and sorting them */
1785 optimize_pin_queue (int start_slot)
1787 void **start, **cur, **end;
1788 /* sort and uniq pin_queue: we just sort and we let the rest discard multiple values */
1789 /* it may be better to keep ranges of pinned memory instead of individually pinning objects */
1790 DEBUG (5, fprintf (gc_debug_file, "Sorting pin queue, size: %d\n", next_pin_slot));
1791 if ((next_pin_slot - start_slot) > 1)
1792 sort_addresses (pin_queue + start_slot, next_pin_slot - start_slot);
1793 start = cur = pin_queue + start_slot;
1794 end = pin_queue + next_pin_slot;
1797 while (*start == *cur && cur < end)
1801 next_pin_slot = start - pin_queue;
1802 DEBUG (5, fprintf (gc_debug_file, "Pin queue reduced to size: %d\n", next_pin_slot));
1803 //DEBUG (6, print_nursery_gaps (start_nursery, end_nursery));
1808 * Scan the memory between start and end and queue values which could be pointers
1809 * to the area between start_nursery and end_nursery for later consideration.
1810 * Typically used for thread stacks.
1813 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1816 while (start < end) {
1817 if (*start >= start_nursery && *start < end_nursery) {
1819 * *start can point to the middle of an object
1820 * note: should we handle pointing at the end of an object?
1821 * pinning in C# code disallows pointing at the end of an object
1822 * but there is some small chance that an optimizing C compiler
1823 * may keep the only reference to an object by pointing
1824 * at the end of it. We ignore this small chance for now.
1825 * Pointers to the end of an object are indistinguishable
1826 * from pointers to the start of the next object in memory
1827 * so if we allow that we'd need to pin two objects...
1828 * We queue the pointer in an array, the
1829 * array will then be sorted and uniqued. This way
1830 * we can coalesce several pinning pointers and it should
1831 * be faster since we'd do a memory scan with increasing
1832 * addresses. Note: we can align the address to the allocation
1833 * alignment, so the unique process is more effective.
1835 mword addr = (mword)*start;
1836 addr &= ~(ALLOC_ALIGN - 1);
1837 if (addr >= (mword)start_nursery && addr < (mword)end_nursery)
1838 pin_stage_ptr ((void*)addr);
1840 pin_stats_register_address ((char*)addr, pin_type);
1841 DEBUG (6, if (count) fprintf (gc_debug_file, "Pinning address %p\n", (void*)addr));
1846 DEBUG (7, if (count) fprintf (gc_debug_file, "found %d potential pinned heap pointers\n", count));
1850 * Debugging function: find in the conservative roots where @obj is being pinned.
1852 static G_GNUC_UNUSED void
1853 find_pinning_reference (char *obj, size_t size)
1857 char *endobj = obj + size;
1858 for (i = 0; i < roots_hash_size [0]; ++i) {
1859 for (root = roots_hash [0][i]; root; root = root->next) {
1860 /* if desc is non-null it has precise info */
1861 if (!root->root_desc) {
1862 char ** start = (char**)root->start_root;
1863 while (start < (char**)root->end_root) {
1864 if (*start >= obj && *start < endobj) {
1865 DEBUG (0, fprintf (gc_debug_file, "Object %p referenced in pinned roots %p-%p (at %p in record %p)\n", obj, root->start_root, root->end_root, start, root));
1872 find_pinning_ref_from_thread (obj, size);
1876 * The first thing we do in a collection is to identify pinned objects.
1877 * This function considers all the areas of memory that need to be
1878 * conservatively scanned.
1881 pin_from_roots (void *start_nursery, void *end_nursery)
1885 DEBUG (2, fprintf (gc_debug_file, "Scanning pinned roots (%d bytes, %d/%d entries)\n", (int)roots_size, num_roots_entries [ROOT_TYPE_NORMAL], num_roots_entries [ROOT_TYPE_PINNED]));
1886 /* objects pinned from the API are inside these roots */
1887 for (i = 0; i < roots_hash_size [ROOT_TYPE_PINNED]; ++i) {
1888 for (root = roots_hash [ROOT_TYPE_PINNED][i]; root; root = root->next) {
1889 DEBUG (6, fprintf (gc_debug_file, "Pinned roots %p-%p\n", root->start_root, root->end_root));
1890 conservatively_pin_objects_from ((void**)root->start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
1893 /* now deal with the thread stacks
1894 * in the future we should be able to conservatively scan only:
1895 * *) the cpu registers
1896 * *) the unmanaged stack frames
1897 * *) the _last_ managed stack frame
1898 * *) pointers slots in managed frames
1900 scan_thread_data (start_nursery, end_nursery, FALSE);
1902 evacuate_pin_staging_area ();
1905 static CopyOrMarkObjectFunc user_copy_or_mark_func;
1906 static GrayQueue *user_copy_or_mark_queue;
1909 single_arg_user_copy_or_mark (void **obj)
1911 user_copy_or_mark_func (obj, user_copy_or_mark_queue);
1915 * The memory area from start_root to end_root contains pointers to objects.
1916 * Their position is precisely described by @desc (this means that the pointer
1917 * can be either NULL or the pointer to the start of an object).
1918 * This functions copies them to to_space updates them.
1920 * This function is not thread-safe!
1923 precisely_scan_objects_from (CopyOrMarkObjectFunc copy_func, void** start_root, void** end_root, char* n_start, char *n_end, mword desc, GrayQueue *queue)
1925 switch (desc & ROOT_DESC_TYPE_MASK) {
1926 case ROOT_DESC_BITMAP:
1927 desc >>= ROOT_DESC_TYPE_SHIFT;
1929 if ((desc & 1) && *start_root) {
1930 copy_func (start_root, queue);
1931 DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", start_root, *start_root));
1932 drain_gray_stack (queue);
1938 case ROOT_DESC_COMPLEX: {
1939 gsize *bitmap_data = complex_descriptors + (desc >> ROOT_DESC_TYPE_SHIFT);
1940 int bwords = (*bitmap_data) - 1;
1941 void **start_run = start_root;
1943 while (bwords-- > 0) {
1944 gsize bmap = *bitmap_data++;
1945 void **objptr = start_run;
1947 if ((bmap & 1) && *objptr) {
1948 copy_func (objptr, queue);
1949 DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", objptr, *objptr));
1950 drain_gray_stack (queue);
1955 start_run += GC_BITS_PER_WORD;
1959 case ROOT_DESC_USER: {
1960 MonoGCRootMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
1961 user_copy_or_mark_func = copy_func;
1962 user_copy_or_mark_queue = queue;
1963 marker (start_root, single_arg_user_copy_or_mark);
1964 user_copy_or_mark_func = NULL;
1965 user_copy_or_mark_queue = NULL;
1968 case ROOT_DESC_RUN_LEN:
1969 g_assert_not_reached ();
1971 g_assert_not_reached ();
1976 mono_sgen_update_heap_boundaries (mword low, mword high)
1981 old = lowest_heap_address;
1984 } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
1987 old = highest_heap_address;
1990 } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
1994 alloc_fragment (void)
1996 Fragment *frag = fragment_freelist;
1998 fragment_freelist = frag->next;
2002 frag = mono_sgen_alloc_internal (INTERNAL_MEM_FRAGMENT);
2007 /* size must be a power of 2 */
2009 mono_sgen_alloc_os_memory_aligned (mword size, mword alignment, gboolean activate)
2011 /* Allocate twice the memory to be able to put the block on an aligned address */
2012 char *mem = mono_sgen_alloc_os_memory (size + alignment, activate);
2017 aligned = (char*)((mword)(mem + (alignment - 1)) & ~(alignment - 1));
2018 g_assert (aligned >= mem && aligned + size <= mem + size + alignment && !((mword)aligned & (alignment - 1)));
2021 mono_sgen_free_os_memory (mem, aligned - mem);
2022 if (aligned + size < mem + size + alignment)
2023 mono_sgen_free_os_memory (aligned + size, (mem + size + alignment) - (aligned + size));
2029 * Allocate and setup the data structures needed to be able to allocate objects
2030 * in the nursery. The nursery is stored in nursery_section.
2033 alloc_nursery (void)
2035 GCMemSection *section;
2041 if (nursery_section)
2043 DEBUG (2, fprintf (gc_debug_file, "Allocating nursery size: %lu\n", (unsigned long)nursery_size));
2044 /* later we will alloc a larger area for the nursery but only activate
2045 * what we need. The rest will be used as expansion if we have too many pinned
2046 * objects in the existing nursery.
2048 /* FIXME: handle OOM */
2049 section = mono_sgen_alloc_internal (INTERNAL_MEM_SECTION);
2051 g_assert (nursery_size == DEFAULT_NURSERY_SIZE);
2052 alloc_size = nursery_size;
2053 #ifdef SGEN_ALIGN_NURSERY
2054 data = major.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
2056 data = major.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
2058 nursery_start = data;
2059 nursery_real_end = nursery_start + nursery_size;
2060 mono_sgen_update_heap_boundaries ((mword)nursery_start, (mword)nursery_real_end);
2061 nursery_next = nursery_start;
2062 DEBUG (4, fprintf (gc_debug_file, "Expanding nursery size (%p-%p): %lu, total: %lu\n", data, data + alloc_size, (unsigned long)nursery_size, (unsigned long)total_alloc));
2063 section->data = section->next_data = data;
2064 section->size = alloc_size;
2065 section->end_data = nursery_real_end;
2066 scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
2067 section->scan_starts = mono_sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS);
2068 section->num_scan_start = scan_starts;
2069 section->block.role = MEMORY_ROLE_GEN0;
2070 section->block.next = NULL;
2072 nursery_section = section;
2074 /* Setup the single first large fragment */
2075 frag = alloc_fragment ();
2076 frag->fragment_start = nursery_start;
2077 frag->fragment_limit = nursery_start;
2078 frag->fragment_end = nursery_real_end;
2079 nursery_frag_real_end = nursery_real_end;
2080 /* FIXME: frag here is lost */
2084 mono_gc_get_nursery (int *shift_bits, size_t *size)
2086 *size = nursery_size;
2087 #ifdef SGEN_ALIGN_NURSERY
2088 *shift_bits = DEFAULT_NURSERY_BITS;
2092 return nursery_start;
2096 scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeEntry *list, GrayQueue *queue)
2100 for (fin = list; fin; fin = fin->next) {
2103 DEBUG (5, fprintf (gc_debug_file, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object)));
2104 copy_func (&fin->object, queue);
2108 static mword fragment_total = 0;
2110 * We found a fragment of free memory in the nursery: memzero it and if
2111 * it is big enough, add it to the list of fragments that can be used for
2115 add_nursery_frag (size_t frag_size, char* frag_start, char* frag_end)
2118 DEBUG (4, fprintf (gc_debug_file, "Found empty fragment: %p-%p, size: %zd\n", frag_start, frag_end, frag_size));
2119 binary_protocol_empty (frag_start, frag_size);
2120 /* memsetting just the first chunk start is bound to provide better cache locality */
2121 if (nursery_clear_policy == CLEAR_AT_GC)
2122 memset (frag_start, 0, frag_size);
2123 /* Not worth dealing with smaller fragments: need to tune */
2124 if (frag_size >= FRAGMENT_MIN_SIZE) {
2125 fragment = alloc_fragment ();
2126 fragment->fragment_start = frag_start;
2127 fragment->fragment_limit = frag_start;
2128 fragment->fragment_end = frag_end;
2129 fragment->next = nursery_fragments;
2130 nursery_fragments = fragment;
2131 fragment_total += frag_size;
2133 /* Clear unused fragments, pinning depends on this */
2134 /*TODO place an int[] here instead of the memset if size justify it*/
2135 memset (frag_start, 0, frag_size);
2140 generation_name (int generation)
2142 switch (generation) {
2143 case GENERATION_NURSERY: return "nursery";
2144 case GENERATION_OLD: return "old";
2145 default: g_assert_not_reached ();
2149 static DisappearingLinkHashTable*
2150 get_dislink_hash_table (int generation)
2152 switch (generation) {
2153 case GENERATION_NURSERY: return &minor_disappearing_link_hash;
2154 case GENERATION_OLD: return &major_disappearing_link_hash;
2155 default: g_assert_not_reached ();
2159 static FinalizeEntryHashTable*
2160 get_finalize_entry_hash_table (int generation)
2162 switch (generation) {
2163 case GENERATION_NURSERY: return &minor_finalizable_hash;
2164 case GENERATION_OLD: return &major_finalizable_hash;
2165 default: g_assert_not_reached ();
2170 finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue)
2175 int ephemeron_rounds = 0;
2176 CopyOrMarkObjectFunc copy_func = current_collection_generation == GENERATION_NURSERY ? major.copy_object : major.copy_or_mark_object;
2179 * We copied all the reachable objects. Now it's the time to copy
2180 * the objects that were not referenced by the roots, but by the copied objects.
2181 * we built a stack of objects pointed to by gray_start: they are
2182 * additional roots and we may add more items as we go.
2183 * We loop until gray_start == gray_objects which means no more objects have
2184 * been added. Note this is iterative: no recursion is involved.
2185 * We need to walk the LO list as well in search of marked big objects
2186 * (use a flag since this is needed only on major collections). We need to loop
2187 * here as well, so keep a counter of marked LO (increasing it in copy_object).
2188 * To achieve better cache locality and cache usage, we drain the gray stack
2189 * frequently, after each object is copied, and just finish the work here.
2191 drain_gray_stack (queue);
2193 DEBUG (2, fprintf (gc_debug_file, "%s generation done\n", generation_name (generation)));
2194 /* walk the finalization queue and move also the objects that need to be
2195 * finalized: use the finalized objects as new roots so the objects they depend
2196 * on are also not reclaimed. As with the roots above, only objects in the nursery
2197 * are marked/copied.
2198 * We need a loop here, since objects ready for finalizers may reference other objects
2199 * that are fin-ready. Speedup with a flag?
2203 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
2204 * before processing finalizable objects to avoid finalizing reachable values.
2206 * It must be done inside the finalizaters loop since objects must not be removed from CWT tables
2207 * while they are been finalized.
2209 int done_with_ephemerons = 0;
2211 done_with_ephemerons = mark_ephemerons_in_range (copy_func, start_addr, end_addr, queue);
2212 drain_gray_stack (queue);
2214 } while (!done_with_ephemerons);
2216 fin_ready = num_ready_finalizers;
2217 finalize_in_range (copy_func, start_addr, end_addr, generation, queue);
2218 if (generation == GENERATION_OLD)
2219 finalize_in_range (copy_func, nursery_start, nursery_real_end, GENERATION_NURSERY, queue);
2221 /* drain the new stack that might have been created */
2222 DEBUG (6, fprintf (gc_debug_file, "Precise scan of gray area post fin\n"));
2223 drain_gray_stack (queue);
2224 } while (fin_ready != num_ready_finalizers);
2227 * Clear ephemeron pairs with unreachable keys.
2228 * We pass the copy func so we can figure out if an array was promoted or not.
2230 clear_unreachable_ephemerons (copy_func, start_addr, end_addr, queue);
2233 DEBUG (2, fprintf (gc_debug_file, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron roundss\n", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds));
2236 * handle disappearing links
2237 * Note we do this after checking the finalization queue because if an object
2238 * survives (at least long enough to be finalized) we don't clear the link.
2239 * This also deals with a possible issue with the monitor reclamation: with the Boehm
2240 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
2243 g_assert (gray_object_queue_is_empty (queue));
2245 null_link_in_range (copy_func, start_addr, end_addr, generation, queue);
2246 if (generation == GENERATION_OLD)
2247 null_link_in_range (copy_func, start_addr, end_addr, GENERATION_NURSERY, queue);
2248 if (gray_object_queue_is_empty (queue))
2250 drain_gray_stack (queue);
2253 g_assert (gray_object_queue_is_empty (queue));
2257 mono_sgen_check_section_scan_starts (GCMemSection *section)
2260 for (i = 0; i < section->num_scan_start; ++i) {
2261 if (section->scan_starts [i]) {
2262 guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
2263 g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
2269 check_scan_starts (void)
2271 if (!do_scan_starts_check)
2273 mono_sgen_check_section_scan_starts (nursery_section);
2274 major.check_scan_starts ();
2277 static int last_num_pinned = 0;
2280 build_nursery_fragments (void **start, int num_entries)
2282 char *frag_start, *frag_end;
2286 while (nursery_fragments) {
2287 Fragment *next = nursery_fragments->next;
2288 nursery_fragments->next = fragment_freelist;
2289 fragment_freelist = nursery_fragments;
2290 nursery_fragments = next;
2292 frag_start = nursery_start;
2294 /* clear scan starts */
2295 memset (nursery_section->scan_starts, 0, nursery_section->num_scan_start * sizeof (gpointer));
2296 for (i = 0; i < num_entries; ++i) {
2297 frag_end = start [i];
2298 /* remove the pin bit from pinned objects */
2299 unpin_object (frag_end);
2300 nursery_section->scan_starts [((char*)frag_end - (char*)nursery_section->data)/SCAN_START_SIZE] = frag_end;
2301 frag_size = frag_end - frag_start;
2303 add_nursery_frag (frag_size, frag_start, frag_end);
2304 frag_size = ALIGN_UP (safe_object_get_size ((MonoObject*)start [i]));
2305 frag_start = (char*)start [i] + frag_size;
2307 nursery_last_pinned_end = frag_start;
2308 frag_end = nursery_real_end;
2309 frag_size = frag_end - frag_start;
2311 add_nursery_frag (frag_size, frag_start, frag_end);
2312 if (!nursery_fragments) {
2313 DEBUG (1, fprintf (gc_debug_file, "Nursery fully pinned (%d)\n", num_entries));
2314 for (i = 0; i < num_entries; ++i) {
2315 DEBUG (3, fprintf (gc_debug_file, "Bastard pinning obj %p (%s), size: %d\n", start [i], safe_name (start [i]), safe_object_get_size (start [i])));
2320 nursery_next = nursery_frag_real_end = NULL;
2322 /* Clear TLABs for all threads */
2327 scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type, GrayQueue *queue)
2331 for (i = 0; i < roots_hash_size [root_type]; ++i) {
2332 for (root = roots_hash [root_type][i]; root; root = root->next) {
2333 DEBUG (6, fprintf (gc_debug_file, "Precise root scan %p-%p (desc: %p)\n", root->start_root, root->end_root, (void*)root->root_desc));
2334 precisely_scan_objects_from (copy_func, (void**)root->start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, queue);
2340 mono_sgen_dump_occupied (char *start, char *end, char *section_start)
2342 fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
2346 mono_sgen_dump_section (GCMemSection *section, const char *type)
2348 char *start = section->data;
2349 char *end = section->data + section->size;
2350 char *occ_start = NULL;
2352 char *old_start = NULL; /* just for debugging */
2354 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
2356 while (start < end) {
2360 if (!*(void**)start) {
2362 mono_sgen_dump_occupied (occ_start, start, section->data);
2365 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
2368 g_assert (start < section->next_data);
2373 vt = (GCVTable*)LOAD_VTABLE (start);
2376 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
2379 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
2380 start - section->data,
2381 vt->klass->name_space, vt->klass->name,
2389 mono_sgen_dump_occupied (occ_start, start, section->data);
2391 fprintf (heap_dump_file, "</section>\n");
2395 dump_object (MonoObject *obj, gboolean dump_location)
2397 static char class_name [1024];
2399 MonoClass *class = mono_object_class (obj);
2403 * Python's XML parser is too stupid to parse angle brackets
2404 * in strings, so we just ignore them;
2407 while (class->name [i] && j < sizeof (class_name) - 1) {
2408 if (!strchr ("<>\"", class->name [i]))
2409 class_name [j++] = class->name [i];
2412 g_assert (j < sizeof (class_name));
2415 fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%d\"",
2416 class->name_space, class_name,
2417 safe_object_get_size (obj));
2418 if (dump_location) {
2419 const char *location;
2420 if (ptr_in_nursery (obj))
2421 location = "nursery";
2422 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
2426 fprintf (heap_dump_file, " location=\"%s\"", location);
2428 fprintf (heap_dump_file, "/>\n");
2432 dump_heap (const char *type, int num, const char *reason)
2437 fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
2439 fprintf (heap_dump_file, " reason=\"%s\"", reason);
2440 fprintf (heap_dump_file, ">\n");
2441 fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
2442 mono_sgen_dump_internal_mem_usage (heap_dump_file);
2443 fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", pinned_byte_counts [PIN_TYPE_STACK]);
2444 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
2445 fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", pinned_byte_counts [PIN_TYPE_OTHER]);
2447 fprintf (heap_dump_file, "<pinned-objects>\n");
2448 for (list = pinned_objects; list; list = list->next)
2449 dump_object (list->obj, TRUE);
2450 fprintf (heap_dump_file, "</pinned-objects>\n");
2452 mono_sgen_dump_section (nursery_section, "nursery");
2454 major.dump_heap (heap_dump_file);
2456 fprintf (heap_dump_file, "<los>\n");
2457 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
2458 dump_object ((MonoObject*)bigobj->data, FALSE);
2459 fprintf (heap_dump_file, "</los>\n");
2461 fprintf (heap_dump_file, "</collection>\n");
2465 mono_sgen_register_moved_object (void *obj, void *destination)
2467 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
2469 /* FIXME: handle this for parallel collector */
2470 g_assert (!major.is_parallel);
2472 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
2473 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
2474 moved_objects_idx = 0;
2476 moved_objects [moved_objects_idx++] = obj;
2477 moved_objects [moved_objects_idx++] = destination;
2483 static gboolean inited = FALSE;
2488 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_pre_collection_fragment_clear);
2489 mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_pinning);
2490 mono_counters_register ("Minor scan remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_remsets);
2491 mono_counters_register ("Minor scan cardtables", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_card_table);
2492 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_pinned);
2493 mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_registered_roots);
2494 mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_thread_data);
2495 mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_finish_gray_stack);
2496 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_fragment_creation);
2498 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_pre_collection_fragment_clear);
2499 mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_pinning);
2500 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_pinned);
2501 mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_registered_roots);
2502 mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_thread_data);
2503 mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_alloc_pinned);
2504 mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_finalized);
2505 mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_big_objects);
2506 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_finish_gray_stack);
2507 mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_free_bigobjs);
2508 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_los_sweep);
2509 mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_sweep);
2510 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_fragment_creation);
2513 #ifdef HEAVY_STATISTICS
2514 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
2515 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
2516 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
2517 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
2518 mono_counters_register ("WBarrier generic store stored", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store_remset);
2519 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
2520 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
2521 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
2523 mono_counters_register ("# objects allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced);
2524 mono_counters_register ("bytes allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced);
2525 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
2526 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
2527 mono_counters_register ("bytes allocated in LOS", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_los);
2529 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
2530 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
2531 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_major);
2532 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_major);
2534 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
2535 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);
2537 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
2538 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
2539 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
2541 mono_counters_register ("# wasted fragments used", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_wasted_fragments_used);
2542 mono_counters_register ("bytes in wasted fragments", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_wasted_fragments_bytes);
2544 mono_counters_register ("Store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets);
2545 mono_counters_register ("Unique store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets_unique);
2546 mono_counters_register ("Saved remsets 1", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_1);
2547 mono_counters_register ("Saved remsets 2", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_2);
2548 mono_counters_register ("Non-global remsets processed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_local_remsets_processed);
2549 mono_counters_register ("Global remsets added", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_added);
2550 mono_counters_register ("Global remsets re-added", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_readded);
2551 mono_counters_register ("Global remsets processed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_processed);
2552 mono_counters_register ("Global remsets discarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_discarded);
2559 need_major_collection (void)
2561 mword los_alloced = los_memory_usage - MIN (last_los_memory_usage, los_memory_usage);
2562 return minor_collection_sections_alloced * major.section_size + los_alloced > minor_collection_allowance;
2566 * Collect objects in the nursery. Returns whether to trigger a major
2570 collect_nursery (size_t requested_size)
2572 size_t max_garbage_amount;
2573 char *orig_nursery_next;
2574 TV_DECLARE (all_atv);
2575 TV_DECLARE (all_btv);
2579 current_collection_generation = GENERATION_NURSERY;
2581 binary_protocol_collection (GENERATION_NURSERY);
2582 check_scan_starts ();
2585 orig_nursery_next = nursery_next;
2586 nursery_next = MAX (nursery_next, nursery_last_pinned_end);
2587 /* FIXME: optimize later to use the higher address where an object can be present */
2588 nursery_next = MAX (nursery_next, nursery_real_end);
2590 DEBUG (1, fprintf (gc_debug_file, "Start nursery collection %d %p-%p, size: %d\n", num_minor_gcs, nursery_start, nursery_next, (int)(nursery_next - nursery_start)));
2591 max_garbage_amount = nursery_next - nursery_start;
2592 g_assert (nursery_section->size >= max_garbage_amount);
2594 /* world must be stopped already */
2595 TV_GETTIME (all_atv);
2598 /* Pinning depends on this */
2599 clear_nursery_fragments (orig_nursery_next);
2602 time_minor_pre_collection_fragment_clear += TV_ELAPSED_MS (atv, btv);
2605 check_for_xdomain_refs ();
2607 nursery_section->next_data = nursery_next;
2609 major.start_nursery_collection ();
2611 gray_object_queue_init (&gray_queue, mono_sgen_get_unmanaged_allocator ());
2614 mono_stats.minor_gc_count ++;
2616 global_remset_cache_clear ();
2618 /* pin from pinned handles */
2620 pin_from_roots (nursery_start, nursery_next);
2621 /* identify pinned objects */
2622 optimize_pin_queue (0);
2623 next_pin_slot = pin_objects_from_addresses (nursery_section, pin_queue, pin_queue + next_pin_slot, nursery_start, nursery_next, &gray_queue);
2624 nursery_section->pin_queue_start = pin_queue;
2625 nursery_section->pin_queue_num_entries = next_pin_slot;
2627 time_minor_pinning += TV_ELAPSED_MS (btv, atv);
2628 DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot, TV_ELAPSED (btv, atv)));
2629 DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", next_pin_slot));
2631 if (consistency_check_at_minor_collection)
2632 check_consistency ();
2635 * walk all the roots and copy the young objects to the old generation,
2636 * starting from to_space
2639 scan_from_remsets (nursery_start, nursery_next, &gray_queue);
2640 /* we don't have complete write barrier yet, so we scan all the old generation sections */
2642 time_minor_scan_remsets += TV_ELAPSED_MS (atv, btv);
2643 DEBUG (2, fprintf (gc_debug_file, "Old generation scan: %d usecs\n", TV_ELAPSED (atv, btv)));
2645 if (use_cardtable) {
2646 card_tables_collect_starts (TRUE);
2648 scan_from_card_tables (nursery_start, nursery_next, &gray_queue);
2650 time_minor_scan_card_table += TV_ELAPSED_MS (atv, btv);
2653 drain_gray_stack (&gray_queue);
2656 time_minor_scan_pinned += TV_ELAPSED_MS (btv, atv);
2657 /* registered roots, this includes static fields */
2658 scan_from_registered_roots (major.copy_object, nursery_start, nursery_next, ROOT_TYPE_NORMAL, &gray_queue);
2659 scan_from_registered_roots (major.copy_object, nursery_start, nursery_next, ROOT_TYPE_WBARRIER, &gray_queue);
2661 time_minor_scan_registered_roots += TV_ELAPSED_MS (atv, btv);
2663 scan_thread_data (nursery_start, nursery_next, TRUE);
2665 time_minor_scan_thread_data += TV_ELAPSED_MS (btv, atv);
2668 finish_gray_stack (nursery_start, nursery_next, GENERATION_NURSERY, &gray_queue);
2670 time_minor_finish_gray_stack += TV_ELAPSED_MS (btv, atv);
2672 /* walk the pin_queue, build up the fragment list of free memory, unmark
2673 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2676 build_nursery_fragments (pin_queue, next_pin_slot);
2678 time_minor_fragment_creation += TV_ELAPSED_MS (atv, btv);
2679 DEBUG (2, fprintf (gc_debug_file, "Fragment creation: %d usecs, %lu bytes available\n", TV_ELAPSED (atv, btv), (unsigned long)fragment_total));
2681 if (consistency_check_at_minor_collection)
2682 check_major_refs ();
2684 major.finish_nursery_collection ();
2686 TV_GETTIME (all_btv);
2687 mono_stats.minor_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
2690 dump_heap ("minor", num_minor_gcs - 1, NULL);
2692 /* prepare the pin queue for the next collection */
2693 last_num_pinned = next_pin_slot;
2695 if (fin_ready_list || critical_fin_list) {
2696 DEBUG (4, fprintf (gc_debug_file, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers));
2697 mono_gc_finalize_notify ();
2701 g_assert (gray_object_queue_is_empty (&gray_queue));
2704 card_tables_collect_starts (FALSE);
2706 check_scan_starts ();
2708 binary_protocol_flush_buffers (FALSE);
2710 current_collection_generation = -1;
2712 return need_major_collection ();
2716 major_do_collection (const char *reason)
2718 LOSObject *bigobj, *prevbo;
2719 TV_DECLARE (all_atv);
2720 TV_DECLARE (all_btv);
2723 /* FIXME: only use these values for the precise scan
2724 * note that to_space pointers should be excluded anyway...
2726 char *heap_start = NULL;
2727 char *heap_end = (char*)-1;
2728 int old_num_major_sections = major.get_num_major_sections ();
2729 int num_major_sections, num_major_sections_saved, save_target, allowance_target;
2730 mword los_memory_saved, los_memory_alloced, old_los_memory_usage;
2733 * A domain could have been freed, resulting in
2734 * los_memory_usage being less than last_los_memory_usage.
2736 los_memory_alloced = los_memory_usage - MIN (last_los_memory_usage, los_memory_usage);
2737 old_los_memory_usage = los_memory_usage;
2739 //count_ref_nonref_objs ();
2740 //consistency_check ();
2742 binary_protocol_collection (GENERATION_OLD);
2743 check_scan_starts ();
2744 gray_object_queue_init (&gray_queue, mono_sgen_get_unmanaged_allocator ());
2745 if (major.is_parallel)
2746 gray_object_queue_init (&workers_distribute_gray_queue, mono_sgen_get_unmanaged_allocator ());
2749 DEBUG (1, fprintf (gc_debug_file, "Start major collection %d\n", num_major_gcs));
2751 mono_stats.major_gc_count ++;
2753 /* world must be stopped already */
2754 TV_GETTIME (all_atv);
2757 /* Pinning depends on this */
2758 clear_nursery_fragments (nursery_next);
2761 time_major_pre_collection_fragment_clear += TV_ELAPSED_MS (atv, btv);
2764 check_for_xdomain_refs ();
2766 nursery_section->next_data = nursery_real_end;
2767 /* we should also coalesce scanning from sections close to each other
2768 * and deal with pointers outside of the sections later.
2770 /* The remsets are not useful for a major collection */
2772 global_remset_cache_clear ();
2774 card_table_clear ();
2778 DEBUG (6, fprintf (gc_debug_file, "Collecting pinned addresses\n"));
2779 pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address);
2780 optimize_pin_queue (0);
2783 * pin_queue now contains all candidate pointers, sorted and
2784 * uniqued. We must do two passes now to figure out which
2785 * objects are pinned.
2787 * The first is to find within the pin_queue the area for each
2788 * section. This requires that the pin_queue be sorted. We
2789 * also process the LOS objects and pinned chunks here.
2791 * The second, destructive, pass is to reduce the section
2792 * areas to pointers to the actually pinned objects.
2794 DEBUG (6, fprintf (gc_debug_file, "Pinning from sections\n"));
2795 /* first pass for the sections */
2796 mono_sgen_find_section_pin_queue_start_end (nursery_section);
2797 major.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2798 /* identify possible pointers to the insize of large objects */
2799 DEBUG (6, fprintf (gc_debug_file, "Pinning from large objects\n"));
2800 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
2802 if (mono_sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + bigobj->size, &dummy)) {
2803 pin_object (bigobj->data);
2804 /* FIXME: only enqueue if object has references */
2805 GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data);
2807 mono_sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
2808 DEBUG (6, fprintf (gc_debug_file, "Marked large object %p (%s) size: %lu from roots\n", bigobj->data, safe_name (bigobj->data), (unsigned long)bigobj->size));
2811 /* second pass for the sections */
2812 mono_sgen_pin_objects_in_section (nursery_section, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2813 major.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2816 time_major_pinning += TV_ELAPSED_MS (atv, btv);
2817 DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot, TV_ELAPSED (atv, btv)));
2818 DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", next_pin_slot));
2820 major.init_to_space ();
2822 workers_start_all_workers (1);
2825 time_major_scan_pinned += TV_ELAPSED_MS (btv, atv);
2827 /* registered roots, this includes static fields */
2828 scan_from_registered_roots (major.copy_or_mark_object, heap_start, heap_end, ROOT_TYPE_NORMAL, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2829 scan_from_registered_roots (major.copy_or_mark_object, heap_start, heap_end, ROOT_TYPE_WBARRIER, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2831 time_major_scan_registered_roots += TV_ELAPSED_MS (atv, btv);
2834 /* FIXME: This is the wrong place for this, because it does
2836 scan_thread_data (heap_start, heap_end, TRUE);
2838 time_major_scan_thread_data += TV_ELAPSED_MS (btv, atv);
2841 time_major_scan_alloc_pinned += TV_ELAPSED_MS (atv, btv);
2843 /* scan the list of objects ready for finalization */
2844 scan_finalizer_entries (major.copy_or_mark_object, fin_ready_list, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2845 scan_finalizer_entries (major.copy_or_mark_object, critical_fin_list, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2847 time_major_scan_finalized += TV_ELAPSED_MS (btv, atv);
2848 DEBUG (2, fprintf (gc_debug_file, "Root scan: %d usecs\n", TV_ELAPSED (btv, atv)));
2851 time_major_scan_big_objects += TV_ELAPSED_MS (atv, btv);
2853 if (major.is_parallel) {
2854 while (!gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE)) {
2855 workers_distribute_gray_queue_sections ();
2859 workers_change_num_working (-1);
2862 if (major.is_parallel)
2863 g_assert (gray_object_queue_is_empty (&gray_queue));
2865 /* all the objects in the heap */
2866 finish_gray_stack (heap_start, heap_end, GENERATION_OLD, &gray_queue);
2868 time_major_finish_gray_stack += TV_ELAPSED_MS (btv, atv);
2870 /* sweep the big objects list */
2872 for (bigobj = los_object_list; bigobj;) {
2873 if (object_is_pinned (bigobj->data)) {
2874 unpin_object (bigobj->data);
2877 /* not referenced anywhere, so we can free it */
2879 prevbo->next = bigobj->next;
2881 los_object_list = bigobj->next;
2883 bigobj = bigobj->next;
2884 free_large_object (to_free);
2888 bigobj = bigobj->next;
2892 time_major_free_bigobjs += TV_ELAPSED_MS (atv, btv);
2897 time_major_los_sweep += TV_ELAPSED_MS (btv, atv);
2902 time_major_sweep += TV_ELAPSED_MS (atv, btv);
2904 /* walk the pin_queue, build up the fragment list of free memory, unmark
2905 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2908 build_nursery_fragments (nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries);
2911 time_major_fragment_creation += TV_ELAPSED_MS (btv, atv);
2913 TV_GETTIME (all_btv);
2914 mono_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
2917 dump_heap ("major", num_major_gcs - 1, reason);
2919 /* prepare the pin queue for the next collection */
2921 if (fin_ready_list || critical_fin_list) {
2922 DEBUG (4, fprintf (gc_debug_file, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers));
2923 mono_gc_finalize_notify ();
2927 g_assert (gray_object_queue_is_empty (&gray_queue));
2929 num_major_sections = major.get_num_major_sections ();
2931 num_major_sections_saved = MAX (old_num_major_sections - num_major_sections, 0);
2932 los_memory_saved = MAX (old_los_memory_usage - los_memory_usage, 1);
2934 save_target = ((num_major_sections * major.section_size) + los_memory_saved) / 2;
2936 * We aim to allow the allocation of as many sections as is
2937 * necessary to reclaim save_target sections in the next
2938 * collection. We assume the collection pattern won't change.
2939 * In the last cycle, we had num_major_sections_saved for
2940 * minor_collection_sections_alloced. Assuming things won't
2941 * change, this must be the same ratio as save_target for
2942 * allowance_target, i.e.
2944 * num_major_sections_saved save_target
2945 * --------------------------------- == ----------------
2946 * minor_collection_sections_alloced allowance_target
2950 allowance_target = (mword)((double)save_target * (double)(minor_collection_sections_alloced * major.section_size + los_memory_alloced) / (double)(num_major_sections_saved * major.section_size + los_memory_saved));
2952 minor_collection_allowance = MAX (MIN (allowance_target, num_major_sections * major.section_size + los_memory_usage), MIN_MINOR_COLLECTION_ALLOWANCE);
2954 minor_collection_sections_alloced = 0;
2955 last_los_memory_usage = los_memory_usage;
2957 major.finish_major_collection ();
2959 check_scan_starts ();
2961 binary_protocol_flush_buffers (FALSE);
2963 //consistency_check ();
2967 major_collection (const char *reason)
2969 if (g_getenv ("MONO_GC_NO_MAJOR")) {
2970 collect_nursery (0);
2974 current_collection_generation = GENERATION_OLD;
2975 major_do_collection (reason);
2976 current_collection_generation = -1;
2980 * When deciding if it's better to collect or to expand, keep track
2981 * of how much garbage was reclaimed with the last collection: if it's too
2983 * This is called when we could not allocate a small object.
2985 static void __attribute__((noinline))
2986 minor_collect_or_expand_inner (size_t size)
2988 int do_minor_collection = 1;
2990 g_assert (nursery_section);
2991 if (do_minor_collection) {
2993 if (collect_nursery (size))
2994 major_collection ("minor overflow");
2995 DEBUG (2, fprintf (gc_debug_file, "Heap size: %lu, LOS size: %lu\n", (unsigned long)total_alloc, (unsigned long)los_memory_usage));
2997 /* this also sets the proper pointers for the next allocation */
2998 if (!search_fragment_for_size (size)) {
3000 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
3001 DEBUG (1, fprintf (gc_debug_file, "nursery collection didn't find enough room for %zd alloc (%d pinned)\n", size, last_num_pinned));
3002 for (i = 0; i < last_num_pinned; ++i) {
3003 DEBUG (3, fprintf (gc_debug_file, "Bastard pinning obj %p (%s), size: %d\n", pin_queue [i], safe_name (pin_queue [i]), safe_object_get_size (pin_queue [i])));
3008 //report_internal_mem_usage ();
3012 * ######################################################################
3013 * ######## Memory allocation from the OS
3014 * ######################################################################
3015 * This section of code deals with getting memory from the OS and
3016 * allocating memory for GC-internal data structures.
3017 * Internal memory can be handled with a freelist for small objects.
3023 G_GNUC_UNUSED static void
3024 report_internal_mem_usage (void)
3026 printf ("Internal memory usage:\n");
3027 mono_sgen_report_internal_mem_usage ();
3028 printf ("Pinned memory usage:\n");
3029 major.report_pinned_memory_usage ();
3033 * Allocate a big chunk of memory from the OS (usually 64KB to several megabytes).
3034 * This must not require any lock.
3037 mono_sgen_alloc_os_memory (size_t size, int activate)
3040 unsigned long prot_flags = activate? MONO_MMAP_READ|MONO_MMAP_WRITE: MONO_MMAP_NONE;
3042 prot_flags |= MONO_MMAP_PRIVATE | MONO_MMAP_ANON;
3043 size += pagesize - 1;
3044 size &= ~(pagesize - 1);
3045 ptr = mono_valloc (0, size, prot_flags);
3047 total_alloc += size;
3052 * Free the memory returned by mono_sgen_alloc_os_memory (), returning it to the OS.
3055 mono_sgen_free_os_memory (void *addr, size_t size)
3057 mono_vfree (addr, size);
3059 size += pagesize - 1;
3060 size &= ~(pagesize - 1);
3062 total_alloc -= size;
3066 * ######################################################################
3067 * ######## Object allocation
3068 * ######################################################################
3069 * This section of code deals with allocating memory for objects.
3070 * There are several ways:
3071 * *) allocate large objects
3072 * *) allocate normal objects
3073 * *) fast lock-free allocation
3074 * *) allocation of pinned objects
3078 setup_fragment (Fragment *frag, Fragment *prev, size_t size)
3080 /* remove from the list */
3082 prev->next = frag->next;
3084 nursery_fragments = frag->next;
3085 nursery_next = frag->fragment_start;
3086 nursery_frag_real_end = frag->fragment_end;
3088 DEBUG (4, fprintf (gc_debug_file, "Using nursery fragment %p-%p, size: %td (req: %zd)\n", nursery_next, nursery_frag_real_end, nursery_frag_real_end - nursery_next, size));
3089 frag->next = fragment_freelist;
3090 fragment_freelist = frag;
3093 /* check if we have a suitable fragment in nursery_fragments to be able to allocate
3094 * an object of size @size
3095 * Return FALSE if not found (which means we need a collection)
3098 search_fragment_for_size (size_t size)
3100 Fragment *frag, *prev;
3101 DEBUG (4, fprintf (gc_debug_file, "Searching nursery fragment %p, size: %zd\n", nursery_frag_real_end, size));
3103 if (nursery_frag_real_end > nursery_next && nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
3104 /* Clear the remaining space, pinning depends on this */
3105 memset (nursery_next, 0, nursery_frag_real_end - nursery_next);
3108 for (frag = nursery_fragments; frag; frag = frag->next) {
3109 if (size <= (frag->fragment_end - frag->fragment_start)) {
3110 setup_fragment (frag, prev, size);
3119 * Same as search_fragment_for_size but if search for @desired_size fails, try to satisfy @minimum_size.
3120 * This improves nursery usage.
3123 search_fragment_for_size_range (size_t desired_size, size_t minimum_size)
3125 Fragment *frag, *prev, *min_prev;
3126 DEBUG (4, fprintf (gc_debug_file, "Searching nursery fragment %p, desired size: %zd minimum size %zd\n", nursery_frag_real_end, desired_size, minimum_size));
3128 if (nursery_frag_real_end > nursery_next && nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
3129 /* Clear the remaining space, pinning depends on this */
3130 memset (nursery_next, 0, nursery_frag_real_end - nursery_next);
3132 min_prev = GINT_TO_POINTER (-1);
3135 for (frag = nursery_fragments; frag; frag = frag->next) {
3136 int frag_size = frag->fragment_end - frag->fragment_start;
3137 if (desired_size <= frag_size) {
3138 setup_fragment (frag, prev, desired_size);
3139 return desired_size;
3141 if (minimum_size <= frag_size)
3147 if (min_prev != GINT_TO_POINTER (-1)) {
3150 frag = min_prev->next;
3152 frag = nursery_fragments;
3154 frag_size = frag->fragment_end - frag->fragment_start;
3155 HEAVY_STAT (++stat_wasted_fragments_used);
3156 HEAVY_STAT (stat_wasted_fragments_bytes += frag_size);
3158 setup_fragment (frag, min_prev, minimum_size);
3166 alloc_degraded (MonoVTable *vtable, size_t size)
3168 if (need_major_collection ()) {
3170 major_collection ("degraded overflow");
3174 degraded_mode += size;
3175 return major.alloc_degraded (vtable, size);
3179 * Provide a variant that takes just the vtable for small fixed-size objects.
3180 * The aligned size is already computed and stored in vt->gc_descr.
3181 * Note: every SCAN_START_SIZE or so we are given the chance to do some special
3182 * processing. We can keep track of where objects start, for example,
3183 * so when we scan the thread stacks for pinned objects, we can start
3184 * a search for the pinned object in SCAN_START_SIZE chunks.
3187 mono_gc_alloc_obj_nolock (MonoVTable *vtable, size_t size)
3189 /* FIXME: handle OOM */
3194 HEAVY_STAT (++stat_objects_alloced);
3195 if (size <= MAX_SMALL_OBJ_SIZE)
3196 HEAVY_STAT (stat_bytes_alloced += size);
3198 HEAVY_STAT (stat_bytes_alloced_los += size);
3200 size = ALIGN_UP (size);
3202 g_assert (vtable->gc_descr);
3204 if (G_UNLIKELY (collect_before_allocs)) {
3205 if (nursery_section) {
3207 collect_nursery (0);
3209 if (!degraded_mode && !search_fragment_for_size (size)) {
3211 g_assert_not_reached ();
3217 * We must already have the lock here instead of after the
3218 * fast path because we might be interrupted in the fast path
3219 * (after confirming that new_next < TLAB_TEMP_END) by the GC,
3220 * and we'll end up allocating an object in a fragment which
3221 * no longer belongs to us.
3223 * The managed allocator does not do this, but it's treated
3224 * specially by the world-stopping code.
3227 if (size > MAX_SMALL_OBJ_SIZE) {
3228 p = alloc_large_inner (vtable, size);
3230 /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
3232 p = (void**)TLAB_NEXT;
3233 /* FIXME: handle overflow */
3234 new_next = (char*)p + size;
3235 TLAB_NEXT = new_next;
3237 if (G_LIKELY (new_next < TLAB_TEMP_END)) {
3241 * FIXME: We might need a memory barrier here so the change to tlab_next is
3242 * visible before the vtable store.
3245 DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
3246 binary_protocol_alloc (p , vtable, size);
3247 g_assert (*p == NULL);
3250 g_assert (TLAB_NEXT == new_next);
3257 /* there are two cases: the object is too big or we run out of space in the TLAB */
3258 /* we also reach here when the thread does its first allocation after a minor
3259 * collection, since the tlab_ variables are initialized to NULL.
3260 * there can be another case (from ORP), if we cooperate with the runtime a bit:
3261 * objects that need finalizers can have the high bit set in their size
3262 * so the above check fails and we can readily add the object to the queue.
3263 * This avoids taking again the GC lock when registering, but this is moot when
3264 * doing thread-local allocation, so it may not be a good idea.
3266 g_assert (TLAB_NEXT == new_next);
3267 if (TLAB_NEXT >= TLAB_REAL_END) {
3269 * Run out of space in the TLAB. When this happens, some amount of space
3270 * remains in the TLAB, but not enough to satisfy the current allocation
3271 * request. Currently, we retire the TLAB in all cases, later we could
3272 * keep it if the remaining space is above a treshold, and satisfy the
3273 * allocation directly from the nursery.
3276 /* when running in degraded mode, we continue allocing that way
3277 * for a while, to decrease the number of useless nursery collections.
3279 if (degraded_mode && degraded_mode < DEFAULT_NURSERY_SIZE) {
3280 p = alloc_degraded (vtable, size);
3281 binary_protocol_alloc_degraded (p, vtable, size);
3285 /*FIXME This codepath is current deadcode since tlab_size > MAX_SMALL_OBJ_SIZE*/
3286 if (size > tlab_size) {
3287 /* Allocate directly from the nursery */
3288 if (nursery_next + size >= nursery_frag_real_end) {
3289 if (!search_fragment_for_size (size)) {
3290 minor_collect_or_expand_inner (size);
3291 if (degraded_mode) {
3292 p = alloc_degraded (vtable, size);
3293 binary_protocol_alloc_degraded (p, vtable, size);
3299 p = (void*)nursery_next;
3300 nursery_next += size;
3301 if (nursery_next > nursery_frag_real_end) {
3306 if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
3307 memset (p, 0, size);
3309 int alloc_size = tlab_size;
3310 int available_in_nursery = nursery_frag_real_end - nursery_next;
3312 DEBUG (3, fprintf (gc_debug_file, "Retire TLAB: %p-%p [%ld]\n", TLAB_START, TLAB_REAL_END, (long)(TLAB_REAL_END - TLAB_NEXT - size)));
3314 if (alloc_size >= available_in_nursery) {
3315 if (available_in_nursery > MAX_NURSERY_TLAB_WASTE && available_in_nursery > size) {
3316 alloc_size = available_in_nursery;
3318 alloc_size = search_fragment_for_size_range (tlab_size, size);
3320 alloc_size = tlab_size;
3321 minor_collect_or_expand_inner (tlab_size);
3322 if (degraded_mode) {
3323 p = alloc_degraded (vtable, size);
3324 binary_protocol_alloc_degraded (p, vtable, size);
3331 /* Allocate a new TLAB from the current nursery fragment */
3332 TLAB_START = nursery_next;
3333 nursery_next += alloc_size;
3334 TLAB_NEXT = TLAB_START;
3335 TLAB_REAL_END = TLAB_START + alloc_size;
3336 TLAB_TEMP_END = TLAB_START + MIN (SCAN_START_SIZE, alloc_size);
3338 if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
3339 memset (TLAB_START, 0, alloc_size);
3341 /* Allocate from the TLAB */
3342 p = (void*)TLAB_NEXT;
3344 g_assert (TLAB_NEXT <= TLAB_REAL_END);
3346 nursery_section->scan_starts [((char*)p - (char*)nursery_section->data)/SCAN_START_SIZE] = (char*)p;
3349 /* Reached tlab_temp_end */
3351 /* record the scan start so we can find pinned objects more easily */
3352 nursery_section->scan_starts [((char*)p - (char*)nursery_section->data)/SCAN_START_SIZE] = (char*)p;
3353 /* we just bump tlab_temp_end as well */
3354 TLAB_TEMP_END = MIN (TLAB_REAL_END, TLAB_NEXT + SCAN_START_SIZE);
3355 DEBUG (5, fprintf (gc_debug_file, "Expanding local alloc: %p-%p\n", TLAB_NEXT, TLAB_TEMP_END));
3359 DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
3360 binary_protocol_alloc (p, vtable, size);
3367 mono_gc_try_alloc_obj_nolock (MonoVTable *vtable, size_t size)
3373 size = ALIGN_UP (size);
3375 g_assert (vtable->gc_descr);
3376 if (size <= MAX_SMALL_OBJ_SIZE) {
3377 /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
3379 p = (void**)TLAB_NEXT;
3380 /* FIXME: handle overflow */
3381 new_next = (char*)p + size;
3382 TLAB_NEXT = new_next;
3384 if (G_LIKELY (new_next < TLAB_TEMP_END)) {
3388 * FIXME: We might need a memory barrier here so the change to tlab_next is
3389 * visible before the vtable store.
3392 HEAVY_STAT (++stat_objects_alloced);
3393 HEAVY_STAT (stat_bytes_alloced += size);
3395 DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
3396 binary_protocol_alloc (p, vtable, size);
3397 g_assert (*p == NULL);
3400 g_assert (TLAB_NEXT == new_next);
3409 mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
3412 #ifndef DISABLE_CRITICAL_REGION
3414 ENTER_CRITICAL_REGION;
3415 res = mono_gc_try_alloc_obj_nolock (vtable, size);
3417 EXIT_CRITICAL_REGION;
3420 EXIT_CRITICAL_REGION;
3423 res = mono_gc_alloc_obj_nolock (vtable, size);
3429 mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length)
3432 #ifndef DISABLE_CRITICAL_REGION
3434 ENTER_CRITICAL_REGION;
3435 arr = mono_gc_try_alloc_obj_nolock (vtable, size);
3437 arr->max_length = max_length;
3438 EXIT_CRITICAL_REGION;
3441 EXIT_CRITICAL_REGION;
3446 arr = mono_gc_alloc_obj_nolock (vtable, size);
3447 arr->max_length = max_length;
3455 mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size)
3458 MonoArrayBounds *bounds;
3462 arr = mono_gc_alloc_obj_nolock (vtable, size);
3463 arr->max_length = max_length;
3465 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
3466 arr->bounds = bounds;
3474 mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len)
3477 #ifndef DISABLE_CRITICAL_REGION
3479 ENTER_CRITICAL_REGION;
3480 str = mono_gc_try_alloc_obj_nolock (vtable, size);
3483 EXIT_CRITICAL_REGION;
3486 EXIT_CRITICAL_REGION;
3491 str = mono_gc_alloc_obj_nolock (vtable, size);
3500 * To be used for interned strings and possibly MonoThread, reflection handles.
3501 * We may want to explicitly free these objects.
3504 mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size)
3506 /* FIXME: handle OOM */
3508 size = ALIGN_UP (size);
3510 if (size > MAX_SMALL_OBJ_SIZE) {
3511 /* large objects are always pinned anyway */
3512 p = alloc_large_inner (vtable, size);
3514 DEBUG (9, g_assert (vtable->klass->inited));
3515 p = major.alloc_small_pinned_obj (size, vtable->klass->has_references);
3517 DEBUG (6, fprintf (gc_debug_file, "Allocated pinned object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
3518 binary_protocol_alloc_pinned (p, vtable, size);
3525 * ######################################################################
3526 * ######## Finalization support
3527 * ######################################################################
3531 * this is valid for the nursery: if the object has been forwarded it means it's
3532 * still refrenced from a root. If it is pinned it's still alive as well.
3533 * Return TRUE if @obj is ready to be finalized.
3535 #define object_is_fin_ready(obj) (!object_is_pinned (obj) && !object_is_forwarded (obj))
3538 is_critical_finalizer (FinalizeEntry *entry)
3543 if (!mono_defaults.critical_finalizer_object)
3546 obj = entry->object;
3547 class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
3549 return mono_class_has_parent (class, mono_defaults.critical_finalizer_object);
3553 queue_finalization_entry (FinalizeEntry *entry) {
3554 if (is_critical_finalizer (entry)) {
3555 entry->next = critical_fin_list;
3556 critical_fin_list = entry;
3558 entry->next = fin_ready_list;
3559 fin_ready_list = entry;
3563 /* LOCKING: requires that the GC lock is held */
3565 rehash_fin_table (FinalizeEntryHashTable *hash_table)
3567 FinalizeEntry **finalizable_hash = hash_table->table;
3568 mword finalizable_hash_size = hash_table->size;
3571 FinalizeEntry **new_hash;
3572 FinalizeEntry *entry, *next;
3573 int new_size = g_spaced_primes_closest (hash_table->num_registered);
3575 new_hash = mono_sgen_alloc_internal_dynamic (new_size * sizeof (FinalizeEntry*), INTERNAL_MEM_FIN_TABLE);
3576 for (i = 0; i < finalizable_hash_size; ++i) {
3577 for (entry = finalizable_hash [i]; entry; entry = next) {
3578 hash = mono_object_hash (entry->object) % new_size;
3580 entry->next = new_hash [hash];
3581 new_hash [hash] = entry;
3584 mono_sgen_free_internal_dynamic (finalizable_hash, finalizable_hash_size * sizeof (FinalizeEntry*), INTERNAL_MEM_FIN_TABLE);
3585 hash_table->table = new_hash;
3586 hash_table->size = new_size;
3589 /* LOCKING: requires that the GC lock is held */
3591 rehash_fin_table_if_necessary (FinalizeEntryHashTable *hash_table)
3593 if (hash_table->num_registered >= hash_table->size * 2)
3594 rehash_fin_table (hash_table);
3597 /* LOCKING: requires that the GC lock is held */
3599 finalize_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, GrayQueue *queue)
3601 FinalizeEntryHashTable *hash_table = get_finalize_entry_hash_table (generation);
3602 FinalizeEntry *entry, *prev;
3604 FinalizeEntry **finalizable_hash = hash_table->table;
3605 mword finalizable_hash_size = hash_table->size;
3609 for (i = 0; i < finalizable_hash_size; ++i) {
3611 for (entry = finalizable_hash [i]; entry;) {
3612 if ((char*)entry->object >= start && (char*)entry->object < end && !major.is_object_live (entry->object)) {
3613 gboolean is_fin_ready = object_is_fin_ready (entry->object);
3614 char *copy = entry->object;
3615 copy_func ((void**)©, queue);
3618 FinalizeEntry *next;
3619 /* remove and put in fin_ready_list */
3621 prev->next = entry->next;
3623 finalizable_hash [i] = entry->next;
3625 num_ready_finalizers++;
3626 hash_table->num_registered--;
3627 queue_finalization_entry (entry);
3628 /* Make it survive */
3629 from = entry->object;
3630 entry->object = copy;
3631 DEBUG (5, fprintf (gc_debug_file, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)\n", entry->object, safe_name (entry->object), from, num_ready_finalizers, hash_table->num_registered));
3635 char *from = entry->object;
3636 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
3637 FinalizeEntry *next = entry->next;
3638 unsigned int major_hash;
3639 /* remove from the list */
3641 prev->next = entry->next;
3643 finalizable_hash [i] = entry->next;
3644 hash_table->num_registered--;
3646 entry->object = copy;
3648 /* insert it into the major hash */
3649 rehash_fin_table_if_necessary (&major_finalizable_hash);
3650 major_hash = mono_object_hash ((MonoObject*) copy) %
3651 major_finalizable_hash.size;
3652 entry->next = major_finalizable_hash.table [major_hash];
3653 major_finalizable_hash.table [major_hash] = entry;
3654 major_finalizable_hash.num_registered++;
3656 DEBUG (5, fprintf (gc_debug_file, "Promoting finalization of object %p (%s) (was at %p) to major table\n", copy, safe_name (copy), from));
3661 /* update pointer */
3662 DEBUG (5, fprintf (gc_debug_file, "Updating object for finalization: %p (%s) (was at %p)\n", entry->object, safe_name (entry->object), from));
3663 entry->object = copy;
3668 entry = entry->next;
3674 object_is_reachable (char *object, char *start, char *end)
3676 /*This happens for non nursery objects during minor collections. We just treat all objects as alive.*/
3677 if (object < start || object >= end)
3679 return !object_is_fin_ready (object) || major.is_object_live (object);
3682 /* LOCKING: requires that the GC lock is held */
3684 null_ephemerons_for_domain (MonoDomain *domain)
3686 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3689 MonoObject *object = (MonoObject*)current->array;
3691 if (object && !object->vtable) {
3692 EphemeronLinkNode *tmp = current;
3695 prev->next = current->next;
3697 ephemeron_list = current->next;
3699 current = current->next;
3700 mono_sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3703 current = current->next;
3708 /* LOCKING: requires that the GC lock is held */
3710 clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue)
3712 int was_in_nursery, was_promoted;
3713 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3715 Ephemeron *cur, *array_end;
3719 char *object = current->array;
3721 if (!object_is_reachable (object, start, end)) {
3722 EphemeronLinkNode *tmp = current;
3724 DEBUG (5, fprintf (gc_debug_file, "Dead Ephemeron array at %p\n", object));
3727 prev->next = current->next;
3729 ephemeron_list = current->next;
3731 current = current->next;
3732 mono_sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3737 was_in_nursery = ptr_in_nursery (object);
3738 copy_func ((void**)&object, queue);
3739 current->array = object;
3741 /*The array was promoted, add global remsets for key/values left behind in nursery.*/
3742 was_promoted = was_in_nursery && !ptr_in_nursery (object);
3744 DEBUG (5, fprintf (gc_debug_file, "Clearing unreachable entries for ephemeron array at %p\n", object));
3746 array = (MonoArray*)object;
3747 cur = mono_array_addr (array, Ephemeron, 0);
3748 array_end = cur + mono_array_length_fast (array);
3749 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3751 for (; cur < array_end; ++cur) {
3752 char *key = (char*)cur->key;
3754 if (!key || key == tombstone)
3757 DEBUG (5, fprintf (gc_debug_file, "[%td] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
3758 key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
3759 cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable"));
3761 if (!object_is_reachable (key, start, end)) {
3762 cur->key = tombstone;
3768 if (ptr_in_nursery (key)) {/*key was not promoted*/
3769 DEBUG (5, fprintf (gc_debug_file, "\tAdded remset to key %p\n", key));
3770 mono_sgen_add_to_global_remset (&cur->key);
3772 if (ptr_in_nursery (cur->value)) {/*value was not promoted*/
3773 DEBUG (5, fprintf (gc_debug_file, "\tAdded remset to value %p\n", cur->value));
3774 mono_sgen_add_to_global_remset (&cur->value);
3779 current = current->next;
3783 /* LOCKING: requires that the GC lock is held */
3785 mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue)
3787 int nothing_marked = 1;
3788 EphemeronLinkNode *current = ephemeron_list;
3790 Ephemeron *cur, *array_end;
3793 for (current = ephemeron_list; current; current = current->next) {
3794 char *object = current->array;
3795 DEBUG (5, fprintf (gc_debug_file, "Ephemeron array at %p\n", object));
3797 /*We ignore arrays in old gen during minor collections since all objects are promoted by the remset machinery.*/
3798 if (object < start || object >= end)
3801 /*It has to be alive*/
3802 if (!object_is_reachable (object, start, end)) {
3803 DEBUG (5, fprintf (gc_debug_file, "\tnot reachable\n"));
3807 copy_func ((void**)&object, queue);
3809 array = (MonoArray*)object;
3810 cur = mono_array_addr (array, Ephemeron, 0);
3811 array_end = cur + mono_array_length_fast (array);
3812 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3814 for (; cur < array_end; ++cur) {
3815 char *key = cur->key;
3817 if (!key || key == tombstone)
3820 DEBUG (5, fprintf (gc_debug_file, "[%td] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
3821 key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
3822 cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable"));
3824 if (object_is_reachable (key, start, end)) {
3825 char *value = cur->value;
3827 copy_func ((void**)&cur->key, queue);
3829 if (!object_is_reachable (value, start, end))
3831 copy_func ((void**)&cur->value, queue);
3837 DEBUG (5, fprintf (gc_debug_file, "Ephemeron run finished. Is it done %d\n", nothing_marked));
3838 return nothing_marked;
3841 /* LOCKING: requires that the GC lock is held */
3843 null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, GrayQueue *queue)
3845 DisappearingLinkHashTable *hash = get_dislink_hash_table (generation);
3846 DisappearingLink **disappearing_link_hash = hash->table;
3847 int disappearing_link_hash_size = hash->size;
3848 DisappearingLink *entry, *prev;
3850 if (!hash->num_links)
3852 for (i = 0; i < disappearing_link_hash_size; ++i) {
3854 for (entry = disappearing_link_hash [i]; entry;) {
3855 char *object = DISLINK_OBJECT (entry);
3856 if (object >= start && object < end && !major.is_object_live (object)) {
3857 gboolean track = DISLINK_TRACK (entry);
3858 if (!track && object_is_fin_ready (object)) {
3859 void **p = entry->link;
3860 DisappearingLink *old;
3862 /* remove from list */
3864 prev->next = entry->next;
3866 disappearing_link_hash [i] = entry->next;
3867 DEBUG (5, fprintf (gc_debug_file, "Dislink nullified at %p to GCed object %p\n", p, object));
3869 mono_sgen_free_internal (entry, INTERNAL_MEM_DISLINK);
3874 char *copy = object;
3875 copy_func ((void**)©, queue);
3877 /* Update pointer if it's moved. If the object
3878 * has been moved out of the nursery, we need to
3879 * remove the link from the minor hash table to
3882 * FIXME: what if an object is moved earlier?
3885 if (hash == &minor_disappearing_link_hash && !ptr_in_nursery (copy)) {
3886 void **link = entry->link;
3887 DisappearingLink *old;
3888 /* remove from list */
3890 prev->next = entry->next;
3892 disappearing_link_hash [i] = entry->next;
3894 mono_sgen_free_internal (entry, INTERNAL_MEM_DISLINK);
3898 add_or_remove_disappearing_link ((MonoObject*)copy, link,
3899 track, GENERATION_OLD);
3901 DEBUG (5, fprintf (gc_debug_file, "Upgraded dislink at %p to major because object %p moved to %p\n", link, object, copy));
3905 /* We set the track resurrection bit to
3906 * FALSE if the object is to be finalized
3907 * so that the object can be collected in
3908 * the next cycle (i.e. after it was
3911 *entry->link = HIDE_POINTER (copy,
3912 object_is_fin_ready (object) ? FALSE : track);
3913 DEBUG (5, fprintf (gc_debug_file, "Updated dislink at %p to %p\n", entry->link, DISLINK_OBJECT (entry)));
3918 entry = entry->next;
3923 /* LOCKING: requires that the GC lock is held */
3925 null_links_for_domain (MonoDomain *domain, int generation)
3927 DisappearingLinkHashTable *hash = get_dislink_hash_table (generation);
3928 DisappearingLink **disappearing_link_hash = hash->table;
3929 int disappearing_link_hash_size = hash->size;
3930 DisappearingLink *entry, *prev;
3932 for (i = 0; i < disappearing_link_hash_size; ++i) {
3934 for (entry = disappearing_link_hash [i]; entry; ) {
3935 char *object = DISLINK_OBJECT (entry);
3936 if (object && !((MonoObject*)object)->vtable) {
3937 DisappearingLink *next = entry->next;
3942 disappearing_link_hash [i] = next;
3944 if (*(entry->link)) {
3945 *(entry->link) = NULL;
3946 g_warning ("Disappearing link %p not freed", entry->link);
3948 mono_sgen_free_internal (entry, INTERNAL_MEM_DISLINK);
3955 entry = entry->next;
3960 /* LOCKING: requires that the GC lock is held */
3962 finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size,
3963 FinalizeEntryHashTable *hash_table)
3965 FinalizeEntry **finalizable_hash = hash_table->table;
3966 mword finalizable_hash_size = hash_table->size;
3967 FinalizeEntry *entry, *prev;
3970 if (no_finalize || !out_size || !out_array)
3973 for (i = 0; i < finalizable_hash_size; ++i) {
3975 for (entry = finalizable_hash [i]; entry;) {
3976 if (mono_object_domain (entry->object) == domain) {
3977 FinalizeEntry *next;
3978 /* remove and put in out_array */
3980 prev->next = entry->next;
3982 finalizable_hash [i] = entry->next;
3984 hash_table->num_registered--;
3985 out_array [count ++] = entry->object;
3986 DEBUG (5, fprintf (gc_debug_file, "Collecting object for finalization: %p (%s) (%d/%d)\n", entry->object, safe_name (entry->object), num_ready_finalizers, hash_table->num_registered));
3988 if (count == out_size)
3993 entry = entry->next;
4000 * mono_gc_finalizers_for_domain:
4001 * @domain: the unloading appdomain
4002 * @out_array: output array
4003 * @out_size: size of output array
4005 * Store inside @out_array up to @out_size objects that belong to the unloading
4006 * appdomain @domain. Returns the number of stored items. Can be called repeteadly
4007 * until it returns 0.
4008 * The items are removed from the finalizer data structure, so the caller is supposed
4010 * @out_array should be on the stack to allow the GC to know the objects are still alive.
4013 mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
4018 result = finalizers_for_domain (domain, out_array, out_size, &minor_finalizable_hash);
4019 if (result < out_size) {
4020 result += finalizers_for_domain (domain, out_array + result, out_size - result,
4021 &major_finalizable_hash);
4029 register_for_finalization (MonoObject *obj, void *user_data, int generation)
4031 FinalizeEntryHashTable *hash_table = get_finalize_entry_hash_table (generation);
4032 FinalizeEntry **finalizable_hash;
4033 mword finalizable_hash_size;
4034 FinalizeEntry *entry, *prev;
4038 g_assert (user_data == NULL || user_data == mono_gc_run_finalize);
4039 hash = mono_object_hash (obj);
4041 rehash_fin_table_if_necessary (hash_table);
4042 finalizable_hash = hash_table->table;
4043 finalizable_hash_size = hash_table->size;
4044 hash %= finalizable_hash_size;
4046 for (entry = finalizable_hash [hash]; entry; entry = entry->next) {
4047 if (entry->object == obj) {
4049 /* remove from the list */
4051 prev->next = entry->next;
4053 finalizable_hash [hash] = entry->next;
4054 hash_table->num_registered--;
4055 DEBUG (5, fprintf (gc_debug_file, "Removed finalizer %p for object: %p (%s) (%d)\n", entry, obj, obj->vtable->klass->name, hash_table->num_registered));
4056 mono_sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_ENTRY);
4064 /* request to deregister, but already out of the list */
4068 entry = mono_sgen_alloc_internal (INTERNAL_MEM_FINALIZE_ENTRY);
4069 entry->object = obj;
4070 entry->next = finalizable_hash [hash];
4071 finalizable_hash [hash] = entry;
4072 hash_table->num_registered++;
4073 DEBUG (5, fprintf (gc_debug_file, "Added finalizer %p for object: %p (%s) (%d) to %s table\n", entry, obj, obj->vtable->klass->name, hash_table->num_registered, generation_name (generation)));
4078 mono_gc_register_for_finalization (MonoObject *obj, void *user_data)
4080 if (ptr_in_nursery (obj))
4081 register_for_finalization (obj, user_data, GENERATION_NURSERY);
4083 register_for_finalization (obj, user_data, GENERATION_OLD);
4087 rehash_dislink (DisappearingLinkHashTable *hash_table)
4089 DisappearingLink **disappearing_link_hash = hash_table->table;
4090 int disappearing_link_hash_size = hash_table->size;
4093 DisappearingLink **new_hash;
4094 DisappearingLink *entry, *next;
4095 int new_size = g_spaced_primes_closest (hash_table->num_links);
4097 new_hash = mono_sgen_alloc_internal_dynamic (new_size * sizeof (DisappearingLink*), INTERNAL_MEM_DISLINK_TABLE);
4098 for (i = 0; i < disappearing_link_hash_size; ++i) {
4099 for (entry = disappearing_link_hash [i]; entry; entry = next) {
4100 hash = mono_aligned_addr_hash (entry->link) % new_size;
4102 entry->next = new_hash [hash];
4103 new_hash [hash] = entry;
4106 mono_sgen_free_internal_dynamic (disappearing_link_hash,
4107 disappearing_link_hash_size * sizeof (DisappearingLink*), INTERNAL_MEM_DISLINK_TABLE);
4108 hash_table->table = new_hash;
4109 hash_table->size = new_size;
4112 /* LOCKING: assumes the GC lock is held */
4114 add_or_remove_disappearing_link (MonoObject *obj, void **link, gboolean track, int generation)
4116 DisappearingLinkHashTable *hash_table = get_dislink_hash_table (generation);
4117 DisappearingLink *entry, *prev;
4119 DisappearingLink **disappearing_link_hash = hash_table->table;
4120 int disappearing_link_hash_size = hash_table->size;
4122 if (hash_table->num_links >= disappearing_link_hash_size * 2) {
4123 rehash_dislink (hash_table);
4124 disappearing_link_hash = hash_table->table;
4125 disappearing_link_hash_size = hash_table->size;
4127 /* FIXME: add check that link is not in the heap */
4128 hash = mono_aligned_addr_hash (link) % disappearing_link_hash_size;
4129 entry = disappearing_link_hash [hash];
4131 for (; entry; entry = entry->next) {
4132 /* link already added */
4133 if (link == entry->link) {
4134 /* NULL obj means remove */
4137 prev->next = entry->next;
4139 disappearing_link_hash [hash] = entry->next;
4140 hash_table->num_links--;
4141 DEBUG (5, fprintf (gc_debug_file, "Removed dislink %p (%d) from %s table\n", entry, hash_table->num_links, generation_name (generation)));
4142 mono_sgen_free_internal (entry, INTERNAL_MEM_DISLINK);
4145 *link = HIDE_POINTER (obj, track); /* we allow the change of object */
4153 entry = mono_sgen_alloc_internal (INTERNAL_MEM_DISLINK);
4154 *link = HIDE_POINTER (obj, track);
4156 entry->next = disappearing_link_hash [hash];
4157 disappearing_link_hash [hash] = entry;
4158 hash_table->num_links++;
4159 DEBUG (5, fprintf (gc_debug_file, "Added dislink %p for object: %p (%s) at %p to %s table\n", entry, obj, obj->vtable->klass->name, link, generation_name (generation)));
4162 /* LOCKING: assumes the GC lock is held */
4164 mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track)
4166 add_or_remove_disappearing_link (NULL, link, FALSE, GENERATION_NURSERY);
4167 add_or_remove_disappearing_link (NULL, link, FALSE, GENERATION_OLD);
4169 if (ptr_in_nursery (obj))
4170 add_or_remove_disappearing_link (obj, link, track, GENERATION_NURSERY);
4172 add_or_remove_disappearing_link (obj, link, track, GENERATION_OLD);
4177 mono_gc_invoke_finalizers (void)
4179 FinalizeEntry *entry = NULL;
4180 gboolean entry_is_critical = FALSE;
4183 /* FIXME: batch to reduce lock contention */
4184 while (fin_ready_list || critical_fin_list) {
4188 FinalizeEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
4190 /* We have finalized entry in the last
4191 interation, now we need to remove it from
4194 *list = entry->next;
4196 FinalizeEntry *e = *list;
4197 while (e->next != entry)
4199 e->next = entry->next;
4201 mono_sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_ENTRY);
4205 /* Now look for the first non-null entry. */
4206 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
4209 entry_is_critical = FALSE;
4211 entry_is_critical = TRUE;
4212 for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
4217 g_assert (entry->object);
4218 num_ready_finalizers--;
4219 obj = entry->object;
4220 entry->object = NULL;
4221 DEBUG (7, fprintf (gc_debug_file, "Finalizing object %p (%s)\n", obj, safe_name (obj)));
4229 g_assert (entry->object == NULL);
4231 /* the object is on the stack so it is pinned */
4232 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
4233 mono_gc_run_finalize (obj, NULL);
4240 mono_gc_pending_finalizers (void)
4242 return fin_ready_list || critical_fin_list;
4245 /* Negative value to remove */
4247 mono_gc_add_memory_pressure (gint64 value)
4249 /* FIXME: Use interlocked functions */
4251 memory_pressure += value;
4256 mono_sgen_register_major_sections_alloced (int num_sections)
4258 minor_collection_sections_alloced += num_sections;
4262 mono_sgen_get_minor_collection_allowance (void)
4264 return minor_collection_allowance;
4268 * ######################################################################
4269 * ######## registered roots support
4270 * ######################################################################
4274 rehash_roots (gboolean pinned)
4278 RootRecord **new_hash;
4279 RootRecord *entry, *next;
4282 new_size = g_spaced_primes_closest (num_roots_entries [pinned]);
4283 new_hash = mono_sgen_alloc_internal_dynamic (new_size * sizeof (RootRecord*), INTERNAL_MEM_ROOTS_TABLE);
4284 for (i = 0; i < roots_hash_size [pinned]; ++i) {
4285 for (entry = roots_hash [pinned][i]; entry; entry = next) {
4286 hash = mono_aligned_addr_hash (entry->start_root) % new_size;
4288 entry->next = new_hash [hash];
4289 new_hash [hash] = entry;
4292 mono_sgen_free_internal_dynamic (roots_hash [pinned], roots_hash_size [pinned] * sizeof (RootRecord*), INTERNAL_MEM_ROOTS_TABLE);
4293 roots_hash [pinned] = new_hash;
4294 roots_hash_size [pinned] = new_size;
4298 find_root (int root_type, char *start, guint32 addr_hash)
4300 RootRecord *new_root;
4302 guint32 hash = addr_hash % roots_hash_size [root_type];
4303 for (new_root = roots_hash [root_type][hash]; new_root; new_root = new_root->next) {
4304 /* we allow changing the size and the descriptor (for thread statics etc) */
4305 if (new_root->start_root == start) {
4314 * We do not coalesce roots.
4317 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
4319 RootRecord *new_root;
4320 unsigned int hash, addr_hash = mono_aligned_addr_hash (start);
4323 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
4324 if (num_roots_entries [i] >= roots_hash_size [i] * 2)
4327 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
4328 new_root = find_root (i, start, addr_hash);
4329 /* we allow changing the size and the descriptor (for thread statics etc) */
4331 size_t old_size = new_root->end_root - new_root->start_root;
4332 new_root->end_root = new_root->start_root + size;
4333 g_assert (((new_root->root_desc != 0) && (descr != NULL)) ||
4334 ((new_root->root_desc == 0) && (descr == NULL)));
4335 new_root->root_desc = (mword)descr;
4337 roots_size -= old_size;
4342 new_root = mono_sgen_alloc_internal (INTERNAL_MEM_ROOT_RECORD);
4344 new_root->start_root = start;
4345 new_root->end_root = new_root->start_root + size;
4346 new_root->root_desc = (mword)descr;
4348 hash = addr_hash % roots_hash_size [root_type];
4349 num_roots_entries [root_type]++;
4350 new_root->next = roots_hash [root_type] [hash];
4351 roots_hash [root_type][hash] = new_root;
4352 DEBUG (3, fprintf (gc_debug_file, "Added root %p for range: %p-%p, descr: %p (%d/%d bytes)\n", new_root, new_root->start_root, new_root->end_root, descr, (int)size, (int)roots_size));
4362 mono_gc_register_root (char *start, size_t size, void *descr)
4364 return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
4368 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
4370 return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
4374 mono_gc_deregister_root (char* addr)
4376 RootRecord *tmp, *prev;
4377 unsigned int hash, addr_hash = mono_aligned_addr_hash (addr);
4381 for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
4382 hash = addr_hash % roots_hash_size [root_type];
4383 tmp = roots_hash [root_type][hash];
4386 if (tmp->start_root == (char*)addr) {
4388 prev->next = tmp->next;
4390 roots_hash [root_type][hash] = tmp->next;
4391 roots_size -= (tmp->end_root - tmp->start_root);
4392 num_roots_entries [root_type]--;
4393 DEBUG (3, fprintf (gc_debug_file, "Removed root %p for range: %p-%p\n", tmp, tmp->start_root, tmp->end_root));
4394 mono_sgen_free_internal (tmp, INTERNAL_MEM_ROOT_RECORD);
4405 * ######################################################################
4406 * ######## Thread handling (stop/start code)
4407 * ######################################################################
4410 /* FIXME: handle large/small config */
4411 #define HASH_PTHREAD_T(id) (((unsigned int)(id) >> 4) * 2654435761u)
4413 static SgenThreadInfo* thread_table [THREAD_HASH_SIZE];
4415 #if USE_SIGNAL_BASED_START_STOP_WORLD
4417 static MonoSemType suspend_ack_semaphore;
4418 static MonoSemType *suspend_ack_semaphore_ptr;
4419 static unsigned int global_stop_count = 0;
4421 static sigset_t suspend_signal_mask;
4422 static mword cur_thread_regs [ARCH_NUM_REGS] = {0};
4424 /* LOCKING: assumes the GC lock is held */
4426 mono_sgen_get_thread_table (void)
4428 return thread_table;
4432 mono_sgen_thread_info_lookup (ARCH_THREAD_TYPE id)
4434 unsigned int hash = HASH_PTHREAD_T (id) % THREAD_HASH_SIZE;
4435 SgenThreadInfo *info;
4437 info = thread_table [hash];
4438 while (info && !ARCH_THREAD_EQUALS (info->id, id)) {
4445 update_current_thread_stack (void *start)
4447 void *ptr = cur_thread_regs;
4448 SgenThreadInfo *info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
4450 info->stack_start = align_pointer (&ptr);
4451 g_assert (info->stack_start >= info->stack_start_limit && info->stack_start < info->stack_end);
4452 ARCH_STORE_REGS (ptr);
4453 info->stopped_regs = ptr;
4454 if (gc_callbacks.thread_suspend_func)
4455 gc_callbacks.thread_suspend_func (info->runtime_data, NULL);
4459 * Define this and use the "xdomain-checks" MONO_GC_DEBUG option to
4460 * have cross-domain checks in the write barrier.
4462 //#define XDOMAIN_CHECKS_IN_WBARRIER
4464 #ifndef SGEN_BINARY_PROTOCOL
4465 #ifndef HEAVY_STATISTICS
4466 #define MANAGED_ALLOCATION
4467 #ifndef XDOMAIN_CHECKS_IN_WBARRIER
4468 #define MANAGED_WBARRIER
4474 is_ip_in_managed_allocator (MonoDomain *domain, gpointer ip);
4477 mono_sgen_wait_for_suspend_ack (int count)
4481 for (i = 0; i < count; ++i) {
4482 while ((result = MONO_SEM_WAIT (suspend_ack_semaphore_ptr)) != 0) {
4483 if (errno != EINTR) {
4484 g_error ("sem_wait ()");
4491 restart_threads_until_none_in_managed_allocator (void)
4493 SgenThreadInfo *info;
4494 int i, result, num_threads_died = 0;
4495 int sleep_duration = -1;
4498 int restart_count = 0, restarted_count = 0;
4499 /* restart all threads that stopped in the
4501 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
4502 for (info = thread_table [i]; info; info = info->next) {
4505 if (!info->stack_start || info->in_critical_region ||
4506 is_ip_in_managed_allocator (info->stopped_domain, info->stopped_ip)) {
4507 binary_protocol_thread_restart ((gpointer)info->id);
4508 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
4509 result = thread_resume (pthread_mach_thread_np (info->id));
4511 result = pthread_kill (info->id, restart_signal_num);
4519 /* we set the stopped_ip to
4520 NULL for threads which
4521 we're not restarting so
4522 that we can easily identify
4524 info->stopped_ip = NULL;
4525 info->stopped_domain = NULL;
4529 /* if no threads were restarted, we're done */
4530 if (restart_count == 0)
4533 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
4534 /* mach thread_resume is synchronous so we dont need to wait for them */
4536 /* wait for the threads to signal their restart */
4537 mono_sgen_wait_for_suspend_ack (restart_count);
4540 if (sleep_duration < 0) {
4544 g_usleep (sleep_duration);
4545 sleep_duration += 10;
4548 /* stop them again */
4549 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
4550 for (info = thread_table [i]; info; info = info->next) {
4551 if (info->skip || info->stopped_ip == NULL)
4553 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
4554 result = thread_suspend (pthread_mach_thread_np (info->id));
4556 result = pthread_kill (info->id, suspend_signal_num);
4565 /* some threads might have died */
4566 num_threads_died += restart_count - restarted_count;
4567 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
4568 /* mach thread_resume is synchronous so we dont need to wait for them */
4570 /* wait for the threads to signal their suspension
4572 mono_sgen_wait_for_suspend_ack (restart_count);
4576 return num_threads_died;
4579 /* LOCKING: assumes the GC lock is held (by the stopping thread) */
4581 suspend_handler (int sig, siginfo_t *siginfo, void *context)
4583 SgenThreadInfo *info;
4586 int old_errno = errno;
4587 gpointer regs [ARCH_NUM_REGS];
4588 gpointer stack_start;
4589 ucontext_t *ctx = (ucontext_t*)context;
4591 id = pthread_self ();
4592 info = mono_sgen_thread_info_lookup (id);
4593 info->stopped_domain = mono_domain_get ();
4594 info->stopped_ip = (gpointer) ARCH_SIGCTX_IP (ctx);
4595 stop_count = global_stop_count;
4596 /* duplicate signal */
4597 if (0 && info->stop_count == stop_count) {
4601 #ifdef HAVE_KW_THREAD
4602 /* update the remset info in the thread data structure */
4603 info->remset = remembered_set;
4605 stack_start = (char*) ARCH_SIGCTX_SP (ctx) - REDZONE_SIZE;
4606 /* If stack_start is not within the limits, then don't set it
4607 in info and we will be restarted. */
4608 if (stack_start >= info->stack_start_limit && info->stack_start <= info->stack_end) {
4609 info->stack_start = stack_start;
4611 ARCH_COPY_SIGCTX_REGS (regs, ctx);
4612 info->stopped_regs = regs;
4614 g_assert (!info->stack_start);
4617 /* Notify the JIT */
4618 if (gc_callbacks.thread_suspend_func)
4619 gc_callbacks.thread_suspend_func (info->runtime_data, ctx);
4621 DEBUG (4, fprintf (gc_debug_file, "Posting suspend_ack_semaphore for suspend from %p %p\n", info, (gpointer)ARCH_GET_THREAD ()));
4622 /* notify the waiting thread */
4623 MONO_SEM_POST (suspend_ack_semaphore_ptr);
4624 info->stop_count = stop_count;
4626 /* wait until we receive the restart signal */
4629 sigsuspend (&suspend_signal_mask);
4630 } while (info->signal != restart_signal_num);
4632 DEBUG (4, fprintf (gc_debug_file, "Posting suspend_ack_semaphore for resume from %p %p\n", info, (gpointer)ARCH_GET_THREAD ()));
4633 /* notify the waiting thread */
4634 MONO_SEM_POST (suspend_ack_semaphore_ptr);
4640 restart_handler (int sig)
4642 SgenThreadInfo *info;
4643 int old_errno = errno;
4645 info = mono_sgen_thread_info_lookup (pthread_self ());
4646 info->signal = restart_signal_num;
4647 DEBUG (4, fprintf (gc_debug_file, "Restart handler in %p %p\n", info, (gpointer)ARCH_GET_THREAD ()));
4653 acquire_gc_locks (void)
4659 release_gc_locks (void)
4661 UNLOCK_INTERRUPTION;
4664 static TV_DECLARE (stop_world_time);
4665 static unsigned long max_pause_usec = 0;
4667 /* LOCKING: assumes the GC lock is held */
4673 acquire_gc_locks ();
4675 update_current_thread_stack (&count);
4677 global_stop_count++;
4678 DEBUG (3, fprintf (gc_debug_file, "stopping world n %d from %p %p\n", global_stop_count, mono_sgen_thread_info_lookup (ARCH_GET_THREAD ()), (gpointer)ARCH_GET_THREAD ()));
4679 TV_GETTIME (stop_world_time);
4680 count = mono_sgen_thread_handshake (suspend_signal_num);
4681 count -= restart_threads_until_none_in_managed_allocator ();
4682 g_assert (count >= 0);
4683 DEBUG (3, fprintf (gc_debug_file, "world stopped %d thread(s)\n", count));
4687 /* LOCKING: assumes the GC lock is held */
4689 restart_world (void)
4692 SgenThreadInfo *info;
4693 TV_DECLARE (end_sw);
4696 /* notify the profiler of the leftovers */
4697 if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_GC_MOVES)) {
4698 if (moved_objects_idx) {
4699 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
4700 moved_objects_idx = 0;
4703 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
4704 for (info = thread_table [i]; info; info = info->next) {
4705 info->stack_start = NULL;
4706 info->stopped_regs = NULL;
4710 release_gc_locks ();
4712 count = mono_sgen_thread_handshake (restart_signal_num);
4713 TV_GETTIME (end_sw);
4714 usec = TV_ELAPSED (stop_world_time, end_sw);
4715 max_pause_usec = MAX (usec, max_pause_usec);
4716 DEBUG (2, fprintf (gc_debug_file, "restarted %d thread(s) (pause time: %d usec, max: %d)\n", count, (int)usec, (int)max_pause_usec));
4720 #endif /* USE_SIGNAL_BASED_START_STOP_WORLD */
4723 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
4725 gc_callbacks = *callbacks;
4729 mono_gc_get_gc_callbacks ()
4731 return &gc_callbacks;
4734 /* Variables holding start/end nursery so it won't have to be passed at every call */
4735 static void *scan_area_arg_start, *scan_area_arg_end;
4738 mono_gc_conservatively_scan_area (void *start, void *end)
4740 conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
4744 mono_gc_scan_object (void *obj)
4746 g_assert_not_reached ();
4747 if (current_collection_generation == GENERATION_NURSERY)
4748 major.copy_object (&obj, &gray_queue);
4750 major.copy_or_mark_object (&obj, &gray_queue);
4755 * Mark from thread stacks and registers.
4758 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise)
4761 SgenThreadInfo *info;
4763 scan_area_arg_start = start_nursery;
4764 scan_area_arg_end = end_nursery;
4766 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
4767 for (info = thread_table [i]; info; info = info->next) {
4769 DEBUG (3, fprintf (gc_debug_file, "Skipping dead thread %p, range: %p-%p, size: %td\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
4772 DEBUG (3, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, next_pin_slot));
4773 if (gc_callbacks.thread_mark_func && !conservative_stack_mark)
4774 gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
4776 conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
4779 conservatively_pin_objects_from (info->stopped_regs, info->stopped_regs + ARCH_NUM_REGS,
4780 start_nursery, end_nursery, PIN_TYPE_STACK);
4786 find_pinning_ref_from_thread (char *obj, size_t size)
4789 SgenThreadInfo *info;
4790 char *endobj = obj + size;
4792 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
4793 for (info = thread_table [i]; info; info = info->next) {
4794 char **start = (char**)info->stack_start;
4797 while (start < (char**)info->stack_end) {
4798 if (*start >= obj && *start < endobj) {
4799 DEBUG (0, fprintf (gc_debug_file, "Object %p referenced in thread %p (id %p) at %p, stack: %p-%p\n", obj, info, (gpointer)info->id, start, info->stack_start, info->stack_end));
4804 /* FIXME: check info->stopped_regs */
4810 ptr_on_stack (void *ptr)
4812 gpointer stack_start = &stack_start;
4813 SgenThreadInfo *info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
4815 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
4821 handle_remset (mword *p, void *start_nursery, void *end_nursery, gboolean global, GrayQueue *queue)
4828 HEAVY_STAT (++stat_global_remsets_processed);
4830 HEAVY_STAT (++stat_local_remsets_processed);
4832 /* FIXME: exclude stack locations */
4833 switch ((*p) & REMSET_TYPE_MASK) {
4834 case REMSET_LOCATION:
4836 //__builtin_prefetch (ptr);
4837 if (((void*)ptr < start_nursery || (void*)ptr >= end_nursery)) {
4838 gpointer old = *ptr;
4839 major.copy_object (ptr, queue);
4840 DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p\n", ptr, *ptr));
4842 binary_protocol_ptr_update (ptr, old, *ptr, (gpointer)LOAD_VTABLE (*ptr), safe_object_get_size (*ptr));
4843 if (!global && *ptr >= start_nursery && *ptr < end_nursery) {
4845 * If the object is pinned, each reference to it from nonpinned objects
4846 * becomes part of the global remset, which can grow very large.
4848 DEBUG (9, fprintf (gc_debug_file, "Add to global remset because of pinning %p (%p %s)\n", ptr, *ptr, safe_name (*ptr)));
4849 mono_sgen_add_to_global_remset (ptr);
4852 DEBUG (9, fprintf (gc_debug_file, "Skipping remset at %p holding %p\n", ptr, *ptr));
4856 ptr = (void**)(*p & ~REMSET_TYPE_MASK);
4857 if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
4860 while (count-- > 0) {
4861 major.copy_object (ptr, queue);
4862 DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p (count: %d)\n", ptr, *ptr, (int)count));
4863 if (!global && *ptr >= start_nursery && *ptr < end_nursery)
4864 mono_sgen_add_to_global_remset (ptr);
4869 ptr = (void**)(*p & ~REMSET_TYPE_MASK);
4870 if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
4872 major.minor_scan_object ((char*)ptr, queue);
4874 case REMSET_VTYPE: {
4875 ptr = (void**)(*p & ~REMSET_TYPE_MASK);
4876 if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
4881 ptr = (void**) major.minor_scan_vtype ((char*)ptr, desc, start_nursery, end_nursery, queue);
4885 g_assert_not_reached ();
4890 #ifdef HEAVY_STATISTICS
4892 collect_store_remsets (RememberedSet *remset, mword *bumper)
4894 mword *p = remset->data;
4899 while (p < remset->store_next) {
4900 switch ((*p) & REMSET_TYPE_MASK) {
4901 case REMSET_LOCATION:
4904 ++stat_saved_remsets_1;
4906 if (*p == last1 || *p == last2) {
4907 ++stat_saved_remsets_2;
4924 g_assert_not_reached ();
4934 RememberedSet *remset;
4936 SgenThreadInfo *info;
4938 mword *addresses, *bumper, *p, *r;
4940 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
4941 for (info = thread_table [i]; info; info = info->next) {
4942 for (remset = info->remset; remset; remset = remset->next)
4943 size += remset->store_next - remset->data;
4946 for (remset = freed_thread_remsets; remset; remset = remset->next)
4947 size += remset->store_next - remset->data;
4948 for (remset = global_remset; remset; remset = remset->next)
4949 size += remset->store_next - remset->data;
4951 bumper = addresses = mono_sgen_alloc_internal_dynamic (sizeof (mword) * size, INTERNAL_MEM_STATISTICS);
4953 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
4954 for (info = thread_table [i]; info; info = info->next) {
4955 for (remset = info->remset; remset; remset = remset->next)
4956 bumper = collect_store_remsets (remset, bumper);
4959 for (remset = global_remset; remset; remset = remset->next)
4960 bumper = collect_store_remsets (remset, bumper);
4961 for (remset = freed_thread_remsets; remset; remset = remset->next)
4962 bumper = collect_store_remsets (remset, bumper);
4964 g_assert (bumper <= addresses + size);
4966 stat_store_remsets += bumper - addresses;
4968 sort_addresses ((void**)addresses, bumper - addresses);
4971 while (r < bumper) {
4977 stat_store_remsets_unique += p - addresses;
4979 mono_sgen_free_internal_dynamic (addresses, sizeof (mword) * size, INTERNAL_MEM_STATISTICS);
4984 clear_thread_store_remset_buffer (SgenThreadInfo *info)
4986 *info->store_remset_buffer_index_addr = 0;
4987 memset (*info->store_remset_buffer_addr, 0, sizeof (gpointer) * STORE_REMSET_BUFFER_SIZE);
4991 remset_byte_size (RememberedSet *remset)
4993 return sizeof (RememberedSet) + (remset->end_set - remset->data) * sizeof (gpointer);
4997 scan_from_remsets (void *start_nursery, void *end_nursery, GrayQueue *queue)
5000 SgenThreadInfo *info;
5001 RememberedSet *remset;
5002 GenericStoreRememberedSet *store_remset;
5003 mword *p, *next_p, *store_pos;
5005 #ifdef HEAVY_STATISTICS
5009 /* the global one */
5010 for (remset = global_remset; remset; remset = remset->next) {
5011 DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
5012 store_pos = remset->data;
5013 for (p = remset->data; p < remset->store_next; p = next_p) {
5014 void **ptr = (void**)p [0];
5016 /*Ignore previously processed remset.*/
5017 if (!global_remset_location_was_not_added (ptr)) {
5022 next_p = handle_remset (p, start_nursery, end_nursery, TRUE, queue);
5025 * Clear global remsets of locations which no longer point to the
5026 * nursery. Otherwise, they could grow indefinitely between major
5029 * Since all global remsets are location remsets, we don't need to unmask the pointer.
5031 if (ptr_in_nursery (*ptr)) {
5032 *store_pos ++ = p [0];
5033 HEAVY_STAT (++stat_global_remsets_readded);
5037 /* Truncate the remset */
5038 remset->store_next = store_pos;
5041 /* the generic store ones */
5042 store_remset = generic_store_remsets;
5043 while (store_remset) {
5044 GenericStoreRememberedSet *next = store_remset->next;
5046 for (i = 0; i < STORE_REMSET_BUFFER_SIZE - 1; ++i) {
5047 gpointer addr = store_remset->data [i];
5049 handle_remset ((mword*)&addr, start_nursery, end_nursery, FALSE, queue);
5052 mono_sgen_free_internal (store_remset, INTERNAL_MEM_STORE_REMSET);
5054 store_remset = next;
5056 generic_store_remsets = NULL;
5058 /* the per-thread ones */
5059 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5060 for (info = thread_table [i]; info; info = info->next) {
5061 RememberedSet *next;
5063 for (remset = info->remset; remset; remset = next) {
5064 DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
5065 for (p = remset->data; p < remset->store_next;)
5066 p = handle_remset (p, start_nursery, end_nursery, FALSE, queue);
5067 remset->store_next = remset->data;
5068 next = remset->next;
5069 remset->next = NULL;
5070 if (remset != info->remset) {
5071 DEBUG (4, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
5072 mono_sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET);
5075 for (j = 0; j < *info->store_remset_buffer_index_addr; ++j)
5076 handle_remset ((mword*)*info->store_remset_buffer_addr + j + 1, start_nursery, end_nursery, FALSE, queue);
5077 clear_thread_store_remset_buffer (info);
5081 /* the freed thread ones */
5082 while (freed_thread_remsets) {
5083 RememberedSet *next;
5084 remset = freed_thread_remsets;
5085 DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
5086 for (p = remset->data; p < remset->store_next;)
5087 p = handle_remset (p, start_nursery, end_nursery, FALSE, queue);
5088 next = remset->next;
5089 DEBUG (4, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
5090 mono_sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET);
5091 freed_thread_remsets = next;
5096 * Clear the info in the remembered sets: we're doing a major collection, so
5097 * the per-thread ones are not needed and the global ones will be reconstructed
5101 clear_remsets (void)
5104 SgenThreadInfo *info;
5105 RememberedSet *remset, *next;
5107 /* the global list */
5108 for (remset = global_remset; remset; remset = next) {
5109 remset->store_next = remset->data;
5110 next = remset->next;
5111 remset->next = NULL;
5112 if (remset != global_remset) {
5113 DEBUG (4, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
5114 mono_sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET);
5117 /* the generic store ones */
5118 while (generic_store_remsets) {
5119 GenericStoreRememberedSet *gs_next = generic_store_remsets->next;
5120 mono_sgen_free_internal (generic_store_remsets, INTERNAL_MEM_STORE_REMSET);
5121 generic_store_remsets = gs_next;
5123 /* the per-thread ones */
5124 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5125 for (info = thread_table [i]; info; info = info->next) {
5126 for (remset = info->remset; remset; remset = next) {
5127 remset->store_next = remset->data;
5128 next = remset->next;
5129 remset->next = NULL;
5130 if (remset != info->remset) {
5131 DEBUG (3, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
5132 mono_sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET);
5135 clear_thread_store_remset_buffer (info);
5139 /* the freed thread ones */
5140 while (freed_thread_remsets) {
5141 next = freed_thread_remsets->next;
5142 DEBUG (4, fprintf (gc_debug_file, "Freed remset at %p\n", freed_thread_remsets->data));
5143 mono_sgen_free_internal_dynamic (freed_thread_remsets, remset_byte_size (freed_thread_remsets), INTERNAL_MEM_REMSET);
5144 freed_thread_remsets = next;
5149 * Clear the thread local TLAB variables for all threads.
5154 SgenThreadInfo *info;
5157 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5158 for (info = thread_table [i]; info; info = info->next) {
5159 /* A new TLAB will be allocated when the thread does its first allocation */
5160 *info->tlab_start_addr = NULL;
5161 *info->tlab_next_addr = NULL;
5162 *info->tlab_temp_end_addr = NULL;
5163 *info->tlab_real_end_addr = NULL;
5168 /* LOCKING: assumes the GC lock is held */
5169 static SgenThreadInfo*
5170 gc_register_current_thread (void *addr)
5173 SgenThreadInfo* info = malloc (sizeof (SgenThreadInfo));
5174 #ifndef HAVE_KW_THREAD
5175 SgenThreadInfo *__thread_info__ = info;
5181 memset (info, 0, sizeof (SgenThreadInfo));
5182 #ifndef HAVE_KW_THREAD
5183 info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
5185 g_assert (!pthread_getspecific (thread_info_key));
5186 pthread_setspecific (thread_info_key, info);
5191 info->id = ARCH_GET_THREAD ();
5192 info->stop_count = -1;
5195 info->stack_start = NULL;
5196 info->tlab_start_addr = &TLAB_START;
5197 info->tlab_next_addr = &TLAB_NEXT;
5198 info->tlab_temp_end_addr = &TLAB_TEMP_END;
5199 info->tlab_real_end_addr = &TLAB_REAL_END;
5200 info->store_remset_buffer_addr = &STORE_REMSET_BUFFER;
5201 info->store_remset_buffer_index_addr = &STORE_REMSET_BUFFER_INDEX;
5202 info->stopped_ip = NULL;
5203 info->stopped_domain = NULL;
5204 info->stopped_regs = NULL;
5206 binary_protocol_thread_register ((gpointer)info->id);
5208 #ifdef HAVE_KW_THREAD
5209 tlab_next_addr = &tlab_next;
5210 store_remset_buffer_index_addr = &store_remset_buffer_index;
5213 /* try to get it with attributes first */
5214 #if defined(HAVE_PTHREAD_GETATTR_NP) && defined(HAVE_PTHREAD_ATTR_GETSTACK)
5218 pthread_attr_t attr;
5219 pthread_getattr_np (pthread_self (), &attr);
5220 pthread_attr_getstack (&attr, &sstart, &size);
5221 info->stack_start_limit = sstart;
5222 info->stack_end = (char*)sstart + size;
5223 pthread_attr_destroy (&attr);
5225 #elif defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP)
5226 info->stack_end = (char*)pthread_get_stackaddr_np (pthread_self ());
5227 info->stack_start_limit = (char*)info->stack_end - pthread_get_stacksize_np (pthread_self ());
5230 /* FIXME: we assume the stack grows down */
5231 gsize stack_bottom = (gsize)addr;
5232 stack_bottom += 4095;
5233 stack_bottom &= ~4095;
5234 info->stack_end = (char*)stack_bottom;
5238 #ifdef HAVE_KW_THREAD
5239 stack_end = info->stack_end;
5242 /* hash into the table */
5243 hash = HASH_PTHREAD_T (info->id) % THREAD_HASH_SIZE;
5244 info->next = thread_table [hash];
5245 thread_table [hash] = info;
5247 info->remset = alloc_remset (DEFAULT_REMSET_SIZE, info);
5248 pthread_setspecific (remembered_set_key, info->remset);
5249 #ifdef HAVE_KW_THREAD
5250 remembered_set = info->remset;
5253 STORE_REMSET_BUFFER = mono_sgen_alloc_internal (INTERNAL_MEM_STORE_REMSET);
5254 STORE_REMSET_BUFFER_INDEX = 0;
5256 DEBUG (3, fprintf (gc_debug_file, "registered thread %p (%p) (hash: %d)\n", info, (gpointer)info->id, hash));
5258 if (gc_callbacks.thread_attach_func)
5259 info->runtime_data = gc_callbacks.thread_attach_func ();
5265 add_generic_store_remset_from_buffer (gpointer *buffer)
5267 GenericStoreRememberedSet *remset = mono_sgen_alloc_internal (INTERNAL_MEM_STORE_REMSET);
5268 memcpy (remset->data, buffer + 1, sizeof (gpointer) * (STORE_REMSET_BUFFER_SIZE - 1));
5269 remset->next = generic_store_remsets;
5270 generic_store_remsets = remset;
5274 unregister_current_thread (void)
5277 SgenThreadInfo *prev = NULL;
5279 RememberedSet *rset;
5280 ARCH_THREAD_TYPE id = ARCH_GET_THREAD ();
5282 binary_protocol_thread_unregister ((gpointer)id);
5284 hash = HASH_PTHREAD_T (id) % THREAD_HASH_SIZE;
5285 p = thread_table [hash];
5287 DEBUG (3, fprintf (gc_debug_file, "unregister thread %p (%p)\n", p, (gpointer)p->id));
5288 while (!ARCH_THREAD_EQUALS (p->id, id)) {
5293 thread_table [hash] = p->next;
5295 prev->next = p->next;
5298 if (freed_thread_remsets) {
5299 for (rset = p->remset; rset->next; rset = rset->next)
5301 rset->next = freed_thread_remsets;
5302 freed_thread_remsets = p->remset;
5304 freed_thread_remsets = p->remset;
5307 if (*p->store_remset_buffer_index_addr)
5308 add_generic_store_remset_from_buffer (*p->store_remset_buffer_addr);
5309 mono_sgen_free_internal (*p->store_remset_buffer_addr, INTERNAL_MEM_STORE_REMSET);
5314 unregister_thread (void *k)
5316 g_assert (!mono_domain_get ());
5318 unregister_current_thread ();
5323 mono_gc_register_thread (void *baseptr)
5325 SgenThreadInfo *info;
5329 info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
5331 info = gc_register_current_thread (baseptr);
5333 return info != NULL;
5336 #if USE_PTHREAD_INTERCEPT
5339 void *(*start_routine) (void *);
5342 MonoSemType registered;
5343 } SgenThreadStartInfo;
5346 gc_start_thread (void *arg)
5348 SgenThreadStartInfo *start_info = arg;
5349 SgenThreadInfo* info;
5350 void *t_arg = start_info->arg;
5351 void *(*start_func) (void*) = start_info->start_routine;
5356 info = gc_register_current_thread (&result);
5358 post_result = MONO_SEM_POST (&(start_info->registered));
5359 g_assert (!post_result);
5360 result = start_func (t_arg);
5361 g_assert (!mono_domain_get ());
5363 * this is done by the pthread key dtor
5365 unregister_current_thread ();
5373 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
5375 SgenThreadStartInfo *start_info;
5378 start_info = malloc (sizeof (SgenThreadStartInfo));
5381 MONO_SEM_INIT (&(start_info->registered), 0);
5382 start_info->arg = arg;
5383 start_info->start_routine = start_routine;
5385 result = pthread_create (new_thread, attr, gc_start_thread, start_info);
5387 while (MONO_SEM_WAIT (&(start_info->registered)) != 0) {
5388 /*if (EINTR != errno) ABORT("sem_wait failed"); */
5391 MONO_SEM_DESTROY (&(start_info->registered));
5397 mono_gc_pthread_join (pthread_t thread, void **retval)
5399 return pthread_join (thread, retval);
5403 mono_gc_pthread_detach (pthread_t thread)
5405 return pthread_detach (thread);
5408 #endif /* USE_PTHREAD_INTERCEPT */
5411 * ######################################################################
5412 * ######## Write barriers
5413 * ######################################################################
5417 * This causes the compile to extend the liveness of 'v' till the call to dummy_use
5420 dummy_use (gpointer v) {
5421 __asm__ volatile ("" : "=r"(v) : "r"(v));
5425 static RememberedSet*
5426 alloc_remset (int size, gpointer id) {
5427 RememberedSet* res = mono_sgen_alloc_internal_dynamic (sizeof (RememberedSet) + (size * sizeof (gpointer)), INTERNAL_MEM_REMSET);
5428 res->store_next = res->data;
5429 res->end_set = res->data + size;
5431 DEBUG (4, fprintf (gc_debug_file, "Allocated remset size %d at %p for %p\n", size, res->data, id));
5436 * Note: the write barriers first do the needed GC work and then do the actual store:
5437 * this way the value is visible to the conservative GC scan after the write barrier
5438 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
5439 * the conservative scan, otherwise by the remembered set scan.
5442 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
5444 HEAVY_STAT (++stat_wbarrier_set_field);
5445 if (ptr_in_nursery (field_ptr)) {
5446 *(void**)field_ptr = value;
5449 DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", field_ptr));
5450 if (use_cardtable) {
5451 *(void**)field_ptr = value;
5452 if (ptr_in_nursery (value))
5453 sgen_card_table_mark_address ((mword)field_ptr);
5460 rs = REMEMBERED_SET;
5461 if (rs->store_next < rs->end_set) {
5462 *(rs->store_next++) = (mword)field_ptr;
5463 *(void**)field_ptr = value;
5467 rs = alloc_remset (rs->end_set - rs->data, (void*)1);
5468 rs->next = REMEMBERED_SET;
5469 REMEMBERED_SET = rs;
5470 #ifdef HAVE_KW_THREAD
5471 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
5473 *(rs->store_next++) = (mword)field_ptr;
5474 *(void**)field_ptr = value;
5480 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
5482 HEAVY_STAT (++stat_wbarrier_set_arrayref);
5483 if (ptr_in_nursery (slot_ptr)) {
5484 *(void**)slot_ptr = value;
5487 DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", slot_ptr));
5488 if (use_cardtable) {
5489 *(void**)slot_ptr = value;
5490 if (ptr_in_nursery (value))
5491 sgen_card_table_mark_address ((mword)slot_ptr);
5498 rs = REMEMBERED_SET;
5499 if (rs->store_next < rs->end_set) {
5500 *(rs->store_next++) = (mword)slot_ptr;
5501 *(void**)slot_ptr = value;
5505 rs = alloc_remset (rs->end_set - rs->data, (void*)1);
5506 rs->next = REMEMBERED_SET;
5507 REMEMBERED_SET = rs;
5508 #ifdef HAVE_KW_THREAD
5509 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
5511 *(rs->store_next++) = (mword)slot_ptr;
5512 *(void**)slot_ptr = value;
5518 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
5520 HEAVY_STAT (++stat_wbarrier_arrayref_copy);
5521 /*This check can be done without taking a lock since dest_ptr array is pinned*/
5522 if (ptr_in_nursery (dest_ptr) || count <= 0) {
5523 memmove (dest_ptr, src_ptr, count * sizeof (gpointer));
5527 if (use_cardtable) {
5528 gpointer *dest = dest_ptr;
5529 gpointer *src = src_ptr;
5531 /*overlapping that required backward copying*/
5532 if (src < dest && (src + count) > dest) {
5533 gpointer *start = dest;
5537 for (; dest >= start; --src, --dest) {
5538 gpointer value = *src;
5540 if (ptr_in_nursery (value))
5541 sgen_card_table_mark_address ((mword)dest);
5545 gpointer *end = dest + count;
5546 for (; dest < end; ++src, ++dest) {
5547 gpointer value = *src;
5549 if (ptr_in_nursery (value))
5550 sgen_card_table_mark_address ((mword)dest);
5558 memmove (dest_ptr, src_ptr, count * sizeof (gpointer));
5560 rs = REMEMBERED_SET;
5561 DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p, %d\n", dest_ptr, count));
5562 if (rs->store_next + 1 < rs->end_set) {
5563 *(rs->store_next++) = (mword)dest_ptr | REMSET_RANGE;
5564 *(rs->store_next++) = count;
5568 rs = alloc_remset (rs->end_set - rs->data, (void*)1);
5569 rs->next = REMEMBERED_SET;
5570 REMEMBERED_SET = rs;
5571 #ifdef HAVE_KW_THREAD
5572 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
5574 *(rs->store_next++) = (mword)dest_ptr | REMSET_RANGE;
5575 *(rs->store_next++) = count;
5581 static char *found_obj;
5584 find_object_for_ptr_callback (char *obj, size_t size, char *ptr)
5586 if (ptr >= obj && ptr < obj + size) {
5587 g_assert (!found_obj);
5592 /* for use in the debugger */
5593 char* find_object_for_ptr (char *ptr);
5595 find_object_for_ptr (char *ptr)
5599 if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
5601 mono_sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
5602 (IterateObjectCallbackFunc)find_object_for_ptr_callback, ptr);
5607 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
5608 if (ptr >= bigobj->data && ptr < bigobj->data + bigobj->size)
5609 return bigobj->data;
5613 * Very inefficient, but this is debugging code, supposed to
5614 * be called from gdb, so we don't care.
5617 major.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)find_object_for_ptr_callback, ptr);
5622 evacuate_remset_buffer (void)
5627 buffer = STORE_REMSET_BUFFER;
5629 add_generic_store_remset_from_buffer (buffer);
5630 memset (buffer, 0, sizeof (gpointer) * STORE_REMSET_BUFFER_SIZE);
5632 STORE_REMSET_BUFFER_INDEX = 0;
5636 mono_gc_wbarrier_generic_nostore (gpointer ptr)
5642 HEAVY_STAT (++stat_wbarrier_generic_store);
5644 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
5645 /* FIXME: ptr_in_heap must be called with the GC lock held */
5646 if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
5647 char *start = find_object_for_ptr (ptr);
5648 MonoObject *value = *(MonoObject**)ptr;
5652 MonoObject *obj = (MonoObject*)start;
5653 if (obj->vtable->domain != value->vtable->domain)
5654 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
5660 if (*(gpointer*)ptr)
5661 binary_protocol_wbarrier (ptr, *(gpointer*)ptr, (gpointer)LOAD_VTABLE (*(gpointer*)ptr));
5663 if (ptr_in_nursery (ptr) || ptr_on_stack (ptr) || !ptr_in_nursery (*(gpointer*)ptr)) {
5664 DEBUG (8, fprintf (gc_debug_file, "Skipping remset at %p\n", ptr));
5668 if (use_cardtable) {
5669 if (ptr_in_nursery(*(gpointer*)ptr))
5670 sgen_card_table_mark_address ((mword)ptr);
5676 buffer = STORE_REMSET_BUFFER;
5677 index = STORE_REMSET_BUFFER_INDEX;
5678 /* This simple optimization eliminates a sizable portion of
5679 entries. Comparing it to the last but one entry as well
5680 doesn't eliminate significantly more entries. */
5681 if (buffer [index] == ptr) {
5686 DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", ptr));
5687 HEAVY_STAT (++stat_wbarrier_generic_store_remset);
5690 if (index >= STORE_REMSET_BUFFER_SIZE) {
5691 evacuate_remset_buffer ();
5692 index = STORE_REMSET_BUFFER_INDEX;
5693 g_assert (index == 0);
5696 buffer [index] = ptr;
5697 STORE_REMSET_BUFFER_INDEX = index;
5703 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
5705 DEBUG (8, fprintf (gc_debug_file, "Wbarrier store at %p to %p (%s)\n", ptr, value, value ? safe_name (value) : "null"));
5706 *(void**)ptr = value;
5707 if (ptr_in_nursery (value))
5708 mono_gc_wbarrier_generic_nostore (ptr);
5712 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
5714 mword *dest = _dest;
5719 mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
5724 size -= SIZEOF_VOID_P;
5731 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
5734 size_t size = count * mono_class_value_size (klass, NULL);
5736 HEAVY_STAT (++stat_wbarrier_value_copy);
5737 g_assert (klass->valuetype);
5739 memmove (dest, src, size);
5740 if (use_cardtable) {
5741 sgen_card_table_mark_range ((mword)dest, size);
5743 rs = REMEMBERED_SET;
5744 if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !klass->has_references) {
5748 g_assert (klass->gc_descr_inited);
5749 DEBUG (8, fprintf (gc_debug_file, "Adding value remset at %p, count %d, descr %p for class %s (%p)\n", dest, count, klass->gc_descr, klass->name, klass));
5751 if (rs->store_next + 3 < rs->end_set) {
5752 *(rs->store_next++) = (mword)dest | REMSET_VTYPE;
5753 *(rs->store_next++) = (mword)klass->gc_descr;
5754 *(rs->store_next++) = (mword)count;
5758 rs = alloc_remset (rs->end_set - rs->data, (void*)1);
5759 rs->next = REMEMBERED_SET;
5760 REMEMBERED_SET = rs;
5761 #ifdef HAVE_KW_THREAD
5762 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
5764 *(rs->store_next++) = (mword)dest | REMSET_VTYPE;
5765 *(rs->store_next++) = (mword)klass->gc_descr;
5766 *(rs->store_next++) = (mword)count;
5772 * mono_gc_wbarrier_object_copy:
5774 * Write barrier to call when obj is the result of a clone or copy of an object.
5777 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
5783 HEAVY_STAT (++stat_wbarrier_object_copy);
5784 rs = REMEMBERED_SET;
5785 DEBUG (6, fprintf (gc_debug_file, "Adding object remset for %p\n", obj));
5786 size = mono_object_class (obj)->instance_size;
5788 /* do not copy the sync state */
5789 memcpy ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
5790 size - sizeof (MonoObject));
5791 if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
5795 if (rs->store_next < rs->end_set) {
5796 *(rs->store_next++) = (mword)obj | REMSET_OBJECT;
5800 rs = alloc_remset (rs->end_set - rs->data, (void*)1);
5801 rs->next = REMEMBERED_SET;
5802 REMEMBERED_SET = rs;
5803 #ifdef HAVE_KW_THREAD
5804 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
5806 *(rs->store_next++) = (mword)obj | REMSET_OBJECT;
5811 * ######################################################################
5812 * ######## Collector debugging
5813 * ######################################################################
5816 const char*descriptor_types [] = {
5828 describe_ptr (char *ptr)
5834 if (ptr_in_nursery (ptr)) {
5835 printf ("Pointer inside nursery.\n");
5837 if (major.ptr_is_in_non_pinned_space (ptr)) {
5838 printf ("Pointer inside oldspace.\n");
5839 } else if (major.obj_is_from_pinned_alloc (ptr)) {
5840 printf ("Pointer is inside a pinned chunk.\n");
5842 printf ("Pointer unknown.\n");
5847 if (object_is_pinned (ptr))
5848 printf ("Object is pinned.\n");
5850 if (object_is_forwarded (ptr))
5851 printf ("Object is forwared.\n");
5853 // FIXME: Handle pointers to the inside of objects
5854 vtable = (MonoVTable*)LOAD_VTABLE (ptr);
5856 printf ("VTable: %p\n", vtable);
5857 if (vtable == NULL) {
5858 printf ("VTable is invalid (empty).\n");
5861 if (ptr_in_nursery (vtable)) {
5862 printf ("VTable is invalid (points inside nursery).\n");
5865 printf ("Class: %s\n", vtable->klass->name);
5867 desc = ((GCVTable*)vtable)->desc;
5868 printf ("Descriptor: %lx\n", (long)desc);
5871 printf ("Descriptor type: %d (%s)\n", type, descriptor_types [type]);
5875 find_in_remset_loc (mword *p, char *addr, gboolean *found)
5881 switch ((*p) & REMSET_TYPE_MASK) {
5882 case REMSET_LOCATION:
5883 if (*p == (mword)addr)
5887 ptr = (void**)(*p & ~REMSET_TYPE_MASK);
5889 if ((void**)addr >= ptr && (void**)addr < ptr + count)
5893 ptr = (void**)(*p & ~REMSET_TYPE_MASK);
5894 count = safe_object_get_size ((MonoObject*)ptr);
5895 count = ALIGN_UP (count);
5896 count /= sizeof (mword);
5897 if ((void**)addr >= ptr && (void**)addr < ptr + count)
5901 ptr = (void**)(*p & ~REMSET_TYPE_MASK);
5905 switch (desc & 0x7) {
5906 case DESC_TYPE_RUN_LENGTH:
5907 OBJ_RUN_LEN_SIZE (skip_size, desc, ptr);
5909 case DESC_TYPE_SMALL_BITMAP:
5910 OBJ_BITMAP_SIZE (skip_size, desc, start);
5914 g_assert_not_reached ();
5917 /* The descriptor includes the size of MonoObject */
5918 skip_size -= sizeof (MonoObject);
5920 if ((void**)addr >= ptr && (void**)addr < ptr + (skip_size / sizeof (gpointer)))
5925 g_assert_not_reached ();
5931 * Return whenever ADDR occurs in the remembered sets
5934 find_in_remsets (char *addr)
5937 SgenThreadInfo *info;
5938 RememberedSet *remset;
5939 GenericStoreRememberedSet *store_remset;
5941 gboolean found = FALSE;
5943 /* the global one */
5944 for (remset = global_remset; remset; remset = remset->next) {
5945 DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
5946 for (p = remset->data; p < remset->store_next;) {
5947 p = find_in_remset_loc (p, addr, &found);
5953 /* the generic store ones */
5954 for (store_remset = generic_store_remsets; store_remset; store_remset = store_remset->next) {
5955 for (i = 0; i < STORE_REMSET_BUFFER_SIZE - 1; ++i) {
5956 if (store_remset->data [i] == addr)
5961 /* the per-thread ones */
5962 for (i = 0; i < THREAD_HASH_SIZE; ++i) {
5963 for (info = thread_table [i]; info; info = info->next) {
5965 for (remset = info->remset; remset; remset = remset->next) {
5966 DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
5967 for (p = remset->data; p < remset->store_next;) {
5968 p = find_in_remset_loc (p, addr, &found);
5973 for (j = 0; j < *info->store_remset_buffer_index_addr; ++j) {
5974 if ((*info->store_remset_buffer_addr) [j + 1] == addr)
5980 /* the freed thread ones */
5981 for (remset = freed_thread_remsets; remset; remset = remset->next) {
5982 DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
5983 for (p = remset->data; p < remset->store_next;) {
5984 p = find_in_remset_loc (p, addr, &found);
5993 static gboolean missing_remsets;
5996 * We let a missing remset slide if the target object is pinned,
5997 * because the store might have happened but the remset not yet added,
5998 * but in that case the target must be pinned. We might theoretically
5999 * miss some missing remsets this way, but it's very unlikely.
6002 #define HANDLE_PTR(ptr,obj) do { \
6003 if (*(ptr) && (char*)*(ptr) >= nursery_start && (char*)*(ptr) < nursery_next) { \
6004 if (!find_in_remsets ((char*)(ptr)) && (!use_cardtable || !sgen_card_table_address_is_marked ((mword)ptr))) { \
6005 fprintf (gc_debug_file, "Oldspace->newspace reference %p at offset %td in object %p (%s.%s) not found in remsets.\n", *(ptr), (char*)(ptr) - (char*)(obj), (obj), ((MonoObject*)(obj))->vtable->klass->name_space, ((MonoObject*)(obj))->vtable->klass->name); \
6006 binary_protocol_missing_remset ((obj), (gpointer)LOAD_VTABLE ((obj)), (char*)(ptr) - (char*)(obj), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
6007 if (!object_is_pinned (*(ptr))) \
6008 missing_remsets = TRUE; \
6014 * Check that each object reference which points into the nursery can
6015 * be found in the remembered sets.
6018 check_consistency_callback (char *start, size_t size, void *dummy)
6020 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
6021 DEBUG (8, fprintf (gc_debug_file, "Scanning object %p, vtable: %p (%s)\n", start, vt, vt->klass->name));
6023 #define SCAN_OBJECT_ACTION
6024 #include "sgen-scan-object.h"
6028 * Perform consistency check of the heap.
6030 * Assumes the world is stopped.
6033 check_consistency (void)
6037 // Need to add more checks
6039 missing_remsets = FALSE;
6041 DEBUG (1, fprintf (gc_debug_file, "Begin heap consistency check...\n"));
6043 // Check that oldspace->newspace pointers are registered with the collector
6044 major.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)check_consistency_callback, NULL);
6046 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
6047 check_consistency_callback (bigobj->data, bigobj->size, NULL);
6049 DEBUG (1, fprintf (gc_debug_file, "Heap consistency check done.\n"));
6051 #ifdef SGEN_BINARY_PROTOCOL
6052 if (!binary_protocol_file)
6054 g_assert (!missing_remsets);
6059 #define HANDLE_PTR(ptr,obj) do { \
6060 if (*(ptr) && !LOAD_VTABLE (*(ptr))) \
6061 g_error ("Could not load vtable for obj %p slot %d (size %d)", obj, (char*)ptr - (char*)obj, safe_object_get_size ((MonoObject*)obj)); \
6065 check_major_refs_callback (char *start, size_t size, void *dummy)
6067 #define SCAN_OBJECT_ACTION
6068 #include "sgen-scan-object.h"
6072 check_major_refs (void)
6076 major.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)check_major_refs_callback, NULL);
6078 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
6079 check_major_refs_callback (bigobj->data, bigobj->size, NULL);
6082 /* Check that the reference is valid */
6084 #define HANDLE_PTR(ptr,obj) do { \
6086 g_assert (safe_name (*(ptr)) != NULL); \
6093 * Perform consistency check on an object. Currently we only check that the
6094 * reference fields are valid.
6097 check_object (char *start)
6102 #include "sgen-scan-object.h"
6106 * ######################################################################
6107 * ######## Other mono public interface functions.
6108 * ######################################################################
6112 mono_gc_collect (int generation)
6116 if (generation == 0) {
6117 collect_nursery (0);
6119 major_collection ("user request");
6126 mono_gc_max_generation (void)
6132 mono_gc_collection_count (int generation)
6134 if (generation == 0)
6135 return num_minor_gcs;
6136 return num_major_gcs;
6140 mono_gc_get_used_size (void)
6144 tot = los_memory_usage;
6145 tot += nursery_section->next_data - nursery_section->data;
6146 tot += major.get_used_size ();
6147 /* FIXME: account for pinned objects */
6153 mono_gc_get_heap_size (void)
6159 mono_gc_disable (void)
6167 mono_gc_enable (void)
6175 mono_gc_get_los_limit (void)
6177 return MAX_SMALL_OBJ_SIZE;
6181 mono_object_is_alive (MonoObject* o)
6187 mono_gc_get_generation (MonoObject *obj)
6189 if (ptr_in_nursery (obj))
6195 mono_gc_enable_events (void)
6200 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
6203 mono_gc_register_disappearing_link (obj, link_addr, track);
6208 mono_gc_weak_link_remove (void **link_addr)
6211 mono_gc_register_disappearing_link (NULL, link_addr, FALSE);
6216 mono_gc_weak_link_get (void **link_addr)
6220 return (MonoObject*) REVEAL_POINTER (*link_addr);
6224 mono_gc_ephemeron_array_add (MonoObject *obj)
6226 EphemeronLinkNode *node;
6230 node = mono_sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
6235 node->array = (char*)obj;
6236 node->next = ephemeron_list;
6237 ephemeron_list = node;
6239 DEBUG (5, fprintf (gc_debug_file, "Registered ephemeron array %p\n", obj));
6246 mono_gc_make_descr_from_bitmap (gsize *bitmap, int numbits)
6248 if (numbits < ((sizeof (*bitmap) * 8) - ROOT_DESC_TYPE_SHIFT)) {
6249 return (void*)MAKE_ROOT_DESC (ROOT_DESC_BITMAP, bitmap [0]);
6251 mword complex = alloc_complex_descriptor (bitmap, numbits);
6252 return (void*)MAKE_ROOT_DESC (ROOT_DESC_COMPLEX, complex);
6257 mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker)
6261 g_assert (user_descriptors_next < MAX_USER_DESCRIPTORS);
6262 descr = (void*)MAKE_ROOT_DESC (ROOT_DESC_USER, (mword)user_descriptors_next);
6263 user_descriptors [user_descriptors_next ++] = marker;
6269 mono_gc_alloc_fixed (size_t size, void *descr)
6271 /* FIXME: do a single allocation */
6272 void *res = calloc (1, size);
6275 if (!mono_gc_register_root (res, size, descr)) {
6283 mono_gc_free_fixed (void* addr)
6285 mono_gc_deregister_root (addr);
6290 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
6294 result = func (data);
6295 UNLOCK_INTERRUPTION;
6300 mono_gc_is_gc_thread (void)
6304 result = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ()) != NULL;
6309 /* Tries to extract a number from the passed string, taking in to account m, k
6312 mono_sgen_parse_environment_string_extract_number (const char *str, glong *out)
6315 int len = strlen (str), shift = 0;
6317 gboolean is_suffix = FALSE;
6320 switch (str [len - 1]) {
6331 suffix = str [len - 1];
6336 val = strtol (str, &endptr, 10);
6338 if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN))
6339 || (errno != 0 && val == 0) || (endptr == str))
6343 if (*(endptr + 1)) /* Invalid string. */
6353 mono_gc_base_init (void)
6357 char *major_collector = NULL;
6358 struct sigaction sinfo;
6360 LOCK_INIT (gc_mutex);
6362 if (gc_initialized) {
6366 pagesize = mono_pagesize ();
6367 gc_debug_file = stderr;
6369 LOCK_INIT (interruption_mutex);
6370 LOCK_INIT (global_remset_mutex);
6372 if ((env = getenv ("MONO_GC_PARAMS"))) {
6373 opts = g_strsplit (env, ",", -1);
6374 for (ptr = opts; *ptr; ++ptr) {
6376 if (g_str_has_prefix (opt, "major=")) {
6377 opt = strchr (opt, '=') + 1;
6378 major_collector = g_strdup (opt);
6386 mono_sgen_init_internal_allocator ();
6388 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FRAGMENT, sizeof (Fragment));
6389 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
6390 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_ENTRY, sizeof (FinalizeEntry));
6391 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_DISLINK, sizeof (DisappearingLink));
6392 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord));
6393 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
6394 g_assert (sizeof (GenericStoreRememberedSet) == sizeof (gpointer) * STORE_REMSET_BUFFER_SIZE);
6395 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_STORE_REMSET, sizeof (GenericStoreRememberedSet));
6396 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
6398 if (!major_collector || !strcmp (major_collector, "marksweep")) {
6399 mono_sgen_marksweep_init (&major);
6400 } else if (!major_collector || !strcmp (major_collector, "marksweep-fixed")) {
6401 mono_sgen_marksweep_fixed_init (&major);
6402 } else if (!major_collector || !strcmp (major_collector, "marksweep-par")) {
6403 mono_sgen_marksweep_par_init (&major);
6404 workers_init (mono_cpu_count ());
6405 } else if (!major_collector || !strcmp (major_collector, "marksweep-fixed-par")) {
6406 mono_sgen_marksweep_fixed_par_init (&major);
6407 workers_init (mono_cpu_count ());
6408 } else if (!strcmp (major_collector, "copying")) {
6409 mono_sgen_copying_init (&major);
6411 fprintf (stderr, "Unknown major collector `%s'.\n", major_collector);
6415 #ifdef SGEN_HAVE_CARDTABLE
6416 use_cardtable = major.supports_cardtable;
6418 use_cardtable = FALSE;
6422 for (ptr = opts; *ptr; ++ptr) {
6424 if (g_str_has_prefix (opt, "major="))
6426 if (g_str_has_prefix (opt, "wbarrier=")) {
6427 opt = strchr (opt, '=') + 1;
6428 if (strcmp (opt, "remset") == 0) {
6429 use_cardtable = FALSE;
6430 } else if (strcmp (opt, "cardtable") == 0) {
6431 if (!use_cardtable) {
6432 if (major.supports_cardtable)
6433 fprintf (stderr, "The cardtable write barrier is not supported on this platform.\n");
6435 fprintf (stderr, "The major collector does not support the cardtable write barrier.\n");
6442 if (g_str_has_prefix (opt, "nursery-size=")) {
6444 opt = strchr (opt, '=') + 1;
6445 if (*opt && mono_sgen_parse_environment_string_extract_number (opt, &val)) {
6446 default_nursery_size = val;
6447 #ifdef SGEN_ALIGN_NURSERY
6448 if ((val & (val - 1))) {
6449 fprintf (stderr, "The nursery size must be a power of two.\n");
6453 default_nursery_bits = 0;
6454 while (1 << (++ default_nursery_bits) != default_nursery_size)
6458 fprintf (stderr, "nursery-size must be an integer.\n");
6464 if (!(major.handle_gc_param && major.handle_gc_param (opt))) {
6465 fprintf (stderr, "MONO_GC_PARAMS must be a comma-delimited list of one or more of the following:\n");
6466 fprintf (stderr, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
6467 fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-par' or `copying')\n");
6468 fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
6469 if (major.print_gc_param_usage)
6470 major.print_gc_param_usage ();
6477 if (major_collector)
6478 g_free (major_collector);
6480 nursery_size = DEFAULT_NURSERY_SIZE;
6481 minor_collection_allowance = MIN_MINOR_COLLECTION_ALLOWANCE;
6485 if ((env = getenv ("MONO_GC_DEBUG"))) {
6486 opts = g_strsplit (env, ",", -1);
6487 for (ptr = opts; ptr && *ptr; ptr ++) {
6489 if (opt [0] >= '0' && opt [0] <= '9') {
6490 gc_debug_level = atoi (opt);
6495 char *rf = g_strdup_printf ("%s.%d", opt, getpid ());
6496 gc_debug_file = fopen (rf, "wb");
6498 gc_debug_file = stderr;
6501 } else if (!strcmp (opt, "collect-before-allocs")) {
6502 collect_before_allocs = TRUE;
6503 } else if (!strcmp (opt, "check-at-minor-collections")) {
6504 consistency_check_at_minor_collection = TRUE;
6505 nursery_clear_policy = CLEAR_AT_GC;
6506 } else if (!strcmp (opt, "xdomain-checks")) {
6507 xdomain_checks = TRUE;
6508 } else if (!strcmp (opt, "clear-at-gc")) {
6509 nursery_clear_policy = CLEAR_AT_GC;
6510 } else if (!strcmp (opt, "conservative-stack-mark")) {
6511 conservative_stack_mark = TRUE;
6512 } else if (!strcmp (opt, "check-scan-starts")) {
6513 do_scan_starts_check = TRUE;
6514 } else if (g_str_has_prefix (opt, "heap-dump=")) {
6515 char *filename = strchr (opt, '=') + 1;
6516 nursery_clear_policy = CLEAR_AT_GC;
6517 heap_dump_file = fopen (filename, "w");
6519 fprintf (heap_dump_file, "<sgen-dump>\n");
6520 #ifdef SGEN_BINARY_PROTOCOL
6521 } else if (g_str_has_prefix (opt, "binary-protocol=")) {
6522 char *filename = strchr (opt, '=') + 1;
6523 binary_protocol_file = fopen (filename, "w");
6526 fprintf (stderr, "Invalid format for the MONO_GC_DEBUG env variable: '%s'\n", env);
6527 fprintf (stderr, "The format is: MONO_GC_DEBUG=[l[:filename]|<option>]+ where l is a debug level 0-9.\n");
6528 fprintf (stderr, "Valid options are: collect-before-allocs, check-at-minor-collections, xdomain-checks, clear-at-gc.\n");
6535 suspend_ack_semaphore_ptr = &suspend_ack_semaphore;
6536 MONO_SEM_INIT (&suspend_ack_semaphore, 0);
6538 sigfillset (&sinfo.sa_mask);
6539 sinfo.sa_flags = SA_RESTART | SA_SIGINFO;
6540 sinfo.sa_sigaction = suspend_handler;
6541 if (sigaction (suspend_signal_num, &sinfo, NULL) != 0) {
6542 g_error ("failed sigaction");
6545 sinfo.sa_handler = restart_handler;
6546 if (sigaction (restart_signal_num, &sinfo, NULL) != 0) {
6547 g_error ("failed sigaction");
6550 sigfillset (&suspend_signal_mask);
6551 sigdelset (&suspend_signal_mask, restart_signal_num);
6553 global_remset = alloc_remset (1024, NULL);
6554 global_remset->next = NULL;
6556 pthread_key_create (&remembered_set_key, unregister_thread);
6558 #ifndef HAVE_KW_THREAD
6559 pthread_key_create (&thread_info_key, NULL);
6565 gc_initialized = TRUE;
6567 mono_gc_register_thread (&sinfo);
6571 mono_gc_get_suspend_signal (void)
6573 return suspend_signal_num;
6583 #ifdef HAVE_KW_THREAD
6584 #define EMIT_TLS_ACCESS(mb,dummy,offset) do { \
6585 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
6586 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
6587 mono_mb_emit_i4 ((mb), (offset)); \
6590 #define EMIT_TLS_ACCESS(mb,member,dummy) do { \
6591 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
6592 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
6593 mono_mb_emit_i4 ((mb), thread_info_key); \
6594 mono_mb_emit_icon ((mb), G_STRUCT_OFFSET (SgenThreadInfo, member)); \
6595 mono_mb_emit_byte ((mb), CEE_ADD); \
6596 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
6600 #ifdef MANAGED_ALLOCATION
6601 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
6602 * for each class. This is currently not easy to do, as it is hard to generate basic
6603 * blocks + branches, but it is easy with the linear IL codebase.
6605 * For this to work we'd need to solve the TLAB race, first. Now we
6606 * require the allocator to be in a few known methods to make sure
6607 * that they are executed atomically via the restart mechanism.
6610 create_allocator (int atype)
6612 int p_var, size_var;
6613 guint32 slowpath_branch, max_size_branch;
6614 MonoMethodBuilder *mb;
6616 MonoMethodSignature *csig;
6617 static gboolean registered = FALSE;
6618 int tlab_next_addr_var, new_next_var;
6620 const char *name = NULL;
6621 AllocatorWrapperInfo *info;
6623 #ifdef HAVE_KW_THREAD
6624 int tlab_next_addr_offset = -1;
6625 int tlab_temp_end_offset = -1;
6627 MONO_THREAD_VAR_OFFSET (tlab_next_addr, tlab_next_addr_offset);
6628 MONO_THREAD_VAR_OFFSET (tlab_temp_end, tlab_temp_end_offset);
6630 g_assert (tlab_next_addr_offset != -1);
6631 g_assert (tlab_temp_end_offset != -1);
6635 mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE);
6636 mono_register_jit_icall (mono_gc_alloc_vector, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE);
6640 if (atype == ATYPE_SMALL) {
6642 name = "AllocSmall";
6643 } else if (atype == ATYPE_NORMAL) {
6646 } else if (atype == ATYPE_VECTOR) {
6648 name = "AllocVector";
6650 g_assert_not_reached ();
6653 csig = mono_metadata_signature_alloc (mono_defaults.corlib, num_params);
6654 csig->ret = &mono_defaults.object_class->byval_arg;
6655 for (i = 0; i < num_params; ++i)
6656 csig->params [i] = &mono_defaults.int_class->byval_arg;
6658 mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC);
6659 size_var = mono_mb_add_local (mb, &mono_defaults.int32_class->byval_arg);
6660 if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
6661 /* size = vtable->klass->instance_size; */
6662 mono_mb_emit_ldarg (mb, 0);
6663 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoVTable, klass));
6664 mono_mb_emit_byte (mb, CEE_ADD);
6665 mono_mb_emit_byte (mb, CEE_LDIND_I);
6666 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoClass, instance_size));
6667 mono_mb_emit_byte (mb, CEE_ADD);
6668 /* FIXME: assert instance_size stays a 4 byte integer */
6669 mono_mb_emit_byte (mb, CEE_LDIND_U4);
6670 mono_mb_emit_stloc (mb, size_var);
6671 } else if (atype == ATYPE_VECTOR) {
6672 MonoExceptionClause *clause;
6674 MonoClass *oom_exc_class;
6677 /* n > MONO_ARRAY_MAX_INDEX -> OverflowException */
6678 mono_mb_emit_ldarg (mb, 1);
6679 mono_mb_emit_icon (mb, MONO_ARRAY_MAX_INDEX);
6680 pos = mono_mb_emit_short_branch (mb, CEE_BLE_UN_S);
6681 mono_mb_emit_exception (mb, "OverflowException", NULL);
6682 mono_mb_patch_short_branch (mb, pos);
6684 clause = mono_image_alloc0 (mono_defaults.corlib, sizeof (MonoExceptionClause));
6685 clause->try_offset = mono_mb_get_label (mb);
6687 /* vtable->klass->sizes.element_size */
6688 mono_mb_emit_ldarg (mb, 0);
6689 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoVTable, klass));
6690 mono_mb_emit_byte (mb, CEE_ADD);
6691 mono_mb_emit_byte (mb, CEE_LDIND_I);
6692 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoClass, sizes.element_size));
6693 mono_mb_emit_byte (mb, CEE_ADD);
6694 mono_mb_emit_byte (mb, CEE_LDIND_U4);
6697 mono_mb_emit_ldarg (mb, 1);
6698 mono_mb_emit_byte (mb, CEE_MUL_OVF_UN);
6699 /* + sizeof (MonoArray) */
6700 mono_mb_emit_icon (mb, sizeof (MonoArray));
6701 mono_mb_emit_byte (mb, CEE_ADD_OVF_UN);
6702 mono_mb_emit_stloc (mb, size_var);
6704 pos_leave = mono_mb_emit_branch (mb, CEE_LEAVE);
6707 clause->flags = MONO_EXCEPTION_CLAUSE_NONE;
6708 clause->try_len = mono_mb_get_pos (mb) - clause->try_offset;
6709 clause->data.catch_class = mono_class_from_name (mono_defaults.corlib,
6710 "System", "OverflowException");
6711 g_assert (clause->data.catch_class);
6712 clause->handler_offset = mono_mb_get_label (mb);
6714 oom_exc_class = mono_class_from_name (mono_defaults.corlib,
6715 "System", "OutOfMemoryException");
6716 g_assert (oom_exc_class);
6717 ctor = mono_class_get_method_from_name (oom_exc_class, ".ctor", 0);
6720 mono_mb_emit_byte (mb, CEE_POP);
6721 mono_mb_emit_op (mb, CEE_NEWOBJ, ctor);
6722 mono_mb_emit_byte (mb, CEE_THROW);
6724 clause->handler_len = mono_mb_get_pos (mb) - clause->handler_offset;
6725 mono_mb_set_clauses (mb, 1, clause);
6726 mono_mb_patch_branch (mb, pos_leave);
6729 g_assert_not_reached ();
6732 /* size += ALLOC_ALIGN - 1; */
6733 mono_mb_emit_ldloc (mb, size_var);
6734 mono_mb_emit_icon (mb, ALLOC_ALIGN - 1);
6735 mono_mb_emit_byte (mb, CEE_ADD);
6736 /* size &= ~(ALLOC_ALIGN - 1); */
6737 mono_mb_emit_icon (mb, ~(ALLOC_ALIGN - 1));
6738 mono_mb_emit_byte (mb, CEE_AND);
6739 mono_mb_emit_stloc (mb, size_var);
6741 /* if (size > MAX_SMALL_OBJ_SIZE) goto slowpath */
6742 if (atype != ATYPE_SMALL) {
6743 mono_mb_emit_ldloc (mb, size_var);
6744 mono_mb_emit_icon (mb, MAX_SMALL_OBJ_SIZE);
6745 max_size_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BGT_S);
6749 * We need to modify tlab_next, but the JIT only supports reading, so we read
6750 * another tls var holding its address instead.
6753 /* tlab_next_addr (local) = tlab_next_addr (TLS var) */
6754 tlab_next_addr_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
6755 EMIT_TLS_ACCESS (mb, tlab_next_addr, tlab_next_addr_offset);
6756 mono_mb_emit_stloc (mb, tlab_next_addr_var);
6758 /* p = (void**)tlab_next; */
6759 p_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
6760 mono_mb_emit_ldloc (mb, tlab_next_addr_var);
6761 mono_mb_emit_byte (mb, CEE_LDIND_I);
6762 mono_mb_emit_stloc (mb, p_var);
6764 /* new_next = (char*)p + size; */
6765 new_next_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
6766 mono_mb_emit_ldloc (mb, p_var);
6767 mono_mb_emit_ldloc (mb, size_var);
6768 mono_mb_emit_byte (mb, CEE_CONV_I);
6769 mono_mb_emit_byte (mb, CEE_ADD);
6770 mono_mb_emit_stloc (mb, new_next_var);
6772 /* tlab_next = new_next */
6773 mono_mb_emit_ldloc (mb, tlab_next_addr_var);
6774 mono_mb_emit_ldloc (mb, new_next_var);
6775 mono_mb_emit_byte (mb, CEE_STIND_I);
6777 /* if (G_LIKELY (new_next < tlab_temp_end)) */
6778 mono_mb_emit_ldloc (mb, new_next_var);
6779 EMIT_TLS_ACCESS (mb, tlab_temp_end, tlab_temp_end_offset);
6780 slowpath_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BLT_UN_S);
6783 if (atype != ATYPE_SMALL)
6784 mono_mb_patch_short_branch (mb, max_size_branch);
6786 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
6787 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
6789 /* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
6790 mono_mb_emit_ldarg (mb, 0);
6791 mono_mb_emit_ldloc (mb, size_var);
6792 if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
6793 mono_mb_emit_icall (mb, mono_gc_alloc_obj);
6794 } else if (atype == ATYPE_VECTOR) {
6795 mono_mb_emit_ldarg (mb, 1);
6796 mono_mb_emit_icall (mb, mono_gc_alloc_vector);
6798 g_assert_not_reached ();
6800 mono_mb_emit_byte (mb, CEE_RET);
6803 mono_mb_patch_short_branch (mb, slowpath_branch);
6805 /* FIXME: Memory barrier */
6808 mono_mb_emit_ldloc (mb, p_var);
6809 mono_mb_emit_ldarg (mb, 0);
6810 mono_mb_emit_byte (mb, CEE_STIND_I);
6812 if (atype == ATYPE_VECTOR) {
6813 /* arr->max_length = max_length; */
6814 mono_mb_emit_ldloc (mb, p_var);
6815 mono_mb_emit_ldflda (mb, G_STRUCT_OFFSET (MonoArray, max_length));
6816 mono_mb_emit_ldarg (mb, 1);
6817 mono_mb_emit_byte (mb, CEE_STIND_I);
6821 mono_mb_emit_ldloc (mb, p_var);
6822 mono_mb_emit_byte (mb, CEE_RET);
6824 res = mono_mb_create_method (mb, csig, 8);
6826 mono_method_get_header (res)->init_locals = FALSE;
6828 info = mono_image_alloc0 (mono_defaults.corlib, sizeof (AllocatorWrapperInfo));
6829 info->gc_name = "sgen";
6830 info->alloc_type = atype;
6831 mono_marshal_set_wrapper_info (res, info);
6838 mono_gc_get_gc_name (void)
6843 static MonoMethod* alloc_method_cache [ATYPE_NUM];
6844 static MonoMethod *write_barrier_method;
6847 is_ip_in_managed_allocator (MonoDomain *domain, gpointer ip)
6855 ji = mono_jit_info_table_find (domain, ip);
6858 method = ji->method;
6860 if (method == write_barrier_method)
6862 for (i = 0; i < ATYPE_NUM; ++i)
6863 if (method == alloc_method_cache [i])
6869 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
6870 * The signature of the called method is:
6871 * object allocate (MonoVTable *vtable)
6874 mono_gc_get_managed_allocator (MonoVTable *vtable, gboolean for_box)
6876 #ifdef MANAGED_ALLOCATION
6877 MonoClass *klass = vtable->klass;
6879 #ifdef HAVE_KW_THREAD
6880 int tlab_next_offset = -1;
6881 int tlab_temp_end_offset = -1;
6882 MONO_THREAD_VAR_OFFSET (tlab_next, tlab_next_offset);
6883 MONO_THREAD_VAR_OFFSET (tlab_temp_end, tlab_temp_end_offset);
6885 if (tlab_next_offset == -1 || tlab_temp_end_offset == -1)
6889 if (!mono_runtime_has_tls_get ())
6891 if (klass->instance_size > tlab_size)
6893 if (klass->has_finalize || klass->marshalbyref || (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS))
6897 if (klass->byval_arg.type == MONO_TYPE_STRING)
6899 if (collect_before_allocs)
6902 if (ALIGN_TO (klass->instance_size, ALLOC_ALIGN) < MAX_SMALL_OBJ_SIZE)
6903 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL);
6905 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL);
6912 mono_gc_get_managed_array_allocator (MonoVTable *vtable, int rank)
6914 #ifdef MANAGED_ALLOCATION
6915 MonoClass *klass = vtable->klass;
6917 #ifdef HAVE_KW_THREAD
6918 int tlab_next_offset = -1;
6919 int tlab_temp_end_offset = -1;
6920 MONO_THREAD_VAR_OFFSET (tlab_next, tlab_next_offset);
6921 MONO_THREAD_VAR_OFFSET (tlab_temp_end, tlab_temp_end_offset);
6923 if (tlab_next_offset == -1 || tlab_temp_end_offset == -1)
6929 if (!mono_runtime_has_tls_get ())
6931 if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
6933 if (collect_before_allocs)
6935 g_assert (!klass->has_finalize && !klass->marshalbyref);
6937 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR);
6944 mono_gc_get_managed_allocator_by_type (int atype)
6946 #ifdef MANAGED_ALLOCATION
6949 if (!mono_runtime_has_tls_get ())
6952 mono_loader_lock ();
6953 res = alloc_method_cache [atype];
6955 res = alloc_method_cache [atype] = create_allocator (atype);
6956 mono_loader_unlock ();
6964 mono_gc_get_managed_allocator_types (void)
6971 mono_gc_get_write_barrier (void)
6974 MonoMethodBuilder *mb;
6975 MonoMethodSignature *sig;
6976 #ifdef MANAGED_WBARRIER
6977 int label_no_wb_1, label_no_wb_2, label_no_wb_3, label_no_wb_4, label_need_wb, label_slow_path;
6978 #ifndef SGEN_ALIGN_NURSERY
6979 int label_continue_1, label_continue_2, label_no_wb_5;
6980 int dereferenced_var;
6982 int buffer_var, buffer_index_var, dummy_var;
6984 #ifdef HAVE_KW_THREAD
6985 int stack_end_offset = -1, store_remset_buffer_offset = -1;
6986 int store_remset_buffer_index_offset = -1, store_remset_buffer_index_addr_offset = -1;
6988 MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
6989 g_assert (stack_end_offset != -1);
6990 MONO_THREAD_VAR_OFFSET (store_remset_buffer, store_remset_buffer_offset);
6991 g_assert (store_remset_buffer_offset != -1);
6992 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index, store_remset_buffer_index_offset);
6993 g_assert (store_remset_buffer_index_offset != -1);
6994 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index_addr, store_remset_buffer_index_addr_offset);
6995 g_assert (store_remset_buffer_index_addr_offset != -1);
6999 g_assert (!use_cardtable);
7001 // FIXME: Maybe create a separate version for ctors (the branch would be
7002 // correctly predicted more times)
7003 if (write_barrier_method)
7004 return write_barrier_method;
7006 /* Create the IL version of mono_gc_barrier_generic_store () */
7007 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
7008 sig->ret = &mono_defaults.void_class->byval_arg;
7009 sig->params [0] = &mono_defaults.int_class->byval_arg;
7011 mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
7013 #ifdef MANAGED_WBARRIER
7014 if (mono_runtime_has_tls_get ()) {
7015 #ifdef SGEN_ALIGN_NURSERY
7016 // if (ptr_in_nursery (ptr)) return;
7018 * Masking out the bits might be faster, but we would have to use 64 bit
7019 * immediates, which might be slower.
7021 mono_mb_emit_ldarg (mb, 0);
7022 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
7023 mono_mb_emit_byte (mb, CEE_SHR_UN);
7024 mono_mb_emit_icon (mb, (mword)nursery_start >> DEFAULT_NURSERY_BITS);
7025 label_no_wb_1 = mono_mb_emit_branch (mb, CEE_BEQ);
7027 // if (!ptr_in_nursery (*ptr)) return;
7028 mono_mb_emit_ldarg (mb, 0);
7029 mono_mb_emit_byte (mb, CEE_LDIND_I);
7030 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
7031 mono_mb_emit_byte (mb, CEE_SHR_UN);
7032 mono_mb_emit_icon (mb, (mword)nursery_start >> DEFAULT_NURSERY_BITS);
7033 label_no_wb_2 = mono_mb_emit_branch (mb, CEE_BNE_UN);
7036 // if (ptr < (nursery_start)) goto continue;
7037 mono_mb_emit_ldarg (mb, 0);
7038 mono_mb_emit_ptr (mb, (gpointer) nursery_start);
7039 label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
7041 // if (ptr >= nursery_real_end)) goto continue;
7042 mono_mb_emit_ldarg (mb, 0);
7043 mono_mb_emit_ptr (mb, (gpointer) nursery_real_end);
7044 label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
7047 label_no_wb_1 = mono_mb_emit_branch (mb, CEE_BR);
7050 mono_mb_patch_branch (mb, label_continue_1);
7051 mono_mb_patch_branch (mb, label_continue_2);
7053 // Dereference and store in local var
7054 dereferenced_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
7055 mono_mb_emit_ldarg (mb, 0);
7056 mono_mb_emit_byte (mb, CEE_LDIND_I);
7057 mono_mb_emit_stloc (mb, dereferenced_var);
7059 // if (*ptr < nursery_start) return;
7060 mono_mb_emit_ldloc (mb, dereferenced_var);
7061 mono_mb_emit_ptr (mb, (gpointer) nursery_start);
7062 label_no_wb_2 = mono_mb_emit_branch (mb, CEE_BLT);
7064 // if (*ptr >= nursery_end) return;
7065 mono_mb_emit_ldloc (mb, dereferenced_var);
7066 mono_mb_emit_ptr (mb, (gpointer) nursery_real_end);
7067 label_no_wb_5 = mono_mb_emit_branch (mb, CEE_BGE);
7070 // if (ptr >= stack_end) goto need_wb;
7071 mono_mb_emit_ldarg (mb, 0);
7072 EMIT_TLS_ACCESS (mb, stack_end, stack_end_offset);
7073 label_need_wb = mono_mb_emit_branch (mb, CEE_BGE_UN);
7075 // if (ptr >= stack_start) return;
7076 dummy_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
7077 mono_mb_emit_ldarg (mb, 0);
7078 mono_mb_emit_ldloc_addr (mb, dummy_var);
7079 label_no_wb_3 = mono_mb_emit_branch (mb, CEE_BGE_UN);
7082 mono_mb_patch_branch (mb, label_need_wb);
7084 // buffer = STORE_REMSET_BUFFER;
7085 buffer_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
7086 EMIT_TLS_ACCESS (mb, store_remset_buffer, store_remset_buffer_offset);
7087 mono_mb_emit_stloc (mb, buffer_var);
7089 // buffer_index = STORE_REMSET_BUFFER_INDEX;
7090 buffer_index_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
7091 EMIT_TLS_ACCESS (mb, store_remset_buffer_index, store_remset_buffer_index_offset);
7092 mono_mb_emit_stloc (mb, buffer_index_var);
7094 // if (buffer [buffer_index] == ptr) return;
7095 mono_mb_emit_ldloc (mb, buffer_var);
7096 mono_mb_emit_ldloc (mb, buffer_index_var);
7097 g_assert (sizeof (gpointer) == 4 || sizeof (gpointer) == 8);
7098 mono_mb_emit_icon (mb, sizeof (gpointer) == 4 ? 2 : 3);
7099 mono_mb_emit_byte (mb, CEE_SHL);
7100 mono_mb_emit_byte (mb, CEE_ADD);
7101 mono_mb_emit_byte (mb, CEE_LDIND_I);
7102 mono_mb_emit_ldarg (mb, 0);
7103 label_no_wb_4 = mono_mb_emit_branch (mb, CEE_BEQ);
7106 mono_mb_emit_ldloc (mb, buffer_index_var);
7107 mono_mb_emit_icon (mb, 1);
7108 mono_mb_emit_byte (mb, CEE_ADD);
7109 mono_mb_emit_stloc (mb, buffer_index_var);
7111 // if (buffer_index >= STORE_REMSET_BUFFER_SIZE) goto slow_path;
7112 mono_mb_emit_ldloc (mb, buffer_index_var);
7113 mono_mb_emit_icon (mb, STORE_REMSET_BUFFER_SIZE);
7114 label_slow_path = mono_mb_emit_branch (mb, CEE_BGE);
7116 // buffer [buffer_index] = ptr;
7117 mono_mb_emit_ldloc (mb, buffer_var);
7118 mono_mb_emit_ldloc (mb, buffer_index_var);
7119 g_assert (sizeof (gpointer) == 4 || sizeof (gpointer) == 8);
7120 mono_mb_emit_icon (mb, sizeof (gpointer) == 4 ? 2 : 3);
7121 mono_mb_emit_byte (mb, CEE_SHL);
7122 mono_mb_emit_byte (mb, CEE_ADD);
7123 mono_mb_emit_ldarg (mb, 0);
7124 mono_mb_emit_byte (mb, CEE_STIND_I);
7126 // STORE_REMSET_BUFFER_INDEX = buffer_index;
7127 EMIT_TLS_ACCESS (mb, store_remset_buffer_index_addr, store_remset_buffer_index_addr_offset);
7128 mono_mb_emit_ldloc (mb, buffer_index_var);
7129 mono_mb_emit_byte (mb, CEE_STIND_I);
7132 mono_mb_patch_branch (mb, label_no_wb_1);
7133 mono_mb_patch_branch (mb, label_no_wb_2);
7134 mono_mb_patch_branch (mb, label_no_wb_3);
7135 mono_mb_patch_branch (mb, label_no_wb_4);
7136 #ifndef SGEN_ALIGN_NURSERY
7137 mono_mb_patch_branch (mb, label_no_wb_5);
7139 mono_mb_emit_byte (mb, CEE_RET);
7142 mono_mb_patch_branch (mb, label_slow_path);
7146 mono_mb_emit_ldarg (mb, 0);
7147 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
7148 mono_mb_emit_byte (mb, CEE_RET);
7150 res = mono_mb_create_method (mb, sig, 16);
7153 mono_loader_lock ();
7154 if (write_barrier_method) {
7155 /* Already created */
7156 mono_free_method (res);
7158 /* double-checked locking */
7159 mono_memory_barrier ();
7160 write_barrier_method = res;
7162 mono_loader_unlock ();
7164 return write_barrier_method;
7168 mono_gc_get_description (void)
7170 return g_strdup ("sgen");
7174 mono_gc_set_desktop_mode (void)
7179 mono_gc_is_moving (void)
7185 mono_gc_is_disabled (void)
7191 mono_sgen_debug_printf (int level, const char *format, ...)
7195 if (level > gc_debug_level)
7198 va_start (ap, format);
7199 vfprintf (gc_debug_file, format, ap);
7203 #endif /* HAVE_SGEN_GC */