2 * Copyright 2001-2003 Ximian, Inc
3 * Copyright 2003-2010 Novell, Inc.
4 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #ifndef __MONO_SGENGC_H__
26 #define __MONO_SGENGC_H__
33 typedef struct _SgenThreadInfo SgenThreadInfo;
34 #define THREAD_INFO_TYPE SgenThreadInfo
39 #include <mono/utils/mono-compiler.h>
40 #include <mono/utils/mono-threads.h>
41 #include <mono/metadata/class-internals.h>
42 #include <mono/metadata/object-internals.h>
43 #include <mono/metadata/sgen-archdep.h>
45 #include <mach/mach_port.h>
49 * Turning on heavy statistics will turn off the managed allocator and
50 * the managed write barrier.
52 //#define HEAVY_STATISTICS
55 * If this is set, the nursery is aligned to an address aligned to its size, ie.
56 * a 1MB nursery will be aligned to an address divisible by 1MB. This allows us to
57 * speed up ptr_in_nursery () checks which are very frequent. This requires the
58 * nursery size to be a compile time constant.
60 #define SGEN_ALIGN_NURSERY 1
62 //#define SGEN_BINARY_PROTOCOL
64 #define SGEN_MAX_DEBUG_LEVEL 2
66 #define GC_BITS_PER_WORD (sizeof (mword) * 8)
68 /* The method used to clear the nursery */
69 /* Clearing at nursery collections is the safest, but has bad interactions with caches.
70 * Clearing at TLAB creation is much faster, but more complex and it might expose hard
75 CLEAR_AT_TLAB_CREATION
78 NurseryClearPolicy mono_sgen_get_nursery_clear_policy (void) MONO_INTERNAL;
81 #if SIZEOF_VOID_P == 4
82 typedef guint32 mword;
84 typedef guint64 mword;
87 #define SGEN_TV_DECLARE(name) gint64 name
88 #define SGEN_TV_GETTIME(tv) tv = mono_100ns_ticks ()
89 #define SGEN_TV_ELAPSED(start,end) (int)((end-start) / 10)
90 #define SGEN_TV_ELAPSED_MS(start,end) ((SGEN_TV_ELAPSED((start),(end)) + 500) / 1000)
92 /* for use with write barriers */
93 typedef struct _RememberedSet RememberedSet;
94 struct _RememberedSet {
98 mword data [MONO_ZERO_LEN_ARRAY];
101 /* eventually share with MonoThread? */
102 struct _SgenThreadInfo {
104 #if defined(__MACH__)
105 thread_port_t mach_port;
108 unsigned int stop_count; /* to catch duplicate signals */
111 volatile int in_critical_region;
112 gboolean doing_handshake;
113 gboolean thread_is_dying;
116 void *stack_start_limit;
117 char **tlab_next_addr;
118 char **tlab_start_addr;
119 char **tlab_temp_end_addr;
120 char **tlab_real_end_addr;
121 gpointer **store_remset_buffer_addr;
122 long *store_remset_buffer_index_addr;
123 RememberedSet *remset;
124 gpointer runtime_data;
125 gpointer stopped_ip; /* only valid if the thread is stopped */
126 MonoDomain *stopped_domain; /* ditto */
128 #if defined(__MACH__)
130 MonoContext ctx; /* ditto */
132 gpointer regs[ARCH_NUM_REGS]; /* ditto */
137 MonoContext *monoctx; /* ditto */
139 gpointer *stopped_regs; /* ditto */
141 #ifndef HAVE_KW_THREAD
146 gpointer *store_remset_buffer;
147 long store_remset_buffer_index;
157 typedef struct _SgenBlock SgenBlock;
164 * The nursery section and the major copying collector's sections use
167 typedef struct _GCMemSection GCMemSection;
168 struct _GCMemSection {
172 /* pointer where more data could be allocated if it fits */
176 * scan starts is an array of pointers to objects equally spaced in the allocation area
177 * They let use quickly find pinned objects from pinning pointers.
180 /* in major collections indexes in the pin_queue for objects that pin this section */
181 void **pin_queue_start;
182 int pin_queue_num_entries;
183 unsigned short num_scan_start;
184 gboolean is_to_space;
187 #define SGEN_SIZEOF_GC_MEM_SECTION ((sizeof (GCMemSection) + 7) & ~7)
190 * to quickly find the head of an object pinned by a conservative
191 * address we keep track of the objects allocated for each
192 * SGEN_SCAN_START_SIZE memory chunk in the nursery or other memory
193 * sections. Larger values have less memory overhead and bigger
194 * runtime cost. 4-8 KB are reasonable values.
196 #define SGEN_SCAN_START_SIZE (4096*2)
199 * Objects bigger then this go into the large object space. This size
200 * has a few constraints. It must fit into the major heap, which in
201 * the case of the copying collector means that it must fit into a
202 * pinned chunk. It must also play well with the GC descriptors, some
203 * of which (DESC_TYPE_RUN_LENGTH, DESC_TYPE_SMALL_BITMAP) encode the
206 #define SGEN_MAX_SMALL_OBJ_SIZE 8000
209 * This is the maximum ammount of memory we're willing to waste in order to speed up allocation.
210 * Wastage comes in thre forms:
212 * -when building the nursery fragment list, small regions are discarded;
213 * -when allocating memory from a fragment if it ends up below the threshold, we remove it from the fragment list; and
214 * -when allocating a new tlab, we discard the remaining space of the old one
216 * Increasing this value speeds up allocation but will cause more frequent nursery collections as less space will be used.
217 * Descreasing this value will cause allocation to be slower since we'll have to cycle thru more fragments.
218 * 512 annedoctally keeps wastage under control and doesn't impact allocation performance too much.
220 #define SGEN_MAX_NURSERY_WASTE 512
223 /* This is also the MAJOR_SECTION_SIZE for the copying major
225 #define SGEN_PINNED_CHUNK_SIZE (128 * 1024)
227 #define SGEN_PINNED_CHUNK_FOR_PTR(o) ((SgenBlock*)(((mword)(o)) & ~(SGEN_PINNED_CHUNK_SIZE - 1)))
229 typedef struct _SgenPinnedChunk SgenPinnedChunk;
232 * Recursion is not allowed for the thread lock.
234 #define LOCK_DECLARE(name) pthread_mutex_t name = PTHREAD_MUTEX_INITIALIZER
235 /* if changing LOCK_INIT to something that isn't idempotent, look at
236 its use in mono_gc_base_init in sgen-gc.c */
237 #define LOCK_INIT(name)
238 #define LOCK_GC pthread_mutex_lock (&gc_mutex)
239 #define TRYLOCK_GC (pthread_mutex_trylock (&gc_mutex) == 0)
240 #define UNLOCK_GC pthread_mutex_unlock (&gc_mutex)
241 #define LOCK_INTERRUPTION pthread_mutex_lock (&interruption_mutex)
242 #define UNLOCK_INTERRUPTION pthread_mutex_unlock (&interruption_mutex)
244 #define SGEN_CAS_PTR InterlockedCompareExchangePointer
245 #define SGEN_ATOMIC_ADD(x,i) do { \
249 } while (InterlockedCompareExchange (&(x), __old_x + (i), __old_x) != __old_x); \
252 /* we intercept pthread_create calls to know which threads exist */
253 #define USE_PTHREAD_INTERCEPT 1
255 #ifdef HEAVY_STATISTICS
256 #define HEAVY_STAT(x) x
258 extern long long stat_objects_alloced_degraded;
259 extern long long stat_bytes_alloced_degraded;
260 extern long long stat_copy_object_called_major;
261 extern long long stat_objects_copied_major;
263 #define HEAVY_STAT(x)
266 #define DEBUG(level,a) do {if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) a;} while (0)
268 extern int gc_debug_level;
269 extern FILE* gc_debug_file;
271 extern int current_collection_generation;
273 extern unsigned int mono_sgen_global_stop_count;
275 #define SGEN_ALLOC_ALIGN 8
276 #define SGEN_ALLOC_ALIGN_BITS 3
278 #define SGEN_ALIGN_UP(s) (((s)+(SGEN_ALLOC_ALIGN-1)) & ~(SGEN_ALLOC_ALIGN-1))
280 #ifdef SGEN_ALIGN_NURSERY
281 #define SGEN_PTR_IN_NURSERY(p,bits,start,end) (((mword)(p) & ~((1 << (bits)) - 1)) == (mword)(start))
283 #define SGEN_PTR_IN_NURSERY(p,bits,start,end) ((char*)(p) >= (start) && (char*)(p) < (end))
286 /* Structure that corresponds to a MonoVTable: desc is a mword so requires
287 * no cast from a pointer to an integer
294 /* these bits are set in the object vtable: we could merge them since an object can be
295 * either pinned or forwarded but not both.
296 * We store them in the vtable slot because the bits are used in the sync block for
297 * other purposes: if we merge them and alloc the sync blocks aligned to 8 bytes, we can change
298 * this and use bit 3 in the syncblock (with the lower two bits both set for forwarded, that
299 * would be an invalid combination for the monitor and hash code).
300 * The values are already shifted.
301 * The forwarding address is stored in the sync block.
303 #define SGEN_FORWARDED_BIT 1
304 #define SGEN_PINNED_BIT 2
305 #define SGEN_VTABLE_BITS_MASK 0x3
307 /* returns NULL if not forwarded, or the forwarded address */
308 #define SGEN_OBJECT_IS_FORWARDED(obj) (((mword*)(obj))[0] & SGEN_FORWARDED_BIT ? (void*)(((mword*)(obj))[0] & ~SGEN_VTABLE_BITS_MASK) : NULL)
309 #define SGEN_OBJECT_IS_PINNED(obj) (((mword*)(obj))[0] & SGEN_PINNED_BIT)
311 /* set the forwarded address fw_addr for object obj */
312 #define SGEN_FORWARD_OBJECT(obj,fw_addr) do { \
313 ((mword*)(obj))[0] = (mword)(fw_addr) | SGEN_FORWARDED_BIT; \
315 #define SGEN_PIN_OBJECT(obj) do { \
316 ((mword*)(obj))[0] |= SGEN_PINNED_BIT; \
318 #define SGEN_UNPIN_OBJECT(obj) do { \
319 ((mword*)(obj))[0] &= ~SGEN_PINNED_BIT; \
323 * Since we set bits in the vtable, use the macro to load it from the pointer to
324 * an object that is potentially pinned.
326 #define SGEN_LOAD_VTABLE(addr) ((*(mword*)(addr)) & ~SGEN_VTABLE_BITS_MASK)
329 * ######################################################################
330 * ######## GC descriptors
331 * ######################################################################
332 * Used to quickly get the info the GC needs about an object: size and
333 * where the references are held.
335 #define OBJECT_HEADER_WORDS (sizeof(MonoObject)/sizeof(gpointer))
336 #define LOW_TYPE_BITS 3
337 #define SMALL_BITMAP_SHIFT 16
338 #define SMALL_BITMAP_SIZE (GC_BITS_PER_WORD - SMALL_BITMAP_SHIFT)
339 #define VECTOR_INFO_SHIFT 14
340 #define VECTOR_ELSIZE_SHIFT 3
341 #define LARGE_BITMAP_SIZE (GC_BITS_PER_WORD - LOW_TYPE_BITS)
342 #define MAX_ELEMENT_SIZE 0x3ff
343 #define VECTOR_SUBTYPE_PTRFREE (DESC_TYPE_V_PTRFREE << VECTOR_INFO_SHIFT)
344 #define VECTOR_SUBTYPE_REFS (DESC_TYPE_V_REFS << VECTOR_INFO_SHIFT)
345 #define VECTOR_SUBTYPE_RUN_LEN (DESC_TYPE_V_RUN_LEN << VECTOR_INFO_SHIFT)
346 #define VECTOR_SUBTYPE_BITMAP (DESC_TYPE_V_BITMAP << VECTOR_INFO_SHIFT)
348 /* objects are aligned to 8 bytes boundaries
349 * A descriptor is a pointer in MonoVTable, so 32 or 64 bits of size.
350 * The low 3 bits define the type of the descriptor. The other bits
351 * depend on the type.
352 * As a general rule the 13 remaining low bits define the size, either
353 * of the whole object or of the elements in the arrays. While for objects
354 * the size is already in bytes, for arrays we need to shift, because
355 * array elements might be smaller than 8 bytes. In case of arrays, we
356 * use two bits to describe what the additional high bits represents,
357 * so the default behaviour can handle element sizes less than 2048 bytes.
358 * The high 16 bits, if 0 it means the object is pointer-free.
359 * This design should make it easy and fast to skip over ptr-free data.
360 * The first 4 types should cover >95% of the objects.
361 * Note that since the size of objects is limited to 64K, larger objects
362 * will be allocated in the large object heap.
363 * If we want 4-bytes alignment, we need to put vector and small bitmap
368 * We don't use 0 so that 0 isn't a valid GC descriptor. No
369 * deep reason for this other than to be able to identify a
370 * non-inited descriptor for debugging.
372 * If an object contains no references, its GC descriptor is
373 * always DESC_TYPE_RUN_LENGTH, without a size, no exceptions.
374 * This is so that we can quickly check for that in
375 * copy_object_no_checks(), without having to fetch the
378 DESC_TYPE_RUN_LENGTH = 1, /* 15 bits aligned byte size | 1-3 (offset, numptr) bytes tuples */
379 DESC_TYPE_COMPLEX, /* index for bitmap into complex_descriptors */
380 DESC_TYPE_VECTOR, /* 10 bits element size | 1 bit array | 2 bits desc | element desc */
381 DESC_TYPE_ARRAY, /* 10 bits element size | 1 bit array | 2 bits desc | element desc */
382 DESC_TYPE_LARGE_BITMAP, /* | 29-61 bitmap bits */
383 DESC_TYPE_COMPLEX_ARR, /* index for bitmap into complex_descriptors */
384 /* subtypes for arrays and vectors */
385 DESC_TYPE_V_PTRFREE = 0,/* there are no refs: keep first so it has a zero value */
386 DESC_TYPE_V_REFS, /* all the array elements are refs */
387 DESC_TYPE_V_RUN_LEN, /* elements are run-length encoded as DESC_TYPE_RUN_LENGTH */
388 DESC_TYPE_V_BITMAP /* elements are as the bitmap in DESC_TYPE_SMALL_BITMAP */
391 #define SGEN_VTABLE_HAS_REFERENCES(vt) (((MonoVTable*)(vt))->gc_descr != (void*)DESC_TYPE_RUN_LENGTH)
392 #define SGEN_CLASS_HAS_REFERENCES(c) ((c)->gc_descr != (void*)DESC_TYPE_RUN_LENGTH)
394 /* helper macros to scan and traverse objects, macros because we resue them in many functions */
395 #define OBJ_RUN_LEN_SIZE(size,desc,obj) do { \
396 (size) = ((desc) & 0xfff8) >> 1; \
399 #define OBJ_BITMAP_SIZE(size,desc,obj) do { \
400 (size) = ((desc) & 0xfff8) >> 1; \
404 #define PREFETCH(addr) __builtin_prefetch ((addr))
406 #define PREFETCH(addr)
409 /* code using these macros must define a HANDLE_PTR(ptr) macro that does the work */
410 #define OBJ_RUN_LEN_FOREACH_PTR(desc,obj) do { \
411 if ((desc) & 0xffff0000) { \
412 /* there are pointers */ \
413 void **_objptr_end; \
414 void **_objptr = (void**)(obj); \
415 _objptr += ((desc) >> 16) & 0xff; \
416 _objptr_end = _objptr + (((desc) >> 24) & 0xff); \
417 while (_objptr < _objptr_end) { \
418 HANDLE_PTR (_objptr, (obj)); \
424 /* a bitmap desc means that there are pointer references or we'd have
425 * choosen run-length, instead: add an assert to check.
427 #define OBJ_LARGE_BITMAP_FOREACH_PTR(desc,obj) do { \
428 /* there are pointers */ \
429 void **_objptr = (void**)(obj); \
430 gsize _bmap = (desc) >> LOW_TYPE_BITS; \
431 _objptr += OBJECT_HEADER_WORDS; \
434 HANDLE_PTR (_objptr, (obj)); \
441 gsize* mono_sgen_get_complex_descriptor (mword desc) MONO_INTERNAL;
443 #define OBJ_COMPLEX_FOREACH_PTR(vt,obj) do { \
444 /* there are pointers */ \
445 void **_objptr = (void**)(obj); \
446 gsize *bitmap_data = mono_sgen_get_complex_descriptor ((desc)); \
447 int bwords = (*bitmap_data) - 1; \
448 void **start_run = _objptr; \
451 MonoObject *myobj = (MonoObject*)obj; \
452 g_print ("found %d at %p (0x%zx): %s.%s\n", bwords, (obj), (desc), myobj->vtable->klass->name_space, myobj->vtable->klass->name); \
454 while (bwords-- > 0) { \
455 gsize _bmap = *bitmap_data++; \
456 _objptr = start_run; \
457 /*g_print ("bitmap: 0x%x/%d at %p\n", _bmap, bwords, _objptr);*/ \
460 HANDLE_PTR (_objptr, (obj)); \
465 start_run += GC_BITS_PER_WORD; \
469 /* this one is untested */
470 #define OBJ_COMPLEX_ARR_FOREACH_PTR(vt,obj) do { \
471 /* there are pointers */ \
472 gsize *mbitmap_data = mono_sgen_get_complex_descriptor ((vt)->desc); \
473 int mbwords = (*mbitmap_data++) - 1; \
474 int el_size = mono_array_element_size (vt->klass); \
475 char *e_start = (char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector); \
476 char *e_end = e_start + el_size * mono_array_length_fast ((MonoArray*)(obj)); \
478 g_print ("found %d at %p (0x%zx): %s.%s\n", mbwords, (obj), (vt)->desc, vt->klass->name_space, vt->klass->name); \
479 while (e_start < e_end) { \
480 void **_objptr = (void**)e_start; \
481 gsize *bitmap_data = mbitmap_data; \
482 unsigned int bwords = mbwords; \
483 while (bwords-- > 0) { \
484 gsize _bmap = *bitmap_data++; \
485 void **start_run = _objptr; \
486 /*g_print ("bitmap: 0x%x\n", _bmap);*/ \
489 HANDLE_PTR (_objptr, (obj)); \
494 _objptr = start_run + GC_BITS_PER_WORD; \
496 e_start += el_size; \
500 #define OBJ_VECTOR_FOREACH_PTR(desc,obj) do { \
501 /* note: 0xffffc000 excludes DESC_TYPE_V_PTRFREE */ \
502 if ((desc) & 0xffffc000) { \
503 int el_size = ((desc) >> 3) & MAX_ELEMENT_SIZE; \
504 /* there are pointers */ \
505 int etype = (desc) & 0xc000; \
506 if (etype == (DESC_TYPE_V_REFS << 14)) { \
507 void **p = (void**)((char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector)); \
508 void **end_refs = (void**)((char*)p + el_size * mono_array_length_fast ((MonoArray*)(obj))); \
509 /* Note: this code can handle also arrays of struct with only references in them */ \
510 while (p < end_refs) { \
511 HANDLE_PTR (p, (obj)); \
514 } else if (etype == DESC_TYPE_V_RUN_LEN << 14) { \
515 int offset = ((desc) >> 16) & 0xff; \
516 int num_refs = ((desc) >> 24) & 0xff; \
517 char *e_start = (char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector); \
518 char *e_end = e_start + el_size * mono_array_length_fast ((MonoArray*)(obj)); \
519 while (e_start < e_end) { \
520 void **p = (void**)e_start; \
523 for (i = 0; i < num_refs; ++i) { \
524 HANDLE_PTR (p + i, (obj)); \
526 e_start += el_size; \
528 } else if (etype == DESC_TYPE_V_BITMAP << 14) { \
529 char *e_start = (char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector); \
530 char *e_end = e_start + el_size * mono_array_length_fast ((MonoArray*)(obj)); \
531 while (e_start < e_end) { \
532 void **p = (void**)e_start; \
533 gsize _bmap = (desc) >> 16; \
534 /* Note: there is no object header here to skip */ \
537 HANDLE_PTR (p, (obj)); \
542 e_start += el_size; \
548 #define SGEN_GRAY_QUEUE_SECTION_SIZE (128 - 3)
551 * This is a stack now instead of a queue, so the most recently added items are removed
552 * first, improving cache locality, and keeping the stack size manageable.
554 typedef struct _GrayQueueSection GrayQueueSection;
555 struct _GrayQueueSection {
557 GrayQueueSection *next;
558 char *objects [SGEN_GRAY_QUEUE_SECTION_SIZE];
561 typedef struct _SgenGrayQueue SgenGrayQueue;
563 typedef void (*GrayQueueAllocPrepareFunc) (SgenGrayQueue*);
565 struct _SgenGrayQueue {
566 GrayQueueSection *first;
567 GrayQueueSection *free_list;
569 GrayQueueAllocPrepareFunc alloc_prepare_func;
570 void *alloc_prepare_data;
573 typedef void (*CopyOrMarkObjectFunc) (void**, SgenGrayQueue*);
574 typedef void (*ScanObjectFunc) (char*, SgenGrayQueue*);
575 typedef void (*ScanVTypeFunc) (char*, mword desc, SgenGrayQueue*);
577 #if SGEN_MAX_DEBUG_LEVEL >= 9
578 #define GRAY_OBJECT_ENQUEUE gray_object_enqueue
579 #define GRAY_OBJECT_DEQUEUE(queue,o) ((o) = gray_object_dequeue ((queue)))
581 #define GRAY_OBJECT_ENQUEUE(queue,o) do { \
582 if (G_UNLIKELY (!(queue)->first || (queue)->first->end == SGEN_GRAY_QUEUE_SECTION_SIZE)) \
583 mono_sgen_gray_object_enqueue ((queue), (o)); \
585 (queue)->first->objects [(queue)->first->end++] = (o); \
588 #define GRAY_OBJECT_DEQUEUE(queue,o) do { \
589 if (!(queue)->first) \
591 else if (G_UNLIKELY ((queue)->first->end == 1)) \
592 (o) = mono_sgen_gray_object_dequeue ((queue)); \
594 (o) = (queue)->first->objects [--(queue)->first->end]; \
598 void mono_sgen_gray_object_enqueue (SgenGrayQueue *queue, char *obj) MONO_INTERNAL;
599 char* mono_sgen_gray_object_dequeue (SgenGrayQueue *queue) MONO_INTERNAL;
601 typedef void (*IterateObjectCallbackFunc) (char*, size_t, void*);
603 void* mono_sgen_alloc_os_memory (size_t size, int activate) MONO_INTERNAL;
604 void* mono_sgen_alloc_os_memory_aligned (mword size, mword alignment, gboolean activate) MONO_INTERNAL;
605 void mono_sgen_free_os_memory (void *addr, size_t size) MONO_INTERNAL;
607 int mono_sgen_thread_handshake (BOOL suspend) MONO_INTERNAL;
608 gboolean mono_sgen_suspend_thread (SgenThreadInfo *info) MONO_INTERNAL;
609 gboolean mono_sgen_resume_thread (SgenThreadInfo *info) MONO_INTERNAL;
610 void mono_sgen_wait_for_suspend_ack (int count) MONO_INTERNAL;
611 gboolean mono_sgen_park_current_thread_if_doing_handshake (SgenThreadInfo *p) MONO_INTERNAL;
612 void mono_sgen_os_init (void) MONO_INTERNAL;
614 void mono_sgen_fill_thread_info_for_suspend (SgenThreadInfo *info) MONO_INTERNAL;
616 gboolean mono_sgen_is_worker_thread (pthread_t thread) MONO_INTERNAL;
618 void mono_sgen_update_heap_boundaries (mword low, mword high) MONO_INTERNAL;
620 void mono_sgen_register_major_sections_alloced (int num_sections) MONO_INTERNAL;
621 mword mono_sgen_get_minor_collection_allowance (void) MONO_INTERNAL;
623 void mono_sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags) MONO_INTERNAL;
624 void mono_sgen_check_section_scan_starts (GCMemSection *section) MONO_INTERNAL;
626 /* Keep in sync with mono_sgen_dump_internal_mem_usage() in dump_heap()! */
628 INTERNAL_MEM_PIN_QUEUE,
629 INTERNAL_MEM_FRAGMENT,
630 INTERNAL_MEM_SECTION,
631 INTERNAL_MEM_SCAN_STARTS,
632 INTERNAL_MEM_FIN_TABLE,
633 INTERNAL_MEM_FINALIZE_ENTRY,
634 INTERNAL_MEM_FINALIZE_READY_ENTRY,
635 INTERNAL_MEM_DISLINK_TABLE,
636 INTERNAL_MEM_DISLINK,
637 INTERNAL_MEM_ROOTS_TABLE,
638 INTERNAL_MEM_ROOT_RECORD,
639 INTERNAL_MEM_STATISTICS,
640 INTERNAL_MEM_STAT_PINNED_CLASS,
641 INTERNAL_MEM_STAT_REMSET_CLASS,
643 INTERNAL_MEM_GRAY_QUEUE,
644 INTERNAL_MEM_STORE_REMSET,
645 INTERNAL_MEM_MS_TABLES,
646 INTERNAL_MEM_MS_BLOCK_INFO,
647 INTERNAL_MEM_EPHEMERON_LINK,
648 INTERNAL_MEM_WORKER_DATA,
649 INTERNAL_MEM_BRIDGE_DATA,
650 INTERNAL_MEM_JOB_QUEUE_ENTRY,
654 #define SGEN_PINNED_FREELIST_NUM_SLOTS 30
657 SgenPinnedChunk *chunk_list;
658 SgenPinnedChunk *free_lists [SGEN_PINNED_FREELIST_NUM_SLOTS];
659 void *delayed_free_lists [SGEN_PINNED_FREELIST_NUM_SLOTS];
660 } SgenPinnedAllocator;
668 void mono_sgen_init_internal_allocator (void) MONO_INTERNAL;
669 void mono_sgen_init_pinned_allocator (void) MONO_INTERNAL;
671 void mono_sgen_report_internal_mem_usage (void) MONO_INTERNAL;
672 void mono_sgen_report_pinned_mem_usage (SgenPinnedAllocator *alc) MONO_INTERNAL;
673 void mono_sgen_dump_internal_mem_usage (FILE *heap_dump_file) MONO_INTERNAL;
674 void mono_sgen_dump_section (GCMemSection *section, const char *type) MONO_INTERNAL;
675 void mono_sgen_dump_occupied (char *start, char *end, char *section_start) MONO_INTERNAL;
677 void mono_sgen_register_moved_object (void *obj, void *destination) MONO_INTERNAL;
679 void mono_sgen_register_fixed_internal_mem_type (int type, size_t size) MONO_INTERNAL;
681 void* mono_sgen_alloc_internal (int type) MONO_INTERNAL;
682 void mono_sgen_free_internal (void *addr, int type) MONO_INTERNAL;
684 void* mono_sgen_alloc_internal_dynamic (size_t size, int type) MONO_INTERNAL;
685 void mono_sgen_free_internal_dynamic (void *addr, size_t size, int type) MONO_INTERNAL;
687 void* mono_sgen_alloc_pinned (SgenPinnedAllocator *allocator, size_t size) MONO_INTERNAL;
688 void mono_sgen_free_pinned (SgenPinnedAllocator *allocator, void *addr, size_t size) MONO_INTERNAL;
691 void mono_sgen_debug_printf (int level, const char *format, ...) MONO_INTERNAL;
693 gboolean mono_sgen_parse_environment_string_extract_number (const char *str, glong *out) MONO_INTERNAL;
695 void mono_sgen_pinned_scan_objects (SgenPinnedAllocator *alc, IterateObjectCallbackFunc callback, void *callback_data) MONO_INTERNAL;
696 void mono_sgen_pinned_scan_pinned_objects (SgenPinnedAllocator *alc, IterateObjectCallbackFunc callback, void *callback_data) MONO_INTERNAL;
698 void mono_sgen_pinned_update_heap_boundaries (SgenPinnedAllocator *alc) MONO_INTERNAL;
700 void** mono_sgen_find_optimized_pin_queue_area (void *start, void *end, int *num) MONO_INTERNAL;
701 void mono_sgen_find_section_pin_queue_start_end (GCMemSection *section) MONO_INTERNAL;
702 void mono_sgen_pin_objects_in_section (GCMemSection *section, SgenGrayQueue *queue) MONO_INTERNAL;
704 void mono_sgen_pin_stats_register_object (char *obj, size_t size);
705 void mono_sgen_pin_stats_register_global_remset (char *obj);
706 void mono_sgen_pin_stats_print_class_stats (void);
708 void mono_sgen_add_to_global_remset (gpointer ptr) MONO_INTERNAL;
710 int mono_sgen_get_current_collection_generation (void) MONO_INTERNAL;
711 gboolean mono_sgen_nursery_collection_is_parallel (void) MONO_INTERNAL;
712 CopyOrMarkObjectFunc mono_sgen_get_copy_object (void) MONO_INTERNAL;
713 ScanObjectFunc mono_sgen_get_minor_scan_object (void) MONO_INTERNAL;
714 ScanVTypeFunc mono_sgen_get_minor_scan_vtype (void) MONO_INTERNAL;
716 typedef void (*sgen_cardtable_block_callback) (mword start, mword size);
718 typedef struct _SgenMajorCollector SgenMajorCollector;
719 struct _SgenMajorCollector {
721 gboolean is_parallel;
722 gboolean supports_cardtable;
725 * This is set to TRUE if the sweep for the last major
726 * collection has been completed.
728 gboolean *have_swept;
730 void* (*alloc_heap) (mword nursery_size, mword nursery_align, int nursery_bits);
731 gboolean (*is_object_live) (char *obj);
732 void* (*alloc_small_pinned_obj) (size_t size, gboolean has_references);
733 void* (*alloc_degraded) (MonoVTable *vtable, size_t size);
734 void (*copy_or_mark_object) (void **obj_slot, SgenGrayQueue *queue);
735 void (*minor_scan_object) (char *start, SgenGrayQueue *queue);
736 void (*nopar_minor_scan_object) (char *start, SgenGrayQueue *queue);
737 void (*minor_scan_vtype) (char *start, mword desc, SgenGrayQueue *queue);
738 void (*nopar_minor_scan_vtype) (char *start, mword desc, SgenGrayQueue *queue);
739 void (*major_scan_object) (char *start, SgenGrayQueue *queue);
740 void (*copy_object) (void **obj_slot, SgenGrayQueue *queue);
741 void (*nopar_copy_object) (void **obj_slot, SgenGrayQueue *queue);
742 void* (*alloc_object) (int size, gboolean has_references);
743 void (*free_pinned_object) (char *obj, size_t size);
744 void (*iterate_objects) (gboolean non_pinned, gboolean pinned, IterateObjectCallbackFunc callback, void *data);
745 void (*free_non_pinned_object) (char *obj, size_t size);
746 void (*find_pin_queue_start_ends) (SgenGrayQueue *queue);
747 void (*pin_objects) (SgenGrayQueue *queue);
748 void (*scan_card_table) (SgenGrayQueue *queue);
749 void (*iterate_live_block_ranges) (sgen_cardtable_block_callback callback);
750 void (*init_to_space) (void);
751 void (*sweep) (void);
752 void (*check_scan_starts) (void);
753 void (*dump_heap) (FILE *heap_dump_file);
754 gint64 (*get_used_size) (void);
755 void (*start_nursery_collection) (void);
756 void (*finish_nursery_collection) (void);
757 void (*start_major_collection) (void);
758 void (*finish_major_collection) (void);
759 void (*have_computed_minor_collection_allowance) (void);
760 gboolean (*ptr_is_in_non_pinned_space) (char *ptr);
761 gboolean (*obj_is_from_pinned_alloc) (char *obj);
762 void (*report_pinned_memory_usage) (void);
763 int (*get_num_major_sections) (void);
764 gboolean (*handle_gc_param) (const char *opt);
765 void (*print_gc_param_usage) (void);
766 gboolean (*is_worker_thread) (pthread_t thread);
767 void (*post_param_init) (void);
768 void* (*alloc_worker_data) (void);
769 void (*init_worker_thread) (void *data);
770 void (*reset_worker_data) (void *data);
773 void mono_sgen_marksweep_init (SgenMajorCollector *collector) MONO_INTERNAL;
774 void mono_sgen_marksweep_fixed_init (SgenMajorCollector *collector) MONO_INTERNAL;
775 void mono_sgen_marksweep_par_init (SgenMajorCollector *collector) MONO_INTERNAL;
776 void mono_sgen_marksweep_fixed_par_init (SgenMajorCollector *collector) MONO_INTERNAL;
777 void mono_sgen_copying_init (SgenMajorCollector *collector) MONO_INTERNAL;
780 * This function can be called on an object whose first word, the
781 * vtable field, is not intact. This is necessary for the parallel
785 mono_sgen_par_object_get_size (MonoVTable *vtable, MonoObject* o)
787 MonoClass *klass = vtable->klass;
789 * We depend on mono_string_length_fast and
790 * mono_array_length_fast not using the object's vtable.
792 if (klass == mono_defaults.string_class) {
793 return sizeof (MonoString) + 2 * mono_string_length_fast ((MonoString*) o) + 2;
794 } else if (klass->rank) {
795 MonoArray *array = (MonoArray*)o;
796 size_t size = sizeof (MonoArray) + klass->sizes.element_size * mono_array_length_fast (array);
797 if (G_UNLIKELY (array->bounds)) {
798 size += sizeof (mono_array_size_t) - 1;
799 size &= ~(sizeof (mono_array_size_t) - 1);
800 size += sizeof (MonoArrayBounds) * klass->rank;
804 /* from a created object: the class must be inited already */
805 return klass->instance_size;
810 mono_sgen_safe_object_get_size (MonoObject *obj)
814 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj)))
815 obj = (MonoObject*)forwarded;
817 return mono_sgen_par_object_get_size ((MonoVTable*)SGEN_LOAD_VTABLE (obj), obj);
820 const char* mono_sgen_safe_name (void* obj) MONO_INTERNAL;
822 gboolean mono_sgen_object_is_live (void *obj) MONO_INTERNAL;
824 gboolean mono_sgen_need_bridge_processing (void) MONO_INTERNAL;
825 void mono_sgen_bridge_processing_start (int num_objs, MonoObject **objs) MONO_INTERNAL;
826 void mono_sgen_bridge_processing_finish (int num_objs, MonoObject **objs) MONO_INTERNAL;
827 void mono_sgen_register_test_bridge_callbacks (void) MONO_INTERNAL;
828 gboolean mono_sgen_is_bridge_object (MonoObject *obj) MONO_INTERNAL;
829 void mono_sgen_mark_bridge_object (MonoObject *obj) MONO_INTERNAL;
836 gboolean mono_sgen_try_alloc_space (mword size, int space) MONO_INTERNAL;
837 void mono_sgen_release_space (mword size, int space) MONO_INTERNAL;
838 void mono_sgen_pin_object (void *object, SgenGrayQueue *queue) MONO_INTERNAL;
839 void sgen_collect_major_no_lock (const char *reason) MONO_INTERNAL;
840 gboolean mono_sgen_need_major_collection (mword space_needed) MONO_INTERNAL;
841 void mono_sgen_set_pinned_from_failed_allocation (mword objsize) MONO_INTERNAL;
845 typedef struct _LOSObject LOSObject;
848 mword size; /* this is the object size */
850 int dummy; /* to have a sizeof (LOSObject) a multiple of ALLOC_ALIGN and data starting at same alignment */
851 char data [MONO_ZERO_LEN_ARRAY];
854 #define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
856 extern LOSObject *los_object_list;
857 extern mword los_memory_usage;
859 void mono_sgen_los_free_object (LOSObject *obj) MONO_INTERNAL;
860 void* mono_sgen_los_alloc_large_inner (MonoVTable *vtable, size_t size) MONO_INTERNAL;
861 void mono_sgen_los_sweep (void) MONO_INTERNAL;
862 gboolean mono_sgen_ptr_is_in_los (char *ptr, char **start) MONO_INTERNAL;
863 void mono_sgen_los_iterate_objects (IterateObjectCallbackFunc cb, void *user_data) MONO_INTERNAL;
864 void mono_sgen_los_iterate_live_block_ranges (sgen_cardtable_block_callback callback) MONO_INTERNAL;
865 void mono_sgen_los_scan_card_table (SgenGrayQueue *queue) MONO_INTERNAL;
866 FILE *mono_sgen_get_logfile (void) MONO_INTERNAL;
868 /* nursery allocator */
870 void mono_sgen_clear_nursery_fragments (void) MONO_INTERNAL;
871 void mono_sgen_nursery_allocator_prepare_for_pinning (void) MONO_INTERNAL;
872 void mono_sgen_clear_current_nursery_fragment (void) MONO_INTERNAL;
873 void mono_sgen_nursery_allocator_set_nursery_bounds (char *nursery_start, char *nursery_end) MONO_INTERNAL;
874 mword mono_sgen_build_nursery_fragments (GCMemSection *nursery_section, void **start, int num_entries) MONO_INTERNAL;
875 void mono_sgen_init_nursery_allocator (void) MONO_INTERNAL;
876 void mono_sgen_nursery_allocator_init_heavy_stats (void) MONO_INTERNAL;
877 char* mono_sgen_nursery_alloc_get_upper_alloc_bound (void) MONO_INTERNAL;
878 void* mono_sgen_nursery_alloc (size_t size) MONO_INTERNAL;
879 void* mono_sgen_nursery_alloc_range (size_t size, size_t min_size, int *out_alloc_size) MONO_INTERNAL;
880 MonoVTable* mono_sgen_get_array_fill_vtable (void) MONO_INTERNAL;
881 gboolean mono_sgen_can_alloc_size (size_t size) MONO_INTERNAL;
882 void mono_sgen_nursery_retire_region (void *address, ptrdiff_t size) MONO_INTERNAL;
886 typedef struct _SgenHashTableEntry SgenHashTableEntry;
887 struct _SgenHashTableEntry {
888 SgenHashTableEntry *next;
890 char data [MONO_ZERO_LEN_ARRAY]; /* data is pointer-aligned */
898 GEqualFunc equal_func;
899 SgenHashTableEntry **table;
904 #define SGEN_HASH_TABLE_INIT(table_type,entry_type,data_size,hash_func,equal_func) { (table_type), (entry_type), (data_size), (hash_func), (equal_func), NULL, 0, 0 }
905 #define SGEN_HASH_TABLE_ENTRY_SIZE(data_size) ((data_size) + sizeof (SgenHashTableEntry*) + sizeof (gpointer))
907 gpointer mono_sgen_hash_table_lookup (SgenHashTable *table, gpointer key) MONO_INTERNAL;
908 gboolean mono_sgen_hash_table_replace (SgenHashTable *table, gpointer key, gpointer data) MONO_INTERNAL;
909 gboolean mono_sgen_hash_table_set_value (SgenHashTable *table, gpointer key, gpointer data) MONO_INTERNAL;
910 gboolean mono_sgen_hash_table_set_key (SgenHashTable *hash_table, gpointer old_key, gpointer new_key) MONO_INTERNAL;
911 gboolean mono_sgen_hash_table_remove (SgenHashTable *table, gpointer key, gpointer data_return) MONO_INTERNAL;
913 void mono_sgen_hash_table_clean (SgenHashTable *table) MONO_INTERNAL;
915 #define mono_sgen_hash_table_num_entries(h) ((h)->num_entries)
917 #define SGEN_HASH_TABLE_FOREACH(h,k,v) do { \
918 SgenHashTable *__hash_table = (h); \
919 SgenHashTableEntry **__table = __hash_table->table; \
920 SgenHashTableEntry *__entry, *__prev; \
922 for (__i = 0; __i < (h)->size; ++__i) { \
924 for (__entry = __table [__i]; __entry; ) { \
925 (k) = __entry->key; \
926 (v) = (gpointer)__entry->data;
928 /* The loop must be continue'd after using this! */
929 #define SGEN_HASH_TABLE_FOREACH_REMOVE(free) do { \
930 SgenHashTableEntry *__next = __entry->next; \
932 __prev->next = __next; \
934 __table [__i] = __next; \
936 mono_sgen_free_internal (__entry, __hash_table->entry_mem_type); \
938 --__hash_table->num_entries; \
941 #define SGEN_HASH_TABLE_FOREACH_SET_KEY(k) ((__entry)->key = (k))
943 #define SGEN_HASH_TABLE_FOREACH_END \
945 __entry = __entry->next; \
950 #endif /* HAVE_SGEN_GC */
952 #endif /* __MONO_SGENGC_H__ */