assigned TLABs and if that's more than some percentage of the
nursery size, reduce the TLAB size.
+ *) Explore placing unreachable objects on unused nursery memory.
+ Instead of memset'ng a region to zero, place an int[] covering it.
+ A good place to start is add_nursery_frag. The tricky thing here is
+ placing those objects atomically outside of a collection.
+
+
*/
#include "config.h"
#ifdef HAVE_SGEN_GC
#include <signal.h>
#include <errno.h>
#include <assert.h>
+#ifdef __MACH__
+#undef _XOPEN_SOURCE
+#endif
#include <pthread.h>
+#ifdef __MACH__
+#define _XOPEN_SOURCE
+#endif
#include "metadata/metadata-internals.h"
#include "metadata/class-internals.h"
#include "metadata/gc-internal.h"
#include <mono/utils/memcheck.h>
+#if defined(__MACH__)
+#include "utils/mach-support.h"
+#endif
+
#define OPDEF(a,b,c,d,e,f,g,h,i,j) \
a = i,
* ######## Types and constants used by the GC.
* ######################################################################
*/
-#if SIZEOF_VOID_P == 4
-typedef guint32 mword;
-#else
-typedef guint64 mword;
-#endif
static int gc_initialized = 0;
static int gc_debug_level = 0;
#endif
#ifdef HEAVY_STATISTICS
-static long stat_objects_alloced = 0;
-static long stat_bytes_alloced = 0;
-static long stat_objects_alloced_degraded = 0;
-static long stat_bytes_alloced_degraded = 0;
-static long stat_bytes_alloced_los = 0;
-
-static long stat_copy_object_called_nursery = 0;
-static long stat_objects_copied_nursery = 0;
-static long stat_copy_object_called_major = 0;
-static long stat_objects_copied_major = 0;
-
-static long stat_scan_object_called_nursery = 0;
-static long stat_scan_object_called_major = 0;
-
-static long stat_nursery_copy_object_failed_from_space = 0;
-static long stat_nursery_copy_object_failed_forwarded = 0;
-static long stat_nursery_copy_object_failed_pinned = 0;
-
-static long stat_store_remsets = 0;
-static long stat_store_remsets_unique = 0;
-static long stat_saved_remsets_1 = 0;
-static long stat_saved_remsets_2 = 0;
-static long stat_global_remsets_added = 0;
-static long stat_global_remsets_readded = 0;
-static long stat_global_remsets_processed = 0;
-static long stat_global_remsets_discarded = 0;
+static long long stat_objects_alloced = 0;
+static long long stat_bytes_alloced = 0;
+static long long stat_objects_alloced_degraded = 0;
+static long long stat_bytes_alloced_degraded = 0;
+static long long stat_bytes_alloced_los = 0;
+
+static long long stat_copy_object_called_nursery = 0;
+static long long stat_objects_copied_nursery = 0;
+static long long stat_copy_object_called_major = 0;
+static long long stat_objects_copied_major = 0;
+
+static long long stat_scan_object_called_nursery = 0;
+static long long stat_scan_object_called_major = 0;
+
+static long long stat_nursery_copy_object_failed_from_space = 0;
+static long long stat_nursery_copy_object_failed_forwarded = 0;
+static long long stat_nursery_copy_object_failed_pinned = 0;
+
+static long long stat_store_remsets = 0;
+static long long stat_store_remsets_unique = 0;
+static long long stat_saved_remsets_1 = 0;
+static long long stat_saved_remsets_2 = 0;
+static long long stat_global_remsets_added = 0;
+static long long stat_global_remsets_readded = 0;
+static long long stat_global_remsets_processed = 0;
+static long long stat_global_remsets_discarded = 0;
+
+static long long stat_wasted_fragments_used = 0;
+static long long stat_wasted_fragments_bytes = 0;
static int stat_wbarrier_set_field = 0;
static int stat_wbarrier_set_arrayref = 0;
static int stat_wbarrier_object_copy = 0;
#endif
-static long time_minor_pre_collection_fragment_clear = 0;
-static long time_minor_pinning = 0;
-static long time_minor_scan_remsets = 0;
-static long time_minor_scan_pinned = 0;
-static long time_minor_scan_registered_roots = 0;
-static long time_minor_scan_thread_data = 0;
-static long time_minor_finish_gray_stack = 0;
-static long time_minor_fragment_creation = 0;
-
-static long time_major_pre_collection_fragment_clear = 0;
-static long time_major_pinning = 0;
-static long time_major_scan_pinned = 0;
-static long time_major_scan_registered_roots = 0;
-static long time_major_scan_thread_data = 0;
-static long time_major_scan_alloc_pinned = 0;
-static long time_major_scan_finalized = 0;
-static long time_major_scan_big_objects = 0;
-static long time_major_finish_gray_stack = 0;
-static long time_major_sweep = 0;
-static long time_major_fragment_creation = 0;
-
-static long pinned_chunk_bytes_alloced = 0;
-static long large_internal_bytes_alloced = 0;
+static long long time_minor_pre_collection_fragment_clear = 0;
+static long long time_minor_pinning = 0;
+static long long time_minor_scan_remsets = 0;
+static long long time_minor_scan_pinned = 0;
+static long long time_minor_scan_registered_roots = 0;
+static long long time_minor_scan_thread_data = 0;
+static long long time_minor_finish_gray_stack = 0;
+static long long time_minor_fragment_creation = 0;
+
+static long long time_major_pre_collection_fragment_clear = 0;
+static long long time_major_pinning = 0;
+static long long time_major_scan_pinned = 0;
+static long long time_major_scan_registered_roots = 0;
+static long long time_major_scan_thread_data = 0;
+static long long time_major_scan_alloc_pinned = 0;
+static long long time_major_scan_finalized = 0;
+static long long time_major_scan_big_objects = 0;
+static long long time_major_finish_gray_stack = 0;
+static long long time_major_free_bigobjs = 0;
+static long long time_major_los_sweep = 0;
+static long long time_major_sweep = 0;
+static long long time_major_fragment_creation = 0;
+
+static long long pinned_chunk_bytes_alloced = 0;
+static long long large_internal_bytes_alloced = 0;
/* Keep in sync with internal_mem_names in dump_heap()! */
enum {
}
*/
-#define MAX_DEBUG_LEVEL 2
-#define DEBUG(level,a) do {if (G_UNLIKELY ((level) <= MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) a;} while (0)
-
/* Define this to allow the user to change some of the constants by specifying
* their values in the MONO_GC_PARAMS environmental variable. See
* mono_gc_base_init for details. */
#define SIZEOF_GC_MEM_SECTION ((sizeof (GCMemSection) + 7) & ~7)
-/* large object space struct: 64+ KB */
-/* we could make this limit much smaller to avoid memcpy copy
- * and potentially have more room in the GC descriptor: need to measure
- * This also means that such small OS objects will need to be
- * allocated in a different way (using pinned chunks).
- * We may want to put large but smaller than 64k objects in the fixed space
- * when we move the object from one generation to another (to limit the
- * pig in the snake effect).
- * Note: it may be worth to have an optimized copy function, since we can
- * assume that objects are aligned and have a multiple of 8 size.
- * FIXME: This structure needs to be a multiple of 8 bytes in size: this is not
- * true if MONO_ZERO_LEN_ARRAY is nonzero.
- */
-typedef struct _LOSObject LOSObject;
-struct _LOSObject {
- LOSObject *next;
- mword size; /* this is the object size */
- guint16 role;
- int dummy; /* to have a sizeof (LOSObject) a multiple of ALLOC_ALIGN and data starting at same alignment */
- char data [MONO_ZERO_LEN_ARRAY];
-};
-
/* Pinned objects are allocated in the LOS space if bigger than half a page
* or from freelists otherwise. We assume that pinned objects are relatively few
* and they have a slow dying speed (like interned strings, thread objects).
* reference-free objects.
*/
#define PINNED_FIRST_SLOT_SIZE (sizeof (gpointer) * 4)
-#define MAX_FREELIST_SIZE 2048
-#define PINNED_PAGE_SIZE (4096)
-#define PINNED_CHUNK_MIN_SIZE (4096*8)
+#define MAX_FREELIST_SIZE 8192
typedef struct _PinnedChunk PinnedChunk;
struct _PinnedChunk {
Block block;
mword root_desc;
};
-/* for use with write barriers */
-typedef struct _RememberedSet RememberedSet;
-struct _RememberedSet {
- mword *store_next;
- mword *end_set;
- RememberedSet *next;
- mword data [MONO_ZERO_LEN_ARRAY];
-};
-
/*
* We're never actually using the first element. It's always set to
* NULL to simplify the elimination of consecutive duplicate
{
MonoClass *klass = ((MonoVTable*)LOAD_VTABLE (o))->klass;
if (klass == mono_defaults.string_class) {
- return sizeof (MonoString) + 2 * mono_string_length ((MonoString*) o) + 2;
+ return sizeof (MonoString) + 2 * mono_string_length_fast ((MonoString*) o) + 2;
} else if (klass->rank) {
MonoArray *array = (MonoArray*)o;
- size_t size = sizeof (MonoArray) + klass->sizes.element_size * mono_array_length (array);
+ size_t size = sizeof (MonoArray) + klass->sizes.element_size * mono_array_length_fast (array);
if (G_UNLIKELY (array->bounds)) {
size += sizeof (mono_array_size_t) - 1;
size &= ~(sizeof (mono_array_size_t) - 1);
/* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */
#define DEFAULT_NURSERY_SIZE (default_nursery_size)
-static int default_nursery_size = (1 << 20);
+static int default_nursery_size = (1 << 22);
#ifdef ALIGN_NURSERY
/* The number of trailing 0 bits in DEFAULT_NURSERY_SIZE */
#define DEFAULT_NURSERY_BITS (default_nursery_bits)
-static int default_nursery_bits = 20;
+static int default_nursery_bits = 22;
#endif
#else
-#define DEFAULT_NURSERY_SIZE (1024*512*2)
+#define DEFAULT_NURSERY_SIZE (4*1024*1024)
#ifdef ALIGN_NURSERY
-#define DEFAULT_NURSERY_BITS 20
+#define DEFAULT_NURSERY_BITS 22
#endif
#endif
-#define MIN_LOS_ALLOWANCE (DEFAULT_NURSERY_SIZE * 2)
+#define MIN_MINOR_COLLECTION_ALLOWANCE (DEFAULT_NURSERY_SIZE * 4)
/* to quickly find the head of an object pinned by a conservative address
* we keep track of the objects allocated for each SCAN_START_SIZE memory
* chunk in the nursery or other memory sections. Larger values have less
/* the minimum size of a fragment that we consider useful for allocation */
#define FRAGMENT_MIN_SIZE (512)
/* This is a fixed value used for pinned chunks, not the system pagesize */
-#define FREELIST_PAGESIZE 4096
+#define FREELIST_PAGESIZE (16*1024)
static mword pagesize = 4096;
static mword nursery_size;
static int degraded_mode = 0;
-static LOSObject *los_object_list = NULL;
-static mword los_memory_usage = 0;
-static mword los_num_objects = 0;
-static mword next_los_collection = 2*1024*1024; /* 2 MB, need to tune */
static mword total_alloc = 0;
/* use this to tune when to do a major/minor collection */
static mword memory_pressure = 0;
+static int minor_collection_allowance;
+static int minor_collection_sections_alloced = 0;
static GCMemSection *nursery_section = NULL;
static mword lowest_heap_address = ~(mword)0;
static const int freelist_sizes [] = {
8, 16, 24, 32, 40, 48, 64, 80,
96, 128, 160, 192, 224, 256, 320, 384,
- 448, 512, 584, 680, 816, 1024, 1360, 2048};
+ 448, 512, 584, 680, 816, 1024, 1360, 2048,
+ 2336, 2728, 3272, 4096, 5456, 8192 };
#define FREELIST_NUM_SLOTS (sizeof (freelist_sizes) / sizeof (freelist_sizes [0]))
/* This is also the MAJOR_SECTION_SIZE for the copying major
*/
static char *nursery_start = NULL;
-/* eventually share with MonoThread? */
-typedef struct _SgenThreadInfo SgenThreadInfo;
-
-struct _SgenThreadInfo {
- SgenThreadInfo *next;
- ARCH_THREAD_TYPE id;
- unsigned int stop_count; /* to catch duplicate signals */
- int signal;
- int skip;
- volatile int in_critical_region;
- void *stack_end;
- void *stack_start;
- void *stack_start_limit;
- char **tlab_next_addr;
- char **tlab_start_addr;
- char **tlab_temp_end_addr;
- char **tlab_real_end_addr;
- gpointer **store_remset_buffer_addr;
- long *store_remset_buffer_index_addr;
- RememberedSet *remset;
- gpointer runtime_data;
- gpointer stopped_ip; /* only valid if the thread is stopped */
- MonoDomain *stopped_domain; /* ditto */
- gpointer *stopped_regs; /* ditto */
-#ifndef HAVE_KW_THREAD
- char *tlab_start;
- char *tlab_next;
- char *tlab_temp_end;
- char *tlab_real_end;
- gpointer *store_remset_buffer;
- long store_remset_buffer_index;
-#endif
-};
-
#ifdef HAVE_KW_THREAD
#define TLAB_ACCESS_INIT
#define TLAB_START tlab_start
*/
static guint32 tlab_size = (1024 * 4);
+/*How much space is tolerable to be wasted from the current fragment when allocating a new TLAB*/
+#define MAX_NURSERY_TLAB_WASTE 512
+
/* fragments that are free and ready to be used for allocation */
static Fragment *nursery_fragments = NULL;
/* freeelist of fragment structures */
static Fragment *fragment_freelist = NULL;
-/* objects bigger then this go into the large object space */
-#define MAX_SMALL_OBJ_SIZE 2040
+/*
+ * Objects bigger then this go into the large object space. This size
+ * has a few constraints. It must fit into the major heap, which in
+ * the case of the copying collector means that it must fit into a
+ * pinned chunk. It must also play well with the GC descriptors, some
+ * of which (DESC_TYPE_RUN_LENGTH, DESC_TYPE_SMALL_BITMAP) encode the
+ * object size.
+ */
+#define MAX_SMALL_OBJ_SIZE 8000
/* Functions supplied by the runtime to be called by the GC */
static MonoGCCallbacks gc_callbacks;
static void null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation);
static void null_links_for_domain (MonoDomain *domain, int generation);
static gboolean search_fragment_for_size (size_t size);
+static int search_fragment_for_size_range (size_t desired_size, size_t minimum_size);
static void build_nursery_fragments (int start_pin, int end_pin);
static void clear_nursery_fragments (char *next);
static void pin_from_roots (void *start_nursery, void *end_nursery);
static void clear_tlabs (void);
typedef void (*IterateObjectCallbackFunc) (char*, size_t, void*);
static void scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data);
-static char* scan_object (char *start);
-static char* major_scan_object (char *start);
+static void scan_object (char *start);
+static void major_scan_object (char *start);
static void* copy_object_no_checks (void *obj);
static void copy_object (void **obj_slot);
static void* get_chunk_freelist (PinnedChunk *chunk, int slot);
static PinnedChunk* alloc_pinned_chunk (void);
-static void free_large_object (LOSObject *obj);
static void sort_addresses (void **array, int size);
static void drain_gray_stack (void);
static void finish_gray_stack (char *start_addr, char *end_addr, int generation);
+static gboolean need_major_collection (void);
+static void major_collection (const char *reason);
static void mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track);
void describe_ptr (char *ptr);
+void check_object (char *start);
+
static void check_consistency (void);
+static void check_major_refs (void);
static void check_section_scan_starts (GCMemSection *section);
static void check_scan_starts (void);
static void check_for_xdomain_refs (void);
static void dump_occupied (char *start, char *end, char *section_start);
static void dump_section (GCMemSection *section, const char *type);
static void dump_heap (const char *type, int num, const char *reason);
-static void commit_stats (int generation);
static void report_pinned_chunk (PinnedChunk *chunk, int seq);
void mono_gc_scan_for_specific_ref (MonoObject *key);
#include "sgen-pinning.c"
#include "sgen-pinning-stats.c"
#include "sgen-gray.c"
+#include "sgen-los.c"
/*
* ######################################################################
* inside complex.
*/
enum {
- DESC_TYPE_RUN_LENGTH, /* 16 bits aligned byte size | 1-3 (offset, numptr) bytes tuples */
- DESC_TYPE_SMALL_BITMAP, /* 16 bits aligned byte size | 16-48 bit bitmap */
- DESC_TYPE_STRING, /* nothing */
+ /*
+ * We don't use 0 so that 0 isn't a valid GC descriptor. No
+ * deep reason for this other than to be able to identify a
+ * non-inited descriptor for debugging.
+ *
+ * If an object contains no references, its GC descriptor is
+ * always DESC_TYPE_RUN_LENGTH, without a size, no exceptions.
+ * This is so that we can quickly check for that in
+ * copy_object_no_checks(), without having to fetch the
+ * object's class.
+ */
+ DESC_TYPE_RUN_LENGTH = 1, /* 15 bits aligned byte size | 1-3 (offset, numptr) bytes tuples */
+ DESC_TYPE_SMALL_BITMAP, /* 15 bits aligned byte size | 16-48 bit bitmap */
DESC_TYPE_COMPLEX, /* index for bitmap into complex_descriptors */
DESC_TYPE_VECTOR, /* 10 bits element size | 1 bit array | 2 bits desc | element desc */
DESC_TYPE_ARRAY, /* 10 bits element size | 1 bit array | 2 bits desc | element desc */
void*
mono_gc_make_descr_for_string (gsize *bitmap, int numbits)
{
- return (void*) DESC_TYPE_STRING;
+ return (void*) DESC_TYPE_RUN_LENGTH;
}
void*
int first_set = -1, num_set = 0, last_set = -1, i;
mword desc = 0;
size_t stored_size = obj_size;
- stored_size += ALLOC_ALIGN - 1;
- stored_size &= ~(ALLOC_ALIGN - 1);
for (i = 0; i < numbits; ++i) {
if (bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
if (first_set < 0)
num_set++;
}
}
+ /*
+ * We don't encode the size of types that don't contain
+ * references because they might not be aligned, i.e. the
+ * bottom two bits might be set, which would clash with the
+ * bits we need to encode the descriptor type. Since we don't
+ * use the encoded size to skip objects, other than for
+ * processing remsets, in which case only the positions of
+ * references are relevant, this is not a problem.
+ */
+ if (first_set < 0)
+ return (void*)DESC_TYPE_RUN_LENGTH;
+ g_assert (!(stored_size & 0x3));
if (stored_size <= MAX_SMALL_OBJ_SIZE) {
/* check run-length encoding first: one byte offset, one byte number of pointers
* on 64 bit archs, we can have 3 runs, just one on 32.
* It may be better to use nibbles.
*/
if (first_set < 0) {
- desc = DESC_TYPE_RUN_LENGTH | stored_size;
+ desc = DESC_TYPE_RUN_LENGTH | (stored_size << 1);
DEBUG (6, fprintf (gc_debug_file, "Ptrfree descriptor %p, size: %zd\n", (void*)desc, stored_size));
return (void*) desc;
} else if (first_set < 256 && num_set < 256 && (first_set + num_set == last_set + 1)) {
- desc = DESC_TYPE_RUN_LENGTH | stored_size | (first_set << 16) | (num_set << 24);
+ desc = DESC_TYPE_RUN_LENGTH | (stored_size << 1) | (first_set << 16) | (num_set << 24);
DEBUG (6, fprintf (gc_debug_file, "Runlen descriptor %p, size: %zd, first set: %d, num set: %d\n", (void*)desc, stored_size, first_set, num_set));
return (void*) desc;
}
/* we know the 2-word header is ptr-free */
if (last_set < SMALL_BITMAP_SIZE + OBJECT_HEADER_WORDS) {
- desc = DESC_TYPE_SMALL_BITMAP | stored_size | ((*bitmap >> OBJECT_HEADER_WORDS) << SMALL_BITMAP_SHIFT);
+ desc = DESC_TYPE_SMALL_BITMAP | (stored_size << 1) | ((*bitmap >> OBJECT_HEADER_WORDS) << SMALL_BITMAP_SHIFT);
DEBUG (6, fprintf (gc_debug_file, "Smallbitmap descriptor %p, size: %zd, last set: %d\n", (void*)desc, stored_size, last_set));
return (void*) desc;
}
num_set++;
}
}
+ /* See comment at the definition of DESC_TYPE_RUN_LENGTH. */
+ if (first_set < 0)
+ return (void*)DESC_TYPE_RUN_LENGTH;
if (elem_size <= MAX_ELEMENT_SIZE) {
desc |= elem_size << VECTOR_ELSIZE_SHIFT;
if (!num_set) {
/* helper macros to scan and traverse objects, macros because we resue them in many functions */
#define STRING_SIZE(size,str) do { \
- (size) = sizeof (MonoString) + 2 * mono_string_length ((MonoString*)(str)) + 2; \
+ (size) = sizeof (MonoString) + 2 * mono_string_length_fast ((MonoString*)(str)) + 2; \
(size) += (ALLOC_ALIGN - 1); \
(size) &= ~(ALLOC_ALIGN - 1); \
} while (0)
#define OBJ_RUN_LEN_SIZE(size,desc,obj) do { \
- (size) = (desc) & 0xfff8; \
+ (size) = ((desc) & 0xfff8) >> 1; \
} while (0)
#define OBJ_BITMAP_SIZE(size,desc,obj) do { \
- (size) = (desc) & 0xfff8; \
+ (size) = ((desc) & 0xfff8) >> 1; \
} while (0)
//#define PREFETCH(addr) __asm__ __volatile__ (" prefetchnta %0": : "m"(*(char *)(addr)))
/* there are pointers */ \
gsize *mbitmap_data = complex_descriptors + ((vt)->desc >> LOW_TYPE_BITS); \
int mbwords = (*mbitmap_data++) - 1; \
- int el_size = mono_array_element_size (((MonoObject*)(obj))->vtable->klass); \
+ int el_size = mono_array_element_size (vt->klass); \
char *e_start = (char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector); \
- char *e_end = e_start + el_size * mono_array_length ((MonoArray*)(obj)); \
- if (0) { \
- MonoObject *myobj = (MonoObject*)start; \
- g_print ("found %d at %p (0x%zx): %s.%s\n", mbwords, (obj), (vt)->desc, myobj->vtable->klass->name_space, myobj->vtable->klass->name); \
- } \
+ char *e_end = e_start + el_size * mono_array_length_fast ((MonoArray*)(obj)); \
+ if (0) \
+ g_print ("found %d at %p (0x%zx): %s.%s\n", mbwords, (obj), (vt)->desc, vt->klass->name_space, vt->klass->name); \
while (e_start < e_end) { \
void **_objptr = (void**)e_start; \
gsize *bitmap_data = mbitmap_data; \
int etype = (vt)->desc & 0xc000; \
if (etype == (DESC_TYPE_V_REFS << 14)) { \
void **p = (void**)((char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector)); \
- void **end_refs = (void**)((char*)p + el_size * mono_array_length ((MonoArray*)(obj))); \
+ void **end_refs = (void**)((char*)p + el_size * mono_array_length_fast ((MonoArray*)(obj))); \
/* Note: this code can handle also arrays of struct with only references in them */ \
while (p < end_refs) { \
HANDLE_PTR (p, (obj)); \
int offset = ((vt)->desc >> 16) & 0xff; \
int num_refs = ((vt)->desc >> 24) & 0xff; \
char *e_start = (char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector); \
- char *e_end = e_start + el_size * mono_array_length ((MonoArray*)(obj)); \
+ char *e_end = e_start + el_size * mono_array_length_fast ((MonoArray*)(obj)); \
while (e_start < e_end) { \
void **p = (void**)e_start; \
int i; \
} \
} else if (etype == DESC_TYPE_V_BITMAP << 14) { \
char *e_start = (char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector); \
- char *e_end = e_start + el_size * mono_array_length ((MonoArray*)(obj)); \
+ char *e_end = e_start + el_size * mono_array_length_fast ((MonoArray*)(obj)); \
while (e_start < e_end) { \
void **p = (void**)e_start; \
gsize _bmap = (vt)->desc >> 16; \
} \
} while (0)
-#include "sgen-major-copying.c"
-//#include "sgen-marksweep.c"
+//#include "sgen-major-copying.c"
+#include "sgen-marksweep.c"
static gboolean
is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) check_reference_for_xdomain ((ptr), (obj), domain)
-static char*
-scan_object_for_xdomain_refs (char *start)
+static void
+scan_object_for_xdomain_refs (char *start, mword size, void *data)
{
MonoDomain *domain = ((MonoObject*)start)->vtable->domain;
#include "sgen-scan-object.h"
-
- return start;
-}
-
-static void
-scan_area_for_xdomain_refs (char *start, char *end)
-{
- while (start < end) {
- if (!*(void**)start) {
- start += sizeof (void*); /* should be ALLOC_ALIGN, really */
- continue;
- }
-
- start = scan_object_for_xdomain_refs (start);
- }
}
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) do { \
if ((MonoObject*)*(ptr) == key) { \
- g_print ("found ref to %p in object %p (%s) at offset %zd\n", \
+ g_print ("found ref to %p in object %p (%s) at offset %td\n", \
key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
} \
} while (0)
-static char*
+static void
scan_object_for_specific_ref (char *start, MonoObject *key)
{
#include "sgen-scan-object.h"
-
- return start;
}
static void
check_domain = NULL;
}
-static void
-scan_pinned_object_for_xdomain_refs_callback (char *obj, size_t size, gpointer dummy)
-{
- scan_object_for_xdomain_refs (obj);
-}
-
static void
check_for_xdomain_refs (void)
{
LOSObject *bigobj;
- scan_area_for_xdomain_refs (nursery_section->data, nursery_section->end_data);
+ scan_area_with_callback (nursery_section->data, nursery_section->end_data, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
- major_iterate_objects (TRUE, TRUE, scan_pinned_object_for_xdomain_refs_callback, NULL);
+ major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
- scan_object_for_xdomain_refs (bigobj->data);
+ scan_object_for_xdomain_refs (bigobj->data, bigobj->size, NULL);
}
static gboolean
mword objsize;
char *destination;
MonoVTable *vt = ((MonoObject*)obj)->vtable;
- gboolean has_references = vt->klass->has_references;
+ gboolean has_references = vt->gc_descr != (void*)DESC_TYPE_RUN_LENGTH;
objsize = safe_object_get_size ((MonoObject*)obj);
objsize += ALLOC_ALIGN - 1;
DEBUG (9, g_assert (vt->klass->inited));
MAJOR_GET_COPY_OBJECT_SPACE (destination, objsize, has_references);
- DEBUG (9, fprintf (gc_debug_file, " (to %p, %s size: %zd)\n", destination, ((MonoObject*)obj)->vtable->klass->name, objsize));
+ DEBUG (9, fprintf (gc_debug_file, " (to %p, %s size: %lu)\n", destination, ((MonoObject*)obj)->vtable->klass->name, (unsigned long)objsize));
binary_protocol_copy (obj, destination, ((MonoObject*)obj)->vtable, objsize);
if (objsize <= sizeof (gpointer) * 8) {
if (G_UNLIKELY (vt->rank && ((MonoArray*)obj)->bounds)) {
MonoArray *array = (MonoArray*)destination;
array->bounds = (MonoArrayBounds*)((char*)destination + ((char*)((MonoArray*)obj)->bounds - (char*)obj));
- DEBUG (9, fprintf (gc_debug_file, "Array instance %p: size: %zd, rank: %d, length: %d\n", array, objsize, vt->rank, mono_array_length (array)));
+ DEBUG (9, fprintf (gc_debug_file, "Array instance %p: size: %lu, rank: %d, length: %lu\n", array, (unsigned long)objsize, vt->rank, (unsigned long)mono_array_length (array)));
}
/* set the forwarding pointer */
forward_object (obj, destination);
* Scan the object pointed to by @start for references to
* other objects between @from_start and @from_end and copy
* them to the gray_objects area.
- * Returns a pointer to the end of the object.
*/
-static char*
+static void
scan_object (char *start)
{
#include "sgen-scan-object.h"
HEAVY_STAT (++stat_scan_object_called_nursery);
-
- return start;
}
/*
} \
} while (0)
-static char*
+static void
major_scan_object (char *start)
{
#include "sgen-scan-object.h"
HEAVY_STAT (++stat_scan_object_called_major);
-
- return start;
}
/*
}
}
-static int
-new_gap (int gap)
-{
- gap = (gap * 10) / 13;
- if (gap == 9 || gap == 10)
- return 11;
- if (gap < 1)
- return 1;
- return gap;
-}
-
-#if 0
-static int
-compare_addr (const void *a, const void *b)
-{
- return *(const void **)a - *(const void **)b;
-}
-#endif
-
-/* sort the addresses in array in increasing order */
+/* Sort the addresses in array in increasing order.
+ * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
+ */
static void
sort_addresses (void **array, int size)
{
- /*
- * qsort is slower as predicted.
- * qsort (array, size, sizeof (gpointer), compare_addr);
- * return;
- */
- int gap = size;
- int swapped, end;
- while (TRUE) {
- int i;
- gap = new_gap (gap);
- swapped = FALSE;
- end = size - gap;
- for (i = 0; i < end; i++) {
- int j = i + gap;
- if (array [i] > array [j]) {
- void* val = array [i];
- array [i] = array [j];
- array [j] = val;
- swapped = TRUE;
- }
+ int i;
+ void *tmp;
+
+ for (i = 1; i < size; ++i) {
+ int child = i;
+ while (child > 0) {
+ int parent = (child - 1) / 2;
+
+ if (array [parent] >= array [child])
+ break;
+
+ tmp = array [parent];
+ array [parent] = array [child];
+ array [child] = tmp;
+
+ child = parent;
+ }
+ }
+
+ for (i = size - 1; i > 0; --i) {
+ int end, root;
+ tmp = array [i];
+ array [i] = array [0];
+ array [0] = tmp;
+
+ end = i - 1;
+ root = 0;
+
+ while (root * 2 + 1 <= end) {
+ int child = root * 2 + 1;
+
+ if (child < end && array [child] < array [child + 1])
+ ++child;
+ if (array [root] >= array [child])
+ break;
+
+ tmp = array [root];
+ array [root] = array [child];
+ array [child] = tmp;
+
+ root = child;
}
- if (gap == 1 && !swapped)
- break;
}
}
gpointer next;
for (i = 0; i < next_pin_slot; ++i) {
next = pin_queue [i];
- fprintf (gc_debug_file, "Nursery range: %p-%p, size: %zd\n", first, next, (char*)next-(char*)first);
+ fprintf (gc_debug_file, "Nursery range: %p-%p, size: %td\n", first, next, (char*)next-(char*)first);
first = next;
}
next = end_nursery;
- fprintf (gc_debug_file, "Nursery range: %p-%p, size: %zd\n", first, next, (char*)next-(char*)first);
+ fprintf (gc_debug_file, "Nursery range: %p-%p, size: %td\n", first, next, (char*)next-(char*)first);
}
/* reduce the info in the pin queue, removing duplicate pointers and sorting them */
if (nursery_section)
return;
- DEBUG (2, fprintf (gc_debug_file, "Allocating nursery size: %zd\n", nursery_size));
+ DEBUG (2, fprintf (gc_debug_file, "Allocating nursery size: %lu\n", (unsigned long)nursery_size));
/* later we will alloc a larger area for the nursery but only activate
* what we need. The rest will be used as expansion if we have too many pinned
* objects in the existing nursery.
UPDATE_HEAP_BOUNDARIES (nursery_start, nursery_real_end);
nursery_next = nursery_start;
total_alloc += alloc_size;
- DEBUG (4, fprintf (gc_debug_file, "Expanding nursery size (%p-%p): %zd, total: %zd\n", data, data + alloc_size, nursery_size, total_alloc));
+ DEBUG (4, fprintf (gc_debug_file, "Expanding nursery size (%p-%p): %lu, total: %lu\n", data, data + alloc_size, (unsigned long)nursery_size, (unsigned long)total_alloc));
section->data = section->next_data = data;
section->size = alloc_size;
section->end_data = nursery_real_end;
fragment_total += frag_size;
} else {
/* Clear unused fragments, pinning depends on this */
+ /*TODO place an int[] here instead of the memset if size justify it*/
memset (frag_start, 0, frag_size);
}
}
static void
dump_occupied (char *start, char *end, char *section_start)
{
- fprintf (heap_dump_file, "<occupied offset=\"%zd\" size=\"%zd\"/>\n", start - section_start, end - start);
+ fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
}
static void
GCVTable *vt;
char *old_start = NULL; /* just for debugging */
- fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", type, section->size);
+ fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
while (start < end) {
guint size;
if (reason)
fprintf (heap_dump_file, " reason=\"%s\"", reason);
fprintf (heap_dump_file, ">\n");
- fprintf (heap_dump_file, "<other-mem-usage type=\"pinned-chunks\" size=\"%ld\"/>\n", pinned_chunk_bytes_alloced);
- fprintf (heap_dump_file, "<other-mem-usage type=\"large-internal\" size=\"%ld\"/>\n", large_internal_bytes_alloced);
+ fprintf (heap_dump_file, "<other-mem-usage type=\"pinned-chunks\" size=\"%lld\"/>\n", pinned_chunk_bytes_alloced);
+ fprintf (heap_dump_file, "<other-mem-usage type=\"large-internal\" size=\"%lld\"/>\n", large_internal_bytes_alloced);
fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
for (i = 0; i < INTERNAL_MEM_MAX; ++i)
fprintf (heap_dump_file, "<other-mem-usage type=\"%s\" size=\"%ld\"/>\n", internal_mem_names [i], small_internal_mem_bytes [i]);
mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_finalized);
mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_big_objects);
mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_finish_gray_stack);
+ mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_free_bigobjs);
+ mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_los_sweep);
mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_sweep);
mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_fragment_creation);
mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
+ mono_counters_register ("# wasted fragments used", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_wasted_fragments_used);
+ mono_counters_register ("bytes in wasted fragments", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_wasted_fragments_bytes);
+
mono_counters_register ("Store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets);
mono_counters_register ("Unique store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets_unique);
mono_counters_register ("Saved remsets 1", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_1);
inited = TRUE;
}
+static gboolean
+need_major_collection (void)
+{
+ mword los_alloced = los_memory_usage - MIN (last_los_memory_usage, los_memory_usage);
+ return minor_collection_sections_alloced * MAJOR_SECTION_SIZE + los_alloced > minor_collection_allowance;
+}
+
/*
* Collect objects in the nursery. Returns whether to trigger a major
* collection.
build_nursery_fragments (0, next_pin_slot);
TV_GETTIME (btv);
time_minor_fragment_creation += TV_ELAPSED_MS (atv, btv);
- DEBUG (2, fprintf (gc_debug_file, "Fragment creation: %d usecs, %zd bytes available\n", TV_ELAPSED (atv, btv), fragment_total));
+ DEBUG (2, fprintf (gc_debug_file, "Fragment creation: %d usecs, %lu bytes available\n", TV_ELAPSED (atv, btv), (unsigned long)fragment_total));
+
+ if (consistency_check_at_minor_collection)
+ check_major_refs ();
major_finish_nursery_collection ();
current_collection_generation = -1;
- return major_need_major_collection ();
+ return need_major_collection ();
}
static void
char *heap_end = (char*)-1;
int old_num_major_sections = num_major_sections;
int num_major_sections_saved, save_target, allowance_target;
+ mword los_memory_saved, los_memory_alloced, old_los_memory_usage;
+
+ /*
+ * A domain could have been freed, resulting in
+ * los_memory_usage being less than last_los_memory_usage.
+ */
+ los_memory_alloced = los_memory_usage - MIN (last_los_memory_usage, los_memory_usage);
+ old_los_memory_usage = los_memory_usage;
//count_ref_nonref_objs ();
//consistency_check ();
GRAY_OBJECT_ENQUEUE (bigobj->data);
if (heap_dump_file)
pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
- DEBUG (6, fprintf (gc_debug_file, "Marked large object %p (%s) size: %zd from roots\n", bigobj->data, safe_name (bigobj->data), bigobj->size));
+ DEBUG (6, fprintf (gc_debug_file, "Marked large object %p (%s) size: %lu from roots\n", bigobj->data, safe_name (bigobj->data), (unsigned long)bigobj->size));
}
}
/* second pass for the sections */
bigobj = bigobj->next;
}
+ TV_GETTIME (btv);
+ time_major_free_bigobjs += TV_ELAPSED_MS (atv, btv);
+
+ los_sweep ();
+
+ TV_GETTIME (atv);
+ time_major_los_sweep += TV_ELAPSED_MS (btv, atv);
+
major_sweep ();
TV_GETTIME (btv);
g_assert (gray_object_queue_is_empty ());
- num_major_sections_saved = MAX (old_num_major_sections - num_major_sections, 1);
+ num_major_sections_saved = MAX (old_num_major_sections - num_major_sections, 0);
+ los_memory_saved = MAX (old_los_memory_usage - los_memory_usage, 1);
- save_target = num_major_sections / 2;
+ save_target = ((num_major_sections * MAJOR_SECTION_SIZE) + los_memory_saved) / 2;
/*
* We aim to allow the allocation of as many sections as is
* necessary to reclaim save_target sections in the next
*
* hence:
*/
- allowance_target = save_target * minor_collection_sections_alloced / num_major_sections_saved;
+ allowance_target = (mword)((double)save_target * (double)(minor_collection_sections_alloced * MAJOR_SECTION_SIZE + los_memory_alloced) / (double)(num_major_sections_saved * MAJOR_SECTION_SIZE + los_memory_saved));
- minor_collection_section_allowance = MAX (MIN (allowance_target, num_major_sections), MIN_MINOR_COLLECTION_SECTION_ALLOWANCE);
+ minor_collection_allowance = MAX (MIN (allowance_target, num_major_sections * MAJOR_SECTION_SIZE + los_memory_usage), MIN_MINOR_COLLECTION_ALLOWANCE);
minor_collection_sections_alloced = 0;
+ last_los_memory_usage = los_memory_usage;
check_scan_starts ();
stop_world ();
if (collect_nursery (size))
major_collection ("minor overflow");
- DEBUG (2, fprintf (gc_debug_file, "Heap size: %zd, LOS size: %zd\n", total_alloc, los_memory_usage));
+ DEBUG (2, fprintf (gc_debug_file, "Heap size: %lu, LOS size: %lu\n", (unsigned long)total_alloc, (unsigned long)los_memory_usage));
restart_world ();
/* this also sets the proper pointers for the next allocation */
if (!search_fragment_for_size (size)) {
*/
static void
-free_large_object (LOSObject *obj)
+setup_fragment (Fragment *frag, Fragment *prev, size_t size)
{
- size_t size = obj->size;
- DEBUG (4, fprintf (gc_debug_file, "Freed large object %p, size %zd\n", obj->data, obj->size));
- binary_protocol_empty (obj->data, obj->size);
-
- los_memory_usage -= size;
- size += sizeof (LOSObject);
- size += pagesize - 1;
- size &= ~(pagesize - 1);
- total_alloc -= size;
- los_num_objects--;
- free_os_memory (obj, size);
-}
-
-/*
- * Objects with size >= 64KB are allocated in the large object space.
- * They are currently kept track of with a linked list.
- * They don't move, so there is no need to pin them during collection
- * and we avoid the memcpy overhead.
- */
-static void* __attribute__((noinline))
-alloc_large_inner (MonoVTable *vtable, size_t size)
-{
- LOSObject *obj;
- void **vtslot;
- size_t alloc_size;
-
- g_assert (size > MAX_SMALL_OBJ_SIZE);
-
- if (los_memory_usage > next_los_collection) {
- static mword last_los_memory_usage = 0;
-
- mword los_memory_alloced;
- mword old_los_memory_usage;
- mword los_memory_saved;
- mword save_target;
- mword allowance_target;
- mword allowance;
-
- DEBUG (4, fprintf (gc_debug_file, "Should trigger major collection: req size %zd (los already: %zu, limit: %zu)\n", size, los_memory_usage, next_los_collection));
- stop_world ();
-
- g_assert (los_memory_usage >= last_los_memory_usage);
- los_memory_alloced = los_memory_usage - last_los_memory_usage;
- old_los_memory_usage = los_memory_usage;
-
- major_collection ("LOS overflow");
-
- los_memory_saved = MAX (old_los_memory_usage - los_memory_usage, 1);
- save_target = los_memory_usage / 2;
- /*
- * see the comment at the end of major_collection()
- * for the explanation for this calculation.
- */
- allowance_target = (mword)((double)save_target * (double)los_memory_alloced / (double)los_memory_saved);
- allowance = MAX (MIN (allowance_target, los_memory_usage), MIN_LOS_ALLOWANCE);
- next_los_collection = los_memory_usage + allowance;
-
- last_los_memory_usage = los_memory_usage;
+ /* remove from the list */
+ if (prev)
+ prev->next = frag->next;
+ else
+ nursery_fragments = frag->next;
+ nursery_next = frag->fragment_start;
+ nursery_frag_real_end = frag->fragment_end;
- restart_world ();
- }
- alloc_size = size;
- alloc_size += sizeof (LOSObject);
- alloc_size += pagesize - 1;
- alloc_size &= ~(pagesize - 1);
- /* FIXME: handle OOM */
- obj = get_os_memory (alloc_size, TRUE);
- g_assert (!((mword)obj->data & (ALLOC_ALIGN - 1)));
- obj->size = size;
- vtslot = (void**)obj->data;
- *vtslot = vtable;
- total_alloc += alloc_size;
- UPDATE_HEAP_BOUNDARIES (obj->data, (char*)obj->data + size);
- obj->next = los_object_list;
- los_object_list = obj;
- los_memory_usage += size;
- los_num_objects++;
- DEBUG (4, fprintf (gc_debug_file, "Allocated large object %p, vtable: %p (%s), size: %zd\n", obj->data, vtable, vtable->klass->name, size));
- binary_protocol_alloc (obj->data, vtable, size);
- return obj->data;
+ DEBUG (4, fprintf (gc_debug_file, "Using nursery fragment %p-%p, size: %td (req: %zd)\n", nursery_next, nursery_frag_real_end, nursery_frag_real_end - nursery_next, size));
+ frag->next = fragment_freelist;
+ fragment_freelist = frag;
}
/* check if we have a suitable fragment in nursery_fragments to be able to allocate
prev = NULL;
for (frag = nursery_fragments; frag; frag = frag->next) {
if (size <= (frag->fragment_end - frag->fragment_start)) {
- /* remove from the list */
- if (prev)
- prev->next = frag->next;
- else
- nursery_fragments = frag->next;
- nursery_next = frag->fragment_start;
- nursery_frag_real_end = frag->fragment_end;
-
- DEBUG (4, fprintf (gc_debug_file, "Using nursery fragment %p-%p, size: %zd (req: %zd)\n", nursery_next, nursery_frag_real_end, nursery_frag_real_end - nursery_next, size));
- frag->next = fragment_freelist;
- fragment_freelist = frag;
+ setup_fragment (frag, prev, size);
return TRUE;
}
prev = frag;
return FALSE;
}
+/*
+ * Same as search_fragment_for_size but if search for @desired_size fails, try to satisfy @minimum_size.
+ * This improves nursery usage.
+ */
+static int
+search_fragment_for_size_range (size_t desired_size, size_t minimum_size)
+{
+ Fragment *frag, *prev, *min_prev;
+ DEBUG (4, fprintf (gc_debug_file, "Searching nursery fragment %p, desired size: %zd minimum size %zd\n", nursery_frag_real_end, desired_size, minimum_size));
+
+ if (nursery_frag_real_end > nursery_next && nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
+ /* Clear the remaining space, pinning depends on this */
+ memset (nursery_next, 0, nursery_frag_real_end - nursery_next);
+
+ min_prev = GINT_TO_POINTER (-1);
+ prev = NULL;
+
+ for (frag = nursery_fragments; frag; frag = frag->next) {
+ int frag_size = frag->fragment_end - frag->fragment_start;
+ if (desired_size <= frag_size) {
+ setup_fragment (frag, prev, desired_size);
+ return desired_size;
+ }
+ if (minimum_size <= frag_size)
+ min_prev = prev;
+
+ prev = frag;
+ }
+
+ if (min_prev != GINT_TO_POINTER (-1)) {
+ int frag_size;
+ if (min_prev)
+ frag = min_prev->next;
+ else
+ frag = nursery_fragments;
+
+ frag_size = frag->fragment_end - frag->fragment_start;
+ HEAVY_STAT (++stat_wasted_fragments_used);
+ HEAVY_STAT (stat_wasted_fragments_bytes += frag_size);
+
+ setup_fragment (frag, min_prev, minimum_size);
+ return frag_size;
+ }
+
+ return 0;
+}
+
+static void*
+alloc_degraded (MonoVTable *vtable, size_t size)
+{
+ if (need_major_collection ()) {
+ stop_world ();
+ major_collection ("degraded overflow");
+ restart_world ();
+ }
+
+ return major_alloc_degraded (vtable, size);
+}
+
/*
* Provide a variant that takes just the vtable for small fixed-size objects.
* The aligned size is already computed and stored in vt->gc_descr.
/* FIXME: handle OOM */
void **p;
char *new_next;
- gboolean res;
TLAB_ACCESS_INIT;
HEAVY_STAT (++stat_objects_alloced);
*/
if (degraded_mode && degraded_mode < DEFAULT_NURSERY_SIZE) {
p = alloc_degraded (vtable, size);
+ binary_protocol_alloc_degraded (p, vtable, size);
return p;
}
+ /*FIXME This codepath is current deadcode since tlab_size > MAX_SMALL_OBJ_SIZE*/
if (size > tlab_size) {
/* Allocate directly from the nursery */
if (nursery_next + size >= nursery_frag_real_end) {
minor_collect_or_expand_inner (size);
if (degraded_mode) {
p = alloc_degraded (vtable, size);
+ binary_protocol_alloc_degraded (p, vtable, size);
return p;
}
}
if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
memset (p, 0, size);
} else {
+ int alloc_size = tlab_size;
+ int available_in_nursery = nursery_frag_real_end - nursery_next;
if (TLAB_START)
DEBUG (3, fprintf (gc_debug_file, "Retire TLAB: %p-%p [%ld]\n", TLAB_START, TLAB_REAL_END, (long)(TLAB_REAL_END - TLAB_NEXT - size)));
- if (nursery_next + tlab_size >= nursery_frag_real_end) {
- res = search_fragment_for_size (tlab_size);
- if (!res) {
- minor_collect_or_expand_inner (tlab_size);
- if (degraded_mode) {
- p = alloc_degraded (vtable, size);
- return p;
+ if (alloc_size >= available_in_nursery) {
+ if (available_in_nursery > MAX_NURSERY_TLAB_WASTE && available_in_nursery > size) {
+ alloc_size = available_in_nursery;
+ } else {
+ alloc_size = search_fragment_for_size_range (tlab_size, size);
+ if (!alloc_size) {
+ alloc_size = tlab_size;
+ minor_collect_or_expand_inner (tlab_size);
+ if (degraded_mode) {
+ p = alloc_degraded (vtable, size);
+ binary_protocol_alloc_degraded (p, vtable, size);
+ return p;
+ }
}
}
}
/* Allocate a new TLAB from the current nursery fragment */
TLAB_START = nursery_next;
- nursery_next += tlab_size;
+ nursery_next += alloc_size;
TLAB_NEXT = TLAB_START;
- TLAB_REAL_END = TLAB_START + tlab_size;
- TLAB_TEMP_END = TLAB_START + MIN (SCAN_START_SIZE, tlab_size);
+ TLAB_REAL_END = TLAB_START + alloc_size;
+ TLAB_TEMP_END = TLAB_START + MIN (SCAN_START_SIZE, alloc_size);
if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
- memset (TLAB_START, 0, tlab_size);
+ memset (TLAB_START, 0, alloc_size);
/* Allocate from the TLAB */
p = (void*)TLAB_NEXT;
p = major_alloc_small_pinned_obj (size, vtable->klass->has_references);
}
DEBUG (6, fprintf (gc_debug_file, "Allocated pinned object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
- binary_protocol_alloc (p, vtable, size);
+ binary_protocol_alloc_pinned (p, vtable, size);
*p = vtable;
UNLOCK_GC;
return p;
array = (MonoArray*)object;
cur = mono_array_addr (array, Ephemeron, 0);
- array_end = cur + mono_array_length (array);
+ array_end = cur + mono_array_length_fast (array);
tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
for (; cur < array_end; ++cur) {
if (!key || key == tombstone)
continue;
- DEBUG (5, fprintf (gc_debug_file, "[%d] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
+ DEBUG (5, fprintf (gc_debug_file, "[%td] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable"));
array = (MonoArray*)object;
cur = mono_array_addr (array, Ephemeron, 0);
- array_end = cur + mono_array_length (array);
+ array_end = cur + mono_array_length_fast (array);
tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
for (; cur < array_end; ++cur) {
if (!key || key == tombstone)
continue;
- DEBUG (5, fprintf (gc_debug_file, "[%d] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
+ DEBUG (5, fprintf (gc_debug_file, "[%td] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable"));
*/
/* FIXME: handle large/small config */
-#define THREAD_HASH_SIZE 11
#define HASH_PTHREAD_T(id) (((unsigned int)(id) >> 4) * 2654435761u)
static SgenThreadInfo* thread_table [THREAD_HASH_SIZE];
static MonoSemType suspend_ack_semaphore;
static MonoSemType *suspend_ack_semaphore_ptr;
static unsigned int global_stop_count = 0;
-#ifdef __APPLE__
-static int suspend_signal_num = SIGXFSZ;
-#else
-static int suspend_signal_num = SIGPWR;
-#endif
-static int restart_signal_num = SIGXCPU;
+
static sigset_t suspend_signal_mask;
static mword cur_thread_regs [ARCH_NUM_REGS] = {0};
/* LOCKING: assumes the GC lock is held */
-static SgenThreadInfo*
-thread_info_lookup (ARCH_THREAD_TYPE id)
+SgenThreadInfo**
+mono_sgen_get_thread_table (void)
+{
+ return thread_table;
+}
+
+SgenThreadInfo*
+mono_sgen_thread_info_lookup (ARCH_THREAD_TYPE id)
{
unsigned int hash = HASH_PTHREAD_T (id) % THREAD_HASH_SIZE;
SgenThreadInfo *info;
update_current_thread_stack (void *start)
{
void *ptr = cur_thread_regs;
- SgenThreadInfo *info = thread_info_lookup (ARCH_GET_THREAD ());
+ SgenThreadInfo *info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
info->stack_start = align_pointer (&ptr);
g_assert (info->stack_start >= info->stack_start_limit && info->stack_start < info->stack_end);
gc_callbacks.thread_suspend_func (info->runtime_data, NULL);
}
-static const char*
-signal_desc (int signum)
-{
- if (signum == suspend_signal_num)
- return "suspend";
- if (signum == restart_signal_num)
- return "restart";
- return "unknown";
-}
-
/*
* Define this and use the "xdomain-checks" MONO_GC_DEBUG option to
* have cross-domain checks in the write barrier.
static gboolean
is_ip_in_managed_allocator (MonoDomain *domain, gpointer ip);
-static void
-wait_for_suspend_ack (int count)
+void
+mono_sgen_wait_for_suspend_ack (int count)
{
int i, result;
}
}
-/* LOCKING: assumes the GC lock is held */
-static int
-thread_handshake (int signum)
-{
- int count, i, result;
- SgenThreadInfo *info;
- pthread_t me = pthread_self ();
-
- count = 0;
- for (i = 0; i < THREAD_HASH_SIZE; ++i) {
- for (info = thread_table [i]; info; info = info->next) {
- DEBUG (4, fprintf (gc_debug_file, "considering thread %p for signal %d (%s)\n", info, signum, signal_desc (signum)));
- if (ARCH_THREAD_EQUALS (info->id, me)) {
- DEBUG (4, fprintf (gc_debug_file, "Skip (equal): %p, %p\n", (void*)me, (void*)info->id));
- continue;
- }
- /*if (signum == suspend_signal_num && info->stop_count == global_stop_count)
- continue;*/
- result = pthread_kill (info->id, signum);
- if (result == 0) {
- DEBUG (4, fprintf (gc_debug_file, "thread %p signal sent\n", info));
- count++;
- } else {
- DEBUG (4, fprintf (gc_debug_file, "thread %p signal failed: %d (%s)\n", (void*)info->id, result, strerror (result)));
- info->skip = 1;
- }
- }
- }
-
- wait_for_suspend_ack (count);
-
- return count;
-}
-
static int
restart_threads_until_none_in_managed_allocator (void)
{
if (!info->stack_start || info->in_critical_region ||
is_ip_in_managed_allocator (info->stopped_domain, info->stopped_ip)) {
binary_protocol_thread_restart ((gpointer)info->id);
+#if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
+ result = thread_resume (pthread_mach_thread_np (info->id));
+#else
result = pthread_kill (info->id, restart_signal_num);
+#endif
if (result == 0) {
++restart_count;
} else {
if (restart_count == 0)
break;
+#if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
+ /* mach thread_resume is synchronous so we dont need to wait for them */
+#else
/* wait for the threads to signal their restart */
- wait_for_suspend_ack (restart_count);
+ mono_sgen_wait_for_suspend_ack (restart_count);
+#endif
if (sleep_duration < 0) {
sched_yield ();
for (info = thread_table [i]; info; info = info->next) {
if (info->skip || info->stopped_ip == NULL)
continue;
+#if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
+ result = thread_suspend (pthread_mach_thread_np (info->id));
+#else
result = pthread_kill (info->id, suspend_signal_num);
+#endif
if (result == 0) {
++restarted_count;
} else {
}
/* some threads might have died */
num_threads_died += restart_count - restarted_count;
+#if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
+ /* mach thread_resume is synchronous so we dont need to wait for them */
+#else
/* wait for the threads to signal their suspension
again */
- wait_for_suspend_ack (restart_count);
+ mono_sgen_wait_for_suspend_ack (restart_count);
+#endif
}
return num_threads_died;
gpointer stack_start;
id = pthread_self ();
- info = thread_info_lookup (id);
+ info = mono_sgen_thread_info_lookup (id);
info->stopped_domain = mono_domain_get ();
info->stopped_ip = (gpointer) ARCH_SIGCTX_IP (context);
stop_count = global_stop_count;
SgenThreadInfo *info;
int old_errno = errno;
- info = thread_info_lookup (pthread_self ());
+ info = mono_sgen_thread_info_lookup (pthread_self ());
info->signal = restart_signal_num;
DEBUG (4, fprintf (gc_debug_file, "Restart handler in %p %p\n", info, (gpointer)ARCH_GET_THREAD ()));
update_current_thread_stack (&count);
global_stop_count++;
- DEBUG (3, fprintf (gc_debug_file, "stopping world n %d from %p %p\n", global_stop_count, thread_info_lookup (ARCH_GET_THREAD ()), (gpointer)ARCH_GET_THREAD ()));
+ DEBUG (3, fprintf (gc_debug_file, "stopping world n %d from %p %p\n", global_stop_count, mono_sgen_thread_info_lookup (ARCH_GET_THREAD ()), (gpointer)ARCH_GET_THREAD ()));
TV_GETTIME (stop_world_time);
- count = thread_handshake (suspend_signal_num);
+ count = mono_sgen_thread_handshake (suspend_signal_num);
count -= restart_threads_until_none_in_managed_allocator ();
g_assert (count >= 0);
DEBUG (3, fprintf (gc_debug_file, "world stopped %d thread(s)\n", count));
release_gc_locks ();
- count = thread_handshake (restart_signal_num);
+ count = mono_sgen_thread_handshake (restart_signal_num);
TV_GETTIME (end_sw);
usec = TV_ELAPSED (stop_world_time, end_sw);
max_pause_usec = MAX (usec, max_pause_usec);
gc_callbacks = *callbacks;
}
+MonoGCCallbacks *
+mono_gc_get_gc_callbacks ()
+{
+ return &gc_callbacks;
+}
+
/* Variables holding start/end nursery so it won't have to be passed at every call */
static void *scan_area_arg_start, *scan_area_arg_end;
for (i = 0; i < THREAD_HASH_SIZE; ++i) {
for (info = thread_table [i]; info; info = info->next) {
if (info->skip) {
- DEBUG (3, fprintf (gc_debug_file, "Skipping dead thread %p, range: %p-%p, size: %zd\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
+ DEBUG (3, fprintf (gc_debug_file, "Skipping dead thread %p, range: %p-%p, size: %td\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
continue;
}
- DEBUG (3, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, next_pin_slot));
+ DEBUG (3, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, next_pin_slot));
if (gc_callbacks.thread_mark_func && !conservative_stack_mark)
gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
else if (!precise)
ptr_on_stack (void *ptr)
{
gpointer stack_start = &stack_start;
- SgenThreadInfo *info = thread_info_lookup (ARCH_GET_THREAD ());
+ SgenThreadInfo *info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
return TRUE;
/* the global one */
for (remset = global_remset; remset; remset = remset->next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %zd\n", remset->data, remset->store_next, remset->store_next - remset->data));
+ DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
store_pos = remset->data;
for (p = remset->data; p < remset->store_next; p = next_p) {
- void **ptr = p [0];
+ void **ptr = (void**)p [0];
/*Ignore previously processed remset.*/
if (!global_remset_location_was_not_added (ptr)) {
RememberedSet *next;
int j;
for (remset = info->remset; remset; remset = next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %zd\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
+ DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
for (p = remset->data; p < remset->store_next;) {
p = handle_remset (p, start_nursery, end_nursery, FALSE);
}
while (freed_thread_remsets) {
RememberedSet *next;
remset = freed_thread_remsets;
- DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %zd\n", remset->data, remset->store_next, remset->store_next - remset->data));
+ DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
for (p = remset->data; p < remset->store_next;) {
p = handle_remset (p, start_nursery, end_nursery, FALSE);
}
LOCK_GC;
init_stats ();
- info = thread_info_lookup (ARCH_GET_THREAD ());
+ info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
if (info == NULL)
info = gc_register_current_thread (baseptr);
UNLOCK_GC;
rs->next = REMEMBERED_SET;
REMEMBERED_SET = rs;
#ifdef HAVE_KW_THREAD
- thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
+ mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
#endif
*(rs->store_next++) = (mword)field_ptr;
*(void**)field_ptr = value;
rs->next = REMEMBERED_SET;
REMEMBERED_SET = rs;
#ifdef HAVE_KW_THREAD
- thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
+ mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
#endif
*(rs->store_next++) = (mword)slot_ptr;
*(void**)slot_ptr = value;
rs->next = REMEMBERED_SET;
REMEMBERED_SET = rs;
#ifdef HAVE_KW_THREAD
- thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
+ mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
#endif
*(rs->store_next++) = (mword)dest_ptr | REMSET_RANGE;
*(rs->store_next++) = count;
UNLOCK_GC;
}
-static char*
-find_object_for_ptr_in_area (char *ptr, char *start, char *end)
-{
- while (start < end) {
- char *old_start;
-
- if (!*(void**)start) {
- start += sizeof (void*); /* should be ALLOC_ALIGN, really */
- continue;
- }
-
- old_start = start;
-
- #define SCAN_OBJECT_NOSCAN
- #include "sgen-scan-object.h"
-
- if (ptr >= old_start && ptr < start)
- return old_start;
- }
-
- return NULL;
-}
-
static char *found_obj;
static void
{
LOSObject *bigobj;
- if (ptr >= nursery_section->data && ptr < nursery_section->end_data)
- return find_object_for_ptr_in_area (ptr, nursery_section->data, nursery_section->end_data);
+ if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
+ found_obj = NULL;
+ scan_area_with_callback (nursery_section->data, nursery_section->end_data,
+ (IterateObjectCallbackFunc)find_object_for_ptr_callback, ptr);
+ if (found_obj)
+ return found_obj;
+ }
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
if (ptr >= bigobj->data && ptr < bigobj->data + bigobj->size)
mono_gc_wbarrier_generic_nostore (ptr);
}
+void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
+{
+ mword *dest = _dest;
+ mword *src = _src;
+
+ while (size) {
+ if (bitmap & 0x1)
+ mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
+ else
+ *dest = *src;
+ ++src;
+ ++dest;
+ size -= SIZEOF_VOID_P;
+ bitmap >>= 1;
+ }
+}
+
+
void
mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
{
LOCK_GC;
memmove (dest, src, count * mono_class_value_size (klass, NULL));
rs = REMEMBERED_SET;
- if (ptr_in_nursery (dest) || ptr_on_stack (dest)) {
+ if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !klass->has_references) {
UNLOCK_GC;
return;
}
rs->next = REMEMBERED_SET;
REMEMBERED_SET = rs;
#ifdef HAVE_KW_THREAD
- thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
+ mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
#endif
*(rs->store_next++) = (mword)dest | REMSET_VTYPE;
*(rs->store_next++) = (mword)klass->gc_descr;
rs->next = REMEMBERED_SET;
REMEMBERED_SET = rs;
#ifdef HAVE_KW_THREAD
- thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
+ mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
#endif
*(rs->store_next++) = (mword)obj | REMSET_OBJECT;
UNLOCK_GC;
/* the global one */
for (remset = global_remset; remset; remset = remset->next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %zd\n", remset->data, remset->store_next, remset->store_next - remset->data));
+ DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
for (p = remset->data; p < remset->store_next;) {
p = find_in_remset_loc (p, addr, &found);
if (found)
for (info = thread_table [i]; info; info = info->next) {
int j;
for (remset = info->remset; remset; remset = remset->next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %zd\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
+ DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
for (p = remset->data; p < remset->store_next;) {
p = find_in_remset_loc (p, addr, &found);
if (found)
/* the freed thread ones */
for (remset = freed_thread_remsets; remset; remset = remset->next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %zd\n", remset->data, remset->store_next, remset->store_next - remset->data));
+ DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
for (p = remset->data; p < remset->store_next;) {
p = find_in_remset_loc (p, addr, &found);
if (found)
#define HANDLE_PTR(ptr,obj) do { \
if (*(ptr) && (char*)*(ptr) >= nursery_start && (char*)*(ptr) < nursery_next) { \
if (!find_in_remsets ((char*)(ptr))) { \
- fprintf (gc_debug_file, "Oldspace->newspace reference %p at offset %zd in object %p (%s.%s) not found in remsets.\n", *(ptr), (char*)(ptr) - (char*)(obj), (obj), ((MonoObject*)(obj))->vtable->klass->name_space, ((MonoObject*)(obj))->vtable->klass->name); \
+ fprintf (gc_debug_file, "Oldspace->newspace reference %p at offset %td in object %p (%s.%s) not found in remsets.\n", *(ptr), (char*)(ptr) - (char*)(obj), (obj), ((MonoObject*)(obj))->vtable->klass->name_space, ((MonoObject*)(obj))->vtable->klass->name); \
binary_protocol_missing_remset ((obj), (gpointer)LOAD_VTABLE ((obj)), (char*)(ptr) - (char*)(obj), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
if (!object_is_pinned (*(ptr))) \
missing_remsets = TRUE; \
static void
check_consistency (void)
{
+ LOSObject *bigobj;
+
// Need to add more checks
missing_remsets = FALSE;
// Check that oldspace->newspace pointers are registered with the collector
major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)check_consistency_callback, NULL);
+ for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+ check_consistency_callback (bigobj->data, bigobj->size, NULL);
+
DEBUG (1, fprintf (gc_debug_file, "Heap consistency check done.\n"));
#ifdef BINARY_PROTOCOL
g_assert (!missing_remsets);
}
+
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj) do { \
+ if (*(ptr)) \
+ g_assert (LOAD_VTABLE (*(ptr))); \
+ } while (0)
+
+static void
+check_major_refs_callback (char *start, size_t size, void *dummy)
+{
+#define SCAN_OBJECT_ACTION
+#include "sgen-scan-object.h"
+}
+
+static void
+check_major_refs (void)
+{
+ LOSObject *bigobj;
+
+ major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)check_major_refs_callback, NULL);
+
+ for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+ check_major_refs_callback (bigobj->data, bigobj->size, NULL);
+}
+
/* Check that the reference is valid */
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) do { \
* Perform consistency check on an object. Currently we only check that the
* reference fields are valid.
*/
-char*
+void
check_object (char *start)
{
if (!start)
- return NULL;
+ return;
#include "sgen-scan-object.h"
-
- return start;
}
/*
{
gboolean result;
LOCK_GC;
- result = thread_info_lookup (ARCH_GET_THREAD ()) != NULL;
+ result = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ()) != NULL;
UNLOCK_GC;
return result;
}
#endif
nursery_size = DEFAULT_NURSERY_SIZE;
+ minor_collection_allowance = MIN_MINOR_COLLECTION_ALLOWANCE;
major_init ();
collect_before_allocs = TRUE;
} else if (!strcmp (opt, "check-at-minor-collections")) {
consistency_check_at_minor_collection = TRUE;
+ nursery_clear_policy = CLEAR_AT_GC;
} else if (!strcmp (opt, "xdomain-checks")) {
xdomain_checks = TRUE;
} else if (!strcmp (opt, "clear-at-gc")) {
return write_barrier_method;
}
-#endif /* HAVE_SGEN_GC */
+char*
+mono_gc_get_description (void)
+{
+ return g_strdup ("sgen");
+}
+void
+mono_gc_set_desktop_mode (void)
+{
+}
+
+gboolean
+mono_gc_is_moving (void)
+{
+ return TRUE;
+}
+
+gboolean
+mono_gc_is_disabled (void)
+{
+ return FALSE;
+}
+
+#endif /* HAVE_SGEN_GC */