*) test/fix endianess issues
- *) add more timing info
-
*) Implement a card table as the write barrier instead of remembered
sets? Card tables are not easy to implement with our current
memory layout. We have several different kinds of major heap
*) experiment with max small object size (very small right now - 2kb,
because it's tied to the max freelist size)
- *) avoid the memory store from copy_object when not needed, i.e. when the object
- is not copied.
-
*) add an option to mmap the whole heap in one chunk: it makes for many
simplifications in the checks (put the nursery at the top and just use a single
check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
*) specialized dynamically generated markers/copiers
+ *) Dynamically adjust TLAB size to the number of threads. If we have
+ too many threads that do allocation, we might need smaller TLABs,
+ and we might get better performance with larger TLABs if we only
+ have a handful of threads. We could sum up the space left in all
+ assigned TLABs and if that's more than some percentage of the
+ nursery size, reduce the TLAB size.
+
+ *) Explore placing unreachable objects on unused nursery memory.
+ Instead of memset'ng a region to zero, place an int[] covering it.
+ A good place to start is add_nursery_frag. The tricky thing here is
+ placing those objects atomically outside of a collection.
+
+
*/
#include "config.h"
#ifdef HAVE_SGEN_GC
/* If not null, dump the heap after each collection into this file */
static FILE *heap_dump_file = NULL;
/* If set, mark stacks conservatively, even if precise marking is possible */
-static gboolean conservative_stack_mark = FALSE;
+static gboolean conservative_stack_mark = TRUE;
/* If set, do a plausibility check on the scan_starts before and after
each collection */
static gboolean do_scan_starts_check = FALSE;
#endif
#ifdef HEAVY_STATISTICS
-static long stat_objects_alloced = 0;
-static long stat_objects_alloced_degraded = 0;
-static long stat_copy_object_called_nursery = 0;
-static long stat_objects_copied_nursery = 0;
-static long stat_copy_object_called_major = 0;
-static long stat_objects_copied_major = 0;
-
-static long stat_copy_object_failed_from_space = 0;
-static long stat_copy_object_failed_forwarded = 0;
-static long stat_copy_object_failed_pinned = 0;
-static long stat_copy_object_failed_large_pinned = 0;
-static long stat_copy_object_failed_to_space = 0;
-
-static long stat_store_remsets = 0;
-static long stat_store_remsets_unique = 0;
-static long stat_saved_remsets_1 = 0;
-static long stat_saved_remsets_2 = 0;
-static long stat_global_remsets_added = 0;
-static long stat_global_remsets_processed = 0;
-
-static long num_copy_object_called = 0;
-static long num_objects_copied = 0;
+static long long stat_objects_alloced = 0;
+static long long stat_bytes_alloced = 0;
+static long long stat_objects_alloced_degraded = 0;
+static long long stat_bytes_alloced_degraded = 0;
+static long long stat_bytes_alloced_los = 0;
+
+static long long stat_copy_object_called_nursery = 0;
+static long long stat_objects_copied_nursery = 0;
+static long long stat_copy_object_called_major = 0;
+static long long stat_objects_copied_major = 0;
+
+static long long stat_scan_object_called_nursery = 0;
+static long long stat_scan_object_called_major = 0;
+
+static long long stat_nursery_copy_object_failed_from_space = 0;
+static long long stat_nursery_copy_object_failed_forwarded = 0;
+static long long stat_nursery_copy_object_failed_pinned = 0;
+
+static long long stat_store_remsets = 0;
+static long long stat_store_remsets_unique = 0;
+static long long stat_saved_remsets_1 = 0;
+static long long stat_saved_remsets_2 = 0;
+static long long stat_global_remsets_added = 0;
+static long long stat_global_remsets_readded = 0;
+static long long stat_global_remsets_processed = 0;
+static long long stat_global_remsets_discarded = 0;
+
+static long long stat_wasted_fragments_used = 0;
+static long long stat_wasted_fragments_bytes = 0;
static int stat_wbarrier_set_field = 0;
static int stat_wbarrier_set_arrayref = 0;
static int stat_wbarrier_object_copy = 0;
#endif
-static long pinned_chunk_bytes_alloced = 0;
-static long large_internal_bytes_alloced = 0;
-
+static long long time_minor_pre_collection_fragment_clear = 0;
+static long long time_minor_pinning = 0;
+static long long time_minor_scan_remsets = 0;
+static long long time_minor_scan_pinned = 0;
+static long long time_minor_scan_registered_roots = 0;
+static long long time_minor_scan_thread_data = 0;
+static long long time_minor_finish_gray_stack = 0;
+static long long time_minor_fragment_creation = 0;
+
+static long long time_major_pre_collection_fragment_clear = 0;
+static long long time_major_pinning = 0;
+static long long time_major_scan_pinned = 0;
+static long long time_major_scan_registered_roots = 0;
+static long long time_major_scan_thread_data = 0;
+static long long time_major_scan_alloc_pinned = 0;
+static long long time_major_scan_finalized = 0;
+static long long time_major_scan_big_objects = 0;
+static long long time_major_finish_gray_stack = 0;
+static long long time_major_sweep = 0;
+static long long time_major_fragment_creation = 0;
+
+static long long pinned_chunk_bytes_alloced = 0;
+static long long large_internal_bytes_alloced = 0;
+
+/* Keep in sync with internal_mem_names in dump_heap()! */
enum {
INTERNAL_MEM_PIN_QUEUE,
INTERNAL_MEM_FRAGMENT,
INTERNAL_MEM_REMSET,
INTERNAL_MEM_GRAY_QUEUE,
INTERNAL_MEM_STORE_REMSET,
+ INTERNAL_MEM_MS_TABLES,
+ INTERNAL_MEM_MS_BLOCK_INFO,
+ INTERNAL_MEM_EPHEMERON_LINK,
INTERNAL_MEM_MAX
};
}
*/
-#define MAX_DEBUG_LEVEL 8
+#define MAX_DEBUG_LEVEL 2
#define DEBUG(level,a) do {if (G_UNLIKELY ((level) <= MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) a;} while (0)
+/* Define this to allow the user to change some of the constants by specifying
+ * their values in the MONO_GC_PARAMS environmental variable. See
+ * mono_gc_base_init for details. */
+#define USER_CONFIG 1
+
#define TV_DECLARE(name) gint64 name
#define TV_GETTIME(tv) tv = mono_100ns_ticks ()
#define TV_ELAPSED(start,end) (int)((end-start) / 10)
+#define TV_ELAPSED_MS(start,end) ((TV_ELAPSED((start),(end)) + 500) / 1000)
#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
struct _LOSObject {
LOSObject *next;
mword size; /* this is the object size */
- int dummy; /* to have a sizeof (LOSObject) a multiple of ALLOC_ALIGN and data starting at same alignment */
guint16 role;
- guint16 scanned;
+ int dummy; /* to have a sizeof (LOSObject) a multiple of ALLOC_ALIGN and data starting at same alignment */
char data [MONO_ZERO_LEN_ARRAY];
};
REMSET_LOCATION, /* just a pointer to the exact location */
REMSET_RANGE, /* range of pointer fields */
REMSET_OBJECT, /* mark all the object for scanning */
- REMSET_OTHER, /* all others */
+ REMSET_VTYPE, /* a valuetype array described by a gc descriptor and a count */
REMSET_TYPE_MASK = 0x3
};
-/* Subtypes of REMSET_OTHER */
-enum {
- REMSET_VTYPE, /* a valuetype array described by a gc descriptor and a count */
- REMSET_ROOT_LOCATION, /* a location inside a root */
-};
-
#ifdef HAVE_KW_THREAD
static __thread RememberedSet *remembered_set MONO_TLS_FAST;
#endif
static pthread_key_t remembered_set_key;
static RememberedSet *global_remset;
static RememberedSet *freed_thread_remsets;
-//static int store_to_global_remset = 0;
static GenericStoreRememberedSet *generic_store_remsets = NULL;
+/*A two slots cache for recently inserted remsets */
+static gpointer global_remset_cache [2];
+
/* FIXME: later choose a size that takes into account the RememberedSet struct
* and doesn't waste any alloc paddin space.
*/
{
MonoClass *klass = ((MonoVTable*)LOAD_VTABLE (o))->klass;
if (klass == mono_defaults.string_class) {
- return sizeof (MonoString) + 2 * mono_string_length ((MonoString*) o) + 2;
+ return sizeof (MonoString) + 2 * mono_string_length_fast ((MonoString*) o) + 2;
} else if (klass->rank) {
MonoArray *array = (MonoArray*)o;
- size_t size = sizeof (MonoArray) + klass->sizes.element_size * mono_array_length (array);
+ size_t size = sizeof (MonoArray) + klass->sizes.element_size * mono_array_length_fast (array);
if (G_UNLIKELY (array->bounds)) {
size += sizeof (mono_array_size_t) - 1;
size &= ~(sizeof (mono_array_size_t) - 1);
static int num_minor_gcs = 0;
static int num_major_gcs = 0;
+#ifdef USER_CONFIG
+
/* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */
-//#define DEFAULT_NURSERY_SIZE (1024*512*125+4096*118)
-#define DEFAULT_NURSERY_SIZE (1024*512*2)
+#define DEFAULT_NURSERY_SIZE (default_nursery_size)
+static int default_nursery_size = (1 << 20);
+#ifdef ALIGN_NURSERY
/* The number of trailing 0 bits in DEFAULT_NURSERY_SIZE */
+#define DEFAULT_NURSERY_BITS (default_nursery_bits)
+static int default_nursery_bits = 20;
+#endif
+
+#else
+
+#define DEFAULT_NURSERY_SIZE (1024*512*2)
+#ifdef ALIGN_NURSERY
#define DEFAULT_NURSERY_BITS 20
-#define MAJOR_SECTION_SIZE (128*1024)
-#define BLOCK_FOR_OBJECT(o) ((Block*)(((mword)(o)) & ~(MAJOR_SECTION_SIZE - 1)))
-#define MAJOR_SECTION_FOR_OBJECT(o) ((GCMemSection*)BLOCK_FOR_OBJECT ((o)))
-#define MIN_MINOR_COLLECTION_SECTION_ALLOWANCE (DEFAULT_NURSERY_SIZE * 3 / MAJOR_SECTION_SIZE)
-#define DEFAULT_LOS_COLLECTION_TARGET (DEFAULT_NURSERY_SIZE * 2)
+#endif
+
+#endif
+
+#define MIN_LOS_ALLOWANCE (DEFAULT_NURSERY_SIZE * 2)
/* to quickly find the head of an object pinned by a conservative address
* we keep track of the objects allocated for each SCAN_START_SIZE memory
* chunk in the nursery or other memory sections. Larger values have less
#define FREELIST_PAGESIZE 4096
static mword pagesize = 4096;
-static mword nursery_size = DEFAULT_NURSERY_SIZE;
+static mword nursery_size;
static int degraded_mode = 0;
-static int minor_collection_section_allowance = MIN_MINOR_COLLECTION_SECTION_ALLOWANCE;
-static int minor_collection_sections_alloced = 0;
-static int num_major_sections = 0;
-
static LOSObject *los_object_list = NULL;
static mword los_memory_usage = 0;
static mword los_num_objects = 0;
/* use this to tune when to do a major/minor collection */
static mword memory_pressure = 0;
-static GCMemSection *section_list = NULL;
static GCMemSection *nursery_section = NULL;
static mword lowest_heap_address = ~(mword)0;
static mword highest_heap_address = 0;
int num_links;
};
+typedef struct _EphemeronLinkNode EphemeronLinkNode;
+
+struct _EphemeronLinkNode {
+ EphemeronLinkNode *next;
+ char *array;
+};
+
+typedef struct {
+ void *key;
+ void *value;
+} Ephemeron;
+
#define LARGE_INTERNAL_MEM_HEADER_MAGIC 0x7d289f3a
typedef struct _LargeInternalMemHeader LargeInternalMemHeader;
GENERATION_MAX
};
+int current_collection_generation = -1;
+
/*
* The link pointer is hidden by negating each bit. We use the lowest
* bit of the link (before negation) to store whether it needs
static DisappearingLinkHashTable minor_disappearing_link_hash;
static DisappearingLinkHashTable major_disappearing_link_hash;
+static EphemeronLinkNode *ephemeron_list;
+
static int num_ready_finalizers = 0;
static int no_finalize = 0;
448, 512, 584, 680, 816, 1024, 1360, 2048};
#define FREELIST_NUM_SLOTS (sizeof (freelist_sizes) / sizeof (freelist_sizes [0]))
-static char* max_pinned_chunk_addr = NULL;
-static char* min_pinned_chunk_addr = (char*)-1;
-/* pinned_chunk_list is used for allocations of objects that are never moved */
-static PinnedChunk *pinned_chunk_list = NULL;
+/* This is also the MAJOR_SECTION_SIZE for the copying major
+ collector */
+#define PINNED_CHUNK_SIZE (128 * 1024)
+
/* internal_chunk_list is used for allocating structures needed by the GC */
static PinnedChunk *internal_chunk_list = NULL;
-static gboolean
-obj_is_from_pinned_alloc (char *p)
-{
- return BLOCK_FOR_OBJECT (p)->role == MEMORY_ROLE_PINNED;
-}
-
static int slot_for_size (size_t size);
-static void
-free_pinned_object (PinnedChunk *chunk, char *obj, size_t size)
-{
- void **p = (void**)obj;
- int slot = slot_for_size (size);
-
- g_assert (obj >= (char*)chunk->start_data && obj < ((char*)chunk + chunk->num_pages * FREELIST_PAGESIZE));
- *p = chunk->free_list [slot];
- chunk->free_list [slot] = p;
-}
-
enum {
ROOT_TYPE_NORMAL = 0, /* "normal" roots */
ROOT_TYPE_PINNED = 1, /* roots without a GC descriptor */
static char *nursery_next = NULL;
static char *nursery_frag_real_end = NULL;
static char *nursery_real_end = NULL;
-//static char *nursery_first_pinned_start = NULL;
static char *nursery_last_pinned_end = NULL;
/* The size of a TLAB */
*/
static guint32 tlab_size = (1024 * 4);
+/*How much space is tolerable to be wasted from the current fragment when allocating a new TLAB*/
+#define MAX_NURSERY_TLAB_WASTE 512
+
/* fragments that are free and ready to be used for allocation */
static Fragment *nursery_fragments = NULL;
/* freeelist of fragment structures */
static Fragment *fragment_freelist = NULL;
-/*
- * used when moving the objects
- */
-static char *to_space_bumper = NULL;
-static char *to_space_top = NULL;
-static GCMemSection *to_space_section = NULL;
-
/* objects bigger then this go into the large object space */
-#define MAX_SMALL_OBJ_SIZE MAX_FREELIST_SIZE
+#define MAX_SMALL_OBJ_SIZE 2040
/* Functions supplied by the runtime to be called by the GC */
static MonoGCCallbacks gc_callbacks;
+#define ALLOC_ALIGN 8
+#define ALLOC_ALIGN_BITS 3
+
+#define MOVED_OBJECTS_NUM 64
+static void *moved_objects [MOVED_OBJECTS_NUM];
+static int moved_objects_idx = 0;
+
/*
* ######################################################################
* ######## Macros and function declarations.
return (void*)p;
}
+typedef void (*CopyOrMarkObjectFunc) (void**);
+typedef char* (*ScanObjectFunc) (char*);
+
/* forward declarations */
static void* get_internal_mem (size_t size, int type);
static void free_internal_mem (void *addr, int type);
static void* get_os_memory (size_t size, int activate);
+static void* get_os_memory_aligned (mword size, mword alignment, gboolean activate);
static void free_os_memory (void *addr, size_t size);
static G_GNUC_UNUSED void report_internal_mem_usage (void);
static int stop_world (void);
static int restart_world (void);
+static void add_to_global_remset (gpointer ptr);
static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise);
static void scan_from_remsets (void *start_nursery, void *end_nursery);
+static void scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type);
+static void scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeEntry *list);
static void find_pinning_ref_from_thread (char *obj, size_t size);
static void update_current_thread_stack (void *start);
-static GCMemSection* alloc_major_section (void);
-static void finalize_in_range (char *start, char *end, int generation);
+static void finalize_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation);
static void add_or_remove_disappearing_link (MonoObject *obj, void **link, gboolean track, int generation);
-static void null_link_in_range (char *start, char *end, int generation);
+static void null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation);
static void null_links_for_domain (MonoDomain *domain, int generation);
static gboolean search_fragment_for_size (size_t size);
-static void mark_pinned_from_addresses (PinnedChunk *chunk, void **start, void **end);
+static int search_fragment_for_size_range (size_t desired_size, size_t minimum_size);
+static void build_nursery_fragments (int start_pin, int end_pin);
+static void clear_nursery_fragments (char *next);
+static void pin_from_roots (void *start_nursery, void *end_nursery);
+static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery);
+static void pin_objects_in_section (GCMemSection *section);
+static void optimize_pin_queue (int start_slot);
static void clear_remsets (void);
static void clear_tlabs (void);
-typedef void (*ScanPinnedObjectCallbackFunc) (PinnedChunk*, char*, size_t, void*);
-static void scan_pinned_objects (ScanPinnedObjectCallbackFunc callback, void *callback_data);
-static void sweep_pinned_objects (void);
-static void scan_from_pinned_objects (char *addr_start, char *addr_end);
+typedef void (*IterateObjectCallbackFunc) (char*, size_t, void*);
+static void scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data);
+static char* scan_object (char *start);
+static char* major_scan_object (char *start);
+static void* copy_object_no_checks (void *obj);
+static void copy_object (void **obj_slot);
+static void* get_chunk_freelist (PinnedChunk *chunk, int slot);
+static PinnedChunk* alloc_pinned_chunk (void);
static void free_large_object (LOSObject *obj);
-static void free_major_section (GCMemSection *section);
-static void to_space_expand (void);
+static void sort_addresses (void **array, int size);
+static void drain_gray_stack (void);
+static void finish_gray_stack (char *start_addr, char *end_addr, int generation);
static void mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track);
void describe_ptr (char *ptr);
-void check_consistency (void);
-char* check_object (char *start);
+static void check_consistency (void);
+static void check_section_scan_starts (GCMemSection *section);
+static void check_scan_starts (void);
+static void check_for_xdomain_refs (void);
+static void dump_occupied (char *start, char *end, char *section_start);
+static void dump_section (GCMemSection *section, const char *type);
+static void dump_heap (const char *type, int num, const char *reason);
+static void commit_stats (int generation);
+static void report_pinned_chunk (PinnedChunk *chunk, int seq);
void mono_gc_scan_for_specific_ref (MonoObject *key);
+static void init_stats (void);
+
+static int mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end);
+static void clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end);
+static void null_ephemerons_for_domain (MonoDomain *domain);
+
+//#define BINARY_PROTOCOL
+#include "sgen-protocol.c"
+#include "sgen-pinning.c"
+#include "sgen-pinning-stats.c"
+#include "sgen-gray.c"
+
/*
* ######################################################################
* ######## GC descriptors
#define VECTOR_INFO_SHIFT 14
#define VECTOR_ELSIZE_SHIFT 3
#define LARGE_BITMAP_SIZE (GC_BITS_PER_WORD - LOW_TYPE_BITS)
-#define MAX_SMALL_SIZE ((1 << SMALL_BITMAP_SHIFT) - 1)
-#define SMALL_SIZE_MASK 0xfff8
#define MAX_ELEMENT_SIZE 0x3ff
-#define ELEMENT_SIZE_MASK (0x3ff << LOW_TYPE_BITS)
#define VECTOR_SUBTYPE_PTRFREE (DESC_TYPE_V_PTRFREE << VECTOR_INFO_SHIFT)
#define VECTOR_SUBTYPE_REFS (DESC_TYPE_V_REFS << VECTOR_INFO_SHIFT)
#define VECTOR_SUBTYPE_RUN_LEN (DESC_TYPE_V_RUN_LEN << VECTOR_INFO_SHIFT)
#define VECTOR_SUBTYPE_BITMAP (DESC_TYPE_V_BITMAP << VECTOR_INFO_SHIFT)
-#define ALLOC_ALIGN 8
-
/* Root bitmap descriptors are simpler: the lower three bits describe the type
* and we either have 30/62 bitmap bits or nibble-based run-length,
static gsize* complex_descriptors = NULL;
static int complex_descriptors_size = 0;
static int complex_descriptors_next = 0;
-static MonoGCMarkFunc user_descriptors [MAX_USER_DESCRIPTORS];
+static MonoGCRootMarkFunc user_descriptors [MAX_USER_DESCRIPTORS];
static int user_descriptors_next = 0;
static int
/* helper macros to scan and traverse objects, macros because we resue them in many functions */
#define STRING_SIZE(size,str) do { \
- (size) = sizeof (MonoString) + 2 * mono_string_length ((MonoString*)(str)) + 2; \
+ (size) = sizeof (MonoString) + 2 * mono_string_length_fast ((MonoString*)(str)) + 2; \
(size) += (ALLOC_ALIGN - 1); \
(size) &= ~(ALLOC_ALIGN - 1); \
} while (0)
int mbwords = (*mbitmap_data++) - 1; \
int el_size = mono_array_element_size (((MonoObject*)(obj))->vtable->klass); \
char *e_start = (char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector); \
- char *e_end = e_start + el_size * mono_array_length ((MonoArray*)(obj)); \
+ char *e_end = e_start + el_size * mono_array_length_fast ((MonoArray*)(obj)); \
if (0) { \
MonoObject *myobj = (MonoObject*)start; \
g_print ("found %d at %p (0x%zx): %s.%s\n", mbwords, (obj), (vt)->desc, myobj->vtable->klass->name_space, myobj->vtable->klass->name); \
int etype = (vt)->desc & 0xc000; \
if (etype == (DESC_TYPE_V_REFS << 14)) { \
void **p = (void**)((char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector)); \
- void **end_refs = (void**)((char*)p + el_size * mono_array_length ((MonoArray*)(obj))); \
+ void **end_refs = (void**)((char*)p + el_size * mono_array_length_fast ((MonoArray*)(obj))); \
/* Note: this code can handle also arrays of struct with only references in them */ \
while (p < end_refs) { \
HANDLE_PTR (p, (obj)); \
int offset = ((vt)->desc >> 16) & 0xff; \
int num_refs = ((vt)->desc >> 24) & 0xff; \
char *e_start = (char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector); \
- char *e_end = e_start + el_size * mono_array_length ((MonoArray*)(obj)); \
+ char *e_end = e_start + el_size * mono_array_length_fast ((MonoArray*)(obj)); \
while (e_start < e_end) { \
void **p = (void**)e_start; \
int i; \
} \
} else if (etype == DESC_TYPE_V_BITMAP << 14) { \
char *e_start = (char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector); \
- char *e_end = e_start + el_size * mono_array_length ((MonoArray*)(obj)); \
+ char *e_end = e_start + el_size * mono_array_length_fast ((MonoArray*)(obj)); \
while (e_start < e_end) { \
void **p = (void**)e_start; \
gsize _bmap = (vt)->desc >> 16; \
} \
} while (0)
-#define COUNT_OBJECT_TYPES do { \
- switch (desc & 0x7) { \
- case DESC_TYPE_STRING: type_str++; break; \
- case DESC_TYPE_RUN_LENGTH: type_rlen++; break; \
- case DESC_TYPE_ARRAY: case DESC_TYPE_VECTOR: type_vector++; break; \
- case DESC_TYPE_SMALL_BITMAP: type_bitmap++; break; \
- case DESC_TYPE_LARGE_BITMAP: type_lbit++; break; \
- case DESC_TYPE_COMPLEX: type_complex++; break; \
- case DESC_TYPE_COMPLEX_ARR: type_complex++; break; \
- default: g_assert_not_reached (); \
- } \
- } while (0)
-
-
-/*
- * ######################################################################
- * ######## Detecting and removing garbage.
- * ######################################################################
- * This section of code deals with detecting the objects no longer in use
- * and reclaiming the memory.
- */
-
-#if 0
-static mword new_obj_references = 0;
-static mword obj_references_checked = 0;
-
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj) do { \
- if (*(ptr) && (char*)*(ptr) >= nursery_start && (char*)*(ptr) < nursery_next) { \
- new_obj_references++; \
- /*printf ("bogus ptr %p found at %p in object %p (%s.%s)\n", *(ptr), (ptr), o, o->vtable->klass->name_space, o->vtable->klass->name);*/ \
- } else { \
- obj_references_checked++; \
- } \
- } while (0)
-
-static void __attribute__((noinline))
-scan_area (char *start, char *end)
-{
- GCVTable *vt;
- int type_str = 0, type_rlen = 0, type_bitmap = 0, type_vector = 0, type_lbit = 0, type_complex = 0;
- new_obj_references = 0;
- obj_references_checked = 0;
- while (start < end) {
- if (!*(void**)start) {
- start += sizeof (void*); /* should be ALLOC_ALIGN, really */
- continue;
- }
- vt = (GCVTable*)LOAD_VTABLE (start);
- DEBUG (8, fprintf (gc_debug_file, "Scanning object %p, vtable: %p (%s)\n", start, vt, vt->klass->name));
- if (0) {
- MonoObject *obj = (MonoObject*)start;
- g_print ("found at %p (0x%zx): %s.%s\n", start, vt->desc, obj->vtable->klass->name_space, obj->vtable->klass->name);
- }
-
-#define SCAN_OBJECT_ACTION COUNT_OBJECT_TYPES
-#include "sgen-scan-object.h"
- }
- /*printf ("references to new nursery %p-%p (size: %dk): %d, checked: %d\n", old_start, end, (end-old_start)/1024, new_obj_references, obj_references_checked);
- printf ("\tstrings: %d, runl: %d, vector: %d, bitmaps: %d, lbitmaps: %d, complex: %d\n",
- type_str, type_rlen, type_vector, type_bitmap, type_lbit, type_complex);*/
-}
-#endif
+#include "sgen-major-copying.c"
+//#include "sgen-marksweep.c"
static gboolean
is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
}
static void
-scan_area_for_specific_ref (char *start, char *end, MonoObject *key)
+scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data)
{
while (start < end) {
+ size_t size;
if (!*(void**)start) {
start += sizeof (void*); /* should be ALLOC_ALIGN, really */
continue;
}
- start = scan_object_for_specific_ref (start, key);
+ size = safe_object_get_size ((MonoObject*) start);
+ size += ALLOC_ALIGN - 1;
+ size &= ~(ALLOC_ALIGN - 1);
+
+ callback (start, size, data);
+
+ start += size;
}
}
static void
-scan_pinned_object_for_specific_ref_callback (PinnedChunk *chunk, char *obj, size_t size, MonoObject *key)
+scan_object_for_specific_ref_callback (char *obj, size_t size, MonoObject *key)
{
scan_object_for_specific_ref (obj, key);
}
static MonoObject *check_key = NULL;
static RootRecord *check_root = NULL;
-static void*
-check_root_obj_specific_ref_from_marker (void *obj)
+static void
+check_root_obj_specific_ref_from_marker (void **obj)
{
- check_root_obj_specific_ref (check_root, check_key, obj);
- return obj;
+ check_root_obj_specific_ref (check_root, check_key, *obj);
}
static void
break;
}
case ROOT_DESC_USER: {
- MonoGCMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
+ MonoGCRootMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
marker (start_root, check_root_obj_specific_ref_from_marker);
break;
}
void
mono_gc_scan_for_specific_ref (MonoObject *key)
{
- GCMemSection *section;
LOSObject *bigobj;
RootRecord *root;
int i;
- for (section = section_list; section; section = section->block.next)
- scan_area_for_specific_ref (section->data, section->end_data, key);
+ scan_area_with_callback (nursery_section->data, nursery_section->end_data,
+ (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
+
+ major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
scan_object_for_specific_ref (bigobj->data, key);
- scan_pinned_objects ((ScanPinnedObjectCallbackFunc)scan_pinned_object_for_specific_ref_callback, key);
-
scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
scan_roots_for_specific_ref (key, ROOT_TYPE_WBARRIER);
}
}
-//#define BINARY_PROTOCOL
-#include "sgen-protocol.c"
+/* Clear all remaining nursery fragments */
+static void
+clear_nursery_fragments (char *next)
+{
+ Fragment *frag;
+ if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
+ g_assert (next <= nursery_frag_real_end);
+ memset (next, 0, nursery_frag_real_end - next);
+ for (frag = nursery_fragments; frag; frag = frag->next) {
+ memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start);
+ }
+ }
+}
static gboolean
need_remove_object_for_domain (char *start, MonoDomain *domain)
{
if (mono_object_domain (start) == domain) {
- DEBUG (1, fprintf (gc_debug_file, "Need to cleanup object %p\n", start));
+ DEBUG (4, fprintf (gc_debug_file, "Need to cleanup object %p\n", start));
binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
return TRUE;
}
/* The server could already have been zeroed out, so
we need to check for that, too. */
if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
- DEBUG (1, fprintf (gc_debug_file, "Cleaning up remote pointer in %p to object %p\n",
+ DEBUG (4, fprintf (gc_debug_file, "Cleaning up remote pointer in %p to object %p\n",
start, server));
((MonoRealProxy*)start)->unwrapped_server = NULL;
}
}
}
-static void __attribute__((noinline))
-scan_area_for_domain (MonoDomain *domain, char *start, char *end)
-{
- GCVTable *vt;
- gboolean remove;
-
- while (start < end) {
- if (!*(void**)start) {
- start += sizeof (void*); /* should be ALLOC_ALIGN, really */
- continue;
- }
- vt = (GCVTable*)LOAD_VTABLE (start);
- process_object_for_domain_clearing (start, domain);
- remove = need_remove_object_for_domain (start, domain);
- if (remove && ((MonoObject*)start)->synchronisation) {
- void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)start);
- if (dislink)
- mono_gc_register_disappearing_link (NULL, dislink, FALSE);
- }
-
-#define SCAN_OBJECT_NOSCAN
-#define SCAN_OBJECT_ACTION do { \
- if (remove) memset (start, 0, skip_size); \
- } while (0)
-#include "sgen-scan-object.h"
- }
-}
-
static MonoDomain *check_domain = NULL;
-static void*
-check_obj_not_in_domain (void *o)
+static void
+check_obj_not_in_domain (void **o)
{
- g_assert (((MonoObject*)o)->vtable->domain != check_domain);
- return o;
+ g_assert (((MonoObject*)(*o))->vtable->domain != check_domain);
}
static void
break;
}
case ROOT_DESC_USER: {
- MonoGCMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
+ MonoGCRootMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
marker (start_root, check_obj_not_in_domain);
break;
}
}
static void
-clear_domain_process_pinned_object_callback (PinnedChunk *chunk, char *obj, size_t size, MonoDomain *domain)
+scan_pinned_object_for_xdomain_refs_callback (char *obj, size_t size, gpointer dummy)
{
- process_object_for_domain_clearing (obj, domain);
+ scan_object_for_xdomain_refs (obj);
}
static void
-clear_domain_free_pinned_object_callback (PinnedChunk *chunk, char *obj, size_t size, MonoDomain *domain)
+check_for_xdomain_refs (void)
{
- if (need_remove_object_for_domain (obj, domain))
- free_pinned_object (chunk, obj, size);
+ LOSObject *bigobj;
+
+ scan_area_for_xdomain_refs (nursery_section->data, nursery_section->end_data);
+
+ major_iterate_objects (TRUE, TRUE, scan_pinned_object_for_xdomain_refs_callback, NULL);
+
+ for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+ scan_object_for_xdomain_refs (bigobj->data);
}
-static void
-scan_pinned_object_for_xdomain_refs_callback (PinnedChunk *chunk, char *obj, size_t size, gpointer dummy)
+static gboolean
+clear_domain_process_object (char *obj, MonoDomain *domain)
{
- scan_object_for_xdomain_refs (obj);
+ gboolean remove;
+
+ process_object_for_domain_clearing (obj, domain);
+ remove = need_remove_object_for_domain (obj, domain);
+
+ if (remove && ((MonoObject*)obj)->synchronisation) {
+ void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
+ if (dislink)
+ mono_gc_register_disappearing_link (NULL, dislink, FALSE);
+ }
+
+ return remove;
}
static void
-check_for_xdomain_refs (void)
+clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
{
- GCMemSection *section;
- LOSObject *bigobj;
+ if (clear_domain_process_object (obj, domain))
+ memset (obj, 0, size);
+}
- for (section = section_list; section; section = section->block.next)
- scan_area_for_xdomain_refs (section->data, section->end_data);
+static void
+clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
+{
+ clear_domain_process_object (obj, domain);
+}
- for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
- scan_object_for_xdomain_refs (bigobj->data);
+static void
+clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
+{
+ if (need_remove_object_for_domain (obj, domain))
+ major_free_non_pinned_object (obj, size);
+}
- scan_pinned_objects (scan_pinned_object_for_xdomain_refs_callback, NULL);
+static void
+clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
+{
+ if (need_remove_object_for_domain (obj, domain))
+ free_pinned_object (obj, size);
}
/*
void
mono_gc_clear_domain (MonoDomain * domain)
{
- GCMemSection *section;
LOSObject *bigobj, *prev;
- Fragment *frag;
int i;
LOCK_GC;
- /* Clear all remaining nursery fragments */
- if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
- g_assert (nursery_next <= nursery_frag_real_end);
- memset (nursery_next, 0, nursery_frag_real_end - nursery_next);
- for (frag = nursery_fragments; frag; frag = frag->next) {
- memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start);
- }
- }
+
+ clear_nursery_fragments (nursery_next);
if (xdomain_checks && domain != mono_get_root_domain ()) {
scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
check_for_xdomain_refs ();
}
- for (section = section_list; section; section = section->block.next) {
- scan_area_for_domain (domain, section->data, section->end_data);
- }
+ scan_area_with_callback (nursery_section->data, nursery_section->end_data,
+ (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain);
+
+ /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
+ to memory returned to the OS.*/
+ null_ephemerons_for_domain (domain);
+
+ for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
+ null_links_for_domain (domain, i);
- /* We need two passes over pinned and large objects because
- freeing such an object gives its memory back to the OS (in
- the case of large objects) or obliterates its vtable
- (pinned objects), but we might need to dereference a
- pointer from an object to another object if the first
- object is a proxy. */
- scan_pinned_objects ((ScanPinnedObjectCallbackFunc)clear_domain_process_pinned_object_callback, domain);
+ /* We need two passes over major and large objects because
+ freeing such objects might give their memory back to the OS
+ (in the case of large objects) or obliterate its vtable
+ (pinned objects with major-copying or pinned and non-pinned
+ objects with major-mark&sweep), but we might need to
+ dereference a pointer from an object to another object if
+ the first object is a proxy. */
+ major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
- process_object_for_domain_clearing (bigobj->data, domain);
+ clear_domain_process_object (bigobj->data, domain);
prev = NULL;
for (bigobj = los_object_list; bigobj;) {
else
los_object_list = bigobj->next;
bigobj = bigobj->next;
- DEBUG (1, fprintf (gc_debug_file, "Freeing large object %p\n",
+ DEBUG (4, fprintf (gc_debug_file, "Freeing large object %p\n",
bigobj->data));
free_large_object (to_free);
continue;
prev = bigobj;
bigobj = bigobj->next;
}
- scan_pinned_objects ((ScanPinnedObjectCallbackFunc)clear_domain_free_pinned_object_callback, domain);
-
- for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
- null_links_for_domain (domain, i);
+ major_iterate_objects (TRUE, FALSE, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
+ major_iterate_objects (FALSE, TRUE, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
UNLOCK_GC;
}
+static void
+global_remset_cache_clear (void)
+{
+ memset (global_remset_cache, 0, sizeof (global_remset_cache));
+}
+
+/*
+ * Tries to check if a given remset location was already added to the global remset.
+ * It can
+ *
+ * A 2 entry, LRU cache of recently saw location remsets.
+ *
+ * It's hand-coded instead of done using loops to reduce the number of memory references on cache hit.
+ *
+ * Returns TRUE is the element was added..
+ */
+static gboolean
+global_remset_location_was_not_added (gpointer ptr)
+{
+
+ gpointer first = global_remset_cache [0], second;
+ if (first == ptr) {
+ HEAVY_STAT (++stat_global_remsets_discarded);
+ return FALSE;
+ }
+
+ second = global_remset_cache [1];
+
+ if (second == ptr) {
+ /*Move the second to the front*/
+ global_remset_cache [0] = second;
+ global_remset_cache [1] = first;
+
+ HEAVY_STAT (++stat_global_remsets_discarded);
+ return FALSE;
+ }
+
+ global_remset_cache [0] = second;
+ global_remset_cache [1] = ptr;
+ return TRUE;
+}
+
/*
* add_to_global_remset:
*
* a minor collection. This can happen if the objects they point to are pinned.
*/
static void
-add_to_global_remset (gpointer ptr, gboolean root)
+add_to_global_remset (gpointer ptr)
{
RememberedSet *rs;
+ g_assert (!ptr_in_nursery (ptr) && ptr_in_nursery (*(gpointer*)ptr));
+
+ if (!global_remset_location_was_not_added (ptr))
+ return;
+
DEBUG (8, fprintf (gc_debug_file, "Adding global remset for %p\n", ptr));
binary_protocol_global_remset (ptr, *(gpointer*)ptr, (gpointer)LOAD_VTABLE (*(gpointer*)ptr));
- g_assert (!root);
- g_assert (!ptr_in_nursery (ptr) && ptr_in_nursery (*(gpointer*)ptr));
-
HEAVY_STAT (++stat_global_remsets_added);
/*
* To avoid uncontrolled growth of the global remset, only add each pointer once.
*/
if (global_remset->store_next + 3 < global_remset->end_set) {
- if (root) {
- *(global_remset->store_next++) = (mword)ptr | REMSET_OTHER;
- *(global_remset->store_next++) = (mword)REMSET_ROOT_LOCATION;
- } else {
- *(global_remset->store_next++) = (mword)ptr;
- }
+ *(global_remset->store_next++) = (mword)ptr;
return;
}
rs = alloc_remset (global_remset->end_set - global_remset->data, NULL);
rs->next = global_remset;
global_remset = rs;
- if (root) {
- *(global_remset->store_next++) = (mword)ptr | REMSET_OTHER;
- *(global_remset->store_next++) = (mword)REMSET_ROOT_LOCATION;
- } else {
- *(global_remset->store_next++) = (mword)ptr;
- }
+ *(global_remset->store_next++) = (mword)ptr;
{
int global_rs_size = 0;
}
}
-#define MOVED_OBJECTS_NUM 64
-static void *moved_objects [MOVED_OBJECTS_NUM];
-static int moved_objects_idx = 0;
-
-#include "sgen-gray.c"
-
/*
- * This is how the copying happens from the nursery to the old generation.
- * We assume that at this time all the pinned objects have been identified and
- * marked as such.
- * We run scan_object() for each pinned object so that each referenced
- * objects if possible are copied. The new gray objects created can have
- * scan_object() run on them right away, too.
- * Then we run copy_object() for the precisely tracked roots. At this point
- * all the roots are either gray or black. We run scan_object() on the gray
- * objects until no more gray objects are created.
- * At the end of the process we walk again the pinned list and we unmark
- * the pinned flag. As we go we also create the list of free space for use
- * in the next allocation runs.
- *
- * We need to remember objects from the old generation that point to the new one
- * (or just addresses?).
- *
- * copy_object could be made into a macro once debugged (use inline for now).
+ * FIXME: allocate before calling this function and pass the
+ * destination address.
*/
-
-static char* __attribute__((noinline))
-copy_object (char *obj, char *from_space_start, char *from_space_end)
+static void*
+copy_object_no_checks (void *obj)
{
- static void *copy_labels [] = { &&LAB_0, &&LAB_1, &&LAB_2, &&LAB_3, &&LAB_4, &&LAB_5, &&LAB_6, &&LAB_7, &&LAB_8 };
+ static const void *copy_labels [] = { &&LAB_0, &&LAB_1, &&LAB_2, &&LAB_3, &&LAB_4, &&LAB_5, &&LAB_6, &&LAB_7, &&LAB_8 };
- char *forwarded;
mword objsize;
- MonoVTable *vt;
+ char *destination;
+ MonoVTable *vt = ((MonoObject*)obj)->vtable;
+ gboolean has_references = vt->klass->has_references;
- HEAVY_STAT (++num_copy_object_called);
+ objsize = safe_object_get_size ((MonoObject*)obj);
+ objsize += ALLOC_ALIGN - 1;
+ objsize &= ~(ALLOC_ALIGN - 1);
- if (!(obj >= from_space_start && obj < from_space_end)) {
- DEBUG (9, fprintf (gc_debug_file, "Not copying %p because it's not in from space (%p-%p)\n",
- obj, from_space_start, from_space_end));
- HEAVY_STAT (++stat_copy_object_failed_from_space);
- return obj;
- }
-
- DEBUG (9, fprintf (gc_debug_file, "Precise copy of %p", obj));
-
- /*
- * obj must belong to one of:
- *
- * 1. the nursery
- * 2. the LOS
- * 3. a pinned chunk
- * 4. a non-to-space section of the major heap
- * 5. a to-space section of the major heap
- *
- * In addition, objects in 1, 2 and 4 might also be pinned.
- * Objects in 1 and 4 might be forwarded.
- *
- * Before we can copy the object we must make sure that we are
- * allowed to, i.e. that the object not pinned, not already
- * forwarded and doesn't belong to the LOS, a pinned chunk, or
- * a to-space section.
- *
- * We are usually called for to-space objects (5) when we have
- * two remset entries for the same reference. The first entry
- * copies the object and updates the reference and the second
- * calls us with the updated reference that points into
- * to-space. There might also be other circumstances where we
- * get to-space objects.
- */
-
- if ((forwarded = object_is_forwarded (obj))) {
- g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr);
- DEBUG (9, fprintf (gc_debug_file, " (already forwarded to %p)\n", forwarded));
- HEAVY_STAT (++stat_copy_object_failed_forwarded);
- return forwarded;
- }
- if (object_is_pinned (obj)) {
- g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr);
- DEBUG (9, fprintf (gc_debug_file, " (pinned, no change)\n"));
- HEAVY_STAT (++stat_copy_object_failed_pinned);
- return obj;
- }
-
- objsize = safe_object_get_size ((MonoObject*)obj);
- objsize += ALLOC_ALIGN - 1;
- objsize &= ~(ALLOC_ALIGN - 1);
+ DEBUG (9, g_assert (vt->klass->inited));
+ MAJOR_GET_COPY_OBJECT_SPACE (destination, objsize, has_references);
- if (ptr_in_nursery (obj))
- goto copy;
-
- /*
- * At this point we know obj is not pinned, not forwarded and
- * belongs to 2, 3, 4, or 5.
- *
- * LOS object (2) are simple, at least until we always follow
- * the rule: if objsize > MAX_SMALL_OBJ_SIZE, pin the object
- * and return it. At the end of major collections, we walk
- * the los list and if the object is pinned, it is marked,
- * otherwise it can be freed.
- *
- * Pinned chunks (3) and major heap sections (4, 5) both
- * reside in blocks, which are always aligned, so once we've
- * eliminated LOS objects, we can just access the block and
- * see whether it's a pinned chunk or a major heap section.
- */
- if (G_UNLIKELY (objsize > MAX_SMALL_OBJ_SIZE || obj_is_from_pinned_alloc (obj))) {
- DEBUG (9, fprintf (gc_debug_file, " (marked LOS/Pinned %p (%s), size: %zd)\n", obj, safe_name (obj), objsize));
- binary_protocol_pin (obj, (gpointer)LOAD_VTABLE (obj), safe_object_get_size ((MonoObject*)obj));
- pin_object (obj);
- HEAVY_STAT (++stat_copy_object_failed_large_pinned);
- return obj;
- }
-
- /*
- * Now we know the object is in a major heap section. All we
- * need to do is check whether it's already in to-space (5) or
- * not (4).
- */
- if (MAJOR_SECTION_FOR_OBJECT (obj)->is_to_space) {
- g_assert (objsize <= MAX_SMALL_OBJ_SIZE);
- DEBUG (9, fprintf (gc_debug_file, " (already copied)\n"));
- HEAVY_STAT (++stat_copy_object_failed_to_space);
- return obj;
- }
-
- copy:
- DEBUG (9, fprintf (gc_debug_file, " (to %p, %s size: %zd)\n", to_space_bumper, ((MonoObject*)obj)->vtable->klass->name, objsize));
- binary_protocol_copy (obj, to_space_bumper, ((MonoObject*)obj)->vtable, objsize);
-
- HEAVY_STAT (++num_objects_copied);
-
- /* Make sure we have enough space available */
- if (to_space_bumper + objsize > to_space_top) {
- to_space_expand ();
- g_assert (to_space_bumper + objsize <= to_space_top);
- }
+ DEBUG (9, fprintf (gc_debug_file, " (to %p, %s size: %zd)\n", destination, ((MonoObject*)obj)->vtable->klass->name, objsize));
+ binary_protocol_copy (obj, destination, ((MonoObject*)obj)->vtable, objsize);
if (objsize <= sizeof (gpointer) * 8) {
- mword *dest = (mword*)to_space_bumper;
+ mword *dest = (mword*)destination;
goto *copy_labels [objsize / sizeof (gpointer)];
LAB_8:
(dest) [7] = ((mword*)obj) [7];
{
int ecx;
char* esi = obj;
- char* edi = to_space_bumper;
+ char* edi = destination;
__asm__ __volatile__(
"rep; movsl"
: "=&c" (ecx), "=&D" (edi), "=&S" (esi)
);
}
#else
- memcpy (to_space_bumper, obj, objsize);
+ memcpy (destination, obj, objsize);
#endif
}
/* adjust array->bounds */
- vt = ((MonoObject*)obj)->vtable;
- g_assert (vt->gc_descr);
+ DEBUG (9, g_assert (vt->gc_descr));
if (G_UNLIKELY (vt->rank && ((MonoArray*)obj)->bounds)) {
- MonoArray *array = (MonoArray*)to_space_bumper;
- array->bounds = (MonoArrayBounds*)((char*)to_space_bumper + ((char*)((MonoArray*)obj)->bounds - (char*)obj));
+ MonoArray *array = (MonoArray*)destination;
+ array->bounds = (MonoArrayBounds*)((char*)destination + ((char*)((MonoArray*)obj)->bounds - (char*)obj));
DEBUG (9, fprintf (gc_debug_file, "Array instance %p: size: %zd, rank: %d, length: %d\n", array, objsize, vt->rank, mono_array_length (array)));
}
/* set the forwarding pointer */
- forward_object (obj, to_space_bumper);
+ forward_object (obj, destination);
if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_GC_MOVES)) {
if (moved_objects_idx == MOVED_OBJECTS_NUM) {
mono_profiler_gc_moves (moved_objects, moved_objects_idx);
moved_objects_idx = 0;
}
moved_objects [moved_objects_idx++] = obj;
- moved_objects [moved_objects_idx++] = to_space_bumper;
- }
- obj = to_space_bumper;
- to_space_section->scan_starts [((char*)obj - (char*)to_space_section->data)/SCAN_START_SIZE] = obj;
- to_space_bumper += objsize;
- DEBUG (9, fprintf (gc_debug_file, "Enqueuing gray object %p (%s)\n", obj, safe_name (obj)));
- gray_object_enqueue (obj);
- DEBUG (8, g_assert (to_space_bumper <= to_space_top));
+ moved_objects [moved_objects_idx++] = destination;
+ }
+ obj = destination;
+ if (has_references) {
+ DEBUG (9, fprintf (gc_debug_file, "Enqueuing gray object %p (%s)\n", obj, safe_name (obj)));
+ GRAY_OBJECT_ENQUEUE (obj);
+ }
return obj;
}
+/*
+ * This is how the copying happens from the nursery to the old generation.
+ * We assume that at this time all the pinned objects have been identified and
+ * marked as such.
+ * We run scan_object() for each pinned object so that each referenced
+ * objects if possible are copied. The new gray objects created can have
+ * scan_object() run on them right away, too.
+ * Then we run copy_object() for the precisely tracked roots. At this point
+ * all the roots are either gray or black. We run scan_object() on the gray
+ * objects until no more gray objects are created.
+ * At the end of the process we walk again the pinned list and we unmark
+ * the pinned flag. As we go we also create the list of free space for use
+ * in the next allocation runs.
+ *
+ * We need to remember objects from the old generation that point to the new one
+ * (or just addresses?).
+ *
+ * copy_object could be made into a macro once debugged (use inline for now).
+ */
+
+static void __attribute__((noinline))
+copy_object (void **obj_slot)
+{
+ char *forwarded;
+ char *obj = *obj_slot;
+
+ DEBUG (9, g_assert (current_collection_generation == GENERATION_NURSERY));
+
+ HEAVY_STAT (++stat_copy_object_called_nursery);
+
+ if (!ptr_in_nursery (obj)) {
+ HEAVY_STAT (++stat_nursery_copy_object_failed_from_space);
+ return;
+ }
+
+ DEBUG (9, fprintf (gc_debug_file, "Precise copy of %p from %p", obj, obj_slot));
+
+ /*
+ * Before we can copy the object we must make sure that we are
+ * allowed to, i.e. that the object not pinned or not already
+ * forwarded.
+ */
+
+ if ((forwarded = object_is_forwarded (obj))) {
+ DEBUG (9, g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr));
+ DEBUG (9, fprintf (gc_debug_file, " (already forwarded to %p)\n", forwarded));
+ HEAVY_STAT (++stat_nursery_copy_object_failed_forwarded);
+ *obj_slot = forwarded;
+ return;
+ }
+ if (object_is_pinned (obj)) {
+ DEBUG (9, g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr));
+ DEBUG (9, fprintf (gc_debug_file, " (pinned, no change)\n"));
+ HEAVY_STAT (++stat_nursery_copy_object_failed_pinned);
+ return;
+ }
+
+ HEAVY_STAT (++stat_objects_copied_nursery);
+
+ *obj_slot = copy_object_no_checks (obj);
+}
+
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) do { \
void *__old = *(ptr); \
void *__copy; \
if (__old) { \
- *(ptr) = __copy = copy_object (__old, from_start, from_end); \
+ copy_object ((ptr)); \
+ __copy = *(ptr); \
DEBUG (9, if (__old != __copy) fprintf (gc_debug_file, "Overwrote field at %p with %p (was: %p)\n", (ptr), *(ptr), __old)); \
if (G_UNLIKELY (ptr_in_nursery (__copy) && !ptr_in_nursery ((ptr)))) \
- add_to_global_remset ((ptr), FALSE); \
+ add_to_global_remset ((ptr)); \
} \
} while (0)
* Returns a pointer to the end of the object.
*/
static char*
-scan_object (char *start, char* from_start, char* from_end)
+scan_object (char *start)
{
#include "sgen-scan-object.h"
- return start;
-}
-
-/*
- * drain_gray_stack:
- *
- * Scan objects in the gray stack until the stack is empty. This should be called
- * frequently after each object is copied, to achieve better locality and cache
- * usage.
- */
-static void inline
-drain_gray_stack (char *start_addr, char *end_addr)
-{
- char *obj;
+ HEAVY_STAT (++stat_scan_object_called_nursery);
- while ((obj = gray_object_dequeue ())) {
- DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", obj, safe_name (obj)));
- scan_object (obj, start_addr, end_addr);
- }
+ return start;
}
/*
return NULL;
}
-#include "sgen-pinning-stats.c"
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj) do { \
+ void *__old = *(ptr); \
+ void *__copy; \
+ if (__old) { \
+ major_copy_or_mark_object ((ptr)); \
+ __copy = *(ptr); \
+ DEBUG (9, if (__old != __copy) fprintf (gc_debug_file, "Overwrote field at %p with %p (was: %p)\n", (ptr), *(ptr), __old)); \
+ if (G_UNLIKELY (ptr_in_nursery (__copy) && !ptr_in_nursery ((ptr)))) \
+ add_to_global_remset ((ptr)); \
+ } \
+ } while (0)
+
+static char*
+major_scan_object (char *start)
+{
+#include "sgen-scan-object.h"
+
+ HEAVY_STAT (++stat_scan_object_called_major);
+
+ return start;
+}
+
+/*
+ * drain_gray_stack:
+ *
+ * Scan objects in the gray stack until the stack is empty. This should be called
+ * frequently after each object is copied, to achieve better locality and cache
+ * usage.
+ */
+static void inline
+drain_gray_stack (void)
+{
+ char *obj;
+
+ if (current_collection_generation == GENERATION_NURSERY) {
+ for (;;) {
+ GRAY_OBJECT_DEQUEUE (obj);
+ if (!obj)
+ break;
+ DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", obj, safe_name (obj)));
+ scan_object (obj);
+ }
+ } else {
+ for (;;) {
+ GRAY_OBJECT_DEQUEUE (obj);
+ if (!obj)
+ break;
+ DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", obj, safe_name (obj)));
+ major_scan_object (obj);
+ }
+ }
+}
/*
* Addresses from start to end are already sorted. This function finds
DEBUG (4, fprintf (gc_debug_file, "Pinned object %p, vtable %p (%s), count %d\n", search_start, *(void**)search_start, safe_name (search_start), count));
binary_protocol_pin (search_start, (gpointer)LOAD_VTABLE (search_start), safe_object_get_size (search_start));
pin_object (search_start);
+ GRAY_OBJECT_ENQUEUE (search_start);
if (heap_dump_file)
pin_stats_register_object (search_start, last_obj_size);
definitely_pinned [count] = search_start;
return count;
}
-static void** pin_queue;
-static int pin_queue_size = 0;
-static int next_pin_slot = 0;
+static void
+pin_objects_in_section (GCMemSection *section)
+{
+ int start = section->pin_queue_start;
+ int end = section->pin_queue_end;
+ if (start != end) {
+ int reduced_to;
+ reduced_to = pin_objects_from_addresses (section, pin_queue + start, pin_queue + end,
+ section->data, section->next_data);
+ section->pin_queue_start = start;
+ section->pin_queue_end = start + reduced_to;
+ }
+}
static int
new_gap (int gap)
return gap;
}
-#if 0
-static int
-compare_addr (const void *a, const void *b)
-{
- return *(const void **)a - *(const void **)b;
-}
-#endif
-
-/* sort the addresses in array in increasing order */
+/* Sort the addresses in array in increasing order.
+ * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
+ */
static void
sort_addresses (void **array, int size)
{
- /*
- * qsort is slower as predicted.
- * qsort (array, size, sizeof (gpointer), compare_addr);
- * return;
- */
- int gap = size;
- int swapped, end;
- while (TRUE) {
- int i;
- gap = new_gap (gap);
- swapped = FALSE;
- end = size - gap;
- for (i = 0; i < end; i++) {
- int j = i + gap;
- if (array [i] > array [j]) {
- void* val = array [i];
- array [i] = array [j];
- array [j] = val;
- swapped = TRUE;
- }
+ int i;
+ void *tmp;
+
+ for (i = 1; i < size; ++i) {
+ int child = i;
+ while (child > 0) {
+ int parent = (child - 1) / 2;
+
+ if (array [parent] >= array [child])
+ break;
+
+ tmp = array [parent];
+ array [parent] = array [child];
+ array [child] = tmp;
+
+ child = parent;
+ }
+ }
+
+ for (i = size - 1; i > 0; --i) {
+ int end, root;
+ tmp = array [i];
+ array [i] = array [0];
+ array [0] = tmp;
+
+ end = i - 1;
+ root = 0;
+
+ while (root * 2 + 1 <= end) {
+ int child = root * 2 + 1;
+
+ if (child < end && array [child] < array [child + 1])
+ ++child;
+ if (array [root] >= array [child])
+ break;
+
+ tmp = array [root];
+ array [root] = array [child];
+ array [child] = tmp;
+
+ root = child;
}
- if (gap == 1 && !swapped)
- break;
}
}
}
-static int
-optimized_pin_queue_search (void *addr)
-{
- int first = 0, last = next_pin_slot;
- while (first < last) {
- int middle = first + ((last - first) >> 1);
- if (addr <= pin_queue [middle])
- last = middle;
- else
- first = middle + 1;
- }
- g_assert (first == last);
- return first;
-}
-
-static void
-find_optimized_pin_queue_area (void *start, void *end, int *first, int *last)
-{
- *first = optimized_pin_queue_search (start);
- *last = optimized_pin_queue_search (end);
-}
-
-static void
-realloc_pin_queue (void)
-{
- int new_size = pin_queue_size? pin_queue_size + pin_queue_size/2: 1024;
- void **new_pin = get_internal_mem (sizeof (void*) * new_size, INTERNAL_MEM_PIN_QUEUE);
- memcpy (new_pin, pin_queue, sizeof (void*) * next_pin_slot);
- free_internal_mem (pin_queue, INTERNAL_MEM_PIN_QUEUE);
- pin_queue = new_pin;
- pin_queue_size = new_size;
- DEBUG (4, fprintf (gc_debug_file, "Reallocated pin queue to size: %d\n", new_size));
-}
-
-#include "sgen-pinning.c"
-
/*
* Scan the memory between start and end and queue values which could be pointers
* to the area between start_nursery and end_nursery for later consideration.
DEBUG (7, if (count) fprintf (gc_debug_file, "found %d potential pinned heap pointers\n", count));
}
-/*
- * If generation is 0, just mark objects in the nursery, the others we don't care,
- * since they are not going to move anyway.
- * There are different areas that are scanned for pinned pointers:
- * *) the thread stacks (when jit support is ready only the unmanaged frames)
- * *) the pinned handle table
- * *) the pinned roots
- *
- * Note: when we'll use a write barrier for old to new gen references, we need to
- * keep track of old gen objects that point to pinned new gen objects because in that
- * case the referenced object will be moved maybe at the next collection, but there
- * is no write in the old generation area where the pinned object is referenced
- * and we may not consider it as reachable.
- */
-static G_GNUC_UNUSED void
-mark_pinned_objects (int generation)
-{
-}
-
/*
* Debugging function: find in the conservative roots where @obj is being pinned.
*/
evacuate_pin_staging_area ();
}
-/* Copy function called from user defined mark functions */
-static char *user_copy_n_start;
-static char *user_copy_n_end;
-
-static void*
-user_copy (void *addr)
-{
- if (addr)
- return copy_object (addr, user_copy_n_start, user_copy_n_end);
- else
- return NULL;
-}
-
/*
* The memory area from start_root to end_root contains pointers to objects.
* Their position is precisely described by @desc (this means that the pointer
* This functions copies them to to_space updates them.
*/
static void
-precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc)
+precisely_scan_objects_from (CopyOrMarkObjectFunc copy_func, void** start_root, void** end_root, char* n_start, char *n_end, mword desc)
{
switch (desc & ROOT_DESC_TYPE_MASK) {
case ROOT_DESC_BITMAP:
desc >>= ROOT_DESC_TYPE_SHIFT;
while (desc) {
if ((desc & 1) && *start_root) {
- *start_root = copy_object (*start_root, n_start, n_end);
+ copy_func (start_root);
DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", start_root, *start_root));
- drain_gray_stack (n_start, n_end);
+ drain_gray_stack ();
}
desc >>= 1;
start_root++;
void **objptr = start_run;
while (bmap) {
if ((bmap & 1) && *objptr) {
- *objptr = copy_object (*objptr, n_start, n_end);
+ copy_func (objptr);
DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", objptr, *objptr));
- drain_gray_stack (n_start, n_end);
+ drain_gray_stack ();
}
bmap >>= 1;
++objptr;
break;
}
case ROOT_DESC_USER: {
- MonoGCMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
-
- user_copy_n_start = n_start;
- user_copy_n_end = n_end;
- marker (start_root, user_copy);
+ MonoGCRootMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
+ marker (start_root, copy_func);
break;
}
case ROOT_DESC_RUN_LEN:
/* size must be a power of 2 */
static void*
-get_os_memory_aligned (mword size, gboolean activate)
+get_os_memory_aligned (mword size, mword alignment, gboolean activate)
{
/* Allocate twice the memory to be able to put the block on an aligned address */
- char *mem = get_os_memory (size * 2, activate);
+ char *mem = get_os_memory (size + alignment, activate);
char *aligned;
g_assert (mem);
- aligned = (char*)((mword)(mem + (size - 1)) & ~(size - 1));
- g_assert (aligned >= mem && aligned + size <= mem + size * 2 && !((mword)aligned & (size - 1)));
+ aligned = (char*)((mword)(mem + (alignment - 1)) & ~(alignment - 1));
+ g_assert (aligned >= mem && aligned + size <= mem + size + alignment && !((mword)aligned & (alignment - 1)));
if (aligned > mem)
free_os_memory (mem, aligned - mem);
- if (aligned + size < mem + size * 2)
- free_os_memory (aligned + size, (mem + size * 2) - (aligned + size));
+ if (aligned + size < mem + size + alignment)
+ free_os_memory (aligned + size, (mem + size + alignment) - (aligned + size));
return aligned;
}
g_assert (nursery_size == DEFAULT_NURSERY_SIZE);
alloc_size = nursery_size;
#ifdef ALIGN_NURSERY
- data = get_os_memory_aligned (alloc_size, TRUE);
+ data = get_os_memory_aligned (alloc_size, alloc_size, TRUE);
#else
data = get_os_memory (alloc_size, TRUE);
#endif
section->scan_starts = get_internal_mem (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS);
section->num_scan_start = scan_starts;
section->block.role = MEMORY_ROLE_GEN0;
-
- /* add to the section list */
- section->block.next = section_list;
- section_list = section;
+ section->block.next = NULL;
nursery_section = section;
}
static void
-scan_finalizer_entries (FinalizeEntry *list, char *start, char *end) {
+scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeEntry *list) {
FinalizeEntry *fin;
for (fin = list; fin; fin = fin->next) {
if (!fin->object)
continue;
DEBUG (5, fprintf (gc_debug_file, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object)));
- fin->object = copy_object (fin->object, start, end);
- }
-}
-
-/*
- * Update roots in the old generation. Since we currently don't have the
- * info from the write barriers, we just scan all the objects.
- */
-static G_GNUC_UNUSED void
-scan_old_generation (char *start, char* end)
-{
- GCMemSection *section;
- LOSObject *big_object;
- char *p;
-
- for (section = section_list; section; section = section->block.next) {
- if (section == nursery_section)
- continue;
- DEBUG (2, fprintf (gc_debug_file, "Scan of old section: %p-%p, size: %d\n", section->data, section->next_data, (int)(section->next_data - section->data)));
- /* we have to deal with zeroed holes in old generation (truncated strings ...) */
- p = section->data;
- while (p < section->next_data) {
- if (!*(void**)p) {
- p += ALLOC_ALIGN;
- continue;
- }
- DEBUG (8, fprintf (gc_debug_file, "Precise old object scan of %p (%s)\n", p, safe_name (p)));
- p = scan_object (p, start, end);
- }
+ copy_func (&fin->object);
}
- /* scan the old object space, too */
- for (big_object = los_object_list; big_object; big_object = big_object->next) {
- DEBUG (5, fprintf (gc_debug_file, "Scan of big object: %p (%s), size: %zd\n", big_object->data, safe_name (big_object->data), big_object->size));
- scan_object (big_object->data, start, end);
- }
- /* scan the list of objects ready for finalization */
- scan_finalizer_entries (fin_ready_list, start, end);
- scan_finalizer_entries (critical_fin_list, start, end);
}
static mword fragment_total = 0;
fragment_total += frag_size;
} else {
/* Clear unused fragments, pinning depends on this */
+ /*TODO place an int[] here instead of the memset if size justify it*/
memset (frag_start, 0, frag_size);
}
}
-static int
-scan_needed_big_objects (char *start_addr, char *end_addr)
-{
- LOSObject *big_object;
- int count = 0;
- for (big_object = los_object_list; big_object; big_object = big_object->next) {
- if (!big_object->scanned && object_is_pinned (big_object->data)) {
- DEBUG (5, fprintf (gc_debug_file, "Scan of big object: %p (%s), size: %zd\n", big_object->data, safe_name (big_object->data), big_object->size));
- scan_object (big_object->data, start_addr, end_addr);
- drain_gray_stack (start_addr, end_addr);
- big_object->scanned = TRUE;
- count++;
- }
- }
- return count;
-}
-
static const char*
generation_name (int generation)
{
}
}
-static void
-new_to_space_section (void)
-{
- /* FIXME: if the current to_space_section is empty, we don't
- have to allocate a new one */
-
- to_space_section = alloc_major_section ();
- to_space_bumper = to_space_section->next_data;
- to_space_top = to_space_section->end_data;
-}
-
-static void
-to_space_set_next_data (void)
-{
- g_assert (to_space_bumper >= to_space_section->next_data && to_space_bumper <= to_space_section->end_data);
- to_space_section->next_data = to_space_bumper;
-}
-
-static void
-to_space_expand (void)
-{
- if (to_space_section) {
- g_assert (to_space_top == to_space_section->end_data);
- to_space_set_next_data ();
- }
-
- new_to_space_section ();
-}
-
-static void
-unset_to_space (void)
-{
- /* between collections the to_space_bumper is invalidated
- because degraded allocations might occur, so we set it to
- NULL, just to make it explicit */
- to_space_bumper = NULL;
-
- /* don't unset to_space_section if we implement the FIXME in
- new_to_space_section */
- to_space_section = NULL;
-}
-
-static gboolean
-object_is_in_to_space (char *obj)
-{
- mword objsize;
-
- /* nursery */
- if (ptr_in_nursery (obj))
- return FALSE;
-
- objsize = safe_object_get_size ((MonoObject*)obj);
- objsize += ALLOC_ALIGN - 1;
- objsize &= ~(ALLOC_ALIGN - 1);
-
- /* LOS */
- if (objsize > MAX_SMALL_OBJ_SIZE)
- return FALSE;
-
- /* pinned chunk */
- if (obj_is_from_pinned_alloc (obj))
- return FALSE;
-
- /* now we know it's in a major heap section */
- return MAJOR_SECTION_FOR_OBJECT (obj)->is_to_space;
-}
-
static void
finish_gray_stack (char *start_addr, char *end_addr, int generation)
{
TV_DECLARE (atv);
TV_DECLARE (btv);
- int fin_ready, bigo_scanned_num;
+ int fin_ready;
+ int ephemeron_rounds = 0;
+ CopyOrMarkObjectFunc copy_func = current_collection_generation == GENERATION_NURSERY ? copy_object : major_copy_or_mark_object;
/*
* We copied all the reachable objects. Now it's the time to copy
* To achieve better cache locality and cache usage, we drain the gray stack
* frequently, after each object is copied, and just finish the work here.
*/
- drain_gray_stack (start_addr, end_addr);
+ drain_gray_stack ();
TV_GETTIME (atv);
- //scan_old_generation (start_addr, end_addr);
DEBUG (2, fprintf (gc_debug_file, "%s generation done\n", generation_name (generation)));
/* walk the finalization queue and move also the objects that need to be
* finalized: use the finalized objects as new roots so the objects they depend
* that are fin-ready. Speedup with a flag?
*/
do {
+ /*
+ * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
+ * before processing finalizable objects to avoid finalizing reachable values.
+ *
+ * It must be done inside the finalizaters loop since objects must not be removed from CWT tables
+ * while they are been finalized.
+ */
+ int done_with_ephemerons = 0;
+ do {
+ done_with_ephemerons = mark_ephemerons_in_range (copy_func, start_addr, end_addr);
+ drain_gray_stack ();
+ ++ephemeron_rounds;
+ } while (!done_with_ephemerons);
+
fin_ready = num_ready_finalizers;
- finalize_in_range (start_addr, end_addr, generation);
+ finalize_in_range (copy_func, start_addr, end_addr, generation);
if (generation == GENERATION_OLD)
- finalize_in_range (nursery_start, nursery_real_end, GENERATION_NURSERY);
- bigo_scanned_num = scan_needed_big_objects (start_addr, end_addr);
+ finalize_in_range (copy_func, nursery_start, nursery_real_end, GENERATION_NURSERY);
/* drain the new stack that might have been created */
DEBUG (6, fprintf (gc_debug_file, "Precise scan of gray area post fin\n"));
- drain_gray_stack (start_addr, end_addr);
- } while (fin_ready != num_ready_finalizers || bigo_scanned_num);
+ drain_gray_stack ();
+ } while (fin_ready != num_ready_finalizers);
+
+ /*
+ * Clear ephemeron pairs with unreachable keys.
+ * We pass the copy func so we can figure out if an array was promoted or not.
+ */
+ clear_unreachable_ephemerons (copy_func, start_addr, end_addr);
+
TV_GETTIME (btv);
- DEBUG (2, fprintf (gc_debug_file, "Finalize queue handling scan for %s generation: %d usecs\n", generation_name (generation), TV_ELAPSED (atv, btv)));
+ DEBUG (2, fprintf (gc_debug_file, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron roundss\n", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds));
/*
* handle disappearing links
*/
g_assert (gray_object_queue_is_empty ());
for (;;) {
- null_link_in_range (start_addr, end_addr, generation);
+ null_link_in_range (copy_func, start_addr, end_addr, generation);
if (generation == GENERATION_OLD)
- null_link_in_range (start_addr, end_addr, GENERATION_NURSERY);
+ null_link_in_range (copy_func, start_addr, end_addr, GENERATION_NURSERY);
if (gray_object_queue_is_empty ())
break;
- drain_gray_stack (start_addr, end_addr);
+ drain_gray_stack ();
}
g_assert (gray_object_queue_is_empty ());
- /* DEBUG (2, fprintf (gc_debug_file, "Copied from %s to old space: %d bytes (%p-%p)\n", generation_name (generation), (int)(to_space_bumper - to_space), to_space, to_space_bumper)); */
- to_space_set_next_data ();
}
static void
-check_scan_starts (void)
+check_section_scan_starts (GCMemSection *section)
{
- GCMemSection *section;
int i;
- if (!do_scan_starts_check)
- return;
- for (section = section_list; section; section = section->block.next) {
- for (i = 0; i < section->num_scan_start; ++i) {
- if (section->scan_starts [i]) {
- guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
- g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
- }
+ for (i = 0; i < section->num_scan_start; ++i) {
+ if (section->scan_starts [i]) {
+ guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
+ g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
}
}
}
+static void
+check_scan_starts (void)
+{
+ if (!do_scan_starts_check)
+ return;
+ check_section_scan_starts (nursery_section);
+ major_check_scan_starts ();
+}
+
static int last_num_pinned = 0;
static void
for (i = start_pin; i < end_pin; ++i) {
DEBUG (3, fprintf (gc_debug_file, "Bastard pinning obj %p (%s), size: %d\n", pin_queue [i], safe_name (pin_queue [i]), safe_object_get_size (pin_queue [i])));
}
- degraded_mode = 1;
- }
-
- nursery_next = nursery_frag_real_end = NULL;
-
- /* Clear TLABs for all threads */
- clear_tlabs ();
-}
-
-/* FIXME: later reduce code duplication here with the above
- * We don't keep track of section fragments for non-nursery sections yet, so
- * just memset to 0.
- */
-static void
-build_section_fragments (GCMemSection *section)
-{
- int i;
- char *frag_start, *frag_end;
- size_t frag_size;
-
- /* clear scan starts */
- memset (section->scan_starts, 0, section->num_scan_start * sizeof (gpointer));
- frag_start = section->data;
- section->next_data = section->data;
- for (i = section->pin_queue_start; i < section->pin_queue_end; ++i) {
- frag_end = pin_queue [i];
- /* remove the pin bit from pinned objects */
- unpin_object (frag_end);
- if (frag_end >= section->data + section->size) {
- frag_end = section->data + section->size;
- } else {
- section->scan_starts [((char*)frag_end - (char*)section->data)/SCAN_START_SIZE] = frag_end;
- }
- frag_size = frag_end - frag_start;
- if (frag_size) {
- binary_protocol_empty (frag_start, frag_size);
- memset (frag_start, 0, frag_size);
- }
- frag_size = safe_object_get_size ((MonoObject*)pin_queue [i]);
- frag_size += ALLOC_ALIGN - 1;
- frag_size &= ~(ALLOC_ALIGN - 1);
- frag_start = (char*)pin_queue [i] + frag_size;
- section->next_data = MAX (section->next_data, frag_start);
- }
- frag_end = section->end_data;
- frag_size = frag_end - frag_start;
- if (frag_size) {
- binary_protocol_empty (frag_start, frag_size);
- memset (frag_start, 0, frag_size);
+ degraded_mode = 1;
}
+
+ nursery_next = nursery_frag_real_end = NULL;
+
+ /* Clear TLABs for all threads */
+ clear_tlabs ();
}
static void
-scan_from_registered_roots (char *addr_start, char *addr_end, int root_type)
+scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type)
{
int i;
RootRecord *root;
for (i = 0; i < roots_hash_size [root_type]; ++i) {
for (root = roots_hash [root_type][i]; root; root = root->next) {
DEBUG (6, fprintf (gc_debug_file, "Precise root scan %p-%p (desc: %p)\n", root->start_root, root->end_root, (void*)root->root_desc));
- precisely_scan_objects_from ((void**)root->start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc);
+ precisely_scan_objects_from (copy_func, (void**)root->start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc);
}
}
}
fprintf (heap_dump_file, "</section>\n");
}
+static void
+dump_object (MonoObject *obj, gboolean dump_location)
+{
+ static char class_name [1024];
+
+ MonoClass *class = mono_object_class (obj);
+ int i, j;
+
+ /*
+ * Python's XML parser is too stupid to parse angle brackets
+ * in strings, so we just ignore them;
+ */
+ i = j = 0;
+ while (class->name [i] && j < sizeof (class_name) - 1) {
+ if (!strchr ("<>\"", class->name [i]))
+ class_name [j++] = class->name [i];
+ ++i;
+ }
+ g_assert (j < sizeof (class_name));
+ class_name [j] = 0;
+
+ fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%d\"",
+ class->name_space, class_name,
+ safe_object_get_size (obj));
+ if (dump_location) {
+ const char *location;
+ if (ptr_in_nursery (obj))
+ location = "nursery";
+ else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
+ location = "major";
+ else
+ location = "LOS";
+ fprintf (heap_dump_file, " location=\"%s\"", location);
+ }
+ fprintf (heap_dump_file, "/>\n");
+}
+
static void
dump_heap (const char *type, int num, const char *reason)
{
static char const *internal_mem_names [] = { "pin-queue", "fragment", "section", "scan-starts",
"fin-table", "finalize-entry", "dislink-table",
"dislink", "roots-table", "root-record", "statistics",
- "remset", "gray-queue", "store-remset" };
+ "remset", "gray-queue", "store-remset", "marksweep-tables",
+ "marksweep-block-info", "ephemeron-link" };
- GCMemSection *section;
+ ObjectList *list;
LOSObject *bigobj;
int i;
if (reason)
fprintf (heap_dump_file, " reason=\"%s\"", reason);
fprintf (heap_dump_file, ">\n");
- fprintf (heap_dump_file, "<other-mem-usage type=\"pinned-chunks\" size=\"%ld\"/>\n", pinned_chunk_bytes_alloced);
- fprintf (heap_dump_file, "<other-mem-usage type=\"large-internal\" size=\"%ld\"/>\n", large_internal_bytes_alloced);
+ fprintf (heap_dump_file, "<other-mem-usage type=\"pinned-chunks\" size=\"%lld\"/>\n", pinned_chunk_bytes_alloced);
+ fprintf (heap_dump_file, "<other-mem-usage type=\"large-internal\" size=\"%lld\"/>\n", large_internal_bytes_alloced);
fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
for (i = 0; i < INTERNAL_MEM_MAX; ++i)
fprintf (heap_dump_file, "<other-mem-usage type=\"%s\" size=\"%ld\"/>\n", internal_mem_names [i], small_internal_mem_bytes [i]);
/* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", pinned_byte_counts [PIN_TYPE_OTHER]);
+ fprintf (heap_dump_file, "<pinned-objects>\n");
+ for (list = pinned_objects; list; list = list->next)
+ dump_object (list->obj, TRUE);
+ fprintf (heap_dump_file, "</pinned-objects>\n");
+
dump_section (nursery_section, "nursery");
- for (section = section_list; section; section = section->block.next) {
- if (section != nursery_section)
- dump_section (section, "old");
- }
+ major_dump_heap ();
fprintf (heap_dump_file, "<los>\n");
- for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
- MonoObject *obj = (MonoObject*) bigobj->data;
- MonoClass *class = mono_object_class (obj);
-
- fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%d\"/>\n",
- class->name_space, class->name,
- safe_object_get_size (obj));
- }
+ for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+ dump_object ((MonoObject*)bigobj->data, FALSE);
fprintf (heap_dump_file, "</los>\n");
fprintf (heap_dump_file, "</collection>\n");
{
static gboolean inited = FALSE;
-#ifdef HEAVY_STATISTICS
- num_copy_object_called = 0;
- num_objects_copied = 0;
-#endif
-
if (inited)
return;
+ mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_pre_collection_fragment_clear);
+ mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_pinning);
+ mono_counters_register ("Minor scan remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_remsets);
+ mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_pinned);
+ mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_registered_roots);
+ mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_thread_data);
+ mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_finish_gray_stack);
+ mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_fragment_creation);
+
+ mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_pre_collection_fragment_clear);
+ mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_pinning);
+ mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_pinned);
+ mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_registered_roots);
+ mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_thread_data);
+ mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_alloc_pinned);
+ mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_finalized);
+ mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_big_objects);
+ mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_finish_gray_stack);
+ mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_sweep);
+ mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_fragment_creation);
+
#ifdef HEAVY_STATISTICS
mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
mono_counters_register ("# objects allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced);
+ mono_counters_register ("bytes allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced);
mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
+ mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
+ mono_counters_register ("bytes allocated in LOS", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_los);
+
mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_major);
mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_major);
- mono_counters_register ("# copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_failed_from_space);
- mono_counters_register ("# copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_failed_forwarded);
- mono_counters_register ("# copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_failed_pinned);
- mono_counters_register ("# copy_object() failed large or pinned chunk", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_failed_large_pinned);
- mono_counters_register ("# copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_failed_to_space);
+ mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
+ mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);
+
+ mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
+ mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
+ mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
+
+ mono_counters_register ("# wasted fragments used", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_wasted_fragments_used);
+ mono_counters_register ("bytes in wasted fragments", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_wasted_fragments_bytes);
mono_counters_register ("Store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets);
mono_counters_register ("Unique store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets_unique);
mono_counters_register ("Saved remsets 1", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_1);
mono_counters_register ("Saved remsets 2", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_2);
mono_counters_register ("Global remsets added", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_added);
+ mono_counters_register ("Global remsets re-added", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_readded);
mono_counters_register ("Global remsets processed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_processed);
+ mono_counters_register ("Global remsets discarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_discarded);
+
#endif
inited = TRUE;
}
-static void
-commit_stats (int generation)
-{
-#ifdef HEAVY_STATISTICS
- if (generation == GENERATION_NURSERY) {
- stat_copy_object_called_nursery += num_copy_object_called;
- stat_objects_copied_nursery += num_objects_copied;
- } else {
- g_assert (generation == GENERATION_OLD);
- stat_copy_object_called_major += num_copy_object_called;
- stat_objects_copied_major += num_objects_copied;
- }
-#endif
-}
-
/*
* Collect objects in the nursery. Returns whether to trigger a major
* collection.
collect_nursery (size_t requested_size)
{
size_t max_garbage_amount;
- int i;
char *orig_nursery_next;
- Fragment *frag;
- GCMemSection *section;
- int old_num_major_sections = num_major_sections;
- int sections_alloced;
TV_DECLARE (all_atv);
TV_DECLARE (all_btv);
TV_DECLARE (atv);
TV_DECLARE (btv);
+ current_collection_generation = GENERATION_NURSERY;
+
init_stats ();
binary_protocol_collection (GENERATION_NURSERY);
check_scan_starts ();
max_garbage_amount = nursery_next - nursery_start;
g_assert (nursery_section->size >= max_garbage_amount);
- /* Clear all remaining nursery fragments, pinning depends on this */
- if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
- g_assert (orig_nursery_next <= nursery_frag_real_end);
- memset (orig_nursery_next, 0, nursery_frag_real_end - orig_nursery_next);
- for (frag = nursery_fragments; frag; frag = frag->next) {
- memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start);
- }
- }
+ /* world must be stopped already */
+ TV_GETTIME (all_atv);
+ TV_GETTIME (atv);
+
+ /* Pinning depends on this */
+ clear_nursery_fragments (orig_nursery_next);
+
+ TV_GETTIME (btv);
+ time_minor_pre_collection_fragment_clear += TV_ELAPSED_MS (atv, btv);
if (xdomain_checks)
check_for_xdomain_refs ();
nursery_section->next_data = nursery_next;
- if (!to_space_section) {
- new_to_space_section ();
- } else {
- /* we might have done degraded allocation since the
- last collection */
- g_assert (to_space_bumper <= to_space_section->next_data);
- to_space_bumper = to_space_section->next_data;
+ major_start_nursery_collection ();
- to_space_section->is_to_space = TRUE;
- }
gray_object_queue_init ();
num_minor_gcs++;
mono_stats.minor_gc_count ++;
- /* world must be stopped already */
- TV_GETTIME (all_atv);
- TV_GETTIME (atv);
+
+ global_remset_cache_clear ();
+
/* pin from pinned handles */
init_pinning ();
pin_from_roots (nursery_start, nursery_next);
/* identify pinned objects */
optimize_pin_queue (0);
next_pin_slot = pin_objects_from_addresses (nursery_section, pin_queue, pin_queue + next_pin_slot, nursery_start, nursery_next);
- TV_GETTIME (btv);
- DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot, TV_ELAPSED (atv, btv)));
+ nursery_section->pin_queue_start = 0;
+ nursery_section->pin_queue_end = next_pin_slot;
+ TV_GETTIME (atv);
+ time_minor_pinning += TV_ELAPSED_MS (btv, atv);
+ DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot, TV_ELAPSED (btv, atv)));
DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", next_pin_slot));
if (consistency_check_at_minor_collection)
scan_from_remsets (nursery_start, nursery_next);
/* we don't have complete write barrier yet, so we scan all the old generation sections */
- TV_GETTIME (atv);
- DEBUG (2, fprintf (gc_debug_file, "Old generation scan: %d usecs\n", TV_ELAPSED (btv, atv)));
+ TV_GETTIME (btv);
+ time_minor_scan_remsets += TV_ELAPSED_MS (atv, btv);
+ DEBUG (2, fprintf (gc_debug_file, "Old generation scan: %d usecs\n", TV_ELAPSED (atv, btv)));
- /* the pinned objects are roots */
- for (i = 0; i < next_pin_slot; ++i) {
- DEBUG (6, fprintf (gc_debug_file, "Precise object scan %d of pinned %p (%s)\n", i, pin_queue [i], safe_name (pin_queue [i])));
- scan_object (pin_queue [i], nursery_start, nursery_next);
- }
+ drain_gray_stack ();
+
+ TV_GETTIME (atv);
+ time_minor_scan_pinned += TV_ELAPSED_MS (btv, atv);
/* registered roots, this includes static fields */
- scan_from_registered_roots (nursery_start, nursery_next, ROOT_TYPE_NORMAL);
- scan_from_registered_roots (nursery_start, nursery_next, ROOT_TYPE_WBARRIER);
- scan_thread_data (nursery_start, nursery_next, TRUE);
- /* alloc_pinned objects */
- scan_from_pinned_objects (nursery_start, nursery_next);
+ scan_from_registered_roots (copy_object, nursery_start, nursery_next, ROOT_TYPE_NORMAL);
+ scan_from_registered_roots (copy_object, nursery_start, nursery_next, ROOT_TYPE_WBARRIER);
TV_GETTIME (btv);
- DEBUG (2, fprintf (gc_debug_file, "Root scan: %d usecs\n", TV_ELAPSED (atv, btv)));
+ time_minor_scan_registered_roots += TV_ELAPSED_MS (atv, btv);
+ /* thread data */
+ scan_thread_data (nursery_start, nursery_next, TRUE);
+ TV_GETTIME (atv);
+ time_minor_scan_thread_data += TV_ELAPSED_MS (btv, atv);
+ btv = atv;
finish_gray_stack (nursery_start, nursery_next, GENERATION_NURSERY);
+ TV_GETTIME (atv);
+ time_minor_finish_gray_stack += TV_ELAPSED_MS (btv, atv);
/* walk the pin_queue, build up the fragment list of free memory, unmark
* pinned objects as we go, memzero() the empty fragments so they are ready for the
* next allocations.
*/
build_nursery_fragments (0, next_pin_slot);
- TV_GETTIME (atv);
- DEBUG (2, fprintf (gc_debug_file, "Fragment creation: %d usecs, %zd bytes available\n", TV_ELAPSED (btv, atv), fragment_total));
+ TV_GETTIME (btv);
+ time_minor_fragment_creation += TV_ELAPSED_MS (atv, btv);
+ DEBUG (2, fprintf (gc_debug_file, "Fragment creation: %d usecs, %zd bytes available\n", TV_ELAPSED (atv, btv), fragment_total));
- for (section = section_list; section; section = section->block.next) {
- if (section->is_to_space)
- section->is_to_space = FALSE;
- }
+ major_finish_nursery_collection ();
TV_GETTIME (all_btv);
mono_stats.minor_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
g_assert (gray_object_queue_is_empty ());
- commit_stats (GENERATION_NURSERY);
-
check_scan_starts ();
- sections_alloced = num_major_sections - old_num_major_sections;
- minor_collection_sections_alloced += sections_alloced;
-
- return minor_collection_sections_alloced > minor_collection_section_allowance;
-}
+ current_collection_generation = -1;
-static void
-scan_from_pinned_chunk_if_marked (PinnedChunk *chunk, char *obj, size_t size, void *dummy)
-{
- if (object_is_pinned (obj))
- scan_object (obj, NULL, (char*)-1);
+ return major_need_major_collection ();
}
static void
-major_collection (const char *reason)
+major_do_collection (const char *reason)
{
- GCMemSection *section, *prev_section;
LOSObject *bigobj, *prevbo;
- int i;
- PinnedChunk *chunk;
- Fragment *frag;
TV_DECLARE (all_atv);
TV_DECLARE (all_btv);
TV_DECLARE (atv);
*/
char *heap_start = NULL;
char *heap_end = (char*)-1;
- size_t copy_space_required = 0;
int old_num_major_sections = num_major_sections;
int num_major_sections_saved, save_target, allowance_target;
+ //count_ref_nonref_objs ();
+ //consistency_check ();
+
init_stats ();
binary_protocol_collection (GENERATION_OLD);
check_scan_starts ();
+ gray_object_queue_init ();
degraded_mode = 0;
DEBUG (1, fprintf (gc_debug_file, "Start major collection %d\n", num_major_gcs));
num_major_gcs++;
mono_stats.major_gc_count ++;
- /* Clear all remaining nursery fragments, pinning depends on this */
- if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
- g_assert (nursery_next <= nursery_frag_real_end);
- memset (nursery_next, 0, nursery_frag_real_end - nursery_next);
- for (frag = nursery_fragments; frag; frag = frag->next) {
- memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start);
- }
- }
+ /* world must be stopped already */
+ TV_GETTIME (all_atv);
+ TV_GETTIME (atv);
+
+ /* Pinning depends on this */
+ clear_nursery_fragments (nursery_next);
+
+ TV_GETTIME (btv);
+ time_major_pre_collection_fragment_clear += TV_ELAPSED_MS (atv, btv);
if (xdomain_checks)
check_for_xdomain_refs ();
- if (g_getenv ("MONO_GC_NO_MAJOR")) {
- collect_nursery (0);
- return;
- }
- TV_GETTIME (all_atv);
nursery_section->next_data = nursery_real_end;
/* we should also coalesce scanning from sections close to each other
* and deal with pointers outside of the sections later.
*/
/* The remsets are not useful for a major collection */
clear_remsets ();
- /* world must be stopped already */
+ global_remset_cache_clear ();
+
TV_GETTIME (atv);
init_pinning ();
DEBUG (6, fprintf (gc_debug_file, "Collecting pinned addresses\n"));
*/
DEBUG (6, fprintf (gc_debug_file, "Pinning from sections\n"));
/* first pass for the sections */
- for (section = section_list; section; section = section->block.next) {
- int start, end;
- DEBUG (6, fprintf (gc_debug_file, "Pinning from section %p (%p-%p)\n", section, section->data, section->end_data));
- find_optimized_pin_queue_area (section->data, section->end_data, &start, &end);
- DEBUG (6, fprintf (gc_debug_file, "Found %d pinning addresses in section %p (%d-%d)\n",
- end - start, section, start, end));
- section->pin_queue_start = start;
- section->pin_queue_end = end;
- }
+ find_section_pin_queue_start_end (nursery_section);
+ major_find_pin_queue_start_ends ();
/* identify possible pointers to the insize of large objects */
DEBUG (6, fprintf (gc_debug_file, "Pinning from large objects\n"));
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + bigobj->size, &start, &end);
if (start != end) {
pin_object (bigobj->data);
+ /* FIXME: only enqueue if object has references */
+ GRAY_OBJECT_ENQUEUE (bigobj->data);
if (heap_dump_file)
pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
DEBUG (6, fprintf (gc_debug_file, "Marked large object %p (%s) size: %zd from roots\n", bigobj->data, safe_name (bigobj->data), bigobj->size));
}
}
- /* look for pinned addresses for pinned-alloc objects */
- DEBUG (6, fprintf (gc_debug_file, "Pinning from pinned-alloc objects\n"));
- for (chunk = pinned_chunk_list; chunk; chunk = chunk->block.next) {
- int start, end;
- find_optimized_pin_queue_area (chunk->start_data, (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE, &start, &end);
- if (start != end)
- mark_pinned_from_addresses (chunk, pin_queue + start, pin_queue + end);
- }
/* second pass for the sections */
- for (section = section_list; section; section = section->block.next) {
- int start = section->pin_queue_start;
- int end = section->pin_queue_end;
- if (start != end) {
- int reduced_to;
- reduced_to = pin_objects_from_addresses (section, pin_queue + start, pin_queue + end,
- section->data, section->next_data);
- section->pin_queue_start = start;
- section->pin_queue_end = start + reduced_to;
- }
- copy_space_required += (char*)section->next_data - (char*)section->data;
- }
+ pin_objects_in_section (nursery_section);
+ major_pin_objects ();
TV_GETTIME (btv);
+ time_major_pinning += TV_ELAPSED_MS (atv, btv);
DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot, TV_ELAPSED (atv, btv)));
DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", next_pin_slot));
- new_to_space_section ();
- gray_object_queue_init ();
+ major_init_to_space ();
+
+ drain_gray_stack ();
+
+ TV_GETTIME (atv);
+ time_major_scan_pinned += TV_ELAPSED_MS (btv, atv);
- /* the old generation doesn't need to be scanned (no remembered sets or card
- * table needed either): the only objects that must survive are those pinned and
- * those referenced by the precise roots.
- * mark any section without pinned objects, so we can free it since we will be able to
- * move all the objects.
- */
- /* the pinned objects are roots (big objects are included in this list, too) */
- for (section = section_list; section; section = section->block.next) {
- for (i = section->pin_queue_start; i < section->pin_queue_end; ++i) {
- DEBUG (6, fprintf (gc_debug_file, "Precise object scan %d of pinned %p (%s)\n",
- i, pin_queue [i], safe_name (pin_queue [i])));
- scan_object (pin_queue [i], heap_start, heap_end);
- }
- }
- for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
- if (object_is_pinned (bigobj->data)) {
- DEBUG (6, fprintf (gc_debug_file, "Precise object scan pinned LOS object %p (%s)\n",
- bigobj->data, safe_name (bigobj->data)));
- scan_object (bigobj->data, heap_start, heap_end);
- }
- }
- scan_pinned_objects (scan_from_pinned_chunk_if_marked, NULL);
/* registered roots, this includes static fields */
- scan_from_registered_roots (heap_start, heap_end, ROOT_TYPE_NORMAL);
- scan_from_registered_roots (heap_start, heap_end, ROOT_TYPE_WBARRIER);
+ scan_from_registered_roots (major_copy_or_mark_object, heap_start, heap_end, ROOT_TYPE_NORMAL);
+ scan_from_registered_roots (major_copy_or_mark_object, heap_start, heap_end, ROOT_TYPE_WBARRIER);
+ TV_GETTIME (btv);
+ time_major_scan_registered_roots += TV_ELAPSED_MS (atv, btv);
+
/* Threads */
+ /* FIXME: This is the wrong place for this, because it does
+ pinning */
scan_thread_data (heap_start, heap_end, TRUE);
- /* alloc_pinned objects */
- scan_from_pinned_objects (heap_start, heap_end);
+ TV_GETTIME (atv);
+ time_major_scan_thread_data += TV_ELAPSED_MS (btv, atv);
+
+ TV_GETTIME (btv);
+ time_major_scan_alloc_pinned += TV_ELAPSED_MS (atv, btv);
+
/* scan the list of objects ready for finalization */
- scan_finalizer_entries (fin_ready_list, heap_start, heap_end);
- scan_finalizer_entries (critical_fin_list, heap_start, heap_end);
+ scan_finalizer_entries (major_copy_or_mark_object, fin_ready_list);
+ scan_finalizer_entries (major_copy_or_mark_object, critical_fin_list);
TV_GETTIME (atv);
+ time_major_scan_finalized += TV_ELAPSED_MS (btv, atv);
DEBUG (2, fprintf (gc_debug_file, "Root scan: %d usecs\n", TV_ELAPSED (btv, atv)));
- /* we need to go over the big object list to see if any was marked and scan it
- * And we need to make this in a loop, considering that objects referenced by finalizable
- * objects could reference big objects (this happens in finish_gray_stack ())
- */
- scan_needed_big_objects (heap_start, heap_end);
+ TV_GETTIME (btv);
+ time_major_scan_big_objects += TV_ELAPSED_MS (atv, btv);
+
/* all the objects in the heap */
finish_gray_stack (heap_start, heap_end, GENERATION_OLD);
-
- unset_to_space ();
+ TV_GETTIME (atv);
+ time_major_finish_gray_stack += TV_ELAPSED_MS (btv, atv);
/* sweep the big objects list */
prevbo = NULL;
for (bigobj = los_object_list; bigobj;) {
if (object_is_pinned (bigobj->data)) {
unpin_object (bigobj->data);
- bigobj->scanned = FALSE;
} else {
LOSObject *to_free;
/* not referenced anywhere, so we can free it */
prevbo = bigobj;
bigobj = bigobj->next;
}
- /* unpin objects from the pinned chunks and free the unmarked ones */
- sweep_pinned_objects ();
-
- /* free the unused sections */
- prev_section = NULL;
- for (section = section_list; section;) {
- /* to_space doesn't need handling here and the nursery is special */
- if (section->is_to_space || section == nursery_section) {
- if (section->is_to_space)
- section->is_to_space = FALSE;
- prev_section = section;
- section = section->block.next;
- continue;
- }
- /* no pinning object, so the section is free */
- if (section->pin_queue_start == section->pin_queue_end) {
- GCMemSection *to_free;
- if (prev_section)
- prev_section->block.next = section->block.next;
- else
- section_list = section->block.next;
- to_free = section;
- section = section->block.next;
- free_major_section (to_free);
- continue;
- } else {
- DEBUG (6, fprintf (gc_debug_file, "Section %p has still pinned objects (%d)\n", section, section->pin_queue_end - section->pin_queue_start));
- build_section_fragments (section);
- }
- prev_section = section;
- section = section->block.next;
- }
+
+ major_sweep ();
+
+ TV_GETTIME (btv);
+ time_major_sweep += TV_ELAPSED_MS (atv, btv);
/* walk the pin_queue, build up the fragment list of free memory, unmark
* pinned objects as we go, memzero() the empty fragments so they are ready for the
*/
build_nursery_fragments (nursery_section->pin_queue_start, nursery_section->pin_queue_end);
+ TV_GETTIME (atv);
+ time_major_fragment_creation += TV_ELAPSED_MS (btv, atv);
+
TV_GETTIME (all_btv);
mono_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
g_assert (gray_object_queue_is_empty ());
- commit_stats (GENERATION_OLD);
-
num_major_sections_saved = MAX (old_num_major_sections - num_major_sections, 1);
save_target = num_major_sections / 2;
+ /*
+ * We aim to allow the allocation of as many sections as is
+ * necessary to reclaim save_target sections in the next
+ * collection. We assume the collection pattern won't change.
+ * In the last cycle, we had num_major_sections_saved for
+ * minor_collection_sections_alloced. Assuming things won't
+ * change, this must be the same ratio as save_target for
+ * allowance_target, i.e.
+ *
+ * num_major_sections_saved save_target
+ * --------------------------------- == ----------------
+ * minor_collection_sections_alloced allowance_target
+ *
+ * hence:
+ */
allowance_target = save_target * minor_collection_sections_alloced / num_major_sections_saved;
minor_collection_section_allowance = MAX (MIN (allowance_target, num_major_sections), MIN_MINOR_COLLECTION_SECTION_ALLOWANCE);
- /*
- printf ("alloced %d saved %d target %d allowance %d\n",
- minor_collection_sections_alloced, num_major_sections_saved, allowance_target,
- minor_collection_section_allowance);
- */
-
minor_collection_sections_alloced = 0;
check_scan_starts ();
-}
-
-/*
- * Allocate a new section of memory to be used as old generation.
- */
-static GCMemSection*
-alloc_major_section (void)
-{
- GCMemSection *section;
- int scan_starts;
-
- section = get_os_memory_aligned (MAJOR_SECTION_SIZE, TRUE);
- section->next_data = section->data = (char*)section + SIZEOF_GC_MEM_SECTION;
- g_assert (!((mword)section->data & 7));
- section->size = MAJOR_SECTION_SIZE - SIZEOF_GC_MEM_SECTION;
- section->end_data = section->data + section->size;
- UPDATE_HEAP_BOUNDARIES (section->data, section->end_data);
- total_alloc += section->size;
- DEBUG (3, fprintf (gc_debug_file, "New major heap section: (%p-%p), total: %zd\n", section->data, section->end_data, total_alloc));
- scan_starts = (section->size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
- section->scan_starts = get_internal_mem (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS);
- section->num_scan_start = scan_starts;
- section->block.role = MEMORY_ROLE_GEN1;
- section->is_to_space = TRUE;
- /* add to the section list */
- section->block.next = section_list;
- section_list = section;
-
- ++num_major_sections;
-
- return section;
+ //consistency_check ();
}
static void
-free_major_section (GCMemSection *section)
+major_collection (const char *reason)
{
- DEBUG (3, fprintf (gc_debug_file, "Freed major section %p (%p-%p)\n", section, section->data, section->end_data));
- free_internal_mem (section->scan_starts, INTERNAL_MEM_SCAN_STARTS);
- free_os_memory (section, MAJOR_SECTION_SIZE);
- total_alloc -= MAJOR_SECTION_SIZE - SIZEOF_GC_MEM_SECTION;
+ if (g_getenv ("MONO_GC_NO_MAJOR")) {
+ collect_nursery (0);
+ return;
+ }
- --num_major_sections;
+ current_collection_generation = GENERATION_OLD;
+ major_do_collection (reason);
+ current_collection_generation = -1;
}
/*
report_pinned_chunk (chunk, i++);
}
printf ("Pinned memory usage:\n");
- i = 0;
- for (chunk = pinned_chunk_list; chunk; chunk = chunk->block.next) {
- report_pinned_chunk (chunk, i++);
- }
-}
-
-/*
- * the array of pointers from @start to @end contains conservative
- * pointers to objects inside @chunk: mark each referenced object
- * with the PIN bit.
- */
-static void
-mark_pinned_from_addresses (PinnedChunk *chunk, void **start, void **end)
-{
- for (; start < end; start++) {
- char *addr = *start;
- int offset = (char*)addr - (char*)chunk;
- int page = offset / FREELIST_PAGESIZE;
- int obj_offset = page == 0? offset - ((char*)chunk->start_data - (char*)chunk): offset % FREELIST_PAGESIZE;
- int slot_size = chunk->page_sizes [page];
- void **ptr;
- /* the page is not allocated */
- if (!slot_size)
- continue;
- /* would be faster if we restrict the sizes to power of two,
- * but that's a waste of memory: need to measure. it could reduce
- * fragmentation since there are less pages needed, if for example
- * someone interns strings of each size we end up with one page per
- * interned string (still this is just ~40 KB): with more fine-grained sizes
- * this increases the number of used pages.
- */
- if (page == 0) {
- obj_offset /= slot_size;
- obj_offset *= slot_size;
- addr = (char*)chunk->start_data + obj_offset;
- } else {
- obj_offset /= slot_size;
- obj_offset *= slot_size;
- addr = (char*)chunk + page * FREELIST_PAGESIZE + obj_offset;
- }
- ptr = (void**)addr;
- /* if the vtable is inside the chunk it's on the freelist, so skip */
- if (*ptr && (*ptr < (void*)chunk->start_data || *ptr > (void*)((char*)chunk + chunk->num_pages * FREELIST_PAGESIZE))) {
- binary_protocol_pin (addr, (gpointer)LOAD_VTABLE (addr), safe_object_get_size ((MonoObject*)addr));
- pin_object (addr);
- if (heap_dump_file)
- pin_stats_register_object ((char*) addr, safe_object_get_size ((MonoObject*) addr));
- DEBUG (6, fprintf (gc_debug_file, "Marked pinned object %p (%s) from roots\n", addr, safe_name (addr)));
- }
- }
-}
-
-static void
-scan_pinned_objects (ScanPinnedObjectCallbackFunc callback, void *callback_data)
-{
- PinnedChunk *chunk;
- int i, obj_size;
- char *p, *endp;
- void **ptr;
- void *end_chunk;
- for (chunk = pinned_chunk_list; chunk; chunk = chunk->block.next) {
- end_chunk = (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE;
- DEBUG (6, fprintf (gc_debug_file, "Scanning pinned chunk %p (range: %p-%p)\n", chunk, chunk->start_data, end_chunk));
- for (i = 0; i < chunk->num_pages; ++i) {
- obj_size = chunk->page_sizes [i];
- if (!obj_size)
- continue;
- p = i? (char*)chunk + i * FREELIST_PAGESIZE: chunk->start_data;
- endp = i? p + FREELIST_PAGESIZE: (char*)chunk + FREELIST_PAGESIZE;
- DEBUG (6, fprintf (gc_debug_file, "Page %d (size: %d, range: %p-%p)\n", i, obj_size, p, endp));
- while (p + obj_size <= endp) {
- ptr = (void**)p;
- DEBUG (9, fprintf (gc_debug_file, "Considering %p (vtable: %p)\n", ptr, *ptr));
- /* if the first word (the vtable) is outside the chunk we have an object */
- if (*ptr && (*ptr < (void*)chunk || *ptr >= end_chunk))
- callback (chunk, (char*)ptr, obj_size, callback_data);
- p += obj_size;
- }
- }
- }
-}
-
-static void
-sweep_pinned_objects_callback (PinnedChunk *chunk, char *ptr, size_t size, void *data)
-{
- if (object_is_pinned (ptr)) {
- unpin_object (ptr);
- DEBUG (6, fprintf (gc_debug_file, "Unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
- } else {
- DEBUG (6, fprintf (gc_debug_file, "Freeing unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
- free_pinned_object (chunk, ptr, size);
- }
-}
-
-static void
-sweep_pinned_objects (void)
-{
- scan_pinned_objects (sweep_pinned_objects_callback, NULL);
-}
-
-static void
-scan_object_callback (PinnedChunk *chunk, char *ptr, size_t size, char **data)
-{
- DEBUG (6, fprintf (gc_debug_file, "Precise object scan of alloc_pinned %p (%s)\n", ptr, safe_name (ptr)));
- /* FIXME: Put objects without references into separate chunks
- which do not need to be scanned */
- scan_object (ptr, data [0], data [1]);
-}
-
-static void
-scan_from_pinned_objects (char *addr_start, char *addr_end)
-{
- char *data [2] = { addr_start, addr_end };
- scan_pinned_objects ((ScanPinnedObjectCallbackFunc)scan_object_callback, data);
+ major_report_pinned_memory_usage ();
}
/*
{
PinnedChunk *chunk;
int offset;
- int size = MAJOR_SECTION_SIZE;
+ int size = PINNED_CHUNK_SIZE;
- chunk = get_os_memory_aligned (size, TRUE);
+ chunk = get_os_memory_aligned (size, size, TRUE);
chunk->block.role = MEMORY_ROLE_PINNED;
UPDATE_HEAP_BOUNDARIES (chunk, ((char*)chunk + size));
chunk->page_sizes [0] = PINNED_FIRST_SLOT_SIZE;
build_freelist (chunk, slot_for_size (PINNED_FIRST_SLOT_SIZE), PINNED_FIRST_SLOT_SIZE, chunk->start_data, ((char*)chunk + FREELIST_PAGESIZE));
DEBUG (4, fprintf (gc_debug_file, "Allocated pinned chunk %p, size: %d\n", chunk, size));
- min_pinned_chunk_addr = MIN (min_pinned_chunk_addr, (char*)chunk->start_data);
- max_pinned_chunk_addr = MAX (max_pinned_chunk_addr, ((char*)chunk + size));
return chunk;
}
return NULL;
}
-static void*
-alloc_from_freelist (size_t size)
-{
- int slot;
- void *res = NULL;
- PinnedChunk *pchunk;
- slot = slot_for_size (size);
- /*g_print ("using slot %d for size %d (slot size: %d)\n", slot, size, freelist_sizes [slot]);*/
- g_assert (size <= freelist_sizes [slot]);
- for (pchunk = pinned_chunk_list; pchunk; pchunk = pchunk->block.next) {
- void **p = pchunk->free_list [slot];
- if (p) {
- /*g_print ("found freelist for slot %d in chunk %p, returning %p, next %p\n", slot, pchunk, p, *p);*/
- pchunk->free_list [slot] = *p;
- return p;
- }
- }
- for (pchunk = pinned_chunk_list; pchunk; pchunk = pchunk->block.next) {
- res = get_chunk_freelist (pchunk, slot);
- if (res)
- return res;
- }
- pchunk = alloc_pinned_chunk ();
- /* FIXME: handle OOM */
- pchunk->block.next = pinned_chunk_list;
- pinned_chunk_list = pchunk;
- res = get_chunk_freelist (pchunk, slot);
- return res;
-}
-
/* used for the GC-internal data structures */
static void*
get_internal_mem (size_t size, int type)
LOSObject *obj;
void **vtslot;
size_t alloc_size;
- int just_did_major_gc = FALSE;
g_assert (size > MAX_SMALL_OBJ_SIZE);
if (los_memory_usage > next_los_collection) {
+ static mword last_los_memory_usage = 0;
+
+ mword los_memory_alloced;
+ mword old_los_memory_usage;
+ mword los_memory_saved;
+ mword save_target;
+ mword allowance_target;
+ mword allowance;
+
DEBUG (4, fprintf (gc_debug_file, "Should trigger major collection: req size %zd (los already: %zu, limit: %zu)\n", size, los_memory_usage, next_los_collection));
- just_did_major_gc = TRUE;
stop_world ();
+
+ g_assert (los_memory_usage >= last_los_memory_usage);
+ los_memory_alloced = los_memory_usage - last_los_memory_usage;
+ old_los_memory_usage = los_memory_usage;
+
major_collection ("LOS overflow");
+
+ los_memory_saved = MAX (old_los_memory_usage - los_memory_usage, 1);
+ save_target = los_memory_usage / 2;
+ /*
+ * see the comment at the end of major_collection()
+ * for the explanation for this calculation.
+ */
+ allowance_target = (mword)((double)save_target * (double)los_memory_alloced / (double)los_memory_saved);
+ allowance = MAX (MIN (allowance_target, los_memory_usage), MIN_LOS_ALLOWANCE);
+ next_los_collection = los_memory_usage + allowance;
+
+ last_los_memory_usage = los_memory_usage;
+
restart_world ();
- /* later increase based on a percent of the heap size */
- next_los_collection = los_memory_usage + 5*1024*1024;
}
alloc_size = size;
alloc_size += sizeof (LOSObject);
alloc_size &= ~(pagesize - 1);
/* FIXME: handle OOM */
obj = get_os_memory (alloc_size, TRUE);
+ g_assert (!((mword)obj->data & (ALLOC_ALIGN - 1)));
obj->size = size;
vtslot = (void**)obj->data;
*vtslot = vtable;
return obj->data;
}
+static void
+setup_fragment (Fragment *frag, Fragment *prev, size_t size)
+{
+ /* remove from the list */
+ if (prev)
+ prev->next = frag->next;
+ else
+ nursery_fragments = frag->next;
+ nursery_next = frag->fragment_start;
+ nursery_frag_real_end = frag->fragment_end;
+
+ DEBUG (4, fprintf (gc_debug_file, "Using nursery fragment %p-%p, size: %zd (req: %zd)\n", nursery_next, nursery_frag_real_end, nursery_frag_real_end - nursery_next, size));
+ frag->next = fragment_freelist;
+ fragment_freelist = frag;
+}
+
/* check if we have a suitable fragment in nursery_fragments to be able to allocate
* an object of size @size
* Return FALSE if not found (which means we need a collection)
prev = NULL;
for (frag = nursery_fragments; frag; frag = frag->next) {
if (size <= (frag->fragment_end - frag->fragment_start)) {
- /* remove from the list */
- if (prev)
- prev->next = frag->next;
- else
- nursery_fragments = frag->next;
- nursery_next = frag->fragment_start;
- nursery_frag_real_end = frag->fragment_end;
-
- DEBUG (4, fprintf (gc_debug_file, "Using nursery fragment %p-%p, size: %zd (req: %zd)\n", nursery_next, nursery_frag_real_end, nursery_frag_real_end - nursery_next, size));
- frag->next = fragment_freelist;
- fragment_freelist = frag;
+ setup_fragment (frag, prev, size);
return TRUE;
}
prev = frag;
}
/*
- * size is already rounded up and we hold the GC lock.
+ * Same as search_fragment_for_size but if search for @desired_size fails, try to satisfy @minimum_size.
+ * This improves nursery usage.
*/
-static void*
-alloc_degraded (MonoVTable *vtable, size_t size)
+static int
+search_fragment_for_size_range (size_t desired_size, size_t minimum_size)
{
- GCMemSection *section;
- void **p = NULL;
- g_assert (size <= MAX_SMALL_OBJ_SIZE);
- HEAVY_STAT (++stat_objects_alloced_degraded);
- for (section = section_list; section; section = section->block.next) {
- if (section != nursery_section && (section->end_data - section->next_data) >= size) {
- p = (void**)section->next_data;
- break;
+ Fragment *frag, *prev, *min_prev;
+ DEBUG (4, fprintf (gc_debug_file, "Searching nursery fragment %p, desired size: %zd minimum size %zd\n", nursery_frag_real_end, desired_size, minimum_size));
+
+ if (nursery_frag_real_end > nursery_next && nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
+ /* Clear the remaining space, pinning depends on this */
+ memset (nursery_next, 0, nursery_frag_real_end - nursery_next);
+
+ min_prev = GINT_TO_POINTER (-1);
+ prev = NULL;
+
+ for (frag = nursery_fragments; frag; frag = frag->next) {
+ int frag_size = frag->fragment_end - frag->fragment_start;
+ if (desired_size <= frag_size) {
+ setup_fragment (frag, prev, desired_size);
+ return desired_size;
}
+ if (minimum_size <= frag_size)
+ min_prev = prev;
+
+ prev = frag;
}
- if (!p) {
- section = alloc_major_section ();
- section->is_to_space = FALSE;
- /* FIXME: handle OOM */
- p = (void**)section->next_data;
+
+ if (min_prev != GINT_TO_POINTER (-1)) {
+ int frag_size;
+ if (min_prev)
+ frag = min_prev->next;
+ else
+ frag = nursery_fragments;
+
+ frag_size = frag->fragment_end - frag->fragment_start;
+ HEAVY_STAT (++stat_wasted_fragments_used);
+ HEAVY_STAT (stat_wasted_fragments_bytes += frag_size);
+
+ setup_fragment (frag, min_prev, minimum_size);
+ return frag_size;
}
- section->next_data += size;
- degraded_mode += size;
- DEBUG (3, fprintf (gc_debug_file, "Allocated (degraded) object %p, vtable: %p (%s), size: %zd in section %p\n", p, vtable, vtable->klass->name, size, section));
- *p = vtable;
- return p;
+
+ return 0;
}
/*
/* FIXME: handle OOM */
void **p;
char *new_next;
- gboolean res;
TLAB_ACCESS_INIT;
HEAVY_STAT (++stat_objects_alloced);
+ if (size <= MAX_SMALL_OBJ_SIZE)
+ HEAVY_STAT (stat_bytes_alloced += size);
+ else
+ HEAVY_STAT (stat_bytes_alloced_los += size);
size += ALLOC_ALIGN - 1;
size &= ~(ALLOC_ALIGN - 1);
return p;
}
+ /*FIXME This codepath is current deadcode since tlab_size > MAX_SMALL_OBJ_SIZE*/
if (size > tlab_size) {
/* Allocate directly from the nursery */
if (nursery_next + size >= nursery_frag_real_end) {
if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
memset (p, 0, size);
} else {
+ int alloc_size = tlab_size;
+ int available_in_nursery = nursery_frag_real_end - nursery_next;
if (TLAB_START)
DEBUG (3, fprintf (gc_debug_file, "Retire TLAB: %p-%p [%ld]\n", TLAB_START, TLAB_REAL_END, (long)(TLAB_REAL_END - TLAB_NEXT - size)));
- if (nursery_next + tlab_size >= nursery_frag_real_end) {
- res = search_fragment_for_size (tlab_size);
- if (!res) {
- minor_collect_or_expand_inner (tlab_size);
- if (degraded_mode) {
- p = alloc_degraded (vtable, size);
- return p;
+ if (alloc_size >= available_in_nursery) {
+ if (available_in_nursery > MAX_NURSERY_TLAB_WASTE && available_in_nursery > size) {
+ alloc_size = available_in_nursery;
+ } else {
+ alloc_size = search_fragment_for_size_range (tlab_size, size);
+ if (!alloc_size) {
+ alloc_size = tlab_size;
+ minor_collect_or_expand_inner (tlab_size);
+ if (degraded_mode) {
+ p = alloc_degraded (vtable, size);
+ return p;
+ }
}
}
}
/* Allocate a new TLAB from the current nursery fragment */
TLAB_START = nursery_next;
- nursery_next += tlab_size;
+ nursery_next += alloc_size;
TLAB_NEXT = TLAB_START;
- TLAB_REAL_END = TLAB_START + tlab_size;
- TLAB_TEMP_END = TLAB_START + MIN (SCAN_START_SIZE, tlab_size);
+ TLAB_REAL_END = TLAB_START + alloc_size;
+ TLAB_TEMP_END = TLAB_START + MIN (SCAN_START_SIZE, alloc_size);
if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
- memset (TLAB_START, 0, tlab_size);
+ memset (TLAB_START, 0, alloc_size);
/* Allocate from the TLAB */
p = (void*)TLAB_NEXT;
*/
HEAVY_STAT (++stat_objects_alloced);
+ HEAVY_STAT (stat_bytes_alloced += size);
DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
binary_protocol_alloc (p, vtable, size);
size += ALLOC_ALIGN - 1;
size &= ~(ALLOC_ALIGN - 1);
LOCK_GC;
- if (size > MAX_FREELIST_SIZE) {
+ if (size > MAX_SMALL_OBJ_SIZE) {
/* large objects are always pinned anyway */
p = alloc_large_inner (vtable, size);
} else {
- p = alloc_from_freelist (size);
- memset (p, 0, size);
+ DEBUG (9, g_assert (vtable->klass->inited));
+ p = major_alloc_small_pinned_obj (size, vtable->klass->has_references);
}
DEBUG (6, fprintf (gc_debug_file, "Allocated pinned object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
binary_protocol_alloc (p, vtable, size);
/* LOCKING: requires that the GC lock is held */
static void
-finalize_in_range (char *start, char *end, int generation)
+finalize_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation)
{
FinalizeEntryHashTable *hash_table = get_finalize_entry_hash_table (generation);
FinalizeEntry *entry, *prev;
for (i = 0; i < finalizable_hash_size; ++i) {
prev = NULL;
for (entry = finalizable_hash [i]; entry;) {
- if ((char*)entry->object >= start && (char*)entry->object < end && !object_is_in_to_space (entry->object)) {
+ if ((char*)entry->object >= start && (char*)entry->object < end && !major_is_object_live (entry->object)) {
gboolean is_fin_ready = object_is_fin_ready (entry->object);
- char *copy = copy_object (entry->object, start, end);
+ char *copy = entry->object;
+ copy_func ((void**)©);
if (is_fin_ready) {
char *from;
FinalizeEntry *next;
}
}
+static int
+object_is_reachable (char *object, char *start, char *end)
+{
+ /*This happens for non nursery objects during minor collections. We just treat all objects as alive.*/
+ if (object < start || object >= end)
+ return TRUE;
+ return !object_is_fin_ready (object) || major_is_object_live (object);
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+null_ephemerons_for_domain (MonoDomain *domain)
+{
+ EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
+
+ while (current) {
+ MonoObject *object = (MonoObject*)current->array;
+
+ if (object && !object->vtable) {
+ EphemeronLinkNode *tmp = current;
+
+ if (prev)
+ prev->next = current->next;
+ else
+ ephemeron_list = current->next;
+
+ current = current->next;
+ free_internal_mem (tmp, INTERNAL_MEM_EPHEMERON_LINK);
+ } else {
+ prev = current;
+ current = current->next;
+ }
+ }
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end)
+{
+ int was_in_nursery, was_promoted;
+ EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
+ MonoArray *array;
+ Ephemeron *cur, *array_end;
+ char *tombstone;
+
+ while (current) {
+ char *object = current->array;
+
+ if (!object_is_reachable (object, start, end)) {
+ EphemeronLinkNode *tmp = current;
+
+ DEBUG (5, fprintf (gc_debug_file, "Dead Ephemeron array at %p\n", object));
+
+ if (prev)
+ prev->next = current->next;
+ else
+ ephemeron_list = current->next;
+
+ current = current->next;
+ free_internal_mem (tmp, INTERNAL_MEM_EPHEMERON_LINK);
+
+ continue;
+ }
+
+ was_in_nursery = ptr_in_nursery (object);
+ copy_func ((void**)&object);
+ current->array = object;
+
+ /*The array was promoted, add global remsets for key/values left behind in nursery.*/
+ was_promoted = was_in_nursery && !ptr_in_nursery (object);
+
+ DEBUG (5, fprintf (gc_debug_file, "Clearing unreachable entries for ephemeron array at %p\n", object));
+
+ array = (MonoArray*)object;
+ cur = mono_array_addr (array, Ephemeron, 0);
+ array_end = cur + mono_array_length_fast (array);
+ tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
+
+ for (; cur < array_end; ++cur) {
+ char *key = (char*)cur->key;
+
+ if (!key || key == tombstone)
+ continue;
+
+ DEBUG (5, fprintf (gc_debug_file, "[%d] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
+ key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
+ cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable"));
+
+ if (!object_is_reachable (key, start, end)) {
+ cur->key = tombstone;
+ cur->value = NULL;
+ continue;
+ }
+
+ if (was_promoted) {
+ if (ptr_in_nursery (key)) {/*key was not promoted*/
+ DEBUG (5, fprintf (gc_debug_file, "\tAdded remset to key %p\n", key));
+ add_to_global_remset (&cur->key);
+ }
+ if (ptr_in_nursery (cur->value)) {/*value was not promoted*/
+ DEBUG (5, fprintf (gc_debug_file, "\tAdded remset to value %p\n", cur->value));
+ add_to_global_remset (&cur->value);
+ }
+ }
+ }
+ prev = current;
+ current = current->next;
+ }
+}
+
+/* LOCKING: requires that the GC lock is held */
+static int
+mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end)
+{
+ int nothing_marked = 1;
+ EphemeronLinkNode *current = ephemeron_list;
+ MonoArray *array;
+ Ephemeron *cur, *array_end;
+ char *tombstone;
+
+ for (current = ephemeron_list; current; current = current->next) {
+ char *object = current->array;
+ DEBUG (5, fprintf (gc_debug_file, "Ephemeron array at %p\n", object));
+
+ /*We ignore arrays in old gen during minor collections since all objects are promoted by the remset machinery.*/
+ if (object < start || object >= end)
+ continue;
+
+ /*It has to be alive*/
+ if (!object_is_reachable (object, start, end)) {
+ DEBUG (5, fprintf (gc_debug_file, "\tnot reachable\n"));
+ continue;
+ }
+
+ copy_func ((void**)&object);
+
+ array = (MonoArray*)object;
+ cur = mono_array_addr (array, Ephemeron, 0);
+ array_end = cur + mono_array_length_fast (array);
+ tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
+
+ for (; cur < array_end; ++cur) {
+ char *key = cur->key;
+
+ if (!key || key == tombstone)
+ continue;
+
+ DEBUG (5, fprintf (gc_debug_file, "[%d] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
+ key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
+ cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable"));
+
+ if (object_is_reachable (key, start, end)) {
+ char *value = cur->value;
+
+ copy_func ((void**)&cur->key);
+ if (value) {
+ if (!object_is_reachable (value, start, end))
+ nothing_marked = 0;
+ copy_func ((void**)&cur->value);
+ }
+ }
+ }
+ }
+
+ DEBUG (5, fprintf (gc_debug_file, "Ephemeron run finished. Is it done %d\n", nothing_marked));
+ return nothing_marked;
+}
+
/* LOCKING: requires that the GC lock is held */
static void
-null_link_in_range (char *start, char *end, int generation)
+null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation)
{
DisappearingLinkHashTable *hash = get_dislink_hash_table (generation);
DisappearingLink **disappearing_link_hash = hash->table;
prev = NULL;
for (entry = disappearing_link_hash [i]; entry;) {
char *object = DISLINK_OBJECT (entry);
- if (object >= start && object < end && !object_is_in_to_space (object)) {
+ if (object >= start && object < end && !major_is_object_live (object)) {
gboolean track = DISLINK_TRACK (entry);
if (!track && object_is_fin_ready (object)) {
void **p = entry->link;
hash->num_links--;
continue;
} else {
- char *copy = copy_object (object, start, end);
+ char *copy = object;
+ copy_func ((void**)©);
/* Update pointer if it's moved. If the object
* has been moved out of the nursery, we need to
prev = NULL;
for (entry = disappearing_link_hash [i]; entry; ) {
char *object = DISLINK_OBJECT (entry);
- /* FIXME: actually there should be no object
- left in the domain with a non-null vtable
- (provided we remove the Thread special
- case) */
- if (object && (!((MonoObject*)object)->vtable || mono_object_domain (object) == domain)) {
+ if (object && !((MonoObject*)object)->vtable) {
DisappearingLink *next = entry->next;
if (prev)
*/
//#define XDOMAIN_CHECKS_IN_WBARRIER
+#ifndef BINARY_PROTOCOL
#ifndef HEAVY_STATISTICS
#define MANAGED_ALLOCATION
#ifndef XDOMAIN_CHECKS_IN_WBARRIER
#define MANAGED_WBARRIER
#endif
#endif
+#endif
static gboolean
is_ip_in_managed_allocator (MonoDomain *domain, gpointer ip);
void*
mono_gc_scan_object (void *obj)
{
- return copy_object (obj, scan_area_arg_start, scan_area_arg_end);
+ if (current_collection_generation == GENERATION_NURSERY)
+ copy_object (&obj);
+ else
+ major_copy_or_mark_object (&obj);
+ return obj;
}
-
+
/*
* Mark from thread stacks and registers.
*/
for (i = 0; i < THREAD_HASH_SIZE; ++i) {
for (info = thread_table [i]; info; info = info->next) {
if (info->skip) {
- DEBUG (2, fprintf (gc_debug_file, "Skipping dead thread %p, range: %p-%p, size: %zd\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
+ DEBUG (3, fprintf (gc_debug_file, "Skipping dead thread %p, range: %p-%p, size: %zd\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
continue;
}
DEBUG (3, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, next_pin_slot));
//__builtin_prefetch (ptr);
if (((void*)ptr < start_nursery || (void*)ptr >= end_nursery)) {
gpointer old = *ptr;
- *ptr = copy_object (*ptr, start_nursery, end_nursery);
+ copy_object (ptr);
DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p\n", ptr, *ptr));
if (old)
binary_protocol_ptr_update (ptr, old, *ptr, (gpointer)LOAD_VTABLE (*ptr), safe_object_get_size (*ptr));
* becomes part of the global remset, which can grow very large.
*/
DEBUG (9, fprintf (gc_debug_file, "Add to global remset because of pinning %p (%p %s)\n", ptr, *ptr, safe_name (*ptr)));
- add_to_global_remset (ptr, FALSE);
+ add_to_global_remset (ptr);
}
} else {
DEBUG (9, fprintf (gc_debug_file, "Skipping remset at %p holding %p\n", ptr, *ptr));
return p + 2;
count = p [1];
while (count-- > 0) {
- *ptr = copy_object (*ptr, start_nursery, end_nursery);
+ copy_object (ptr);
DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p (count: %d)\n", ptr, *ptr, (int)count));
if (!global && *ptr >= start_nursery && *ptr < end_nursery)
- add_to_global_remset (ptr, FALSE);
+ add_to_global_remset (ptr);
++ptr;
}
return p + 2;
ptr = (void**)(*p & ~REMSET_TYPE_MASK);
if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
return p + 1;
- scan_object ((char*)ptr, start_nursery, end_nursery);
+ scan_object ((char*)ptr);
return p + 1;
- case REMSET_OTHER: {
+ case REMSET_VTYPE: {
ptr = (void**)(*p & ~REMSET_TYPE_MASK);
-
- switch (p [1]) {
- case REMSET_VTYPE:
- if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
- return p + 4;
- desc = p [2];
- count = p [3];
- while (count-- > 0)
- ptr = (void**) scan_vtype ((char*)ptr, desc, start_nursery, end_nursery);
- return p + 4;
- case REMSET_ROOT_LOCATION:
- /* Same as REMSET_LOCATION, but the address is not required to be in the heap */
- *ptr = copy_object (*ptr, start_nursery, end_nursery);
- DEBUG (9, fprintf (gc_debug_file, "Overwrote root location remset at %p with %p\n", ptr, *ptr));
- if (!global && *ptr >= start_nursery && *ptr < end_nursery) {
- /*
- * If the object is pinned, each reference to it from nonpinned objects
- * becomes part of the global remset, which can grow very large.
- */
- DEBUG (9, fprintf (gc_debug_file, "Add to global remset because of pinning %p (%p %s)\n", ptr, *ptr, safe_name (*ptr)));
- add_to_global_remset (ptr, TRUE);
- }
- return p + 2;
- default:
- g_assert_not_reached ();
- }
- break;
+ if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
+ return p + 3;
+ desc = p [1];
+ count = p [2];
+ while (count-- > 0)
+ ptr = (void**) scan_vtype ((char*)ptr, desc, start_nursery, end_nursery);
+ return p + 3;
}
default:
g_assert_not_reached ();
case REMSET_OBJECT:
p += 1;
break;
- case REMSET_OTHER:
- switch (p [1]) {
- case REMSET_VTYPE:
- p += 4;
- break;
- case REMSET_ROOT_LOCATION:
- p += 2;
- break;
- default:
- g_assert_not_reached ();
- }
+ case REMSET_VTYPE:
+ p += 3;
break;
default:
g_assert_not_reached ();
DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %zd\n", remset->data, remset->store_next, remset->store_next - remset->data));
store_pos = remset->data;
for (p = remset->data; p < remset->store_next; p = next_p) {
- mword ptr;
+ void **ptr = p [0];
+
+ /*Ignore previously processed remset.*/
+ if (!global_remset_location_was_not_added (ptr)) {
+ next_p = p + 1;
+ continue;
+ }
next_p = handle_remset (p, start_nursery, end_nursery, TRUE);
* Clear global remsets of locations which no longer point to the
* nursery. Otherwise, they could grow indefinitely between major
* collections.
+ *
+ * Since all global remsets are location remsets, we don't need to unmask the pointer.
*/
- ptr = (p [0] & ~REMSET_TYPE_MASK);
- if ((p [0] & REMSET_TYPE_MASK) == REMSET_LOCATION) {
- if (ptr_in_nursery (*(void**)ptr))
- *store_pos ++ = p [0];
- } else {
- g_assert ((p [0] & REMSET_TYPE_MASK) == REMSET_OTHER);
- g_assert (p [1] == REMSET_ROOT_LOCATION);
- if (ptr_in_nursery (*(void**)ptr)) {
- *store_pos ++ = p [0];
- *store_pos ++ = p [1];
- }
+ if (ptr_in_nursery (*ptr)) {
+ *store_pos ++ = p [0];
+ HEAVY_STAT (++stat_global_remsets_readded);
}
}
next = remset->next;
remset->next = NULL;
if (remset != info->remset) {
- DEBUG (1, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
+ DEBUG (3, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
free_internal_mem (remset, INTERNAL_MEM_REMSET);
}
}
static char *found_obj;
static void
-find_object_for_ptr_in_pinned_chunk_callback (PinnedChunk *chunk, char *obj, size_t size, char *ptr)
+find_object_for_ptr_callback (char *obj, size_t size, char *ptr)
{
if (ptr >= obj && ptr < obj + size) {
g_assert (!found_obj);
char*
find_object_for_ptr (char *ptr)
{
- GCMemSection *section;
LOSObject *bigobj;
- for (section = section_list; section; section = section->block.next) {
- if (ptr >= section->data && ptr < section->end_data)
- return find_object_for_ptr_in_area (ptr, section->data, section->end_data);
- }
+ if (ptr >= nursery_section->data && ptr < nursery_section->end_data)
+ return find_object_for_ptr_in_area (ptr, nursery_section->data, nursery_section->end_data);
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
if (ptr >= bigobj->data && ptr < bigobj->data + bigobj->size)
return bigobj->data;
}
+ /*
+ * Very inefficient, but this is debugging code, supposed to
+ * be called from gdb, so we don't care.
+ */
found_obj = NULL;
- scan_pinned_objects ((ScanPinnedObjectCallbackFunc)find_object_for_ptr_in_pinned_chunk_callback, ptr);
+ major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)find_object_for_ptr_callback, ptr);
return found_obj;
}
mono_gc_wbarrier_generic_nostore (ptr);
}
-void
-mono_gc_wbarrier_set_root (gpointer ptr, MonoObject *value)
-{
- RememberedSet *rs;
- TLAB_ACCESS_INIT;
- HEAVY_STAT (++stat_wbarrier_set_root);
- if (ptr_in_nursery (ptr))
- return;
- DEBUG (8, fprintf (gc_debug_file, "Adding root remset at %p (%s)\n", ptr, value ? safe_name (value) : "null"));
-
- rs = REMEMBERED_SET;
- if (rs->store_next + 2 < rs->end_set) {
- *(rs->store_next++) = (mword)ptr | REMSET_OTHER;
- *(rs->store_next++) = (mword)REMSET_ROOT_LOCATION;
- *(void**)ptr = value;
- return;
- }
- rs = alloc_remset (rs->end_set - rs->data, (void*)1);
- rs->next = REMEMBERED_SET;
- REMEMBERED_SET = rs;
-#ifdef HAVE_KW_THREAD
- thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
-#endif
- *(rs->store_next++) = (mword)ptr | REMSET_OTHER;
- *(rs->store_next++) = (mword)REMSET_ROOT_LOCATION;
-
- *(void**)ptr = value;
-}
-
void
mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
{
DEBUG (8, fprintf (gc_debug_file, "Adding value remset at %p, count %d, descr %p for class %s (%p)\n", dest, count, klass->gc_descr, klass->name, klass));
if (rs->store_next + 3 < rs->end_set) {
- *(rs->store_next++) = (mword)dest | REMSET_OTHER;
- *(rs->store_next++) = (mword)REMSET_VTYPE;
+ *(rs->store_next++) = (mword)dest | REMSET_VTYPE;
*(rs->store_next++) = (mword)klass->gc_descr;
*(rs->store_next++) = (mword)count;
UNLOCK_GC;
#ifdef HAVE_KW_THREAD
thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
#endif
- *(rs->store_next++) = (mword)dest | REMSET_OTHER;
- *(rs->store_next++) = (mword)REMSET_VTYPE;
+ *(rs->store_next++) = (mword)dest | REMSET_VTYPE;
*(rs->store_next++) = (mword)klass->gc_descr;
*(rs->store_next++) = (mword)count;
UNLOCK_GC;
TLAB_ACCESS_INIT;
HEAVY_STAT (++stat_wbarrier_object_copy);
rs = REMEMBERED_SET;
- DEBUG (1, fprintf (gc_debug_file, "Adding object remset for %p\n", obj));
+ DEBUG (6, fprintf (gc_debug_file, "Adding object remset for %p\n", obj));
size = mono_object_class (obj)->instance_size;
LOCK_GC;
/* do not copy the sync state */
void
describe_ptr (char *ptr)
{
- GCMemSection *section;
MonoVTable *vtable;
mword desc;
int type;
if (ptr_in_nursery (ptr)) {
printf ("Pointer inside nursery.\n");
} else {
- for (section = section_list; section;) {
- if (ptr >= section->data && ptr < section->data + section->size)
- break;
- section = section->block.next;
- }
-
- if (section) {
+ if (major_ptr_is_in_non_pinned_space (ptr)) {
printf ("Pointer inside oldspace.\n");
} else if (obj_is_from_pinned_alloc (ptr)) {
printf ("Pointer is inside a pinned chunk.\n");
if ((void**)addr >= ptr && (void**)addr < ptr + count)
*found = TRUE;
return p + 1;
- case REMSET_OTHER: {
- switch (p [1]) {
- case REMSET_VTYPE:
- ptr = (void**)(*p & ~REMSET_TYPE_MASK);
- desc = p [2];
- count = p [3];
-
- switch (desc & 0x7) {
- case DESC_TYPE_RUN_LENGTH:
- OBJ_RUN_LEN_SIZE (skip_size, desc, ptr);
- break;
- case DESC_TYPE_SMALL_BITMAP:
- OBJ_BITMAP_SIZE (skip_size, desc, start);
- break;
- default:
- // FIXME:
- g_assert_not_reached ();
- }
-
- /* The descriptor includes the size of MonoObject */
- skip_size -= sizeof (MonoObject);
- skip_size *= count;
- if ((void**)addr >= ptr && (void**)addr < ptr + (skip_size / sizeof (gpointer)))
- *found = TRUE;
+ case REMSET_VTYPE:
+ ptr = (void**)(*p & ~REMSET_TYPE_MASK);
+ desc = p [1];
+ count = p [2];
- return p + 4;
- case REMSET_ROOT_LOCATION:
- return p + 2;
+ switch (desc & 0x7) {
+ case DESC_TYPE_RUN_LENGTH:
+ OBJ_RUN_LEN_SIZE (skip_size, desc, ptr);
+ break;
+ case DESC_TYPE_SMALL_BITMAP:
+ OBJ_BITMAP_SIZE (skip_size, desc, start);
+ break;
default:
+ // FIXME:
g_assert_not_reached ();
}
- break;
- }
+
+ /* The descriptor includes the size of MonoObject */
+ skip_size -= sizeof (MonoObject);
+ skip_size *= count;
+ if ((void**)addr >= ptr && (void**)addr < ptr + (skip_size / sizeof (gpointer)))
+ *found = TRUE;
+
+ return p + 3;
default:
g_assert_not_reached ();
}
} while (0)
/*
- * Check that each object reference inside the area which points into the nursery
- * can be found in the remembered sets.
+ * Check that each object reference which points into the nursery can
+ * be found in the remembered sets.
*/
-static void __attribute__((noinline))
-check_remsets_for_area (char *start, char *end)
+static void
+check_consistency_callback (char *start, size_t size, void *dummy)
{
- GCVTable *vt;
- int type_str = 0, type_rlen = 0, type_bitmap = 0, type_vector = 0, type_lbit = 0, type_complex = 0;
- while (start < end) {
- if (!*(void**)start) {
- start += sizeof (void*); /* should be ALLOC_ALIGN, really */
- continue;
- }
- vt = (GCVTable*)LOAD_VTABLE (start);
- DEBUG (8, fprintf (gc_debug_file, "Scanning object %p, vtable: %p (%s)\n", start, vt, vt->klass->name));
- if (0) {
- MonoObject *obj = (MonoObject*)start;
- g_print ("found at %p (0x%lx): %s.%s\n", start, (long)vt->desc, obj->vtable->klass->name_space, obj->vtable->klass->name);
- }
+ GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
+ DEBUG (8, fprintf (gc_debug_file, "Scanning object %p, vtable: %p (%s)\n", start, vt, vt->klass->name));
-#define SCAN_OBJECT_ACTION COUNT_OBJECT_TYPES
+#define SCAN_OBJECT_ACTION
#include "sgen-scan-object.h"
- }
}
/*
*
* Assumes the world is stopped.
*/
-void
+static void
check_consistency (void)
{
- GCMemSection *section;
-
// Need to add more checks
- // FIXME: Create a general heap enumeration function and use that
missing_remsets = FALSE;
DEBUG (1, fprintf (gc_debug_file, "Begin heap consistency check...\n"));
// Check that oldspace->newspace pointers are registered with the collector
- for (section = section_list; section; section = section->block.next) {
- if (section->block.role == MEMORY_ROLE_GEN0)
- continue;
- DEBUG (2, fprintf (gc_debug_file, "Scan of old section: %p-%p, size: %d\n", section->data, section->next_data, (int)(section->next_data - section->data)));
- check_remsets_for_area (section->data, section->next_data);
- }
+ major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)check_consistency_callback, NULL);
DEBUG (1, fprintf (gc_debug_file, "Heap consistency check done.\n"));
return num_major_gcs;
}
-gint64
+int64_t
mono_gc_get_used_size (void)
{
gint64 tot = 0;
- GCMemSection *section;
LOCK_GC;
tot = los_memory_usage;
- for (section = section_list; section; section = section->block.next) {
- /* this is approximate... */
- tot += section->next_data - section->data;
- }
+ tot += nursery_section->next_data - nursery_section->data;
+ tot += major_get_used_size ();
/* FIXME: account for pinned objects */
UNLOCK_GC;
return tot;
}
-gint64
+int64_t
mono_gc_get_heap_size (void)
{
return total_alloc;
return (MonoObject*) REVEAL_POINTER (*link_addr);
}
+gboolean
+mono_gc_ephemeron_array_add (MonoObject *obj)
+{
+ EphemeronLinkNode *node;
+
+ LOCK_GC;
+
+ node = get_internal_mem (sizeof (EphemeronLinkNode), INTERNAL_MEM_EPHEMERON_LINK);
+ if (!node) {
+ UNLOCK_GC;
+ return FALSE;
+ }
+ node->array = (char*)obj;
+ node->next = ephemeron_list;
+ ephemeron_list = node;
+
+ DEBUG (5, fprintf (gc_debug_file, "Registered ephemeron array %p\n", obj));
+
+ UNLOCK_GC;
+ return TRUE;
+}
+
void*
mono_gc_make_descr_from_bitmap (gsize *bitmap, int numbits)
{
}
void*
-mono_gc_make_root_descr_user (MonoGCMarkFunc marker)
+mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker)
{
void *descr;
return result;
}
+#ifdef USER_CONFIG
+
+/* Tries to extract a number from the passed string, taking in to account m, k
+ * and g suffixes */
+static gboolean
+parse_environment_string_extract_number (gchar *str, glong *out)
+{
+ char *endptr;
+ int len = strlen (str), shift = 0;
+ glong val;
+ gboolean is_suffix = FALSE;
+ char suffix;
+
+ switch (str [len - 1]) {
+ case 'g':
+ case 'G':
+ shift += 10;
+ case 'm':
+ case 'M':
+ shift += 10;
+ case 'k':
+ case 'K':
+ shift += 10;
+ is_suffix = TRUE;
+ suffix = str [len - 1];
+ break;
+ }
+
+ errno = 0;
+ val = strtol (str, &endptr, 10);
+
+ if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN))
+ || (errno != 0 && val == 0) || (endptr == str))
+ return FALSE;
+
+ if (is_suffix) {
+ if (*(endptr + 1)) /* Invalid string. */
+ return FALSE;
+ val <<= shift;
+ }
+
+ *out = val;
+ return TRUE;
+}
+
+#endif
+
void
mono_gc_base_init (void)
{
}
pagesize = mono_pagesize ();
gc_debug_file = stderr;
+
+#ifdef USER_CONFIG
+
+ if ((env = getenv ("MONO_GC_PARAMS"))) {
+ if (g_str_has_prefix (env, "nursery-size")) {
+ int index = 0;
+ long val;
+ while (env [index] && env [index++] != '=')
+ ;
+ if (env [index] && parse_environment_string_extract_number (env
+ + index, &val)) {
+ default_nursery_size = val;
+#ifdef ALIGN_NURSERY
+ if ((val & (val - 1))) {
+ fprintf (stderr, "The nursery size must be a power of two.\n");
+ exit (1);
+ }
+
+ default_nursery_bits = 0;
+ while (1 << (++ default_nursery_bits) != default_nursery_size)
+ ;
+#endif
+ } else {
+ fprintf (stderr, "nursery-size must be an integer.\n");
+ exit (1);
+ }
+ } else {
+ fprintf (stderr, "MONO_GC_PARAMS must be of the form 'nursery-size=N' (where N is an integer, possibly with a k, m or a g suffix).\n");
+ exit (1);
+ }
+ }
+
+#endif
+
+ nursery_size = DEFAULT_NURSERY_SIZE;
+
+ major_init ();
+
if ((env = getenv ("MONO_GC_DEBUG"))) {
opts = g_strsplit (env, ",", -1);
for (ptr = opts; ptr && *ptr; ptr ++) {