assigned TLABs and if that's more than some percentage of the
nursery size, reduce the TLAB size.
+ *) Explore placing unreachable objects on unused nursery memory.
+ Instead of memset'ng a region to zero, place an int[] covering it.
+ A good place to start is add_nursery_frag. The tricky thing here is
+ placing those objects atomically outside of a collection.
+
+
*/
#include "config.h"
#ifdef HAVE_SGEN_GC
#include <signal.h>
#include <errno.h>
#include <assert.h>
+#ifdef __MACH__
+#undef _XOPEN_SOURCE
+#endif
#include <pthread.h>
+#ifdef __MACH__
+#define _XOPEN_SOURCE
+#endif
#include "metadata/metadata-internals.h"
#include "metadata/class-internals.h"
#include "metadata/gc-internal.h"
#include <mono/utils/memcheck.h>
+#if defined(__MACH__)
+#include "utils/mach-support.h"
+#endif
+
#define OPDEF(a,b,c,d,e,f,g,h,i,j) \
a = i,
* ######## Types and constants used by the GC.
* ######################################################################
*/
-#if SIZEOF_VOID_P == 4
-typedef guint32 mword;
-#else
-typedef guint64 mword;
-#endif
static int gc_initialized = 0;
static int gc_debug_level = 0;
/* If not null, dump the heap after each collection into this file */
static FILE *heap_dump_file = NULL;
/* If set, mark stacks conservatively, even if precise marking is possible */
-static gboolean conservative_stack_mark = FALSE;
+static gboolean conservative_stack_mark = TRUE;
/* If set, do a plausibility check on the scan_starts before and after
each collection */
static gboolean do_scan_starts_check = FALSE;
#endif
#ifdef HEAVY_STATISTICS
-static long stat_objects_alloced = 0;
-static long stat_bytes_alloced = 0;
-static long stat_objects_alloced_degraded = 0;
-static long stat_bytes_alloced_degraded = 0;
-static long stat_bytes_alloced_los = 0;
-static long stat_copy_object_called_nursery = 0;
-static long stat_objects_copied_nursery = 0;
-static long stat_copy_object_called_major = 0;
-static long stat_objects_copied_major = 0;
-
-static long stat_copy_object_failed_from_space = 0;
-static long stat_copy_object_failed_forwarded = 0;
-static long stat_copy_object_failed_pinned = 0;
-static long stat_copy_object_failed_large_pinned = 0;
-static long stat_copy_object_failed_to_space = 0;
-
-static long stat_store_remsets = 0;
-static long stat_store_remsets_unique = 0;
-static long stat_saved_remsets_1 = 0;
-static long stat_saved_remsets_2 = 0;
-static long stat_global_remsets_added = 0;
-static long stat_global_remsets_processed = 0;
-
-static long num_copy_object_called = 0;
-static long num_objects_copied = 0;
+static long long stat_objects_alloced = 0;
+static long long stat_bytes_alloced = 0;
+static long long stat_objects_alloced_degraded = 0;
+static long long stat_bytes_alloced_degraded = 0;
+static long long stat_bytes_alloced_los = 0;
+
+static long long stat_copy_object_called_nursery = 0;
+static long long stat_objects_copied_nursery = 0;
+static long long stat_copy_object_called_major = 0;
+static long long stat_objects_copied_major = 0;
+
+static long long stat_scan_object_called_nursery = 0;
+static long long stat_scan_object_called_major = 0;
+
+static long long stat_nursery_copy_object_failed_from_space = 0;
+static long long stat_nursery_copy_object_failed_forwarded = 0;
+static long long stat_nursery_copy_object_failed_pinned = 0;
+
+static long long stat_store_remsets = 0;
+static long long stat_store_remsets_unique = 0;
+static long long stat_saved_remsets_1 = 0;
+static long long stat_saved_remsets_2 = 0;
+static long long stat_global_remsets_added = 0;
+static long long stat_global_remsets_readded = 0;
+static long long stat_global_remsets_processed = 0;
+static long long stat_global_remsets_discarded = 0;
+
+static long long stat_wasted_fragments_used = 0;
+static long long stat_wasted_fragments_bytes = 0;
static int stat_wbarrier_set_field = 0;
static int stat_wbarrier_set_arrayref = 0;
static int stat_wbarrier_object_copy = 0;
#endif
-static long time_minor_pre_collection_fragment_clear = 0;
-static long time_minor_pinning = 0;
-static long time_minor_scan_remsets = 0;
-static long time_minor_scan_pinned = 0;
-static long time_minor_scan_registered_roots = 0;
-static long time_minor_scan_thread_data = 0;
-static long time_minor_scan_alloc_pinned = 0;
-static long time_minor_finish_gray_stack = 0;
-static long time_minor_fragment_creation = 0;
-
-static long time_major_pre_collection_fragment_clear = 0;
-static long time_major_pinning = 0;
-static long time_major_scan_pinned = 0;
-static long time_major_scan_registered_roots = 0;
-static long time_major_scan_thread_data = 0;
-static long time_major_scan_alloc_pinned = 0;
-static long time_major_scan_finalized = 0;
-static long time_major_scan_big_objects = 0;
-static long time_major_finish_gray_stack = 0;
-static long time_major_sweep = 0;
-static long time_major_fragment_creation = 0;
-
-static long pinned_chunk_bytes_alloced = 0;
-static long large_internal_bytes_alloced = 0;
-
+static long long time_minor_pre_collection_fragment_clear = 0;
+static long long time_minor_pinning = 0;
+static long long time_minor_scan_remsets = 0;
+static long long time_minor_scan_pinned = 0;
+static long long time_minor_scan_registered_roots = 0;
+static long long time_minor_scan_thread_data = 0;
+static long long time_minor_finish_gray_stack = 0;
+static long long time_minor_fragment_creation = 0;
+
+static long long time_major_pre_collection_fragment_clear = 0;
+static long long time_major_pinning = 0;
+static long long time_major_scan_pinned = 0;
+static long long time_major_scan_registered_roots = 0;
+static long long time_major_scan_thread_data = 0;
+static long long time_major_scan_alloc_pinned = 0;
+static long long time_major_scan_finalized = 0;
+static long long time_major_scan_big_objects = 0;
+static long long time_major_finish_gray_stack = 0;
+static long long time_major_free_bigobjs = 0;
+static long long time_major_los_sweep = 0;
+static long long time_major_sweep = 0;
+static long long time_major_fragment_creation = 0;
+
+static long long pinned_chunk_bytes_alloced = 0;
+static long long large_internal_bytes_alloced = 0;
+
+/* Keep in sync with internal_mem_names in dump_heap()! */
enum {
INTERNAL_MEM_PIN_QUEUE,
INTERNAL_MEM_FRAGMENT,
INTERNAL_MEM_REMSET,
INTERNAL_MEM_GRAY_QUEUE,
INTERNAL_MEM_STORE_REMSET,
+ INTERNAL_MEM_MS_TABLES,
+ INTERNAL_MEM_MS_BLOCK_INFO,
+ INTERNAL_MEM_EPHEMERON_LINK,
INTERNAL_MEM_MAX
};
}
*/
-#define MAX_DEBUG_LEVEL 2
-#define DEBUG(level,a) do {if (G_UNLIKELY ((level) <= MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) a;} while (0)
-
/* Define this to allow the user to change some of the constants by specifying
* their values in the MONO_GC_PARAMS environmental variable. See
* mono_gc_base_init for details. */
#define SIZEOF_GC_MEM_SECTION ((sizeof (GCMemSection) + 7) & ~7)
-/* large object space struct: 64+ KB */
-/* we could make this limit much smaller to avoid memcpy copy
- * and potentially have more room in the GC descriptor: need to measure
- * This also means that such small OS objects will need to be
- * allocated in a different way (using pinned chunks).
- * We may want to put large but smaller than 64k objects in the fixed space
- * when we move the object from one generation to another (to limit the
- * pig in the snake effect).
- * Note: it may be worth to have an optimized copy function, since we can
- * assume that objects are aligned and have a multiple of 8 size.
- * FIXME: This structure needs to be a multiple of 8 bytes in size: this is not
- * true if MONO_ZERO_LEN_ARRAY is nonzero.
- */
-typedef struct _LOSObject LOSObject;
-struct _LOSObject {
- LOSObject *next;
- mword size; /* this is the object size */
- int dummy; /* to have a sizeof (LOSObject) a multiple of ALLOC_ALIGN and data starting at same alignment */
- guint16 role;
- guint16 scanned;
- char data [MONO_ZERO_LEN_ARRAY];
-};
-
/* Pinned objects are allocated in the LOS space if bigger than half a page
* or from freelists otherwise. We assume that pinned objects are relatively few
* and they have a slow dying speed (like interned strings, thread objects).
* reference-free objects.
*/
#define PINNED_FIRST_SLOT_SIZE (sizeof (gpointer) * 4)
-#define MAX_FREELIST_SIZE 2048
-#define PINNED_PAGE_SIZE (4096)
-#define PINNED_CHUNK_MIN_SIZE (4096*8)
+#define MAX_FREELIST_SIZE 8192
typedef struct _PinnedChunk PinnedChunk;
struct _PinnedChunk {
Block block;
mword root_desc;
};
-/* for use with write barriers */
-typedef struct _RememberedSet RememberedSet;
-struct _RememberedSet {
- mword *store_next;
- mword *end_set;
- RememberedSet *next;
- mword data [MONO_ZERO_LEN_ARRAY];
-};
-
/*
* We're never actually using the first element. It's always set to
* NULL to simplify the elimination of consecutive duplicate
REMSET_LOCATION, /* just a pointer to the exact location */
REMSET_RANGE, /* range of pointer fields */
REMSET_OBJECT, /* mark all the object for scanning */
- REMSET_OTHER, /* all others */
+ REMSET_VTYPE, /* a valuetype array described by a gc descriptor and a count */
REMSET_TYPE_MASK = 0x3
};
-/* Subtypes of REMSET_OTHER */
-enum {
- REMSET_VTYPE, /* a valuetype array described by a gc descriptor and a count */
- REMSET_ROOT_LOCATION, /* a location inside a root */
-};
-
#ifdef HAVE_KW_THREAD
static __thread RememberedSet *remembered_set MONO_TLS_FAST;
#endif
static pthread_key_t remembered_set_key;
static RememberedSet *global_remset;
static RememberedSet *freed_thread_remsets;
-//static int store_to_global_remset = 0;
static GenericStoreRememberedSet *generic_store_remsets = NULL;
+/*A two slots cache for recently inserted remsets */
+static gpointer global_remset_cache [2];
+
/* FIXME: later choose a size that takes into account the RememberedSet struct
* and doesn't waste any alloc paddin space.
*/
{
MonoClass *klass = ((MonoVTable*)LOAD_VTABLE (o))->klass;
if (klass == mono_defaults.string_class) {
- return sizeof (MonoString) + 2 * mono_string_length ((MonoString*) o) + 2;
+ return sizeof (MonoString) + 2 * mono_string_length_fast ((MonoString*) o) + 2;
} else if (klass->rank) {
MonoArray *array = (MonoArray*)o;
- size_t size = sizeof (MonoArray) + klass->sizes.element_size * mono_array_length (array);
+ size_t size = sizeof (MonoArray) + klass->sizes.element_size * mono_array_length_fast (array);
if (G_UNLIKELY (array->bounds)) {
size += sizeof (mono_array_size_t) - 1;
size &= ~(sizeof (mono_array_size_t) - 1);
#ifdef USER_CONFIG
/* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */
-//#define DEFAULT_NURSERY_SIZE (1024*512*125+4096*118)
#define DEFAULT_NURSERY_SIZE (default_nursery_size)
-static int default_nursery_size = (1 << 20);
+static int default_nursery_size = (1 << 22);
#ifdef ALIGN_NURSERY
/* The number of trailing 0 bits in DEFAULT_NURSERY_SIZE */
#define DEFAULT_NURSERY_BITS (default_nursery_bits)
-static int default_nursery_bits = 20;
+static int default_nursery_bits = 22;
#endif
#else
-#define DEFAULT_NURSERY_SIZE (1024*512*2)
+#define DEFAULT_NURSERY_SIZE (4*1024*1024)
#ifdef ALIGN_NURSERY
-#define DEFAULT_NURSERY_BITS 20
+#define DEFAULT_NURSERY_BITS 22
#endif
#endif
-#define MAJOR_SECTION_SIZE (128*1024)
-#define BLOCK_FOR_OBJECT(o) ((Block*)(((mword)(o)) & ~(MAJOR_SECTION_SIZE - 1)))
-#define MAJOR_SECTION_FOR_OBJECT(o) ((GCMemSection*)BLOCK_FOR_OBJECT ((o)))
-#define MIN_MINOR_COLLECTION_SECTION_ALLOWANCE (DEFAULT_NURSERY_SIZE * 3 / MAJOR_SECTION_SIZE)
-#define MIN_LOS_ALLOWANCE (DEFAULT_NURSERY_SIZE * 2)
+#define MIN_MINOR_COLLECTION_ALLOWANCE (DEFAULT_NURSERY_SIZE * 4)
/* to quickly find the head of an object pinned by a conservative address
* we keep track of the objects allocated for each SCAN_START_SIZE memory
* chunk in the nursery or other memory sections. Larger values have less
/* the minimum size of a fragment that we consider useful for allocation */
#define FRAGMENT_MIN_SIZE (512)
/* This is a fixed value used for pinned chunks, not the system pagesize */
-#define FREELIST_PAGESIZE 4096
+#define FREELIST_PAGESIZE (16*1024)
static mword pagesize = 4096;
static mword nursery_size;
static int degraded_mode = 0;
-static int minor_collection_section_allowance;
-static int minor_collection_sections_alloced = 0;
-static int num_major_sections = 0;
-
-static LOSObject *los_object_list = NULL;
-static mword los_memory_usage = 0;
-static mword los_num_objects = 0;
-static mword next_los_collection = 2*1024*1024; /* 2 MB, need to tune */
static mword total_alloc = 0;
/* use this to tune when to do a major/minor collection */
static mword memory_pressure = 0;
+static int minor_collection_allowance;
+static int minor_collection_sections_alloced = 0;
-static GCMemSection *section_list = NULL;
static GCMemSection *nursery_section = NULL;
static mword lowest_heap_address = ~(mword)0;
static mword highest_heap_address = 0;
int num_links;
};
+typedef struct _EphemeronLinkNode EphemeronLinkNode;
+
+struct _EphemeronLinkNode {
+ EphemeronLinkNode *next;
+ char *array;
+};
+
+typedef struct {
+ void *key;
+ void *value;
+} Ephemeron;
+
#define LARGE_INTERNAL_MEM_HEADER_MAGIC 0x7d289f3a
typedef struct _LargeInternalMemHeader LargeInternalMemHeader;
GENERATION_MAX
};
+int current_collection_generation = -1;
+
/*
* The link pointer is hidden by negating each bit. We use the lowest
* bit of the link (before negation) to store whether it needs
static DisappearingLinkHashTable minor_disappearing_link_hash;
static DisappearingLinkHashTable major_disappearing_link_hash;
+static EphemeronLinkNode *ephemeron_list;
+
static int num_ready_finalizers = 0;
static int no_finalize = 0;
static const int freelist_sizes [] = {
8, 16, 24, 32, 40, 48, 64, 80,
96, 128, 160, 192, 224, 256, 320, 384,
- 448, 512, 584, 680, 816, 1024, 1360, 2048};
+ 448, 512, 584, 680, 816, 1024, 1360, 2048,
+ 2336, 2728, 3272, 4096, 5456, 8192 };
#define FREELIST_NUM_SLOTS (sizeof (freelist_sizes) / sizeof (freelist_sizes [0]))
-static char* max_pinned_chunk_addr = NULL;
-static char* min_pinned_chunk_addr = (char*)-1;
-/* pinned_chunk_list is used for allocations of objects that are never moved */
-static PinnedChunk *pinned_chunk_list = NULL;
+/* This is also the MAJOR_SECTION_SIZE for the copying major
+ collector */
+#define PINNED_CHUNK_SIZE (128 * 1024)
+
/* internal_chunk_list is used for allocating structures needed by the GC */
static PinnedChunk *internal_chunk_list = NULL;
-static gboolean
-obj_is_from_pinned_alloc (char *p)
-{
- return BLOCK_FOR_OBJECT (p)->role == MEMORY_ROLE_PINNED;
-}
-
static int slot_for_size (size_t size);
-static void
-free_pinned_object (PinnedChunk *chunk, char *obj, size_t size)
-{
- void **p = (void**)obj;
- int slot = slot_for_size (size);
-
- g_assert (obj >= (char*)chunk->start_data && obj < ((char*)chunk + chunk->num_pages * FREELIST_PAGESIZE));
- *p = chunk->free_list [slot];
- chunk->free_list [slot] = p;
-}
-
enum {
ROOT_TYPE_NORMAL = 0, /* "normal" roots */
ROOT_TYPE_PINNED = 1, /* roots without a GC descriptor */
*/
static char *nursery_start = NULL;
-/* eventually share with MonoThread? */
-typedef struct _SgenThreadInfo SgenThreadInfo;
-
-struct _SgenThreadInfo {
- SgenThreadInfo *next;
- ARCH_THREAD_TYPE id;
- unsigned int stop_count; /* to catch duplicate signals */
- int signal;
- int skip;
- volatile int in_critical_region;
- void *stack_end;
- void *stack_start;
- void *stack_start_limit;
- char **tlab_next_addr;
- char **tlab_start_addr;
- char **tlab_temp_end_addr;
- char **tlab_real_end_addr;
- gpointer **store_remset_buffer_addr;
- long *store_remset_buffer_index_addr;
- RememberedSet *remset;
- gpointer runtime_data;
- gpointer stopped_ip; /* only valid if the thread is stopped */
- MonoDomain *stopped_domain; /* ditto */
- gpointer *stopped_regs; /* ditto */
-#ifndef HAVE_KW_THREAD
- char *tlab_start;
- char *tlab_next;
- char *tlab_temp_end;
- char *tlab_real_end;
- gpointer *store_remset_buffer;
- long store_remset_buffer_index;
-#endif
-};
-
#ifdef HAVE_KW_THREAD
#define TLAB_ACCESS_INIT
#define TLAB_START tlab_start
static char *nursery_next = NULL;
static char *nursery_frag_real_end = NULL;
static char *nursery_real_end = NULL;
-//static char *nursery_first_pinned_start = NULL;
static char *nursery_last_pinned_end = NULL;
/* The size of a TLAB */
*/
static guint32 tlab_size = (1024 * 4);
+/*How much space is tolerable to be wasted from the current fragment when allocating a new TLAB*/
+#define MAX_NURSERY_TLAB_WASTE 512
+
/* fragments that are free and ready to be used for allocation */
static Fragment *nursery_fragments = NULL;
/* freeelist of fragment structures */
static Fragment *fragment_freelist = NULL;
-/*
- * used when moving the objects
+/*
+ * Objects bigger then this go into the large object space. This size
+ * has a few constraints. It must fit into the major heap, which in
+ * the case of the copying collector means that it must fit into a
+ * pinned chunk. It must also play well with the GC descriptors, some
+ * of which (DESC_TYPE_RUN_LENGTH, DESC_TYPE_SMALL_BITMAP) encode the
+ * object size.
*/
-static char *to_space_bumper = NULL;
-static char *to_space_top = NULL;
-static GCMemSection *to_space_section = NULL;
-
-/* objects bigger then this go into the large object space */
-#define MAX_SMALL_OBJ_SIZE MAX_FREELIST_SIZE
+#define MAX_SMALL_OBJ_SIZE 8000
/* Functions supplied by the runtime to be called by the GC */
static MonoGCCallbacks gc_callbacks;
+#define ALLOC_ALIGN 8
+#define ALLOC_ALIGN_BITS 3
+
+#define MOVED_OBJECTS_NUM 64
+static void *moved_objects [MOVED_OBJECTS_NUM];
+static int moved_objects_idx = 0;
+
/*
* ######################################################################
* ######## Macros and function declarations.
return (void*)p;
}
+typedef void (*CopyOrMarkObjectFunc) (void**);
+typedef char* (*ScanObjectFunc) (char*);
+
/* forward declarations */
static void* get_internal_mem (size_t size, int type);
static void free_internal_mem (void *addr, int type);
static void* get_os_memory (size_t size, int activate);
+static void* get_os_memory_aligned (mword size, mword alignment, gboolean activate);
static void free_os_memory (void *addr, size_t size);
static G_GNUC_UNUSED void report_internal_mem_usage (void);
static int stop_world (void);
static int restart_world (void);
+static void add_to_global_remset (gpointer ptr);
static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise);
static void scan_from_remsets (void *start_nursery, void *end_nursery);
+static void scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type);
+static void scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeEntry *list);
static void find_pinning_ref_from_thread (char *obj, size_t size);
static void update_current_thread_stack (void *start);
-static GCMemSection* alloc_major_section (void);
-static void finalize_in_range (char *start, char *end, int generation);
+static void finalize_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation);
static void add_or_remove_disappearing_link (MonoObject *obj, void **link, gboolean track, int generation);
-static void null_link_in_range (char *start, char *end, int generation);
+static void null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation);
static void null_links_for_domain (MonoDomain *domain, int generation);
static gboolean search_fragment_for_size (size_t size);
-static void mark_pinned_from_addresses (PinnedChunk *chunk, void **start, void **end);
+static int search_fragment_for_size_range (size_t desired_size, size_t minimum_size);
+static void build_nursery_fragments (int start_pin, int end_pin);
+static void clear_nursery_fragments (char *next);
+static void pin_from_roots (void *start_nursery, void *end_nursery);
+static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery);
+static void pin_objects_in_section (GCMemSection *section);
+static void optimize_pin_queue (int start_slot);
static void clear_remsets (void);
static void clear_tlabs (void);
-typedef void (*ScanPinnedObjectCallbackFunc) (PinnedChunk*, char*, size_t, void*);
-static void scan_pinned_objects (ScanPinnedObjectCallbackFunc callback, void *callback_data);
-static void sweep_pinned_objects (void);
-static void scan_from_pinned_objects (char *addr_start, char *addr_end);
-static void free_large_object (LOSObject *obj);
-static void free_major_section (GCMemSection *section);
-static void to_space_expand (void);
+typedef void (*IterateObjectCallbackFunc) (char*, size_t, void*);
+static void scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data);
+static void scan_object (char *start);
+static void major_scan_object (char *start);
+static void* copy_object_no_checks (void *obj);
+static void copy_object (void **obj_slot);
+static void* get_chunk_freelist (PinnedChunk *chunk, int slot);
+static PinnedChunk* alloc_pinned_chunk (void);
+static void sort_addresses (void **array, int size);
+static void drain_gray_stack (void);
+static void finish_gray_stack (char *start_addr, char *end_addr, int generation);
+static gboolean need_major_collection (void);
+static void major_collection (const char *reason);
static void mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track);
void describe_ptr (char *ptr);
-void check_consistency (void);
-char* check_object (char *start);
+void check_object (char *start);
+
+static void check_consistency (void);
+static void check_major_refs (void);
+static void check_section_scan_starts (GCMemSection *section);
+static void check_scan_starts (void);
+static void check_for_xdomain_refs (void);
+static void dump_occupied (char *start, char *end, char *section_start);
+static void dump_section (GCMemSection *section, const char *type);
+static void dump_heap (const char *type, int num, const char *reason);
+static void report_pinned_chunk (PinnedChunk *chunk, int seq);
void mono_gc_scan_for_specific_ref (MonoObject *key);
+static void init_stats (void);
+
+static int mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end);
+static void clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end);
+static void null_ephemerons_for_domain (MonoDomain *domain);
+
+//#define BINARY_PROTOCOL
+#include "sgen-protocol.c"
+#include "sgen-pinning.c"
+#include "sgen-pinning-stats.c"
+#include "sgen-gray.c"
+#include "sgen-los.c"
+
/*
* ######################################################################
* ######## GC descriptors
* inside complex.
*/
enum {
- DESC_TYPE_RUN_LENGTH, /* 16 bits aligned byte size | 1-3 (offset, numptr) bytes tuples */
- DESC_TYPE_SMALL_BITMAP, /* 16 bits aligned byte size | 16-48 bit bitmap */
- DESC_TYPE_STRING, /* nothing */
+ /*
+ * We don't use 0 so that 0 isn't a valid GC descriptor. No
+ * deep reason for this other than to be able to identify a
+ * non-inited descriptor for debugging.
+ *
+ * If an object contains no references, its GC descriptor is
+ * always DESC_TYPE_RUN_LENGTH, without a size, no exceptions.
+ * This is so that we can quickly check for that in
+ * copy_object_no_checks(), without having to fetch the
+ * object's class.
+ */
+ DESC_TYPE_RUN_LENGTH = 1, /* 15 bits aligned byte size | 1-3 (offset, numptr) bytes tuples */
+ DESC_TYPE_SMALL_BITMAP, /* 15 bits aligned byte size | 16-48 bit bitmap */
DESC_TYPE_COMPLEX, /* index for bitmap into complex_descriptors */
DESC_TYPE_VECTOR, /* 10 bits element size | 1 bit array | 2 bits desc | element desc */
DESC_TYPE_ARRAY, /* 10 bits element size | 1 bit array | 2 bits desc | element desc */
#define VECTOR_INFO_SHIFT 14
#define VECTOR_ELSIZE_SHIFT 3
#define LARGE_BITMAP_SIZE (GC_BITS_PER_WORD - LOW_TYPE_BITS)
-#define MAX_SMALL_SIZE ((1 << SMALL_BITMAP_SHIFT) - 1)
-#define SMALL_SIZE_MASK 0xfff8
#define MAX_ELEMENT_SIZE 0x3ff
-#define ELEMENT_SIZE_MASK (0x3ff << LOW_TYPE_BITS)
#define VECTOR_SUBTYPE_PTRFREE (DESC_TYPE_V_PTRFREE << VECTOR_INFO_SHIFT)
#define VECTOR_SUBTYPE_REFS (DESC_TYPE_V_REFS << VECTOR_INFO_SHIFT)
#define VECTOR_SUBTYPE_RUN_LEN (DESC_TYPE_V_RUN_LEN << VECTOR_INFO_SHIFT)
#define VECTOR_SUBTYPE_BITMAP (DESC_TYPE_V_BITMAP << VECTOR_INFO_SHIFT)
-#define ALLOC_ALIGN 8
-
/* Root bitmap descriptors are simpler: the lower three bits describe the type
* and we either have 30/62 bitmap bits or nibble-based run-length,
void*
mono_gc_make_descr_for_string (gsize *bitmap, int numbits)
{
- return (void*) DESC_TYPE_STRING;
+ return (void*) DESC_TYPE_RUN_LENGTH;
}
void*
int first_set = -1, num_set = 0, last_set = -1, i;
mword desc = 0;
size_t stored_size = obj_size;
- stored_size += ALLOC_ALIGN - 1;
- stored_size &= ~(ALLOC_ALIGN - 1);
for (i = 0; i < numbits; ++i) {
if (bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
if (first_set < 0)
num_set++;
}
}
+ /*
+ * We don't encode the size of types that don't contain
+ * references because they might not be aligned, i.e. the
+ * bottom two bits might be set, which would clash with the
+ * bits we need to encode the descriptor type. Since we don't
+ * use the encoded size to skip objects, other than for
+ * processing remsets, in which case only the positions of
+ * references are relevant, this is not a problem.
+ */
+ if (first_set < 0)
+ return (void*)DESC_TYPE_RUN_LENGTH;
+ g_assert (!(stored_size & 0x3));
if (stored_size <= MAX_SMALL_OBJ_SIZE) {
/* check run-length encoding first: one byte offset, one byte number of pointers
* on 64 bit archs, we can have 3 runs, just one on 32.
* It may be better to use nibbles.
*/
if (first_set < 0) {
- desc = DESC_TYPE_RUN_LENGTH | stored_size;
+ desc = DESC_TYPE_RUN_LENGTH | (stored_size << 1);
DEBUG (6, fprintf (gc_debug_file, "Ptrfree descriptor %p, size: %zd\n", (void*)desc, stored_size));
return (void*) desc;
} else if (first_set < 256 && num_set < 256 && (first_set + num_set == last_set + 1)) {
- desc = DESC_TYPE_RUN_LENGTH | stored_size | (first_set << 16) | (num_set << 24);
+ desc = DESC_TYPE_RUN_LENGTH | (stored_size << 1) | (first_set << 16) | (num_set << 24);
DEBUG (6, fprintf (gc_debug_file, "Runlen descriptor %p, size: %zd, first set: %d, num set: %d\n", (void*)desc, stored_size, first_set, num_set));
return (void*) desc;
}
/* we know the 2-word header is ptr-free */
if (last_set < SMALL_BITMAP_SIZE + OBJECT_HEADER_WORDS) {
- desc = DESC_TYPE_SMALL_BITMAP | stored_size | ((*bitmap >> OBJECT_HEADER_WORDS) << SMALL_BITMAP_SHIFT);
+ desc = DESC_TYPE_SMALL_BITMAP | (stored_size << 1) | ((*bitmap >> OBJECT_HEADER_WORDS) << SMALL_BITMAP_SHIFT);
DEBUG (6, fprintf (gc_debug_file, "Smallbitmap descriptor %p, size: %zd, last set: %d\n", (void*)desc, stored_size, last_set));
return (void*) desc;
}
num_set++;
}
}
+ /* See comment at the definition of DESC_TYPE_RUN_LENGTH. */
+ if (first_set < 0)
+ return (void*)DESC_TYPE_RUN_LENGTH;
if (elem_size <= MAX_ELEMENT_SIZE) {
desc |= elem_size << VECTOR_ELSIZE_SHIFT;
if (!num_set) {
/* helper macros to scan and traverse objects, macros because we resue them in many functions */
#define STRING_SIZE(size,str) do { \
- (size) = sizeof (MonoString) + 2 * mono_string_length ((MonoString*)(str)) + 2; \
+ (size) = sizeof (MonoString) + 2 * mono_string_length_fast ((MonoString*)(str)) + 2; \
(size) += (ALLOC_ALIGN - 1); \
(size) &= ~(ALLOC_ALIGN - 1); \
} while (0)
#define OBJ_RUN_LEN_SIZE(size,desc,obj) do { \
- (size) = (desc) & 0xfff8; \
+ (size) = ((desc) & 0xfff8) >> 1; \
} while (0)
#define OBJ_BITMAP_SIZE(size,desc,obj) do { \
- (size) = (desc) & 0xfff8; \
+ (size) = ((desc) & 0xfff8) >> 1; \
} while (0)
//#define PREFETCH(addr) __asm__ __volatile__ (" prefetchnta %0": : "m"(*(char *)(addr)))
/* there are pointers */ \
gsize *mbitmap_data = complex_descriptors + ((vt)->desc >> LOW_TYPE_BITS); \
int mbwords = (*mbitmap_data++) - 1; \
- int el_size = mono_array_element_size (((MonoObject*)(obj))->vtable->klass); \
+ int el_size = mono_array_element_size (vt->klass); \
char *e_start = (char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector); \
- char *e_end = e_start + el_size * mono_array_length ((MonoArray*)(obj)); \
- if (0) { \
- MonoObject *myobj = (MonoObject*)start; \
- g_print ("found %d at %p (0x%zx): %s.%s\n", mbwords, (obj), (vt)->desc, myobj->vtable->klass->name_space, myobj->vtable->klass->name); \
- } \
+ char *e_end = e_start + el_size * mono_array_length_fast ((MonoArray*)(obj)); \
+ if (0) \
+ g_print ("found %d at %p (0x%zx): %s.%s\n", mbwords, (obj), (vt)->desc, vt->klass->name_space, vt->klass->name); \
while (e_start < e_end) { \
void **_objptr = (void**)e_start; \
gsize *bitmap_data = mbitmap_data; \
int etype = (vt)->desc & 0xc000; \
if (etype == (DESC_TYPE_V_REFS << 14)) { \
void **p = (void**)((char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector)); \
- void **end_refs = (void**)((char*)p + el_size * mono_array_length ((MonoArray*)(obj))); \
+ void **end_refs = (void**)((char*)p + el_size * mono_array_length_fast ((MonoArray*)(obj))); \
/* Note: this code can handle also arrays of struct with only references in them */ \
while (p < end_refs) { \
HANDLE_PTR (p, (obj)); \
int offset = ((vt)->desc >> 16) & 0xff; \
int num_refs = ((vt)->desc >> 24) & 0xff; \
char *e_start = (char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector); \
- char *e_end = e_start + el_size * mono_array_length ((MonoArray*)(obj)); \
+ char *e_end = e_start + el_size * mono_array_length_fast ((MonoArray*)(obj)); \
while (e_start < e_end) { \
void **p = (void**)e_start; \
int i; \
} \
} else if (etype == DESC_TYPE_V_BITMAP << 14) { \
char *e_start = (char*)(obj) + G_STRUCT_OFFSET (MonoArray, vector); \
- char *e_end = e_start + el_size * mono_array_length ((MonoArray*)(obj)); \
+ char *e_end = e_start + el_size * mono_array_length_fast ((MonoArray*)(obj)); \
while (e_start < e_end) { \
void **p = (void**)e_start; \
gsize _bmap = (vt)->desc >> 16; \
} \
} while (0)
-#define COUNT_OBJECT_TYPES do { \
- switch (desc & 0x7) { \
- case DESC_TYPE_STRING: type_str++; break; \
- case DESC_TYPE_RUN_LENGTH: type_rlen++; break; \
- case DESC_TYPE_ARRAY: case DESC_TYPE_VECTOR: type_vector++; break; \
- case DESC_TYPE_SMALL_BITMAP: type_bitmap++; break; \
- case DESC_TYPE_LARGE_BITMAP: type_lbit++; break; \
- case DESC_TYPE_COMPLEX: type_complex++; break; \
- case DESC_TYPE_COMPLEX_ARR: type_complex++; break; \
- default: g_assert_not_reached (); \
- } \
- } while (0)
-
-
-/*
- * ######################################################################
- * ######## Detecting and removing garbage.
- * ######################################################################
- * This section of code deals with detecting the objects no longer in use
- * and reclaiming the memory.
- */
-
-#if 0
-static mword new_obj_references = 0;
-static mword obj_references_checked = 0;
-
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj) do { \
- if (*(ptr) && (char*)*(ptr) >= nursery_start && (char*)*(ptr) < nursery_next) { \
- new_obj_references++; \
- /*printf ("bogus ptr %p found at %p in object %p (%s.%s)\n", *(ptr), (ptr), o, o->vtable->klass->name_space, o->vtable->klass->name);*/ \
- } else { \
- obj_references_checked++; \
- } \
- } while (0)
-
-static void __attribute__((noinline))
-scan_area (char *start, char *end)
-{
- GCVTable *vt;
- int type_str = 0, type_rlen = 0, type_bitmap = 0, type_vector = 0, type_lbit = 0, type_complex = 0;
- new_obj_references = 0;
- obj_references_checked = 0;
- while (start < end) {
- if (!*(void**)start) {
- start += sizeof (void*); /* should be ALLOC_ALIGN, really */
- continue;
- }
- vt = (GCVTable*)LOAD_VTABLE (start);
- DEBUG (8, fprintf (gc_debug_file, "Scanning object %p, vtable: %p (%s)\n", start, vt, vt->klass->name));
- if (0) {
- MonoObject *obj = (MonoObject*)start;
- g_print ("found at %p (0x%zx): %s.%s\n", start, vt->desc, obj->vtable->klass->name_space, obj->vtable->klass->name);
- }
-
-#define SCAN_OBJECT_ACTION COUNT_OBJECT_TYPES
-#include "sgen-scan-object.h"
- }
- /*printf ("references to new nursery %p-%p (size: %dk): %d, checked: %d\n", old_start, end, (end-old_start)/1024, new_obj_references, obj_references_checked);
- printf ("\tstrings: %d, runl: %d, vector: %d, bitmaps: %d, lbitmaps: %d, complex: %d\n",
- type_str, type_rlen, type_vector, type_bitmap, type_lbit, type_complex);*/
-}
-#endif
+//#include "sgen-major-copying.c"
+#include "sgen-marksweep.c"
static gboolean
is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) check_reference_for_xdomain ((ptr), (obj), domain)
-static char*
-scan_object_for_xdomain_refs (char *start)
+static void
+scan_object_for_xdomain_refs (char *start, mword size, void *data)
{
MonoDomain *domain = ((MonoObject*)start)->vtable->domain;
#include "sgen-scan-object.h"
-
- return start;
-}
-
-static void
-scan_area_for_xdomain_refs (char *start, char *end)
-{
- while (start < end) {
- if (!*(void**)start) {
- start += sizeof (void*); /* should be ALLOC_ALIGN, really */
- continue;
- }
-
- start = scan_object_for_xdomain_refs (start);
- }
}
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) do { \
if ((MonoObject*)*(ptr) == key) { \
- g_print ("found ref to %p in object %p (%s) at offset %zd\n", \
+ g_print ("found ref to %p in object %p (%s) at offset %td\n", \
key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
} \
} while (0)
-static char*
+static void
scan_object_for_specific_ref (char *start, MonoObject *key)
{
#include "sgen-scan-object.h"
-
- return start;
}
static void
-scan_area_for_specific_ref (char *start, char *end, MonoObject *key)
+scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data)
{
while (start < end) {
+ size_t size;
if (!*(void**)start) {
start += sizeof (void*); /* should be ALLOC_ALIGN, really */
continue;
}
- start = scan_object_for_specific_ref (start, key);
+ size = safe_object_get_size ((MonoObject*) start);
+ size += ALLOC_ALIGN - 1;
+ size &= ~(ALLOC_ALIGN - 1);
+
+ callback (start, size, data);
+
+ start += size;
}
}
static void
-scan_pinned_object_for_specific_ref_callback (PinnedChunk *chunk, char *obj, size_t size, MonoObject *key)
+scan_object_for_specific_ref_callback (char *obj, size_t size, MonoObject *key)
{
scan_object_for_specific_ref (obj, key);
}
void
mono_gc_scan_for_specific_ref (MonoObject *key)
{
- GCMemSection *section;
LOSObject *bigobj;
RootRecord *root;
int i;
- for (section = section_list; section; section = section->block.next)
- scan_area_for_specific_ref (section->data, section->end_data, key);
+ scan_area_with_callback (nursery_section->data, nursery_section->end_data,
+ (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
+
+ major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
scan_object_for_specific_ref (bigobj->data, key);
- scan_pinned_objects ((ScanPinnedObjectCallbackFunc)scan_pinned_object_for_specific_ref_callback, key);
-
scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
scan_roots_for_specific_ref (key, ROOT_TYPE_WBARRIER);
}
}
-//#define BINARY_PROTOCOL
-#include "sgen-protocol.c"
+/* Clear all remaining nursery fragments */
+static void
+clear_nursery_fragments (char *next)
+{
+ Fragment *frag;
+ if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
+ g_assert (next <= nursery_frag_real_end);
+ memset (next, 0, nursery_frag_real_end - next);
+ for (frag = nursery_fragments; frag; frag = frag->next) {
+ memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start);
+ }
+ }
+}
static gboolean
need_remove_object_for_domain (char *start, MonoDomain *domain)
}
}
-static void __attribute__((noinline))
-scan_area_for_domain (MonoDomain *domain, char *start, char *end)
-{
- GCVTable *vt;
- gboolean remove;
-
- while (start < end) {
- if (!*(void**)start) {
- start += sizeof (void*); /* should be ALLOC_ALIGN, really */
- continue;
- }
- vt = (GCVTable*)LOAD_VTABLE (start);
- process_object_for_domain_clearing (start, domain);
- remove = need_remove_object_for_domain (start, domain);
- if (remove && ((MonoObject*)start)->synchronisation) {
- void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)start);
- if (dislink)
- mono_gc_register_disappearing_link (NULL, dislink, FALSE);
- }
-
-#define SCAN_OBJECT_NOSCAN
-#define SCAN_OBJECT_ACTION do { \
- if (remove) memset (start, 0, skip_size); \
- } while (0)
-#include "sgen-scan-object.h"
- }
-}
-
static MonoDomain *check_domain = NULL;
static void
}
static void
-clear_domain_process_pinned_object_callback (PinnedChunk *chunk, char *obj, size_t size, MonoDomain *domain)
+check_for_xdomain_refs (void)
{
+ LOSObject *bigobj;
+
+ scan_area_with_callback (nursery_section->data, nursery_section->end_data, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
+
+ major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
+
+ for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+ scan_object_for_xdomain_refs (bigobj->data, bigobj->size, NULL);
+}
+
+static gboolean
+clear_domain_process_object (char *obj, MonoDomain *domain)
+{
+ gboolean remove;
+
process_object_for_domain_clearing (obj, domain);
+ remove = need_remove_object_for_domain (obj, domain);
+
+ if (remove && ((MonoObject*)obj)->synchronisation) {
+ void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
+ if (dislink)
+ mono_gc_register_disappearing_link (NULL, dislink, FALSE);
+ }
+
+ return remove;
}
static void
-clear_domain_free_pinned_object_callback (PinnedChunk *chunk, char *obj, size_t size, MonoDomain *domain)
+clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
{
- if (need_remove_object_for_domain (obj, domain))
- free_pinned_object (chunk, obj, size);
+ if (clear_domain_process_object (obj, domain))
+ memset (obj, 0, size);
}
static void
-scan_pinned_object_for_xdomain_refs_callback (PinnedChunk *chunk, char *obj, size_t size, gpointer dummy)
+clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
{
- scan_object_for_xdomain_refs (obj);
+ clear_domain_process_object (obj, domain);
}
static void
-check_for_xdomain_refs (void)
+clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
{
- GCMemSection *section;
- LOSObject *bigobj;
-
- for (section = section_list; section; section = section->block.next)
- scan_area_for_xdomain_refs (section->data, section->end_data);
-
- for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
- scan_object_for_xdomain_refs (bigobj->data);
+ if (need_remove_object_for_domain (obj, domain))
+ major_free_non_pinned_object (obj, size);
+}
- scan_pinned_objects (scan_pinned_object_for_xdomain_refs_callback, NULL);
+static void
+clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
+{
+ if (need_remove_object_for_domain (obj, domain))
+ free_pinned_object (obj, size);
}
/*
void
mono_gc_clear_domain (MonoDomain * domain)
{
- GCMemSection *section;
LOSObject *bigobj, *prev;
- Fragment *frag;
int i;
LOCK_GC;
- /* Clear all remaining nursery fragments */
- if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
- g_assert (nursery_next <= nursery_frag_real_end);
- memset (nursery_next, 0, nursery_frag_real_end - nursery_next);
- for (frag = nursery_fragments; frag; frag = frag->next) {
- memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start);
- }
- }
+
+ clear_nursery_fragments (nursery_next);
if (xdomain_checks && domain != mono_get_root_domain ()) {
scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
check_for_xdomain_refs ();
}
- for (section = section_list; section; section = section->block.next) {
- scan_area_for_domain (domain, section->data, section->end_data);
- }
+ scan_area_with_callback (nursery_section->data, nursery_section->end_data,
+ (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain);
+
+ /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
+ to memory returned to the OS.*/
+ null_ephemerons_for_domain (domain);
+
+ for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
+ null_links_for_domain (domain, i);
- /* We need two passes over pinned and large objects because
- freeing such an object gives its memory back to the OS (in
- the case of large objects) or obliterates its vtable
- (pinned objects), but we might need to dereference a
- pointer from an object to another object if the first
- object is a proxy. */
- scan_pinned_objects ((ScanPinnedObjectCallbackFunc)clear_domain_process_pinned_object_callback, domain);
+ /* We need two passes over major and large objects because
+ freeing such objects might give their memory back to the OS
+ (in the case of large objects) or obliterate its vtable
+ (pinned objects with major-copying or pinned and non-pinned
+ objects with major-mark&sweep), but we might need to
+ dereference a pointer from an object to another object if
+ the first object is a proxy. */
+ major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
- process_object_for_domain_clearing (bigobj->data, domain);
+ clear_domain_process_object (bigobj->data, domain);
prev = NULL;
for (bigobj = los_object_list; bigobj;) {
prev = bigobj;
bigobj = bigobj->next;
}
- scan_pinned_objects ((ScanPinnedObjectCallbackFunc)clear_domain_free_pinned_object_callback, domain);
-
- for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
- null_links_for_domain (domain, i);
+ major_iterate_objects (TRUE, FALSE, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
+ major_iterate_objects (FALSE, TRUE, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
UNLOCK_GC;
}
+static void
+global_remset_cache_clear (void)
+{
+ memset (global_remset_cache, 0, sizeof (global_remset_cache));
+}
+
+/*
+ * Tries to check if a given remset location was already added to the global remset.
+ * It can
+ *
+ * A 2 entry, LRU cache of recently saw location remsets.
+ *
+ * It's hand-coded instead of done using loops to reduce the number of memory references on cache hit.
+ *
+ * Returns TRUE is the element was added..
+ */
+static gboolean
+global_remset_location_was_not_added (gpointer ptr)
+{
+
+ gpointer first = global_remset_cache [0], second;
+ if (first == ptr) {
+ HEAVY_STAT (++stat_global_remsets_discarded);
+ return FALSE;
+ }
+
+ second = global_remset_cache [1];
+
+ if (second == ptr) {
+ /*Move the second to the front*/
+ global_remset_cache [0] = second;
+ global_remset_cache [1] = first;
+
+ HEAVY_STAT (++stat_global_remsets_discarded);
+ return FALSE;
+ }
+
+ global_remset_cache [0] = second;
+ global_remset_cache [1] = ptr;
+ return TRUE;
+}
+
/*
* add_to_global_remset:
*
* a minor collection. This can happen if the objects they point to are pinned.
*/
static void
-add_to_global_remset (gpointer ptr, gboolean root)
+add_to_global_remset (gpointer ptr)
{
RememberedSet *rs;
+ g_assert (!ptr_in_nursery (ptr) && ptr_in_nursery (*(gpointer*)ptr));
+
+ if (!global_remset_location_was_not_added (ptr))
+ return;
+
DEBUG (8, fprintf (gc_debug_file, "Adding global remset for %p\n", ptr));
binary_protocol_global_remset (ptr, *(gpointer*)ptr, (gpointer)LOAD_VTABLE (*(gpointer*)ptr));
- g_assert (!root);
- g_assert (!ptr_in_nursery (ptr) && ptr_in_nursery (*(gpointer*)ptr));
-
HEAVY_STAT (++stat_global_remsets_added);
/*
* To avoid uncontrolled growth of the global remset, only add each pointer once.
*/
if (global_remset->store_next + 3 < global_remset->end_set) {
- if (root) {
- *(global_remset->store_next++) = (mword)ptr | REMSET_OTHER;
- *(global_remset->store_next++) = (mword)REMSET_ROOT_LOCATION;
- } else {
- *(global_remset->store_next++) = (mword)ptr;
- }
+ *(global_remset->store_next++) = (mword)ptr;
return;
}
rs = alloc_remset (global_remset->end_set - global_remset->data, NULL);
rs->next = global_remset;
global_remset = rs;
- if (root) {
- *(global_remset->store_next++) = (mword)ptr | REMSET_OTHER;
- *(global_remset->store_next++) = (mword)REMSET_ROOT_LOCATION;
- } else {
- *(global_remset->store_next++) = (mword)ptr;
- }
+ *(global_remset->store_next++) = (mword)ptr;
{
int global_rs_size = 0;
}
}
-#define MOVED_OBJECTS_NUM 64
-static void *moved_objects [MOVED_OBJECTS_NUM];
-static int moved_objects_idx = 0;
+/*
+ * FIXME: allocate before calling this function and pass the
+ * destination address.
+ */
+static void*
+copy_object_no_checks (void *obj)
+{
+ static const void *copy_labels [] = { &&LAB_0, &&LAB_1, &&LAB_2, &&LAB_3, &&LAB_4, &&LAB_5, &&LAB_6, &&LAB_7, &&LAB_8 };
-#include "sgen-gray.c"
-
-/*
- * This is how the copying happens from the nursery to the old generation.
- * We assume that at this time all the pinned objects have been identified and
- * marked as such.
- * We run scan_object() for each pinned object so that each referenced
- * objects if possible are copied. The new gray objects created can have
- * scan_object() run on them right away, too.
- * Then we run copy_object() for the precisely tracked roots. At this point
- * all the roots are either gray or black. We run scan_object() on the gray
- * objects until no more gray objects are created.
- * At the end of the process we walk again the pinned list and we unmark
- * the pinned flag. As we go we also create the list of free space for use
- * in the next allocation runs.
- *
- * We need to remember objects from the old generation that point to the new one
- * (or just addresses?).
- *
- * copy_object could be made into a macro once debugged (use inline for now).
- */
-
-static void __attribute__((noinline))
-copy_object (void **obj_slot, char *from_space_start, char *from_space_end)
-{
- static const void *copy_labels [] = { &&LAB_0, &&LAB_1, &&LAB_2, &&LAB_3, &&LAB_4, &&LAB_5, &&LAB_6, &&LAB_7, &&LAB_8 };
-
- char *forwarded;
- char *obj = *obj_slot;
- mword objsize;
- MonoVTable *vt;
-
- HEAVY_STAT (++num_copy_object_called);
-
- if (!(obj >= from_space_start && obj < from_space_end)) {
- DEBUG (9, fprintf (gc_debug_file, "Not copying %p because it's not in from space (%p-%p)\n",
- obj, from_space_start, from_space_end));
- HEAVY_STAT (++stat_copy_object_failed_from_space);
- return;
- }
-
- DEBUG (9, fprintf (gc_debug_file, "Precise copy of %p from %p", obj, obj_slot));
-
- /*
- * obj must belong to one of:
- *
- * 1. the nursery
- * 2. the LOS
- * 3. a pinned chunk
- * 4. a non-to-space section of the major heap
- * 5. a to-space section of the major heap
- *
- * In addition, objects in 1, 2 and 4 might also be pinned.
- * Objects in 1 and 4 might be forwarded.
- *
- * Before we can copy the object we must make sure that we are
- * allowed to, i.e. that the object not pinned, not already
- * forwarded and doesn't belong to the LOS, a pinned chunk, or
- * a to-space section.
- *
- * We are usually called for to-space objects (5) when we have
- * two remset entries for the same reference. The first entry
- * copies the object and updates the reference and the second
- * calls us with the updated reference that points into
- * to-space. There might also be other circumstances where we
- * get to-space objects.
- */
-
- if ((forwarded = object_is_forwarded (obj))) {
- DEBUG (9, g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr));
- DEBUG (9, fprintf (gc_debug_file, " (already forwarded to %p)\n", forwarded));
- HEAVY_STAT (++stat_copy_object_failed_forwarded);
- *obj_slot = forwarded;
- return;
- }
- if (object_is_pinned (obj)) {
- DEBUG (9, g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr));
- DEBUG (9, fprintf (gc_debug_file, " (pinned, no change)\n"));
- HEAVY_STAT (++stat_copy_object_failed_pinned);
- return;
- }
+ mword objsize;
+ char *destination;
+ MonoVTable *vt = ((MonoObject*)obj)->vtable;
+ gboolean has_references = vt->gc_descr != (void*)DESC_TYPE_RUN_LENGTH;
objsize = safe_object_get_size ((MonoObject*)obj);
objsize += ALLOC_ALIGN - 1;
objsize &= ~(ALLOC_ALIGN - 1);
- if (ptr_in_nursery (obj))
- goto copy;
+ DEBUG (9, g_assert (vt->klass->inited));
+ MAJOR_GET_COPY_OBJECT_SPACE (destination, objsize, has_references);
- /*
- * At this point we know obj is not pinned, not forwarded and
- * belongs to 2, 3, 4, or 5.
- *
- * LOS object (2) are simple, at least until we always follow
- * the rule: if objsize > MAX_SMALL_OBJ_SIZE, pin the object
- * and return it. At the end of major collections, we walk
- * the los list and if the object is pinned, it is marked,
- * otherwise it can be freed.
- *
- * Pinned chunks (3) and major heap sections (4, 5) both
- * reside in blocks, which are always aligned, so once we've
- * eliminated LOS objects, we can just access the block and
- * see whether it's a pinned chunk or a major heap section.
- */
- if (G_UNLIKELY (objsize > MAX_SMALL_OBJ_SIZE || obj_is_from_pinned_alloc (obj))) {
- DEBUG (9, fprintf (gc_debug_file, " (marked LOS/Pinned %p (%s), size: %zd)\n", obj, safe_name (obj), objsize));
- binary_protocol_pin (obj, (gpointer)LOAD_VTABLE (obj), safe_object_get_size ((MonoObject*)obj));
- pin_object (obj);
- HEAVY_STAT (++stat_copy_object_failed_large_pinned);
- return;
- }
-
- /*
- * Now we know the object is in a major heap section. All we
- * need to do is check whether it's already in to-space (5) or
- * not (4).
- */
- if (MAJOR_SECTION_FOR_OBJECT (obj)->is_to_space) {
- DEBUG (9, g_assert (objsize <= MAX_SMALL_OBJ_SIZE));
- DEBUG (9, fprintf (gc_debug_file, " (already copied)\n"));
- HEAVY_STAT (++stat_copy_object_failed_to_space);
- return;
- }
-
- copy:
- DEBUG (9, fprintf (gc_debug_file, " (to %p, %s size: %zd)\n", to_space_bumper, ((MonoObject*)obj)->vtable->klass->name, objsize));
- binary_protocol_copy (obj, to_space_bumper, ((MonoObject*)obj)->vtable, objsize);
-
- HEAVY_STAT (++num_objects_copied);
-
- /* Make sure we have enough space available */
- if (to_space_bumper + objsize > to_space_top) {
- to_space_expand ();
- DEBUG (8, g_assert (to_space_bumper + objsize <= to_space_top));
- }
+ DEBUG (9, fprintf (gc_debug_file, " (to %p, %s size: %lu)\n", destination, ((MonoObject*)obj)->vtable->klass->name, (unsigned long)objsize));
+ binary_protocol_copy (obj, destination, ((MonoObject*)obj)->vtable, objsize);
if (objsize <= sizeof (gpointer) * 8) {
- mword *dest = (mword*)to_space_bumper;
+ mword *dest = (mword*)destination;
goto *copy_labels [objsize / sizeof (gpointer)];
LAB_8:
(dest) [7] = ((mword*)obj) [7];
{
int ecx;
char* esi = obj;
- char* edi = to_space_bumper;
+ char* edi = destination;
__asm__ __volatile__(
"rep; movsl"
: "=&c" (ecx), "=&D" (edi), "=&S" (esi)
);
}
#else
- memcpy (to_space_bumper, obj, objsize);
+ memcpy (destination, obj, objsize);
#endif
}
/* adjust array->bounds */
- vt = ((MonoObject*)obj)->vtable;
DEBUG (9, g_assert (vt->gc_descr));
if (G_UNLIKELY (vt->rank && ((MonoArray*)obj)->bounds)) {
- MonoArray *array = (MonoArray*)to_space_bumper;
- array->bounds = (MonoArrayBounds*)((char*)to_space_bumper + ((char*)((MonoArray*)obj)->bounds - (char*)obj));
- DEBUG (9, fprintf (gc_debug_file, "Array instance %p: size: %zd, rank: %d, length: %d\n", array, objsize, vt->rank, mono_array_length (array)));
+ MonoArray *array = (MonoArray*)destination;
+ array->bounds = (MonoArrayBounds*)((char*)destination + ((char*)((MonoArray*)obj)->bounds - (char*)obj));
+ DEBUG (9, fprintf (gc_debug_file, "Array instance %p: size: %lu, rank: %d, length: %lu\n", array, (unsigned long)objsize, vt->rank, (unsigned long)mono_array_length (array)));
}
/* set the forwarding pointer */
- forward_object (obj, to_space_bumper);
+ forward_object (obj, destination);
if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_GC_MOVES)) {
if (moved_objects_idx == MOVED_OBJECTS_NUM) {
mono_profiler_gc_moves (moved_objects, moved_objects_idx);
moved_objects_idx = 0;
}
moved_objects [moved_objects_idx++] = obj;
- moved_objects [moved_objects_idx++] = to_space_bumper;
+ moved_objects [moved_objects_idx++] = destination;
+ }
+ obj = destination;
+ if (has_references) {
+ DEBUG (9, fprintf (gc_debug_file, "Enqueuing gray object %p (%s)\n", obj, safe_name (obj)));
+ GRAY_OBJECT_ENQUEUE (obj);
}
- obj = to_space_bumper;
- to_space_section->scan_starts [((char*)obj - (char*)to_space_section->data)/SCAN_START_SIZE] = obj;
- to_space_bumper += objsize;
- DEBUG (9, fprintf (gc_debug_file, "Enqueuing gray object %p (%s)\n", obj, safe_name (obj)));
- gray_object_enqueue (obj);
- DEBUG (8, g_assert (to_space_bumper <= to_space_top));
- *obj_slot = obj;
+ return obj;
}
/*
- * This is a variant of copy_object() that can be used for object locations
- * not in the heap. Ideally all the callers should later be changed to call
- * the real copy_object() that takes the location of the pointer.
+ * This is how the copying happens from the nursery to the old generation.
+ * We assume that at this time all the pinned objects have been identified and
+ * marked as such.
+ * We run scan_object() for each pinned object so that each referenced
+ * objects if possible are copied. The new gray objects created can have
+ * scan_object() run on them right away, too.
+ * Then we run copy_object() for the precisely tracked roots. At this point
+ * all the roots are either gray or black. We run scan_object() on the gray
+ * objects until no more gray objects are created.
+ * At the end of the process we walk again the pinned list and we unmark
+ * the pinned flag. As we go we also create the list of free space for use
+ * in the next allocation runs.
+ *
+ * We need to remember objects from the old generation that point to the new one
+ * (or just addresses?).
+ *
+ * copy_object could be made into a macro once debugged (use inline for now).
*/
-static char*
-copy_object_no_heap (char *obj, char *from_space_start, char *from_space_end)
+
+static void __attribute__((noinline))
+copy_object (void **obj_slot)
{
- void *obj_ptr = (void*)obj;
- copy_object (&obj_ptr, from_space_start, from_space_end);
- return obj_ptr;
+ char *forwarded;
+ char *obj = *obj_slot;
+
+ DEBUG (9, g_assert (current_collection_generation == GENERATION_NURSERY));
+
+ HEAVY_STAT (++stat_copy_object_called_nursery);
+
+ if (!ptr_in_nursery (obj)) {
+ HEAVY_STAT (++stat_nursery_copy_object_failed_from_space);
+ return;
+ }
+
+ DEBUG (9, fprintf (gc_debug_file, "Precise copy of %p from %p", obj, obj_slot));
+
+ /*
+ * Before we can copy the object we must make sure that we are
+ * allowed to, i.e. that the object not pinned or not already
+ * forwarded.
+ */
+
+ if ((forwarded = object_is_forwarded (obj))) {
+ DEBUG (9, g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr));
+ DEBUG (9, fprintf (gc_debug_file, " (already forwarded to %p)\n", forwarded));
+ HEAVY_STAT (++stat_nursery_copy_object_failed_forwarded);
+ *obj_slot = forwarded;
+ return;
+ }
+ if (object_is_pinned (obj)) {
+ DEBUG (9, g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr));
+ DEBUG (9, fprintf (gc_debug_file, " (pinned, no change)\n"));
+ HEAVY_STAT (++stat_nursery_copy_object_failed_pinned);
+ return;
+ }
+
+ HEAVY_STAT (++stat_objects_copied_nursery);
+
+ *obj_slot = copy_object_no_checks (obj);
}
#undef HANDLE_PTR
void *__old = *(ptr); \
void *__copy; \
if (__old) { \
- copy_object ((ptr), from_start, from_end); \
+ copy_object ((ptr)); \
__copy = *(ptr); \
DEBUG (9, if (__old != __copy) fprintf (gc_debug_file, "Overwrote field at %p with %p (was: %p)\n", (ptr), *(ptr), __old)); \
if (G_UNLIKELY (ptr_in_nursery (__copy) && !ptr_in_nursery ((ptr)))) \
- add_to_global_remset ((ptr), FALSE); \
+ add_to_global_remset ((ptr)); \
} \
} while (0)
* Scan the object pointed to by @start for references to
* other objects between @from_start and @from_end and copy
* them to the gray_objects area.
- * Returns a pointer to the end of the object.
*/
-static char*
-scan_object (char *start, char* from_start, char* from_end)
+static void
+scan_object (char *start)
{
#include "sgen-scan-object.h"
- return start;
-}
-
-/*
- * drain_gray_stack:
- *
- * Scan objects in the gray stack until the stack is empty. This should be called
- * frequently after each object is copied, to achieve better locality and cache
- * usage.
- */
-static void inline
-drain_gray_stack (char *start_addr, char *end_addr)
-{
- char *obj;
-
- while ((obj = gray_object_dequeue ())) {
- DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", obj, safe_name (obj)));
- scan_object (obj, start_addr, end_addr);
- }
+ HEAVY_STAT (++stat_scan_object_called_nursery);
}
/*
return NULL;
}
-#include "sgen-pinning-stats.c"
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj) do { \
+ void *__old = *(ptr); \
+ void *__copy; \
+ if (__old) { \
+ major_copy_or_mark_object ((ptr)); \
+ __copy = *(ptr); \
+ DEBUG (9, if (__old != __copy) fprintf (gc_debug_file, "Overwrote field at %p with %p (was: %p)\n", (ptr), *(ptr), __old)); \
+ if (G_UNLIKELY (ptr_in_nursery (__copy) && !ptr_in_nursery ((ptr)))) \
+ add_to_global_remset ((ptr)); \
+ } \
+ } while (0)
+
+static void
+major_scan_object (char *start)
+{
+#include "sgen-scan-object.h"
+
+ HEAVY_STAT (++stat_scan_object_called_major);
+}
+
+/*
+ * drain_gray_stack:
+ *
+ * Scan objects in the gray stack until the stack is empty. This should be called
+ * frequently after each object is copied, to achieve better locality and cache
+ * usage.
+ */
+static void inline
+drain_gray_stack (void)
+{
+ char *obj;
+
+ if (current_collection_generation == GENERATION_NURSERY) {
+ for (;;) {
+ GRAY_OBJECT_DEQUEUE (obj);
+ if (!obj)
+ break;
+ DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", obj, safe_name (obj)));
+ scan_object (obj);
+ }
+ } else {
+ for (;;) {
+ GRAY_OBJECT_DEQUEUE (obj);
+ if (!obj)
+ break;
+ DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", obj, safe_name (obj)));
+ major_scan_object (obj);
+ }
+ }
+}
/*
* Addresses from start to end are already sorted. This function finds
DEBUG (4, fprintf (gc_debug_file, "Pinned object %p, vtable %p (%s), count %d\n", search_start, *(void**)search_start, safe_name (search_start), count));
binary_protocol_pin (search_start, (gpointer)LOAD_VTABLE (search_start), safe_object_get_size (search_start));
pin_object (search_start);
+ GRAY_OBJECT_ENQUEUE (search_start);
if (heap_dump_file)
pin_stats_register_object (search_start, last_obj_size);
definitely_pinned [count] = search_start;
return count;
}
-static void** pin_queue;
-static int pin_queue_size = 0;
-static int next_pin_slot = 0;
-
-static int
-new_gap (int gap)
-{
- gap = (gap * 10) / 13;
- if (gap == 9 || gap == 10)
- return 11;
- if (gap < 1)
- return 1;
- return gap;
-}
-
-#if 0
-static int
-compare_addr (const void *a, const void *b)
-{
- return *(const void **)a - *(const void **)b;
+static void
+pin_objects_in_section (GCMemSection *section)
+{
+ int start = section->pin_queue_start;
+ int end = section->pin_queue_end;
+ if (start != end) {
+ int reduced_to;
+ reduced_to = pin_objects_from_addresses (section, pin_queue + start, pin_queue + end,
+ section->data, section->next_data);
+ section->pin_queue_start = start;
+ section->pin_queue_end = start + reduced_to;
+ }
}
-#endif
-/* sort the addresses in array in increasing order */
+/* Sort the addresses in array in increasing order.
+ * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
+ */
static void
sort_addresses (void **array, int size)
{
- /*
- * qsort is slower as predicted.
- * qsort (array, size, sizeof (gpointer), compare_addr);
- * return;
- */
- int gap = size;
- int swapped, end;
- while (TRUE) {
- int i;
- gap = new_gap (gap);
- swapped = FALSE;
- end = size - gap;
- for (i = 0; i < end; i++) {
- int j = i + gap;
- if (array [i] > array [j]) {
- void* val = array [i];
- array [i] = array [j];
- array [j] = val;
- swapped = TRUE;
- }
+ int i;
+ void *tmp;
+
+ for (i = 1; i < size; ++i) {
+ int child = i;
+ while (child > 0) {
+ int parent = (child - 1) / 2;
+
+ if (array [parent] >= array [child])
+ break;
+
+ tmp = array [parent];
+ array [parent] = array [child];
+ array [child] = tmp;
+
+ child = parent;
+ }
+ }
+
+ for (i = size - 1; i > 0; --i) {
+ int end, root;
+ tmp = array [i];
+ array [i] = array [0];
+ array [0] = tmp;
+
+ end = i - 1;
+ root = 0;
+
+ while (root * 2 + 1 <= end) {
+ int child = root * 2 + 1;
+
+ if (child < end && array [child] < array [child + 1])
+ ++child;
+ if (array [root] >= array [child])
+ break;
+
+ tmp = array [root];
+ array [root] = array [child];
+ array [child] = tmp;
+
+ root = child;
}
- if (gap == 1 && !swapped)
- break;
}
}
gpointer next;
for (i = 0; i < next_pin_slot; ++i) {
next = pin_queue [i];
- fprintf (gc_debug_file, "Nursery range: %p-%p, size: %zd\n", first, next, (char*)next-(char*)first);
+ fprintf (gc_debug_file, "Nursery range: %p-%p, size: %td\n", first, next, (char*)next-(char*)first);
first = next;
}
next = end_nursery;
- fprintf (gc_debug_file, "Nursery range: %p-%p, size: %zd\n", first, next, (char*)next-(char*)first);
+ fprintf (gc_debug_file, "Nursery range: %p-%p, size: %td\n", first, next, (char*)next-(char*)first);
}
/* reduce the info in the pin queue, removing duplicate pointers and sorting them */
}
-static int
-optimized_pin_queue_search (void *addr)
-{
- int first = 0, last = next_pin_slot;
- while (first < last) {
- int middle = first + ((last - first) >> 1);
- if (addr <= pin_queue [middle])
- last = middle;
- else
- first = middle + 1;
- }
- g_assert (first == last);
- return first;
-}
-
-static void
-find_optimized_pin_queue_area (void *start, void *end, int *first, int *last)
-{
- *first = optimized_pin_queue_search (start);
- *last = optimized_pin_queue_search (end);
-}
-
-static void
-realloc_pin_queue (void)
-{
- int new_size = pin_queue_size? pin_queue_size + pin_queue_size/2: 1024;
- void **new_pin = get_internal_mem (sizeof (void*) * new_size, INTERNAL_MEM_PIN_QUEUE);
- memcpy (new_pin, pin_queue, sizeof (void*) * next_pin_slot);
- free_internal_mem (pin_queue, INTERNAL_MEM_PIN_QUEUE);
- pin_queue = new_pin;
- pin_queue_size = new_size;
- DEBUG (4, fprintf (gc_debug_file, "Reallocated pin queue to size: %d\n", new_size));
-}
-
-#include "sgen-pinning.c"
-
/*
* Scan the memory between start and end and queue values which could be pointers
* to the area between start_nursery and end_nursery for later consideration.
DEBUG (7, if (count) fprintf (gc_debug_file, "found %d potential pinned heap pointers\n", count));
}
-/*
- * If generation is 0, just mark objects in the nursery, the others we don't care,
- * since they are not going to move anyway.
- * There are different areas that are scanned for pinned pointers:
- * *) the thread stacks (when jit support is ready only the unmanaged frames)
- * *) the pinned handle table
- * *) the pinned roots
- *
- * Note: when we'll use a write barrier for old to new gen references, we need to
- * keep track of old gen objects that point to pinned new gen objects because in that
- * case the referenced object will be moved maybe at the next collection, but there
- * is no write in the old generation area where the pinned object is referenced
- * and we may not consider it as reachable.
- */
-static G_GNUC_UNUSED void
-mark_pinned_objects (int generation)
-{
-}
-
/*
* Debugging function: find in the conservative roots where @obj is being pinned.
*/
evacuate_pin_staging_area ();
}
-/* Copy function called from user defined mark functions */
-static char *user_copy_n_start;
-static char *user_copy_n_end;
-
-static void
-user_copy (void **addr)
-{
- copy_object (addr, user_copy_n_start, user_copy_n_end);
-}
-
/*
* The memory area from start_root to end_root contains pointers to objects.
* Their position is precisely described by @desc (this means that the pointer
* This functions copies them to to_space updates them.
*/
static void
-precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc)
+precisely_scan_objects_from (CopyOrMarkObjectFunc copy_func, void** start_root, void** end_root, char* n_start, char *n_end, mword desc)
{
switch (desc & ROOT_DESC_TYPE_MASK) {
case ROOT_DESC_BITMAP:
desc >>= ROOT_DESC_TYPE_SHIFT;
while (desc) {
if ((desc & 1) && *start_root) {
- copy_object (start_root, n_start, n_end);
+ copy_func (start_root);
DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", start_root, *start_root));
- drain_gray_stack (n_start, n_end);
+ drain_gray_stack ();
}
desc >>= 1;
start_root++;
void **objptr = start_run;
while (bmap) {
if ((bmap & 1) && *objptr) {
- copy_object (objptr, n_start, n_end);
+ copy_func (objptr);
DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", objptr, *objptr));
- drain_gray_stack (n_start, n_end);
+ drain_gray_stack ();
}
bmap >>= 1;
++objptr;
}
case ROOT_DESC_USER: {
MonoGCRootMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
-
- user_copy_n_start = n_start;
- user_copy_n_end = n_end;
- marker (start_root, user_copy);
+ marker (start_root, copy_func);
break;
}
case ROOT_DESC_RUN_LEN:
/* size must be a power of 2 */
static void*
-get_os_memory_aligned (mword size, gboolean activate)
+get_os_memory_aligned (mword size, mword alignment, gboolean activate)
{
/* Allocate twice the memory to be able to put the block on an aligned address */
- char *mem = get_os_memory (size * 2, activate);
+ char *mem = get_os_memory (size + alignment, activate);
char *aligned;
g_assert (mem);
- aligned = (char*)((mword)(mem + (size - 1)) & ~(size - 1));
- g_assert (aligned >= mem && aligned + size <= mem + size * 2 && !((mword)aligned & (size - 1)));
+ aligned = (char*)((mword)(mem + (alignment - 1)) & ~(alignment - 1));
+ g_assert (aligned >= mem && aligned + size <= mem + size + alignment && !((mword)aligned & (alignment - 1)));
if (aligned > mem)
free_os_memory (mem, aligned - mem);
- if (aligned + size < mem + size * 2)
- free_os_memory (aligned + size, (mem + size * 2) - (aligned + size));
+ if (aligned + size < mem + size + alignment)
+ free_os_memory (aligned + size, (mem + size + alignment) - (aligned + size));
return aligned;
}
if (nursery_section)
return;
- DEBUG (2, fprintf (gc_debug_file, "Allocating nursery size: %zd\n", nursery_size));
+ DEBUG (2, fprintf (gc_debug_file, "Allocating nursery size: %lu\n", (unsigned long)nursery_size));
/* later we will alloc a larger area for the nursery but only activate
* what we need. The rest will be used as expansion if we have too many pinned
* objects in the existing nursery.
g_assert (nursery_size == DEFAULT_NURSERY_SIZE);
alloc_size = nursery_size;
#ifdef ALIGN_NURSERY
- data = get_os_memory_aligned (alloc_size, TRUE);
+ data = get_os_memory_aligned (alloc_size, alloc_size, TRUE);
#else
data = get_os_memory (alloc_size, TRUE);
#endif
UPDATE_HEAP_BOUNDARIES (nursery_start, nursery_real_end);
nursery_next = nursery_start;
total_alloc += alloc_size;
- DEBUG (4, fprintf (gc_debug_file, "Expanding nursery size (%p-%p): %zd, total: %zd\n", data, data + alloc_size, nursery_size, total_alloc));
+ DEBUG (4, fprintf (gc_debug_file, "Expanding nursery size (%p-%p): %lu, total: %lu\n", data, data + alloc_size, (unsigned long)nursery_size, (unsigned long)total_alloc));
section->data = section->next_data = data;
section->size = alloc_size;
section->end_data = nursery_real_end;
section->scan_starts = get_internal_mem (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS);
section->num_scan_start = scan_starts;
section->block.role = MEMORY_ROLE_GEN0;
-
- /* add to the section list */
- section->block.next = section_list;
- section_list = section;
+ section->block.next = NULL;
nursery_section = section;
}
static void
-scan_finalizer_entries (FinalizeEntry *list, char *start, char *end) {
+scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeEntry *list) {
FinalizeEntry *fin;
for (fin = list; fin; fin = fin->next) {
if (!fin->object)
continue;
DEBUG (5, fprintf (gc_debug_file, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object)));
- copy_object (&fin->object, start, end);
+ copy_func (&fin->object);
}
}
-/*
- * Update roots in the old generation. Since we currently don't have the
- * info from the write barriers, we just scan all the objects.
- */
-static G_GNUC_UNUSED void
-scan_old_generation (char *start, char* end)
-{
- GCMemSection *section;
- LOSObject *big_object;
- char *p;
-
- for (section = section_list; section; section = section->block.next) {
- if (section == nursery_section)
- continue;
- DEBUG (2, fprintf (gc_debug_file, "Scan of old section: %p-%p, size: %d\n", section->data, section->next_data, (int)(section->next_data - section->data)));
- /* we have to deal with zeroed holes in old generation (truncated strings ...) */
- p = section->data;
- while (p < section->next_data) {
- if (!*(void**)p) {
- p += ALLOC_ALIGN;
- continue;
- }
- DEBUG (8, fprintf (gc_debug_file, "Precise old object scan of %p (%s)\n", p, safe_name (p)));
- p = scan_object (p, start, end);
- }
- }
- /* scan the old object space, too */
- for (big_object = los_object_list; big_object; big_object = big_object->next) {
- DEBUG (5, fprintf (gc_debug_file, "Scan of big object: %p (%s), size: %zd\n", big_object->data, safe_name (big_object->data), big_object->size));
- scan_object (big_object->data, start, end);
- }
- /* scan the list of objects ready for finalization */
- scan_finalizer_entries (fin_ready_list, start, end);
- scan_finalizer_entries (critical_fin_list, start, end);
-}
-
static mword fragment_total = 0;
/*
* We found a fragment of free memory in the nursery: memzero it and if
fragment_total += frag_size;
} else {
/* Clear unused fragments, pinning depends on this */
+ /*TODO place an int[] here instead of the memset if size justify it*/
memset (frag_start, 0, frag_size);
}
}
-static int
-scan_needed_big_objects (char *start_addr, char *end_addr)
-{
- LOSObject *big_object;
- int count = 0;
- for (big_object = los_object_list; big_object; big_object = big_object->next) {
- if (!big_object->scanned && object_is_pinned (big_object->data)) {
- DEBUG (5, fprintf (gc_debug_file, "Scan of big object: %p (%s), size: %zd\n", big_object->data, safe_name (big_object->data), big_object->size));
- scan_object (big_object->data, start_addr, end_addr);
- drain_gray_stack (start_addr, end_addr);
- big_object->scanned = TRUE;
- count++;
- }
- }
- return count;
-}
-
static const char*
generation_name (int generation)
{
}
}
-static void
-new_to_space_section (void)
-{
- /* FIXME: if the current to_space_section is empty, we don't
- have to allocate a new one */
-
- to_space_section = alloc_major_section ();
- to_space_bumper = to_space_section->next_data;
- to_space_top = to_space_section->end_data;
-}
-
-static void
-to_space_set_next_data (void)
-{
- g_assert (to_space_bumper >= to_space_section->next_data && to_space_bumper <= to_space_section->end_data);
- to_space_section->next_data = to_space_bumper;
-}
-
-static void
-to_space_expand (void)
-{
- if (to_space_section) {
- g_assert (to_space_top == to_space_section->end_data);
- to_space_set_next_data ();
- }
-
- new_to_space_section ();
-}
-
-static void
-unset_to_space (void)
-{
- /* between collections the to_space_bumper is invalidated
- because degraded allocations might occur, so we set it to
- NULL, just to make it explicit */
- to_space_bumper = NULL;
-
- /* don't unset to_space_section if we implement the FIXME in
- new_to_space_section */
- to_space_section = NULL;
-}
-
-static gboolean
-object_is_in_to_space (char *obj)
-{
- mword objsize;
-
- /* nursery */
- if (ptr_in_nursery (obj))
- return FALSE;
-
- objsize = safe_object_get_size ((MonoObject*)obj);
- objsize += ALLOC_ALIGN - 1;
- objsize &= ~(ALLOC_ALIGN - 1);
-
- /* LOS */
- if (objsize > MAX_SMALL_OBJ_SIZE)
- return FALSE;
-
- /* pinned chunk */
- if (obj_is_from_pinned_alloc (obj))
- return FALSE;
-
- /* now we know it's in a major heap section */
- return MAJOR_SECTION_FOR_OBJECT (obj)->is_to_space;
-}
-
static void
finish_gray_stack (char *start_addr, char *end_addr, int generation)
{
TV_DECLARE (atv);
TV_DECLARE (btv);
- int fin_ready, bigo_scanned_num;
+ int fin_ready;
+ int ephemeron_rounds = 0;
+ CopyOrMarkObjectFunc copy_func = current_collection_generation == GENERATION_NURSERY ? copy_object : major_copy_or_mark_object;
/*
* We copied all the reachable objects. Now it's the time to copy
* To achieve better cache locality and cache usage, we drain the gray stack
* frequently, after each object is copied, and just finish the work here.
*/
- drain_gray_stack (start_addr, end_addr);
+ drain_gray_stack ();
TV_GETTIME (atv);
- //scan_old_generation (start_addr, end_addr);
DEBUG (2, fprintf (gc_debug_file, "%s generation done\n", generation_name (generation)));
/* walk the finalization queue and move also the objects that need to be
* finalized: use the finalized objects as new roots so the objects they depend
* that are fin-ready. Speedup with a flag?
*/
do {
+ /*
+ * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
+ * before processing finalizable objects to avoid finalizing reachable values.
+ *
+ * It must be done inside the finalizaters loop since objects must not be removed from CWT tables
+ * while they are been finalized.
+ */
+ int done_with_ephemerons = 0;
+ do {
+ done_with_ephemerons = mark_ephemerons_in_range (copy_func, start_addr, end_addr);
+ drain_gray_stack ();
+ ++ephemeron_rounds;
+ } while (!done_with_ephemerons);
+
fin_ready = num_ready_finalizers;
- finalize_in_range (start_addr, end_addr, generation);
+ finalize_in_range (copy_func, start_addr, end_addr, generation);
if (generation == GENERATION_OLD)
- finalize_in_range (nursery_start, nursery_real_end, GENERATION_NURSERY);
- bigo_scanned_num = scan_needed_big_objects (start_addr, end_addr);
+ finalize_in_range (copy_func, nursery_start, nursery_real_end, GENERATION_NURSERY);
/* drain the new stack that might have been created */
DEBUG (6, fprintf (gc_debug_file, "Precise scan of gray area post fin\n"));
- drain_gray_stack (start_addr, end_addr);
- } while (fin_ready != num_ready_finalizers || bigo_scanned_num);
+ drain_gray_stack ();
+ } while (fin_ready != num_ready_finalizers);
+
+ /*
+ * Clear ephemeron pairs with unreachable keys.
+ * We pass the copy func so we can figure out if an array was promoted or not.
+ */
+ clear_unreachable_ephemerons (copy_func, start_addr, end_addr);
+
TV_GETTIME (btv);
- DEBUG (2, fprintf (gc_debug_file, "Finalize queue handling scan for %s generation: %d usecs\n", generation_name (generation), TV_ELAPSED (atv, btv)));
+ DEBUG (2, fprintf (gc_debug_file, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron roundss\n", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds));
/*
* handle disappearing links
*/
g_assert (gray_object_queue_is_empty ());
for (;;) {
- null_link_in_range (start_addr, end_addr, generation);
+ null_link_in_range (copy_func, start_addr, end_addr, generation);
if (generation == GENERATION_OLD)
- null_link_in_range (start_addr, end_addr, GENERATION_NURSERY);
+ null_link_in_range (copy_func, start_addr, end_addr, GENERATION_NURSERY);
if (gray_object_queue_is_empty ())
break;
- drain_gray_stack (start_addr, end_addr);
+ drain_gray_stack ();
}
g_assert (gray_object_queue_is_empty ());
- /* DEBUG (2, fprintf (gc_debug_file, "Copied from %s to old space: %d bytes (%p-%p)\n", generation_name (generation), (int)(to_space_bumper - to_space), to_space, to_space_bumper)); */
- to_space_set_next_data ();
}
static void
-check_scan_starts (void)
+check_section_scan_starts (GCMemSection *section)
{
- GCMemSection *section;
int i;
- if (!do_scan_starts_check)
- return;
- for (section = section_list; section; section = section->block.next) {
- for (i = 0; i < section->num_scan_start; ++i) {
- if (section->scan_starts [i]) {
- guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
- g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
- }
+ for (i = 0; i < section->num_scan_start; ++i) {
+ if (section->scan_starts [i]) {
+ guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
+ g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
}
}
}
+static void
+check_scan_starts (void)
+{
+ if (!do_scan_starts_check)
+ return;
+ check_section_scan_starts (nursery_section);
+ major_check_scan_starts ();
+}
+
static int last_num_pinned = 0;
static void
clear_tlabs ();
}
-/* FIXME: later reduce code duplication here with the above
- * We don't keep track of section fragments for non-nursery sections yet, so
- * just memset to 0.
- */
static void
-build_section_fragments (GCMemSection *section)
-{
- int i;
- char *frag_start, *frag_end;
- size_t frag_size;
-
- /* clear scan starts */
- memset (section->scan_starts, 0, section->num_scan_start * sizeof (gpointer));
- frag_start = section->data;
- section->next_data = section->data;
- for (i = section->pin_queue_start; i < section->pin_queue_end; ++i) {
- frag_end = pin_queue [i];
- /* remove the pin bit from pinned objects */
- unpin_object (frag_end);
- if (frag_end >= section->data + section->size) {
- frag_end = section->data + section->size;
- } else {
- section->scan_starts [((char*)frag_end - (char*)section->data)/SCAN_START_SIZE] = frag_end;
- }
- frag_size = frag_end - frag_start;
- if (frag_size) {
- binary_protocol_empty (frag_start, frag_size);
- memset (frag_start, 0, frag_size);
- }
- frag_size = safe_object_get_size ((MonoObject*)pin_queue [i]);
- frag_size += ALLOC_ALIGN - 1;
- frag_size &= ~(ALLOC_ALIGN - 1);
- frag_start = (char*)pin_queue [i] + frag_size;
- section->next_data = MAX (section->next_data, frag_start);
- }
- frag_end = section->end_data;
- frag_size = frag_end - frag_start;
- if (frag_size) {
- binary_protocol_empty (frag_start, frag_size);
- memset (frag_start, 0, frag_size);
- }
-}
-
-static void
-scan_from_registered_roots (char *addr_start, char *addr_end, int root_type)
+scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type)
{
int i;
RootRecord *root;
for (i = 0; i < roots_hash_size [root_type]; ++i) {
for (root = roots_hash [root_type][i]; root; root = root->next) {
DEBUG (6, fprintf (gc_debug_file, "Precise root scan %p-%p (desc: %p)\n", root->start_root, root->end_root, (void*)root->root_desc));
- precisely_scan_objects_from ((void**)root->start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc);
+ precisely_scan_objects_from (copy_func, (void**)root->start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc);
}
}
}
static void
dump_occupied (char *start, char *end, char *section_start)
{
- fprintf (heap_dump_file, "<occupied offset=\"%zd\" size=\"%zd\"/>\n", start - section_start, end - start);
+ fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
}
static void
GCVTable *vt;
char *old_start = NULL; /* just for debugging */
- fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", type, section->size);
+ fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
while (start < end) {
guint size;
static char const *internal_mem_names [] = { "pin-queue", "fragment", "section", "scan-starts",
"fin-table", "finalize-entry", "dislink-table",
"dislink", "roots-table", "root-record", "statistics",
- "remset", "gray-queue", "store-remset" };
+ "remset", "gray-queue", "store-remset", "marksweep-tables",
+ "marksweep-block-info", "ephemeron-link" };
- GCMemSection *section;
ObjectList *list;
LOSObject *bigobj;
int i;
if (reason)
fprintf (heap_dump_file, " reason=\"%s\"", reason);
fprintf (heap_dump_file, ">\n");
- fprintf (heap_dump_file, "<other-mem-usage type=\"pinned-chunks\" size=\"%ld\"/>\n", pinned_chunk_bytes_alloced);
- fprintf (heap_dump_file, "<other-mem-usage type=\"large-internal\" size=\"%ld\"/>\n", large_internal_bytes_alloced);
+ fprintf (heap_dump_file, "<other-mem-usage type=\"pinned-chunks\" size=\"%lld\"/>\n", pinned_chunk_bytes_alloced);
+ fprintf (heap_dump_file, "<other-mem-usage type=\"large-internal\" size=\"%lld\"/>\n", large_internal_bytes_alloced);
fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
for (i = 0; i < INTERNAL_MEM_MAX; ++i)
fprintf (heap_dump_file, "<other-mem-usage type=\"%s\" size=\"%ld\"/>\n", internal_mem_names [i], small_internal_mem_bytes [i]);
dump_section (nursery_section, "nursery");
- for (section = section_list; section; section = section->block.next) {
- if (section != nursery_section)
- dump_section (section, "old");
- }
+ major_dump_heap ();
fprintf (heap_dump_file, "<los>\n");
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
{
static gboolean inited = FALSE;
-#ifdef HEAVY_STATISTICS
- num_copy_object_called = 0;
- num_objects_copied = 0;
-#endif
-
if (inited)
return;
mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_pinned);
mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_registered_roots);
mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_thread_data);
- mono_counters_register ("Minor scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_alloc_pinned);
mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_finish_gray_stack);
mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_fragment_creation);
mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_alloc_pinned);
mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_finalized);
mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_big_objects);
- mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_finish_gray_stack);
+ mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_finish_gray_stack);
+ mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_free_bigobjs);
+ mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_los_sweep);
mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_sweep);
mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_fragment_creation);
mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
mono_counters_register ("bytes allocated in LOS", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_los);
+
mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_major);
mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_major);
- mono_counters_register ("# copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_failed_from_space);
- mono_counters_register ("# copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_failed_forwarded);
- mono_counters_register ("# copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_failed_pinned);
- mono_counters_register ("# copy_object() failed large or pinned chunk", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_failed_large_pinned);
- mono_counters_register ("# copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_failed_to_space);
+ mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
+ mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);
+
+ mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
+ mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
+ mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
+
+ mono_counters_register ("# wasted fragments used", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_wasted_fragments_used);
+ mono_counters_register ("bytes in wasted fragments", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_wasted_fragments_bytes);
mono_counters_register ("Store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets);
mono_counters_register ("Unique store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets_unique);
mono_counters_register ("Saved remsets 1", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_1);
mono_counters_register ("Saved remsets 2", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_2);
mono_counters_register ("Global remsets added", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_added);
+ mono_counters_register ("Global remsets re-added", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_readded);
mono_counters_register ("Global remsets processed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_processed);
+ mono_counters_register ("Global remsets discarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_discarded);
+
#endif
inited = TRUE;
}
-static void
-commit_stats (int generation)
+static gboolean
+need_major_collection (void)
{
-#ifdef HEAVY_STATISTICS
- if (generation == GENERATION_NURSERY) {
- stat_copy_object_called_nursery += num_copy_object_called;
- stat_objects_copied_nursery += num_objects_copied;
- } else {
- g_assert (generation == GENERATION_OLD);
- stat_copy_object_called_major += num_copy_object_called;
- stat_objects_copied_major += num_objects_copied;
- }
-#endif
+ mword los_alloced = los_memory_usage - MIN (last_los_memory_usage, los_memory_usage);
+ return minor_collection_sections_alloced * MAJOR_SECTION_SIZE + los_alloced > minor_collection_allowance;
}
/*
collect_nursery (size_t requested_size)
{
size_t max_garbage_amount;
- int i;
char *orig_nursery_next;
- Fragment *frag;
- GCMemSection *section;
- int old_num_major_sections = num_major_sections;
- int sections_alloced;
TV_DECLARE (all_atv);
TV_DECLARE (all_btv);
TV_DECLARE (atv);
TV_DECLARE (btv);
+ current_collection_generation = GENERATION_NURSERY;
+
init_stats ();
binary_protocol_collection (GENERATION_NURSERY);
check_scan_starts ();
TV_GETTIME (all_atv);
TV_GETTIME (atv);
- /* Clear all remaining nursery fragments, pinning depends on this */
- if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
- g_assert (orig_nursery_next <= nursery_frag_real_end);
- memset (orig_nursery_next, 0, nursery_frag_real_end - orig_nursery_next);
- for (frag = nursery_fragments; frag; frag = frag->next) {
- memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start);
- }
- }
+ /* Pinning depends on this */
+ clear_nursery_fragments (orig_nursery_next);
TV_GETTIME (btv);
time_minor_pre_collection_fragment_clear += TV_ELAPSED_MS (atv, btv);
nursery_section->next_data = nursery_next;
- if (!to_space_section) {
- new_to_space_section ();
- } else {
- /* we might have done degraded allocation since the
- last collection */
- g_assert (to_space_bumper <= to_space_section->next_data);
- to_space_bumper = to_space_section->next_data;
+ major_start_nursery_collection ();
- to_space_section->is_to_space = TRUE;
- }
gray_object_queue_init ();
num_minor_gcs++;
mono_stats.minor_gc_count ++;
+
+ global_remset_cache_clear ();
+
/* pin from pinned handles */
init_pinning ();
pin_from_roots (nursery_start, nursery_next);
/* identify pinned objects */
optimize_pin_queue (0);
next_pin_slot = pin_objects_from_addresses (nursery_section, pin_queue, pin_queue + next_pin_slot, nursery_start, nursery_next);
+ nursery_section->pin_queue_start = 0;
+ nursery_section->pin_queue_end = next_pin_slot;
TV_GETTIME (atv);
time_minor_pinning += TV_ELAPSED_MS (btv, atv);
DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot, TV_ELAPSED (btv, atv)));
time_minor_scan_remsets += TV_ELAPSED_MS (atv, btv);
DEBUG (2, fprintf (gc_debug_file, "Old generation scan: %d usecs\n", TV_ELAPSED (atv, btv)));
- /* the pinned objects are roots */
- for (i = 0; i < next_pin_slot; ++i) {
- DEBUG (6, fprintf (gc_debug_file, "Precise object scan %d of pinned %p (%s)\n", i, pin_queue [i], safe_name (pin_queue [i])));
- scan_object (pin_queue [i], nursery_start, nursery_next);
- }
+ drain_gray_stack ();
+
TV_GETTIME (atv);
time_minor_scan_pinned += TV_ELAPSED_MS (btv, atv);
/* registered roots, this includes static fields */
- scan_from_registered_roots (nursery_start, nursery_next, ROOT_TYPE_NORMAL);
- scan_from_registered_roots (nursery_start, nursery_next, ROOT_TYPE_WBARRIER);
+ scan_from_registered_roots (copy_object, nursery_start, nursery_next, ROOT_TYPE_NORMAL);
+ scan_from_registered_roots (copy_object, nursery_start, nursery_next, ROOT_TYPE_WBARRIER);
TV_GETTIME (btv);
time_minor_scan_registered_roots += TV_ELAPSED_MS (atv, btv);
/* thread data */
scan_thread_data (nursery_start, nursery_next, TRUE);
TV_GETTIME (atv);
time_minor_scan_thread_data += TV_ELAPSED_MS (btv, atv);
- /* alloc_pinned objects */
- scan_from_pinned_objects (nursery_start, nursery_next);
- TV_GETTIME (btv);
- time_minor_scan_alloc_pinned += TV_ELAPSED_MS (atv, btv);
- DEBUG (2, fprintf (gc_debug_file, "Root scan: %d usecs\n", TV_ELAPSED (atv, btv)));
+ btv = atv;
finish_gray_stack (nursery_start, nursery_next, GENERATION_NURSERY);
TV_GETTIME (atv);
build_nursery_fragments (0, next_pin_slot);
TV_GETTIME (btv);
time_minor_fragment_creation += TV_ELAPSED_MS (atv, btv);
- DEBUG (2, fprintf (gc_debug_file, "Fragment creation: %d usecs, %zd bytes available\n", TV_ELAPSED (atv, btv), fragment_total));
+ DEBUG (2, fprintf (gc_debug_file, "Fragment creation: %d usecs, %lu bytes available\n", TV_ELAPSED (atv, btv), (unsigned long)fragment_total));
- for (section = section_list; section; section = section->block.next) {
- if (section->is_to_space)
- section->is_to_space = FALSE;
- }
+ if (consistency_check_at_minor_collection)
+ check_major_refs ();
+
+ major_finish_nursery_collection ();
TV_GETTIME (all_btv);
mono_stats.minor_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
g_assert (gray_object_queue_is_empty ());
- commit_stats (GENERATION_NURSERY);
-
check_scan_starts ();
- sections_alloced = num_major_sections - old_num_major_sections;
- minor_collection_sections_alloced += sections_alloced;
+ current_collection_generation = -1;
- return minor_collection_sections_alloced > minor_collection_section_allowance;
+ return need_major_collection ();
}
static void
-scan_from_pinned_chunk_if_marked (PinnedChunk *chunk, char *obj, size_t size, void *dummy)
+major_do_collection (const char *reason)
{
- if (object_is_pinned (obj))
- scan_object (obj, NULL, (char*)-1);
-}
-
-static void
-major_collection (const char *reason)
-{
- GCMemSection *section, *prev_section;
LOSObject *bigobj, *prevbo;
- int i;
- PinnedChunk *chunk;
- Fragment *frag;
TV_DECLARE (all_atv);
TV_DECLARE (all_btv);
TV_DECLARE (atv);
*/
char *heap_start = NULL;
char *heap_end = (char*)-1;
- size_t copy_space_required = 0;
int old_num_major_sections = num_major_sections;
int num_major_sections_saved, save_target, allowance_target;
+ mword los_memory_saved, los_memory_alloced, old_los_memory_usage;
+
+ /*
+ * A domain could have been freed, resulting in
+ * los_memory_usage being less than last_los_memory_usage.
+ */
+ los_memory_alloced = los_memory_usage - MIN (last_los_memory_usage, los_memory_usage);
+ old_los_memory_usage = los_memory_usage;
+
+ //count_ref_nonref_objs ();
+ //consistency_check ();
init_stats ();
binary_protocol_collection (GENERATION_OLD);
check_scan_starts ();
+ gray_object_queue_init ();
degraded_mode = 0;
DEBUG (1, fprintf (gc_debug_file, "Start major collection %d\n", num_major_gcs));
TV_GETTIME (all_atv);
TV_GETTIME (atv);
- /* Clear all remaining nursery fragments, pinning depends on this */
- if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
- g_assert (nursery_next <= nursery_frag_real_end);
- memset (nursery_next, 0, nursery_frag_real_end - nursery_next);
- for (frag = nursery_fragments; frag; frag = frag->next) {
- memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start);
- }
- }
+ /* Pinning depends on this */
+ clear_nursery_fragments (nursery_next);
TV_GETTIME (btv);
time_major_pre_collection_fragment_clear += TV_ELAPSED_MS (atv, btv);
if (xdomain_checks)
check_for_xdomain_refs ();
- if (g_getenv ("MONO_GC_NO_MAJOR")) {
- collect_nursery (0);
- return;
- }
nursery_section->next_data = nursery_real_end;
/* we should also coalesce scanning from sections close to each other
* and deal with pointers outside of the sections later.
*/
/* The remsets are not useful for a major collection */
clear_remsets ();
+ global_remset_cache_clear ();
TV_GETTIME (atv);
init_pinning ();
*/
DEBUG (6, fprintf (gc_debug_file, "Pinning from sections\n"));
/* first pass for the sections */
- for (section = section_list; section; section = section->block.next) {
- int start, end;
- DEBUG (6, fprintf (gc_debug_file, "Pinning from section %p (%p-%p)\n", section, section->data, section->end_data));
- find_optimized_pin_queue_area (section->data, section->end_data, &start, &end);
- DEBUG (6, fprintf (gc_debug_file, "Found %d pinning addresses in section %p (%d-%d)\n",
- end - start, section, start, end));
- section->pin_queue_start = start;
- section->pin_queue_end = end;
- }
+ find_section_pin_queue_start_end (nursery_section);
+ major_find_pin_queue_start_ends ();
/* identify possible pointers to the insize of large objects */
DEBUG (6, fprintf (gc_debug_file, "Pinning from large objects\n"));
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + bigobj->size, &start, &end);
if (start != end) {
pin_object (bigobj->data);
+ /* FIXME: only enqueue if object has references */
+ GRAY_OBJECT_ENQUEUE (bigobj->data);
if (heap_dump_file)
pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
- DEBUG (6, fprintf (gc_debug_file, "Marked large object %p (%s) size: %zd from roots\n", bigobj->data, safe_name (bigobj->data), bigobj->size));
+ DEBUG (6, fprintf (gc_debug_file, "Marked large object %p (%s) size: %lu from roots\n", bigobj->data, safe_name (bigobj->data), (unsigned long)bigobj->size));
}
}
- /* look for pinned addresses for pinned-alloc objects */
- DEBUG (6, fprintf (gc_debug_file, "Pinning from pinned-alloc objects\n"));
- for (chunk = pinned_chunk_list; chunk; chunk = chunk->block.next) {
- int start, end;
- find_optimized_pin_queue_area (chunk->start_data, (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE, &start, &end);
- if (start != end)
- mark_pinned_from_addresses (chunk, pin_queue + start, pin_queue + end);
- }
/* second pass for the sections */
- for (section = section_list; section; section = section->block.next) {
- int start = section->pin_queue_start;
- int end = section->pin_queue_end;
- if (start != end) {
- int reduced_to;
- reduced_to = pin_objects_from_addresses (section, pin_queue + start, pin_queue + end,
- section->data, section->next_data);
- section->pin_queue_start = start;
- section->pin_queue_end = start + reduced_to;
- }
- copy_space_required += (char*)section->next_data - (char*)section->data;
- }
+ pin_objects_in_section (nursery_section);
+ major_pin_objects ();
TV_GETTIME (btv);
time_major_pinning += TV_ELAPSED_MS (atv, btv);
DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot, TV_ELAPSED (atv, btv)));
DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", next_pin_slot));
- new_to_space_section ();
- gray_object_queue_init ();
+ major_init_to_space ();
+
+ drain_gray_stack ();
- /* the old generation doesn't need to be scanned (no remembered sets or card
- * table needed either): the only objects that must survive are those pinned and
- * those referenced by the precise roots.
- * mark any section without pinned objects, so we can free it since we will be able to
- * move all the objects.
- */
- /* the pinned objects are roots (big objects are included in this list, too) */
- for (section = section_list; section; section = section->block.next) {
- for (i = section->pin_queue_start; i < section->pin_queue_end; ++i) {
- DEBUG (6, fprintf (gc_debug_file, "Precise object scan %d of pinned %p (%s)\n",
- i, pin_queue [i], safe_name (pin_queue [i])));
- scan_object (pin_queue [i], heap_start, heap_end);
- }
- }
- for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
- if (object_is_pinned (bigobj->data)) {
- DEBUG (6, fprintf (gc_debug_file, "Precise object scan pinned LOS object %p (%s)\n",
- bigobj->data, safe_name (bigobj->data)));
- scan_object (bigobj->data, heap_start, heap_end);
- }
- }
- scan_pinned_objects (scan_from_pinned_chunk_if_marked, NULL);
TV_GETTIME (atv);
time_major_scan_pinned += TV_ELAPSED_MS (btv, atv);
/* registered roots, this includes static fields */
- scan_from_registered_roots (heap_start, heap_end, ROOT_TYPE_NORMAL);
- scan_from_registered_roots (heap_start, heap_end, ROOT_TYPE_WBARRIER);
+ scan_from_registered_roots (major_copy_or_mark_object, heap_start, heap_end, ROOT_TYPE_NORMAL);
+ scan_from_registered_roots (major_copy_or_mark_object, heap_start, heap_end, ROOT_TYPE_WBARRIER);
TV_GETTIME (btv);
time_major_scan_registered_roots += TV_ELAPSED_MS (atv, btv);
/* Threads */
+ /* FIXME: This is the wrong place for this, because it does
+ pinning */
scan_thread_data (heap_start, heap_end, TRUE);
TV_GETTIME (atv);
time_major_scan_thread_data += TV_ELAPSED_MS (btv, atv);
- /* alloc_pinned objects */
- scan_from_pinned_objects (heap_start, heap_end);
TV_GETTIME (btv);
time_major_scan_alloc_pinned += TV_ELAPSED_MS (atv, btv);
/* scan the list of objects ready for finalization */
- scan_finalizer_entries (fin_ready_list, heap_start, heap_end);
- scan_finalizer_entries (critical_fin_list, heap_start, heap_end);
+ scan_finalizer_entries (major_copy_or_mark_object, fin_ready_list);
+ scan_finalizer_entries (major_copy_or_mark_object, critical_fin_list);
TV_GETTIME (atv);
time_major_scan_finalized += TV_ELAPSED_MS (btv, atv);
DEBUG (2, fprintf (gc_debug_file, "Root scan: %d usecs\n", TV_ELAPSED (btv, atv)));
- /* we need to go over the big object list to see if any was marked and scan it
- * And we need to make this in a loop, considering that objects referenced by finalizable
- * objects could reference big objects (this happens in finish_gray_stack ())
- */
- scan_needed_big_objects (heap_start, heap_end);
TV_GETTIME (btv);
time_major_scan_big_objects += TV_ELAPSED_MS (atv, btv);
TV_GETTIME (atv);
time_major_finish_gray_stack += TV_ELAPSED_MS (btv, atv);
- unset_to_space ();
-
/* sweep the big objects list */
prevbo = NULL;
for (bigobj = los_object_list; bigobj;) {
if (object_is_pinned (bigobj->data)) {
unpin_object (bigobj->data);
- bigobj->scanned = FALSE;
} else {
LOSObject *to_free;
/* not referenced anywhere, so we can free it */
prevbo = bigobj;
bigobj = bigobj->next;
}
- /* unpin objects from the pinned chunks and free the unmarked ones */
- sweep_pinned_objects ();
TV_GETTIME (btv);
- time_major_sweep += TV_ELAPSED_MS (atv, btv);
+ time_major_free_bigobjs += TV_ELAPSED_MS (atv, btv);
- /* free the unused sections */
- prev_section = NULL;
- for (section = section_list; section;) {
- /* to_space doesn't need handling here and the nursery is special */
- if (section->is_to_space || section == nursery_section) {
- if (section->is_to_space)
- section->is_to_space = FALSE;
- prev_section = section;
- section = section->block.next;
- continue;
- }
- /* no pinning object, so the section is free */
- if (section->pin_queue_start == section->pin_queue_end) {
- GCMemSection *to_free;
- if (prev_section)
- prev_section->block.next = section->block.next;
- else
- section_list = section->block.next;
- to_free = section;
- section = section->block.next;
- free_major_section (to_free);
- continue;
- } else {
- DEBUG (6, fprintf (gc_debug_file, "Section %p has still pinned objects (%d)\n", section, section->pin_queue_end - section->pin_queue_start));
- build_section_fragments (section);
- }
- prev_section = section;
- section = section->block.next;
- }
+ los_sweep ();
+
+ TV_GETTIME (atv);
+ time_major_los_sweep += TV_ELAPSED_MS (btv, atv);
+
+ major_sweep ();
+
+ TV_GETTIME (btv);
+ time_major_sweep += TV_ELAPSED_MS (atv, btv);
/* walk the pin_queue, build up the fragment list of free memory, unmark
* pinned objects as we go, memzero() the empty fragments so they are ready for the
g_assert (gray_object_queue_is_empty ());
- commit_stats (GENERATION_OLD);
-
- num_major_sections_saved = MAX (old_num_major_sections - num_major_sections, 1);
+ num_major_sections_saved = MAX (old_num_major_sections - num_major_sections, 0);
+ los_memory_saved = MAX (old_los_memory_usage - los_memory_usage, 1);
- save_target = num_major_sections / 2;
+ save_target = ((num_major_sections * MAJOR_SECTION_SIZE) + los_memory_saved) / 2;
/*
* We aim to allow the allocation of as many sections as is
* necessary to reclaim save_target sections in the next
*
* hence:
*/
- allowance_target = save_target * minor_collection_sections_alloced / num_major_sections_saved;
+ allowance_target = (mword)((double)save_target * (double)(minor_collection_sections_alloced * MAJOR_SECTION_SIZE + los_memory_alloced) / (double)(num_major_sections_saved * MAJOR_SECTION_SIZE + los_memory_saved));
- minor_collection_section_allowance = MAX (MIN (allowance_target, num_major_sections), MIN_MINOR_COLLECTION_SECTION_ALLOWANCE);
+ minor_collection_allowance = MAX (MIN (allowance_target, num_major_sections * MAJOR_SECTION_SIZE + los_memory_usage), MIN_MINOR_COLLECTION_ALLOWANCE);
minor_collection_sections_alloced = 0;
+ last_los_memory_usage = los_memory_usage;
check_scan_starts ();
-}
-
-/*
- * Allocate a new section of memory to be used as old generation.
- */
-static GCMemSection*
-alloc_major_section (void)
-{
- GCMemSection *section;
- int scan_starts;
-
- section = get_os_memory_aligned (MAJOR_SECTION_SIZE, TRUE);
- section->next_data = section->data = (char*)section + SIZEOF_GC_MEM_SECTION;
- g_assert (!((mword)section->data & 7));
- section->size = MAJOR_SECTION_SIZE - SIZEOF_GC_MEM_SECTION;
- section->end_data = section->data + section->size;
- UPDATE_HEAP_BOUNDARIES (section->data, section->end_data);
- total_alloc += section->size;
- DEBUG (3, fprintf (gc_debug_file, "New major heap section: (%p-%p), total: %zd\n", section->data, section->end_data, total_alloc));
- scan_starts = (section->size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
- section->scan_starts = get_internal_mem (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS);
- section->num_scan_start = scan_starts;
- section->block.role = MEMORY_ROLE_GEN1;
- section->is_to_space = TRUE;
- /* add to the section list */
- section->block.next = section_list;
- section_list = section;
-
- ++num_major_sections;
-
- return section;
+ //consistency_check ();
}
static void
-free_major_section (GCMemSection *section)
+major_collection (const char *reason)
{
- DEBUG (3, fprintf (gc_debug_file, "Freed major section %p (%p-%p)\n", section, section->data, section->end_data));
- free_internal_mem (section->scan_starts, INTERNAL_MEM_SCAN_STARTS);
- free_os_memory (section, MAJOR_SECTION_SIZE);
- total_alloc -= MAJOR_SECTION_SIZE - SIZEOF_GC_MEM_SECTION;
+ if (g_getenv ("MONO_GC_NO_MAJOR")) {
+ collect_nursery (0);
+ return;
+ }
- --num_major_sections;
+ current_collection_generation = GENERATION_OLD;
+ major_do_collection (reason);
+ current_collection_generation = -1;
}
/*
stop_world ();
if (collect_nursery (size))
major_collection ("minor overflow");
- DEBUG (2, fprintf (gc_debug_file, "Heap size: %zd, LOS size: %zd\n", total_alloc, los_memory_usage));
+ DEBUG (2, fprintf (gc_debug_file, "Heap size: %lu, LOS size: %lu\n", (unsigned long)total_alloc, (unsigned long)los_memory_usage));
restart_world ();
/* this also sets the proper pointers for the next allocation */
if (!search_fragment_for_size (size)) {
report_pinned_chunk (chunk, i++);
}
printf ("Pinned memory usage:\n");
- i = 0;
- for (chunk = pinned_chunk_list; chunk; chunk = chunk->block.next) {
- report_pinned_chunk (chunk, i++);
- }
-}
-
-/*
- * the array of pointers from @start to @end contains conservative
- * pointers to objects inside @chunk: mark each referenced object
- * with the PIN bit.
- */
-static void
-mark_pinned_from_addresses (PinnedChunk *chunk, void **start, void **end)
-{
- for (; start < end; start++) {
- char *addr = *start;
- int offset = (char*)addr - (char*)chunk;
- int page = offset / FREELIST_PAGESIZE;
- int obj_offset = page == 0? offset - ((char*)chunk->start_data - (char*)chunk): offset % FREELIST_PAGESIZE;
- int slot_size = chunk->page_sizes [page];
- void **ptr;
- /* the page is not allocated */
- if (!slot_size)
- continue;
- /* would be faster if we restrict the sizes to power of two,
- * but that's a waste of memory: need to measure. it could reduce
- * fragmentation since there are less pages needed, if for example
- * someone interns strings of each size we end up with one page per
- * interned string (still this is just ~40 KB): with more fine-grained sizes
- * this increases the number of used pages.
- */
- if (page == 0) {
- obj_offset /= slot_size;
- obj_offset *= slot_size;
- addr = (char*)chunk->start_data + obj_offset;
- } else {
- obj_offset /= slot_size;
- obj_offset *= slot_size;
- addr = (char*)chunk + page * FREELIST_PAGESIZE + obj_offset;
- }
- ptr = (void**)addr;
- /* if the vtable is inside the chunk it's on the freelist, so skip */
- if (*ptr && (*ptr < (void*)chunk->start_data || *ptr > (void*)((char*)chunk + chunk->num_pages * FREELIST_PAGESIZE))) {
- binary_protocol_pin (addr, (gpointer)LOAD_VTABLE (addr), safe_object_get_size ((MonoObject*)addr));
- if (heap_dump_file && !object_is_pinned (addr))
- pin_stats_register_object ((char*) addr, safe_object_get_size ((MonoObject*) addr));
- pin_object (addr);
- DEBUG (6, fprintf (gc_debug_file, "Marked pinned object %p (%s) from roots\n", addr, safe_name (addr)));
- }
- }
-}
-
-static void
-scan_pinned_objects (ScanPinnedObjectCallbackFunc callback, void *callback_data)
-{
- PinnedChunk *chunk;
- int i, obj_size;
- char *p, *endp;
- void **ptr;
- void *end_chunk;
- for (chunk = pinned_chunk_list; chunk; chunk = chunk->block.next) {
- end_chunk = (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE;
- DEBUG (6, fprintf (gc_debug_file, "Scanning pinned chunk %p (range: %p-%p)\n", chunk, chunk->start_data, end_chunk));
- for (i = 0; i < chunk->num_pages; ++i) {
- obj_size = chunk->page_sizes [i];
- if (!obj_size)
- continue;
- p = i? (char*)chunk + i * FREELIST_PAGESIZE: chunk->start_data;
- endp = i? p + FREELIST_PAGESIZE: (char*)chunk + FREELIST_PAGESIZE;
- DEBUG (6, fprintf (gc_debug_file, "Page %d (size: %d, range: %p-%p)\n", i, obj_size, p, endp));
- while (p + obj_size <= endp) {
- ptr = (void**)p;
- DEBUG (9, fprintf (gc_debug_file, "Considering %p (vtable: %p)\n", ptr, *ptr));
- /* if the first word (the vtable) is outside the chunk we have an object */
- if (*ptr && (*ptr < (void*)chunk || *ptr >= end_chunk))
- callback (chunk, (char*)ptr, obj_size, callback_data);
- p += obj_size;
- }
- }
- }
-}
-
-static void
-sweep_pinned_objects_callback (PinnedChunk *chunk, char *ptr, size_t size, void *data)
-{
- if (object_is_pinned (ptr)) {
- unpin_object (ptr);
- DEBUG (6, fprintf (gc_debug_file, "Unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
- } else {
- DEBUG (6, fprintf (gc_debug_file, "Freeing unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
- free_pinned_object (chunk, ptr, size);
- }
-}
-
-static void
-sweep_pinned_objects (void)
-{
- scan_pinned_objects (sweep_pinned_objects_callback, NULL);
-}
-
-static void
-scan_object_callback (PinnedChunk *chunk, char *ptr, size_t size, char **data)
-{
- DEBUG (6, fprintf (gc_debug_file, "Precise object scan of alloc_pinned %p (%s)\n", ptr, safe_name (ptr)));
- /* FIXME: Put objects without references into separate chunks
- which do not need to be scanned */
- scan_object (ptr, data [0], data [1]);
-}
-
-static void
-scan_from_pinned_objects (char *addr_start, char *addr_end)
-{
- char *data [2] = { addr_start, addr_end };
- scan_pinned_objects ((ScanPinnedObjectCallbackFunc)scan_object_callback, data);
+ major_report_pinned_memory_usage ();
}
/*
{
PinnedChunk *chunk;
int offset;
- int size = MAJOR_SECTION_SIZE;
+ int size = PINNED_CHUNK_SIZE;
- chunk = get_os_memory_aligned (size, TRUE);
+ chunk = get_os_memory_aligned (size, size, TRUE);
chunk->block.role = MEMORY_ROLE_PINNED;
UPDATE_HEAP_BOUNDARIES (chunk, ((char*)chunk + size));
chunk->page_sizes [0] = PINNED_FIRST_SLOT_SIZE;
build_freelist (chunk, slot_for_size (PINNED_FIRST_SLOT_SIZE), PINNED_FIRST_SLOT_SIZE, chunk->start_data, ((char*)chunk + FREELIST_PAGESIZE));
DEBUG (4, fprintf (gc_debug_file, "Allocated pinned chunk %p, size: %d\n", chunk, size));
- min_pinned_chunk_addr = MIN (min_pinned_chunk_addr, (char*)chunk->start_data);
- max_pinned_chunk_addr = MAX (max_pinned_chunk_addr, ((char*)chunk + size));
return chunk;
}
return NULL;
}
-static void*
-alloc_from_freelist (size_t size)
-{
- int slot;
- void *res = NULL;
- PinnedChunk *pchunk;
- slot = slot_for_size (size);
- /*g_print ("using slot %d for size %d (slot size: %d)\n", slot, size, freelist_sizes [slot]);*/
- g_assert (size <= freelist_sizes [slot]);
- for (pchunk = pinned_chunk_list; pchunk; pchunk = pchunk->block.next) {
- void **p = pchunk->free_list [slot];
- if (p) {
- /*g_print ("found freelist for slot %d in chunk %p, returning %p, next %p\n", slot, pchunk, p, *p);*/
- pchunk->free_list [slot] = *p;
- return p;
- }
- }
- for (pchunk = pinned_chunk_list; pchunk; pchunk = pchunk->block.next) {
- res = get_chunk_freelist (pchunk, slot);
- if (res)
- return res;
- }
- pchunk = alloc_pinned_chunk ();
- /* FIXME: handle OOM */
- pchunk->block.next = pinned_chunk_list;
- pinned_chunk_list = pchunk;
- res = get_chunk_freelist (pchunk, slot);
- return res;
-}
-
/* used for the GC-internal data structures */
static void*
get_internal_mem (size_t size, int type)
*/
static void
-free_large_object (LOSObject *obj)
-{
- size_t size = obj->size;
- DEBUG (4, fprintf (gc_debug_file, "Freed large object %p, size %zd\n", obj->data, obj->size));
- binary_protocol_empty (obj->data, obj->size);
-
- los_memory_usage -= size;
- size += sizeof (LOSObject);
- size += pagesize - 1;
- size &= ~(pagesize - 1);
- total_alloc -= size;
- los_num_objects--;
- free_os_memory (obj, size);
-}
-
-/*
- * Objects with size >= 64KB are allocated in the large object space.
- * They are currently kept track of with a linked list.
- * They don't move, so there is no need to pin them during collection
- * and we avoid the memcpy overhead.
- */
-static void* __attribute__((noinline))
-alloc_large_inner (MonoVTable *vtable, size_t size)
+setup_fragment (Fragment *frag, Fragment *prev, size_t size)
{
- LOSObject *obj;
- void **vtslot;
- size_t alloc_size;
-
- g_assert (size > MAX_SMALL_OBJ_SIZE);
-
- if (los_memory_usage > next_los_collection) {
- static mword last_los_memory_usage = 0;
-
- mword los_memory_alloced;
- mword old_los_memory_usage;
- mword los_memory_saved;
- mword save_target;
- mword allowance_target;
- mword allowance;
-
- DEBUG (4, fprintf (gc_debug_file, "Should trigger major collection: req size %zd (los already: %zu, limit: %zu)\n", size, los_memory_usage, next_los_collection));
- stop_world ();
-
- g_assert (los_memory_usage >= last_los_memory_usage);
- los_memory_alloced = los_memory_usage - last_los_memory_usage;
- old_los_memory_usage = los_memory_usage;
-
- major_collection ("LOS overflow");
-
- los_memory_saved = MAX (old_los_memory_usage - los_memory_usage, 1);
- save_target = los_memory_usage / 2;
- /*
- * see the comment at the end of major_collection()
- * for the explanation for this calculation.
- */
- allowance_target = (mword)((double)save_target * (double)los_memory_alloced / (double)los_memory_saved);
- allowance = MAX (MIN (allowance_target, los_memory_usage), MIN_LOS_ALLOWANCE);
- next_los_collection = los_memory_usage + allowance;
-
- last_los_memory_usage = los_memory_usage;
+ /* remove from the list */
+ if (prev)
+ prev->next = frag->next;
+ else
+ nursery_fragments = frag->next;
+ nursery_next = frag->fragment_start;
+ nursery_frag_real_end = frag->fragment_end;
- restart_world ();
- }
- alloc_size = size;
- alloc_size += sizeof (LOSObject);
- alloc_size += pagesize - 1;
- alloc_size &= ~(pagesize - 1);
- /* FIXME: handle OOM */
- obj = get_os_memory (alloc_size, TRUE);
- obj->size = size;
- vtslot = (void**)obj->data;
- *vtslot = vtable;
- total_alloc += alloc_size;
- UPDATE_HEAP_BOUNDARIES (obj->data, (char*)obj->data + size);
- obj->next = los_object_list;
- los_object_list = obj;
- los_memory_usage += size;
- los_num_objects++;
- DEBUG (4, fprintf (gc_debug_file, "Allocated large object %p, vtable: %p (%s), size: %zd\n", obj->data, vtable, vtable->klass->name, size));
- binary_protocol_alloc (obj->data, vtable, size);
- return obj->data;
+ DEBUG (4, fprintf (gc_debug_file, "Using nursery fragment %p-%p, size: %td (req: %zd)\n", nursery_next, nursery_frag_real_end, nursery_frag_real_end - nursery_next, size));
+ frag->next = fragment_freelist;
+ fragment_freelist = frag;
}
/* check if we have a suitable fragment in nursery_fragments to be able to allocate
prev = NULL;
for (frag = nursery_fragments; frag; frag = frag->next) {
if (size <= (frag->fragment_end - frag->fragment_start)) {
- /* remove from the list */
- if (prev)
- prev->next = frag->next;
- else
- nursery_fragments = frag->next;
- nursery_next = frag->fragment_start;
- nursery_frag_real_end = frag->fragment_end;
-
- DEBUG (4, fprintf (gc_debug_file, "Using nursery fragment %p-%p, size: %zd (req: %zd)\n", nursery_next, nursery_frag_real_end, nursery_frag_real_end - nursery_next, size));
- frag->next = fragment_freelist;
- fragment_freelist = frag;
+ setup_fragment (frag, prev, size);
return TRUE;
}
prev = frag;
}
/*
- * size is already rounded up and we hold the GC lock.
+ * Same as search_fragment_for_size but if search for @desired_size fails, try to satisfy @minimum_size.
+ * This improves nursery usage.
*/
-static void*
-alloc_degraded (MonoVTable *vtable, size_t size)
+static int
+search_fragment_for_size_range (size_t desired_size, size_t minimum_size)
{
- GCMemSection *section;
- void **p = NULL;
- g_assert (size <= MAX_SMALL_OBJ_SIZE);
- HEAVY_STAT (++stat_objects_alloced_degraded);
- HEAVY_STAT (stat_bytes_alloced_degraded += size);
- for (section = section_list; section; section = section->block.next) {
- if (section != nursery_section && (section->end_data - section->next_data) >= size) {
- p = (void**)section->next_data;
- break;
+ Fragment *frag, *prev, *min_prev;
+ DEBUG (4, fprintf (gc_debug_file, "Searching nursery fragment %p, desired size: %zd minimum size %zd\n", nursery_frag_real_end, desired_size, minimum_size));
+
+ if (nursery_frag_real_end > nursery_next && nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
+ /* Clear the remaining space, pinning depends on this */
+ memset (nursery_next, 0, nursery_frag_real_end - nursery_next);
+
+ min_prev = GINT_TO_POINTER (-1);
+ prev = NULL;
+
+ for (frag = nursery_fragments; frag; frag = frag->next) {
+ int frag_size = frag->fragment_end - frag->fragment_start;
+ if (desired_size <= frag_size) {
+ setup_fragment (frag, prev, desired_size);
+ return desired_size;
}
+ if (minimum_size <= frag_size)
+ min_prev = prev;
+
+ prev = frag;
}
- if (!p) {
- section = alloc_major_section ();
- section->is_to_space = FALSE;
- /* FIXME: handle OOM */
- p = (void**)section->next_data;
+
+ if (min_prev != GINT_TO_POINTER (-1)) {
+ int frag_size;
+ if (min_prev)
+ frag = min_prev->next;
+ else
+ frag = nursery_fragments;
+
+ frag_size = frag->fragment_end - frag->fragment_start;
+ HEAVY_STAT (++stat_wasted_fragments_used);
+ HEAVY_STAT (stat_wasted_fragments_bytes += frag_size);
+
+ setup_fragment (frag, min_prev, minimum_size);
+ return frag_size;
}
- section->next_data += size;
- degraded_mode += size;
- DEBUG (3, fprintf (gc_debug_file, "Allocated (degraded) object %p, vtable: %p (%s), size: %zd in section %p\n", p, vtable, vtable->klass->name, size, section));
- *p = vtable;
- return p;
+
+ return 0;
+}
+
+static void*
+alloc_degraded (MonoVTable *vtable, size_t size)
+{
+ if (need_major_collection ()) {
+ stop_world ();
+ major_collection ("degraded overflow");
+ restart_world ();
+ }
+
+ return major_alloc_degraded (vtable, size);
}
/*
/* FIXME: handle OOM */
void **p;
char *new_next;
- gboolean res;
TLAB_ACCESS_INIT;
HEAVY_STAT (++stat_objects_alloced);
*/
if (degraded_mode && degraded_mode < DEFAULT_NURSERY_SIZE) {
p = alloc_degraded (vtable, size);
+ binary_protocol_alloc_degraded (p, vtable, size);
return p;
}
+ /*FIXME This codepath is current deadcode since tlab_size > MAX_SMALL_OBJ_SIZE*/
if (size > tlab_size) {
/* Allocate directly from the nursery */
if (nursery_next + size >= nursery_frag_real_end) {
minor_collect_or_expand_inner (size);
if (degraded_mode) {
p = alloc_degraded (vtable, size);
+ binary_protocol_alloc_degraded (p, vtable, size);
return p;
}
}
if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
memset (p, 0, size);
} else {
+ int alloc_size = tlab_size;
+ int available_in_nursery = nursery_frag_real_end - nursery_next;
if (TLAB_START)
DEBUG (3, fprintf (gc_debug_file, "Retire TLAB: %p-%p [%ld]\n", TLAB_START, TLAB_REAL_END, (long)(TLAB_REAL_END - TLAB_NEXT - size)));
- if (nursery_next + tlab_size >= nursery_frag_real_end) {
- res = search_fragment_for_size (tlab_size);
- if (!res) {
- minor_collect_or_expand_inner (tlab_size);
- if (degraded_mode) {
- p = alloc_degraded (vtable, size);
- return p;
+ if (alloc_size >= available_in_nursery) {
+ if (available_in_nursery > MAX_NURSERY_TLAB_WASTE && available_in_nursery > size) {
+ alloc_size = available_in_nursery;
+ } else {
+ alloc_size = search_fragment_for_size_range (tlab_size, size);
+ if (!alloc_size) {
+ alloc_size = tlab_size;
+ minor_collect_or_expand_inner (tlab_size);
+ if (degraded_mode) {
+ p = alloc_degraded (vtable, size);
+ binary_protocol_alloc_degraded (p, vtable, size);
+ return p;
+ }
}
}
}
/* Allocate a new TLAB from the current nursery fragment */
TLAB_START = nursery_next;
- nursery_next += tlab_size;
+ nursery_next += alloc_size;
TLAB_NEXT = TLAB_START;
- TLAB_REAL_END = TLAB_START + tlab_size;
- TLAB_TEMP_END = TLAB_START + MIN (SCAN_START_SIZE, tlab_size);
+ TLAB_REAL_END = TLAB_START + alloc_size;
+ TLAB_TEMP_END = TLAB_START + MIN (SCAN_START_SIZE, alloc_size);
if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
- memset (TLAB_START, 0, tlab_size);
+ memset (TLAB_START, 0, alloc_size);
/* Allocate from the TLAB */
p = (void*)TLAB_NEXT;
size += ALLOC_ALIGN - 1;
size &= ~(ALLOC_ALIGN - 1);
LOCK_GC;
- if (size > MAX_FREELIST_SIZE) {
+ if (size > MAX_SMALL_OBJ_SIZE) {
/* large objects are always pinned anyway */
p = alloc_large_inner (vtable, size);
} else {
- p = alloc_from_freelist (size);
- memset (p, 0, size);
+ DEBUG (9, g_assert (vtable->klass->inited));
+ p = major_alloc_small_pinned_obj (size, vtable->klass->has_references);
}
DEBUG (6, fprintf (gc_debug_file, "Allocated pinned object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
- binary_protocol_alloc (p, vtable, size);
+ binary_protocol_alloc_pinned (p, vtable, size);
*p = vtable;
UNLOCK_GC;
return p;
/* LOCKING: requires that the GC lock is held */
static void
-finalize_in_range (char *start, char *end, int generation)
+finalize_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation)
{
FinalizeEntryHashTable *hash_table = get_finalize_entry_hash_table (generation);
FinalizeEntry *entry, *prev;
for (i = 0; i < finalizable_hash_size; ++i) {
prev = NULL;
for (entry = finalizable_hash [i]; entry;) {
- if ((char*)entry->object >= start && (char*)entry->object < end && !object_is_in_to_space (entry->object)) {
+ if ((char*)entry->object >= start && (char*)entry->object < end && !major_is_object_live (entry->object)) {
gboolean is_fin_ready = object_is_fin_ready (entry->object);
- char *copy = copy_object_no_heap (entry->object, start, end);
+ char *copy = entry->object;
+ copy_func ((void**)©);
if (is_fin_ready) {
char *from;
FinalizeEntry *next;
finalizable_hash [i] = entry->next;
hash_table->num_registered--;
- entry->object = copy;
+ entry->object = copy;
+
+ /* insert it into the major hash */
+ rehash_fin_table_if_necessary (&major_finalizable_hash);
+ major_hash = mono_object_hash ((MonoObject*) copy) %
+ major_finalizable_hash.size;
+ entry->next = major_finalizable_hash.table [major_hash];
+ major_finalizable_hash.table [major_hash] = entry;
+ major_finalizable_hash.num_registered++;
+
+ DEBUG (5, fprintf (gc_debug_file, "Promoting finalization of object %p (%s) (was at %p) to major table\n", copy, safe_name (copy), from));
+
+ entry = next;
+ continue;
+ } else {
+ /* update pointer */
+ DEBUG (5, fprintf (gc_debug_file, "Updating object for finalization: %p (%s) (was at %p)\n", entry->object, safe_name (entry->object), from));
+ entry->object = copy;
+ }
+ }
+ }
+ prev = entry;
+ entry = entry->next;
+ }
+ }
+}
+
+static int
+object_is_reachable (char *object, char *start, char *end)
+{
+ /*This happens for non nursery objects during minor collections. We just treat all objects as alive.*/
+ if (object < start || object >= end)
+ return TRUE;
+ return !object_is_fin_ready (object) || major_is_object_live (object);
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+null_ephemerons_for_domain (MonoDomain *domain)
+{
+ EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
+
+ while (current) {
+ MonoObject *object = (MonoObject*)current->array;
+
+ if (object && !object->vtable) {
+ EphemeronLinkNode *tmp = current;
+
+ if (prev)
+ prev->next = current->next;
+ else
+ ephemeron_list = current->next;
+
+ current = current->next;
+ free_internal_mem (tmp, INTERNAL_MEM_EPHEMERON_LINK);
+ } else {
+ prev = current;
+ current = current->next;
+ }
+ }
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end)
+{
+ int was_in_nursery, was_promoted;
+ EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
+ MonoArray *array;
+ Ephemeron *cur, *array_end;
+ char *tombstone;
+
+ while (current) {
+ char *object = current->array;
+
+ if (!object_is_reachable (object, start, end)) {
+ EphemeronLinkNode *tmp = current;
+
+ DEBUG (5, fprintf (gc_debug_file, "Dead Ephemeron array at %p\n", object));
+
+ if (prev)
+ prev->next = current->next;
+ else
+ ephemeron_list = current->next;
+
+ current = current->next;
+ free_internal_mem (tmp, INTERNAL_MEM_EPHEMERON_LINK);
+
+ continue;
+ }
+
+ was_in_nursery = ptr_in_nursery (object);
+ copy_func ((void**)&object);
+ current->array = object;
+
+ /*The array was promoted, add global remsets for key/values left behind in nursery.*/
+ was_promoted = was_in_nursery && !ptr_in_nursery (object);
+
+ DEBUG (5, fprintf (gc_debug_file, "Clearing unreachable entries for ephemeron array at %p\n", object));
+
+ array = (MonoArray*)object;
+ cur = mono_array_addr (array, Ephemeron, 0);
+ array_end = cur + mono_array_length_fast (array);
+ tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
+
+ for (; cur < array_end; ++cur) {
+ char *key = (char*)cur->key;
+
+ if (!key || key == tombstone)
+ continue;
- /* insert it into the major hash */
- rehash_fin_table_if_necessary (&major_finalizable_hash);
- major_hash = mono_object_hash ((MonoObject*) copy) %
- major_finalizable_hash.size;
- entry->next = major_finalizable_hash.table [major_hash];
- major_finalizable_hash.table [major_hash] = entry;
- major_finalizable_hash.num_registered++;
+ DEBUG (5, fprintf (gc_debug_file, "[%td] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
+ key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
+ cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable"));
- DEBUG (5, fprintf (gc_debug_file, "Promoting finalization of object %p (%s) (was at %p) to major table\n", copy, safe_name (copy), from));
+ if (!object_is_reachable (key, start, end)) {
+ cur->key = tombstone;
+ cur->value = NULL;
+ continue;
+ }
- entry = next;
- continue;
- } else {
- /* update pointer */
- DEBUG (5, fprintf (gc_debug_file, "Updating object for finalization: %p (%s) (was at %p)\n", entry->object, safe_name (entry->object), from));
- entry->object = copy;
- }
+ if (was_promoted) {
+ if (ptr_in_nursery (key)) {/*key was not promoted*/
+ DEBUG (5, fprintf (gc_debug_file, "\tAdded remset to key %p\n", key));
+ add_to_global_remset (&cur->key);
+ }
+ if (ptr_in_nursery (cur->value)) {/*value was not promoted*/
+ DEBUG (5, fprintf (gc_debug_file, "\tAdded remset to value %p\n", cur->value));
+ add_to_global_remset (&cur->value);
+ }
+ }
+ }
+ prev = current;
+ current = current->next;
+ }
+}
+
+/* LOCKING: requires that the GC lock is held */
+static int
+mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end)
+{
+ int nothing_marked = 1;
+ EphemeronLinkNode *current = ephemeron_list;
+ MonoArray *array;
+ Ephemeron *cur, *array_end;
+ char *tombstone;
+
+ for (current = ephemeron_list; current; current = current->next) {
+ char *object = current->array;
+ DEBUG (5, fprintf (gc_debug_file, "Ephemeron array at %p\n", object));
+
+ /*We ignore arrays in old gen during minor collections since all objects are promoted by the remset machinery.*/
+ if (object < start || object >= end)
+ continue;
+
+ /*It has to be alive*/
+ if (!object_is_reachable (object, start, end)) {
+ DEBUG (5, fprintf (gc_debug_file, "\tnot reachable\n"));
+ continue;
+ }
+
+ copy_func ((void**)&object);
+
+ array = (MonoArray*)object;
+ cur = mono_array_addr (array, Ephemeron, 0);
+ array_end = cur + mono_array_length_fast (array);
+ tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
+
+ for (; cur < array_end; ++cur) {
+ char *key = cur->key;
+
+ if (!key || key == tombstone)
+ continue;
+
+ DEBUG (5, fprintf (gc_debug_file, "[%td] key %p (%s) value %p (%s)\n", cur - mono_array_addr (array, Ephemeron, 0),
+ key, object_is_reachable (key, start, end) ? "reachable" : "unreachable",
+ cur->value, cur->value && object_is_reachable (cur->value, start, end) ? "reachable" : "unreachable"));
+
+ if (object_is_reachable (key, start, end)) {
+ char *value = cur->value;
+
+ copy_func ((void**)&cur->key);
+ if (value) {
+ if (!object_is_reachable (value, start, end))
+ nothing_marked = 0;
+ copy_func ((void**)&cur->value);
}
}
- prev = entry;
- entry = entry->next;
}
}
+
+ DEBUG (5, fprintf (gc_debug_file, "Ephemeron run finished. Is it done %d\n", nothing_marked));
+ return nothing_marked;
}
/* LOCKING: requires that the GC lock is held */
static void
-null_link_in_range (char *start, char *end, int generation)
+null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation)
{
DisappearingLinkHashTable *hash = get_dislink_hash_table (generation);
DisappearingLink **disappearing_link_hash = hash->table;
prev = NULL;
for (entry = disappearing_link_hash [i]; entry;) {
char *object = DISLINK_OBJECT (entry);
- if (object >= start && object < end && !object_is_in_to_space (object)) {
+ if (object >= start && object < end && !major_is_object_live (object)) {
gboolean track = DISLINK_TRACK (entry);
if (!track && object_is_fin_ready (object)) {
void **p = entry->link;
hash->num_links--;
continue;
} else {
- char *copy = copy_object_no_heap (object, start, end);
+ char *copy = object;
+ copy_func ((void**)©);
/* Update pointer if it's moved. If the object
* has been moved out of the nursery, we need to
prev = NULL;
for (entry = disappearing_link_hash [i]; entry; ) {
char *object = DISLINK_OBJECT (entry);
- /* FIXME: actually there should be no object
- left in the domain with a non-null vtable
- (provided we remove the Thread special
- case) */
- if (object && (!((MonoObject*)object)->vtable || mono_object_domain (object) == domain)) {
+ if (object && !((MonoObject*)object)->vtable) {
DisappearingLink *next = entry->next;
if (prev)
*/
/* FIXME: handle large/small config */
-#define THREAD_HASH_SIZE 11
#define HASH_PTHREAD_T(id) (((unsigned int)(id) >> 4) * 2654435761u)
static SgenThreadInfo* thread_table [THREAD_HASH_SIZE];
static MonoSemType suspend_ack_semaphore;
static MonoSemType *suspend_ack_semaphore_ptr;
static unsigned int global_stop_count = 0;
-#ifdef __APPLE__
-static int suspend_signal_num = SIGXFSZ;
-#else
-static int suspend_signal_num = SIGPWR;
-#endif
-static int restart_signal_num = SIGXCPU;
+
static sigset_t suspend_signal_mask;
static mword cur_thread_regs [ARCH_NUM_REGS] = {0};
/* LOCKING: assumes the GC lock is held */
-static SgenThreadInfo*
-thread_info_lookup (ARCH_THREAD_TYPE id)
+SgenThreadInfo**
+mono_sgen_get_thread_table (void)
+{
+ return thread_table;
+}
+
+SgenThreadInfo*
+mono_sgen_thread_info_lookup (ARCH_THREAD_TYPE id)
{
unsigned int hash = HASH_PTHREAD_T (id) % THREAD_HASH_SIZE;
SgenThreadInfo *info;
update_current_thread_stack (void *start)
{
void *ptr = cur_thread_regs;
- SgenThreadInfo *info = thread_info_lookup (ARCH_GET_THREAD ());
+ SgenThreadInfo *info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
info->stack_start = align_pointer (&ptr);
g_assert (info->stack_start >= info->stack_start_limit && info->stack_start < info->stack_end);
gc_callbacks.thread_suspend_func (info->runtime_data, NULL);
}
-static const char*
-signal_desc (int signum)
-{
- if (signum == suspend_signal_num)
- return "suspend";
- if (signum == restart_signal_num)
- return "restart";
- return "unknown";
-}
-
/*
* Define this and use the "xdomain-checks" MONO_GC_DEBUG option to
* have cross-domain checks in the write barrier.
*/
//#define XDOMAIN_CHECKS_IN_WBARRIER
+#ifndef BINARY_PROTOCOL
#ifndef HEAVY_STATISTICS
#define MANAGED_ALLOCATION
#ifndef XDOMAIN_CHECKS_IN_WBARRIER
#define MANAGED_WBARRIER
#endif
#endif
+#endif
static gboolean
is_ip_in_managed_allocator (MonoDomain *domain, gpointer ip);
-static void
-wait_for_suspend_ack (int count)
+void
+mono_sgen_wait_for_suspend_ack (int count)
{
int i, result;
}
}
-/* LOCKING: assumes the GC lock is held */
-static int
-thread_handshake (int signum)
-{
- int count, i, result;
- SgenThreadInfo *info;
- pthread_t me = pthread_self ();
-
- count = 0;
- for (i = 0; i < THREAD_HASH_SIZE; ++i) {
- for (info = thread_table [i]; info; info = info->next) {
- DEBUG (4, fprintf (gc_debug_file, "considering thread %p for signal %d (%s)\n", info, signum, signal_desc (signum)));
- if (ARCH_THREAD_EQUALS (info->id, me)) {
- DEBUG (4, fprintf (gc_debug_file, "Skip (equal): %p, %p\n", (void*)me, (void*)info->id));
- continue;
- }
- /*if (signum == suspend_signal_num && info->stop_count == global_stop_count)
- continue;*/
- result = pthread_kill (info->id, signum);
- if (result == 0) {
- DEBUG (4, fprintf (gc_debug_file, "thread %p signal sent\n", info));
- count++;
- } else {
- DEBUG (4, fprintf (gc_debug_file, "thread %p signal failed: %d (%s)\n", (void*)info->id, result, strerror (result)));
- info->skip = 1;
- }
- }
- }
-
- wait_for_suspend_ack (count);
-
- return count;
-}
-
static int
restart_threads_until_none_in_managed_allocator (void)
{
if (!info->stack_start || info->in_critical_region ||
is_ip_in_managed_allocator (info->stopped_domain, info->stopped_ip)) {
binary_protocol_thread_restart ((gpointer)info->id);
+#if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
+ result = thread_resume (pthread_mach_thread_np (info->id));
+#else
result = pthread_kill (info->id, restart_signal_num);
+#endif
if (result == 0) {
++restart_count;
} else {
if (restart_count == 0)
break;
+#if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
+ /* mach thread_resume is synchronous so we dont need to wait for them */
+#else
/* wait for the threads to signal their restart */
- wait_for_suspend_ack (restart_count);
+ mono_sgen_wait_for_suspend_ack (restart_count);
+#endif
if (sleep_duration < 0) {
sched_yield ();
for (info = thread_table [i]; info; info = info->next) {
if (info->skip || info->stopped_ip == NULL)
continue;
+#if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
+ result = thread_suspend (pthread_mach_thread_np (info->id));
+#else
result = pthread_kill (info->id, suspend_signal_num);
+#endif
if (result == 0) {
++restarted_count;
} else {
}
/* some threads might have died */
num_threads_died += restart_count - restarted_count;
+#if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
+ /* mach thread_resume is synchronous so we dont need to wait for them */
+#else
/* wait for the threads to signal their suspension
again */
- wait_for_suspend_ack (restart_count);
+ mono_sgen_wait_for_suspend_ack (restart_count);
+#endif
}
return num_threads_died;
gpointer stack_start;
id = pthread_self ();
- info = thread_info_lookup (id);
+ info = mono_sgen_thread_info_lookup (id);
info->stopped_domain = mono_domain_get ();
info->stopped_ip = (gpointer) ARCH_SIGCTX_IP (context);
stop_count = global_stop_count;
SgenThreadInfo *info;
int old_errno = errno;
- info = thread_info_lookup (pthread_self ());
+ info = mono_sgen_thread_info_lookup (pthread_self ());
info->signal = restart_signal_num;
DEBUG (4, fprintf (gc_debug_file, "Restart handler in %p %p\n", info, (gpointer)ARCH_GET_THREAD ()));
update_current_thread_stack (&count);
global_stop_count++;
- DEBUG (3, fprintf (gc_debug_file, "stopping world n %d from %p %p\n", global_stop_count, thread_info_lookup (ARCH_GET_THREAD ()), (gpointer)ARCH_GET_THREAD ()));
+ DEBUG (3, fprintf (gc_debug_file, "stopping world n %d from %p %p\n", global_stop_count, mono_sgen_thread_info_lookup (ARCH_GET_THREAD ()), (gpointer)ARCH_GET_THREAD ()));
TV_GETTIME (stop_world_time);
- count = thread_handshake (suspend_signal_num);
+ count = mono_sgen_thread_handshake (suspend_signal_num);
count -= restart_threads_until_none_in_managed_allocator ();
g_assert (count >= 0);
DEBUG (3, fprintf (gc_debug_file, "world stopped %d thread(s)\n", count));
release_gc_locks ();
- count = thread_handshake (restart_signal_num);
+ count = mono_sgen_thread_handshake (restart_signal_num);
TV_GETTIME (end_sw);
usec = TV_ELAPSED (stop_world_time, end_sw);
max_pause_usec = MAX (usec, max_pause_usec);
gc_callbacks = *callbacks;
}
+MonoGCCallbacks *
+mono_gc_get_gc_callbacks ()
+{
+ return &gc_callbacks;
+}
+
/* Variables holding start/end nursery so it won't have to be passed at every call */
static void *scan_area_arg_start, *scan_area_arg_end;
void*
mono_gc_scan_object (void *obj)
{
- return copy_object_no_heap (obj, scan_area_arg_start, scan_area_arg_end);
+ if (current_collection_generation == GENERATION_NURSERY)
+ copy_object (&obj);
+ else
+ major_copy_or_mark_object (&obj);
+ return obj;
}
-
+
/*
* Mark from thread stacks and registers.
*/
for (i = 0; i < THREAD_HASH_SIZE; ++i) {
for (info = thread_table [i]; info; info = info->next) {
if (info->skip) {
- DEBUG (3, fprintf (gc_debug_file, "Skipping dead thread %p, range: %p-%p, size: %zd\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
+ DEBUG (3, fprintf (gc_debug_file, "Skipping dead thread %p, range: %p-%p, size: %td\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
continue;
}
- DEBUG (3, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, next_pin_slot));
+ DEBUG (3, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, next_pin_slot));
if (gc_callbacks.thread_mark_func && !conservative_stack_mark)
gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
else if (!precise)
ptr_on_stack (void *ptr)
{
gpointer stack_start = &stack_start;
- SgenThreadInfo *info = thread_info_lookup (ARCH_GET_THREAD ());
+ SgenThreadInfo *info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
return TRUE;
//__builtin_prefetch (ptr);
if (((void*)ptr < start_nursery || (void*)ptr >= end_nursery)) {
gpointer old = *ptr;
- copy_object (ptr, start_nursery, end_nursery);
+ copy_object (ptr);
DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p\n", ptr, *ptr));
if (old)
binary_protocol_ptr_update (ptr, old, *ptr, (gpointer)LOAD_VTABLE (*ptr), safe_object_get_size (*ptr));
* becomes part of the global remset, which can grow very large.
*/
DEBUG (9, fprintf (gc_debug_file, "Add to global remset because of pinning %p (%p %s)\n", ptr, *ptr, safe_name (*ptr)));
- add_to_global_remset (ptr, FALSE);
+ add_to_global_remset (ptr);
}
} else {
DEBUG (9, fprintf (gc_debug_file, "Skipping remset at %p holding %p\n", ptr, *ptr));
return p + 2;
count = p [1];
while (count-- > 0) {
- copy_object (ptr, start_nursery, end_nursery);
+ copy_object (ptr);
DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p (count: %d)\n", ptr, *ptr, (int)count));
if (!global && *ptr >= start_nursery && *ptr < end_nursery)
- add_to_global_remset (ptr, FALSE);
+ add_to_global_remset (ptr);
++ptr;
}
return p + 2;
ptr = (void**)(*p & ~REMSET_TYPE_MASK);
if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
return p + 1;
- scan_object ((char*)ptr, start_nursery, end_nursery);
+ scan_object ((char*)ptr);
return p + 1;
- case REMSET_OTHER: {
+ case REMSET_VTYPE: {
ptr = (void**)(*p & ~REMSET_TYPE_MASK);
-
- switch (p [1]) {
- case REMSET_VTYPE:
- if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
- return p + 4;
- desc = p [2];
- count = p [3];
- while (count-- > 0)
- ptr = (void**) scan_vtype ((char*)ptr, desc, start_nursery, end_nursery);
- return p + 4;
- case REMSET_ROOT_LOCATION:
- /* Same as REMSET_LOCATION, but the address is not required to be in the heap */
- copy_object (ptr, start_nursery, end_nursery);
- DEBUG (9, fprintf (gc_debug_file, "Overwrote root location remset at %p with %p\n", ptr, *ptr));
- if (!global && *ptr >= start_nursery && *ptr < end_nursery) {
- /*
- * If the object is pinned, each reference to it from nonpinned objects
- * becomes part of the global remset, which can grow very large.
- */
- DEBUG (9, fprintf (gc_debug_file, "Add to global remset because of pinning %p (%p %s)\n", ptr, *ptr, safe_name (*ptr)));
- add_to_global_remset (ptr, TRUE);
- }
- return p + 2;
- default:
- g_assert_not_reached ();
- }
- break;
+ if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
+ return p + 3;
+ desc = p [1];
+ count = p [2];
+ while (count-- > 0)
+ ptr = (void**) scan_vtype ((char*)ptr, desc, start_nursery, end_nursery);
+ return p + 3;
}
default:
g_assert_not_reached ();
case REMSET_OBJECT:
p += 1;
break;
- case REMSET_OTHER:
- switch (p [1]) {
- case REMSET_VTYPE:
- p += 4;
- break;
- case REMSET_ROOT_LOCATION:
- p += 2;
- break;
- default:
- g_assert_not_reached ();
- }
+ case REMSET_VTYPE:
+ p += 3;
break;
default:
g_assert_not_reached ();
/* the global one */
for (remset = global_remset; remset; remset = remset->next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %zd\n", remset->data, remset->store_next, remset->store_next - remset->data));
+ DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
store_pos = remset->data;
for (p = remset->data; p < remset->store_next; p = next_p) {
- mword ptr;
+ void **ptr = (void**)p [0];
+
+ /*Ignore previously processed remset.*/
+ if (!global_remset_location_was_not_added (ptr)) {
+ next_p = p + 1;
+ continue;
+ }
next_p = handle_remset (p, start_nursery, end_nursery, TRUE);
* Clear global remsets of locations which no longer point to the
* nursery. Otherwise, they could grow indefinitely between major
* collections.
+ *
+ * Since all global remsets are location remsets, we don't need to unmask the pointer.
*/
- ptr = (p [0] & ~REMSET_TYPE_MASK);
- if ((p [0] & REMSET_TYPE_MASK) == REMSET_LOCATION) {
- if (ptr_in_nursery (*(void**)ptr))
- *store_pos ++ = p [0];
- } else {
- g_assert ((p [0] & REMSET_TYPE_MASK) == REMSET_OTHER);
- g_assert (p [1] == REMSET_ROOT_LOCATION);
- if (ptr_in_nursery (*(void**)ptr)) {
- *store_pos ++ = p [0];
- *store_pos ++ = p [1];
- }
+ if (ptr_in_nursery (*ptr)) {
+ *store_pos ++ = p [0];
+ HEAVY_STAT (++stat_global_remsets_readded);
}
}
RememberedSet *next;
int j;
for (remset = info->remset; remset; remset = next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %zd\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
+ DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
for (p = remset->data; p < remset->store_next;) {
p = handle_remset (p, start_nursery, end_nursery, FALSE);
}
while (freed_thread_remsets) {
RememberedSet *next;
remset = freed_thread_remsets;
- DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %zd\n", remset->data, remset->store_next, remset->store_next - remset->data));
+ DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
for (p = remset->data; p < remset->store_next;) {
p = handle_remset (p, start_nursery, end_nursery, FALSE);
}
LOCK_GC;
init_stats ();
- info = thread_info_lookup (ARCH_GET_THREAD ());
+ info = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
if (info == NULL)
info = gc_register_current_thread (baseptr);
UNLOCK_GC;
rs->next = REMEMBERED_SET;
REMEMBERED_SET = rs;
#ifdef HAVE_KW_THREAD
- thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
+ mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
#endif
*(rs->store_next++) = (mword)field_ptr;
*(void**)field_ptr = value;
rs->next = REMEMBERED_SET;
REMEMBERED_SET = rs;
#ifdef HAVE_KW_THREAD
- thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
+ mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
#endif
*(rs->store_next++) = (mword)slot_ptr;
*(void**)slot_ptr = value;
rs->next = REMEMBERED_SET;
REMEMBERED_SET = rs;
#ifdef HAVE_KW_THREAD
- thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
+ mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
#endif
*(rs->store_next++) = (mword)dest_ptr | REMSET_RANGE;
*(rs->store_next++) = count;
UNLOCK_GC;
}
-static char*
-find_object_for_ptr_in_area (char *ptr, char *start, char *end)
-{
- while (start < end) {
- char *old_start;
-
- if (!*(void**)start) {
- start += sizeof (void*); /* should be ALLOC_ALIGN, really */
- continue;
- }
-
- old_start = start;
-
- #define SCAN_OBJECT_NOSCAN
- #include "sgen-scan-object.h"
-
- if (ptr >= old_start && ptr < start)
- return old_start;
- }
-
- return NULL;
-}
-
static char *found_obj;
static void
-find_object_for_ptr_in_pinned_chunk_callback (PinnedChunk *chunk, char *obj, size_t size, char *ptr)
+find_object_for_ptr_callback (char *obj, size_t size, char *ptr)
{
if (ptr >= obj && ptr < obj + size) {
g_assert (!found_obj);
char*
find_object_for_ptr (char *ptr)
{
- GCMemSection *section;
LOSObject *bigobj;
- for (section = section_list; section; section = section->block.next) {
- if (ptr >= section->data && ptr < section->end_data)
- return find_object_for_ptr_in_area (ptr, section->data, section->end_data);
+ if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
+ found_obj = NULL;
+ scan_area_with_callback (nursery_section->data, nursery_section->end_data,
+ (IterateObjectCallbackFunc)find_object_for_ptr_callback, ptr);
+ if (found_obj)
+ return found_obj;
}
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
return bigobj->data;
}
+ /*
+ * Very inefficient, but this is debugging code, supposed to
+ * be called from gdb, so we don't care.
+ */
found_obj = NULL;
- scan_pinned_objects ((ScanPinnedObjectCallbackFunc)find_object_for_ptr_in_pinned_chunk_callback, ptr);
+ major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)find_object_for_ptr_callback, ptr);
return found_obj;
}
mono_gc_wbarrier_generic_nostore (ptr);
}
-void
-mono_gc_wbarrier_set_root (gpointer ptr, MonoObject *value)
+void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
{
- RememberedSet *rs;
- TLAB_ACCESS_INIT;
- HEAVY_STAT (++stat_wbarrier_set_root);
- if (ptr_in_nursery (ptr))
- return;
- DEBUG (8, fprintf (gc_debug_file, "Adding root remset at %p (%s)\n", ptr, value ? safe_name (value) : "null"));
+ mword *dest = _dest;
+ mword *src = _src;
- rs = REMEMBERED_SET;
- if (rs->store_next + 2 < rs->end_set) {
- *(rs->store_next++) = (mword)ptr | REMSET_OTHER;
- *(rs->store_next++) = (mword)REMSET_ROOT_LOCATION;
- *(void**)ptr = value;
- return;
+ while (size) {
+ if (bitmap & 0x1)
+ mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
+ else
+ *dest = *src;
+ ++src;
+ ++dest;
+ size -= SIZEOF_VOID_P;
+ bitmap >>= 1;
}
- rs = alloc_remset (rs->end_set - rs->data, (void*)1);
- rs->next = REMEMBERED_SET;
- REMEMBERED_SET = rs;
-#ifdef HAVE_KW_THREAD
- thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
-#endif
- *(rs->store_next++) = (mword)ptr | REMSET_OTHER;
- *(rs->store_next++) = (mword)REMSET_ROOT_LOCATION;
-
- *(void**)ptr = value;
}
+
void
mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
{
LOCK_GC;
memmove (dest, src, count * mono_class_value_size (klass, NULL));
rs = REMEMBERED_SET;
- if (ptr_in_nursery (dest) || ptr_on_stack (dest)) {
+ if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !klass->has_references) {
UNLOCK_GC;
return;
}
DEBUG (8, fprintf (gc_debug_file, "Adding value remset at %p, count %d, descr %p for class %s (%p)\n", dest, count, klass->gc_descr, klass->name, klass));
if (rs->store_next + 3 < rs->end_set) {
- *(rs->store_next++) = (mword)dest | REMSET_OTHER;
- *(rs->store_next++) = (mword)REMSET_VTYPE;
+ *(rs->store_next++) = (mword)dest | REMSET_VTYPE;
*(rs->store_next++) = (mword)klass->gc_descr;
*(rs->store_next++) = (mword)count;
UNLOCK_GC;
rs->next = REMEMBERED_SET;
REMEMBERED_SET = rs;
#ifdef HAVE_KW_THREAD
- thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
+ mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
#endif
- *(rs->store_next++) = (mword)dest | REMSET_OTHER;
- *(rs->store_next++) = (mword)REMSET_VTYPE;
+ *(rs->store_next++) = (mword)dest | REMSET_VTYPE;
*(rs->store_next++) = (mword)klass->gc_descr;
*(rs->store_next++) = (mword)count;
UNLOCK_GC;
rs->next = REMEMBERED_SET;
REMEMBERED_SET = rs;
#ifdef HAVE_KW_THREAD
- thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
+ mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
#endif
*(rs->store_next++) = (mword)obj | REMSET_OBJECT;
UNLOCK_GC;
void
describe_ptr (char *ptr)
{
- GCMemSection *section;
MonoVTable *vtable;
mword desc;
int type;
if (ptr_in_nursery (ptr)) {
printf ("Pointer inside nursery.\n");
} else {
- for (section = section_list; section;) {
- if (ptr >= section->data && ptr < section->data + section->size)
- break;
- section = section->block.next;
- }
-
- if (section) {
+ if (major_ptr_is_in_non_pinned_space (ptr)) {
printf ("Pointer inside oldspace.\n");
} else if (obj_is_from_pinned_alloc (ptr)) {
printf ("Pointer is inside a pinned chunk.\n");
if ((void**)addr >= ptr && (void**)addr < ptr + count)
*found = TRUE;
return p + 1;
- case REMSET_OTHER: {
- switch (p [1]) {
- case REMSET_VTYPE:
- ptr = (void**)(*p & ~REMSET_TYPE_MASK);
- desc = p [2];
- count = p [3];
-
- switch (desc & 0x7) {
- case DESC_TYPE_RUN_LENGTH:
- OBJ_RUN_LEN_SIZE (skip_size, desc, ptr);
- break;
- case DESC_TYPE_SMALL_BITMAP:
- OBJ_BITMAP_SIZE (skip_size, desc, start);
- break;
- default:
- // FIXME:
- g_assert_not_reached ();
- }
-
- /* The descriptor includes the size of MonoObject */
- skip_size -= sizeof (MonoObject);
- skip_size *= count;
- if ((void**)addr >= ptr && (void**)addr < ptr + (skip_size / sizeof (gpointer)))
- *found = TRUE;
+ case REMSET_VTYPE:
+ ptr = (void**)(*p & ~REMSET_TYPE_MASK);
+ desc = p [1];
+ count = p [2];
- return p + 4;
- case REMSET_ROOT_LOCATION:
- return p + 2;
+ switch (desc & 0x7) {
+ case DESC_TYPE_RUN_LENGTH:
+ OBJ_RUN_LEN_SIZE (skip_size, desc, ptr);
+ break;
+ case DESC_TYPE_SMALL_BITMAP:
+ OBJ_BITMAP_SIZE (skip_size, desc, start);
+ break;
default:
+ // FIXME:
g_assert_not_reached ();
}
- break;
- }
+
+ /* The descriptor includes the size of MonoObject */
+ skip_size -= sizeof (MonoObject);
+ skip_size *= count;
+ if ((void**)addr >= ptr && (void**)addr < ptr + (skip_size / sizeof (gpointer)))
+ *found = TRUE;
+
+ return p + 3;
default:
g_assert_not_reached ();
}
/* the global one */
for (remset = global_remset; remset; remset = remset->next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %zd\n", remset->data, remset->store_next, remset->store_next - remset->data));
+ DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
for (p = remset->data; p < remset->store_next;) {
p = find_in_remset_loc (p, addr, &found);
if (found)
for (info = thread_table [i]; info; info = info->next) {
int j;
for (remset = info->remset; remset; remset = remset->next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %zd\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
+ DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
for (p = remset->data; p < remset->store_next;) {
p = find_in_remset_loc (p, addr, &found);
if (found)
/* the freed thread ones */
for (remset = freed_thread_remsets; remset; remset = remset->next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %zd\n", remset->data, remset->store_next, remset->store_next - remset->data));
+ DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
for (p = remset->data; p < remset->store_next;) {
p = find_in_remset_loc (p, addr, &found);
if (found)
#define HANDLE_PTR(ptr,obj) do { \
if (*(ptr) && (char*)*(ptr) >= nursery_start && (char*)*(ptr) < nursery_next) { \
if (!find_in_remsets ((char*)(ptr))) { \
- fprintf (gc_debug_file, "Oldspace->newspace reference %p at offset %zd in object %p (%s.%s) not found in remsets.\n", *(ptr), (char*)(ptr) - (char*)(obj), (obj), ((MonoObject*)(obj))->vtable->klass->name_space, ((MonoObject*)(obj))->vtable->klass->name); \
+ fprintf (gc_debug_file, "Oldspace->newspace reference %p at offset %td in object %p (%s.%s) not found in remsets.\n", *(ptr), (char*)(ptr) - (char*)(obj), (obj), ((MonoObject*)(obj))->vtable->klass->name_space, ((MonoObject*)(obj))->vtable->klass->name); \
binary_protocol_missing_remset ((obj), (gpointer)LOAD_VTABLE ((obj)), (char*)(ptr) - (char*)(obj), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
if (!object_is_pinned (*(ptr))) \
missing_remsets = TRUE; \
} while (0)
/*
- * Check that each object reference inside the area which points into the nursery
- * can be found in the remembered sets.
+ * Check that each object reference which points into the nursery can
+ * be found in the remembered sets.
*/
-static void __attribute__((noinline))
-check_remsets_for_area (char *start, char *end)
+static void
+check_consistency_callback (char *start, size_t size, void *dummy)
{
- GCVTable *vt;
- int type_str = 0, type_rlen = 0, type_bitmap = 0, type_vector = 0, type_lbit = 0, type_complex = 0;
- while (start < end) {
- if (!*(void**)start) {
- start += sizeof (void*); /* should be ALLOC_ALIGN, really */
- continue;
- }
- vt = (GCVTable*)LOAD_VTABLE (start);
- DEBUG (8, fprintf (gc_debug_file, "Scanning object %p, vtable: %p (%s)\n", start, vt, vt->klass->name));
- if (0) {
- MonoObject *obj = (MonoObject*)start;
- g_print ("found at %p (0x%lx): %s.%s\n", start, (long)vt->desc, obj->vtable->klass->name_space, obj->vtable->klass->name);
- }
+ GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
+ DEBUG (8, fprintf (gc_debug_file, "Scanning object %p, vtable: %p (%s)\n", start, vt, vt->klass->name));
-#define SCAN_OBJECT_ACTION COUNT_OBJECT_TYPES
+#define SCAN_OBJECT_ACTION
#include "sgen-scan-object.h"
- }
}
/*
*
* Assumes the world is stopped.
*/
-void
+static void
check_consistency (void)
{
- GCMemSection *section;
+ LOSObject *bigobj;
// Need to add more checks
- // FIXME: Create a general heap enumeration function and use that
missing_remsets = FALSE;
DEBUG (1, fprintf (gc_debug_file, "Begin heap consistency check...\n"));
// Check that oldspace->newspace pointers are registered with the collector
- for (section = section_list; section; section = section->block.next) {
- if (section->block.role == MEMORY_ROLE_GEN0)
- continue;
- DEBUG (2, fprintf (gc_debug_file, "Scan of old section: %p-%p, size: %d\n", section->data, section->next_data, (int)(section->next_data - section->data)));
- check_remsets_for_area (section->data, section->next_data);
- }
+ major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)check_consistency_callback, NULL);
+
+ for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+ check_consistency_callback (bigobj->data, bigobj->size, NULL);
DEBUG (1, fprintf (gc_debug_file, "Heap consistency check done.\n"));
g_assert (!missing_remsets);
}
+
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj) do { \
+ if (*(ptr)) \
+ g_assert (LOAD_VTABLE (*(ptr))); \
+ } while (0)
+
+static void
+check_major_refs_callback (char *start, size_t size, void *dummy)
+{
+#define SCAN_OBJECT_ACTION
+#include "sgen-scan-object.h"
+}
+
+static void
+check_major_refs (void)
+{
+ LOSObject *bigobj;
+
+ major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)check_major_refs_callback, NULL);
+
+ for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+ check_major_refs_callback (bigobj->data, bigobj->size, NULL);
+}
+
/* Check that the reference is valid */
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) do { \
* Perform consistency check on an object. Currently we only check that the
* reference fields are valid.
*/
-char*
+void
check_object (char *start)
{
if (!start)
- return NULL;
+ return;
#include "sgen-scan-object.h"
-
- return start;
}
/*
mono_gc_get_used_size (void)
{
gint64 tot = 0;
- GCMemSection *section;
LOCK_GC;
tot = los_memory_usage;
- for (section = section_list; section; section = section->block.next) {
- /* this is approximate... */
- tot += section->next_data - section->data;
- }
+ tot += nursery_section->next_data - nursery_section->data;
+ tot += major_get_used_size ();
/* FIXME: account for pinned objects */
UNLOCK_GC;
return tot;
return (MonoObject*) REVEAL_POINTER (*link_addr);
}
+gboolean
+mono_gc_ephemeron_array_add (MonoObject *obj)
+{
+ EphemeronLinkNode *node;
+
+ LOCK_GC;
+
+ node = get_internal_mem (sizeof (EphemeronLinkNode), INTERNAL_MEM_EPHEMERON_LINK);
+ if (!node) {
+ UNLOCK_GC;
+ return FALSE;
+ }
+ node->array = (char*)obj;
+ node->next = ephemeron_list;
+ ephemeron_list = node;
+
+ DEBUG (5, fprintf (gc_debug_file, "Registered ephemeron array %p\n", obj));
+
+ UNLOCK_GC;
+ return TRUE;
+}
+
void*
mono_gc_make_descr_from_bitmap (gsize *bitmap, int numbits)
{
{
gboolean result;
LOCK_GC;
- result = thread_info_lookup (ARCH_GET_THREAD ()) != NULL;
+ result = mono_sgen_thread_info_lookup (ARCH_GET_THREAD ()) != NULL;
UNLOCK_GC;
return result;
}
/* Tries to extract a number from the passed string, taking in to account m, k
* and g suffixes */
-gboolean parse_environment_string_extract_number (gchar *str, glong *out)
+static gboolean
+parse_environment_string_extract_number (gchar *str, glong *out)
{
char *endptr;
int len = strlen (str), shift = 0;
#endif
nursery_size = DEFAULT_NURSERY_SIZE;
+ minor_collection_allowance = MIN_MINOR_COLLECTION_ALLOWANCE;
- minor_collection_section_allowance = MIN_MINOR_COLLECTION_SECTION_ALLOWANCE;
+ major_init ();
if ((env = getenv ("MONO_GC_DEBUG"))) {
opts = g_strsplit (env, ",", -1);
collect_before_allocs = TRUE;
} else if (!strcmp (opt, "check-at-minor-collections")) {
consistency_check_at_minor_collection = TRUE;
+ nursery_clear_policy = CLEAR_AT_GC;
} else if (!strcmp (opt, "xdomain-checks")) {
xdomain_checks = TRUE;
} else if (!strcmp (opt, "clear-at-gc")) {
return write_barrier_method;
}
-#endif /* HAVE_SGEN_GC */
+char*
+mono_gc_get_description (void)
+{
+ return g_strdup ("sgen");
+}
+
+void
+mono_gc_set_desktop_mode (void)
+{
+}
+
+gboolean
+mono_gc_is_moving (void)
+{
+ return TRUE;
+}
+
+gboolean
+mono_gc_is_disabled (void)
+{
+ return FALSE;
+}
+#endif /* HAVE_SGEN_GC */