#include "metadata/gc-internal.h"
#include "metadata/sgen-pointer-queue.h"
#include "metadata/sgen-pinning.h"
+#include "metadata/sgen-workers.h"
#define SGEN_HAVE_CONCURRENT_MARK
#define MS_BLOCK_SIZE ARCH_MIN_MS_BLOCK_SIZE
#define MS_BLOCK_SIZE_SHIFT ARCH_MIN_MS_BLOCK_SIZE_SHIFT
#else
-#define MS_BLOCK_SIZE (16*1024)
-#define MS_BLOCK_SIZE_SHIFT 14
+#define MS_BLOCK_SIZE_SHIFT 14 /* INT FASTENABLE */
+#define MS_BLOCK_SIZE (1 << MS_BLOCK_SIZE_SHIFT)
#endif
#define MAJOR_SECTION_SIZE MS_BLOCK_SIZE
#define CARDS_PER_BLOCK (MS_BLOCK_SIZE / CARD_SIZE_IN_BYTES)
* of a block is the MSBlockHeader, then opional padding, then come
* the objects, so this must be >= sizeof (MSBlockHeader).
*/
-#define MS_BLOCK_SKIP 16
+#define MS_BLOCK_SKIP ((sizeof (MSBlockHeader) + 15) & ~15)
#define MS_BLOCK_FREE (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
#define MS_NUM_MARK_WORDS ((MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
-#if SGEN_MAX_SMALL_OBJ_SIZE > MS_BLOCK_FREE / 2
-#error MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2
-#endif
-
typedef struct _MSBlockInfo MSBlockInfo;
struct _MSBlockInfo {
int obj_size;
unsigned int has_pinned : 1; /* means cannot evacuate */
unsigned int is_to_space : 1;
unsigned int swept : 1;
- char *block;
void **free_list;
MSBlockInfo *next_free;
size_t pin_queue_first_entry;
mword mark_words [MS_NUM_MARK_WORDS];
};
-#define MS_BLOCK_FOR_BLOCK_INFO(b) ((b)->block)
+#define MS_BLOCK_FOR_BLOCK_INFO(b) ((char*)(b))
#define MS_BLOCK_OBJ(b,i) (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (b)->obj_size * (i))
#define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size) (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (obj_size) * (i))
#define MS_BLOCK_DATA_FOR_OBJ(o) ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
typedef struct {
- MSBlockInfo *info;
+ MSBlockInfo info;
} MSBlockHeader;
-#define MS_BLOCK_FOR_OBJ(o) (((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
+#define MS_BLOCK_FOR_OBJ(o) (&((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
/* object index will always be small */
#define MS_BLOCK_OBJ_INDEX(o,b) ((int)(((char*)(o) - (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP)) / (b)->obj_size))
#define MS_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] & (ONE_P << (b)))
#define MS_SET_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] |= (ONE_P << (b)))
-#define MS_PAR_SET_MARK_BIT(was_marked,bl,w,b) do { \
- mword __old = (bl)->mark_words [(w)]; \
- mword __bitmask = ONE_P << (b); \
- if (__old & __bitmask) { \
- was_marked = TRUE; \
- break; \
- } \
- if (SGEN_CAS_PTR ((gpointer*)&(bl)->mark_words [(w)], \
- (gpointer)(__old | __bitmask), \
- (gpointer)__old) == \
- (gpointer)__old) { \
- was_marked = FALSE; \
- break; \
- } \
- } while (1)
#define MS_OBJ_ALLOCED(o,b) (*(void**)(o) && (*(char**)(o) < MS_BLOCK_FOR_BLOCK_INFO (b) || *(char**)(o) >= MS_BLOCK_FOR_BLOCK_INFO (b) + MS_BLOCK_SIZE))
-#define MS_BLOCK_OBJ_SIZE_FACTOR (sqrt (2.0))
+#define MS_BLOCK_OBJ_SIZE_FACTOR (pow (2.0, 1.0 / 3))
/*
* This way we can lookup block object size indexes for sizes up to
/* one free block list for each block object size */
static MSBlockInfo **free_block_lists [MS_BLOCK_TYPE_MAX];
-static long long stat_major_blocks_alloced = 0;
-static long long stat_major_blocks_freed = 0;
-static long long stat_major_blocks_lazy_swept = 0;
-static long long stat_major_objects_evacuated = 0;
+static guint64 stat_major_blocks_alloced = 0;
+static guint64 stat_major_blocks_freed = 0;
+static guint64 stat_major_blocks_lazy_swept = 0;
+static guint64 stat_major_objects_evacuated = 0;
#if SIZEOF_VOID_P != 8
-static long long stat_major_blocks_freed_ideal = 0;
-static long long stat_major_blocks_freed_less_ideal = 0;
-static long long stat_major_blocks_freed_individual = 0;
-static long long stat_major_blocks_alloced_less_ideal = 0;
+static guint64 stat_major_blocks_freed_ideal = 0;
+static guint64 stat_major_blocks_freed_less_ideal = 0;
+static guint64 stat_major_blocks_freed_individual = 0;
+static guint64 stat_major_blocks_alloced_less_ideal = 0;
#endif
#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
-static long long num_major_objects_marked = 0;
+static guint64 num_major_objects_marked = 0;
#define INC_NUM_MAJOR_OBJECTS_MARKED() (++num_major_objects_marked)
#else
#define INC_NUM_MAJOR_OBJECTS_MARKED()
#endif
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+static mono_mutex_t scanned_objects_list_lock;
+static SgenPointerQueue scanned_objects_list;
+
+static void
+add_scanned_object (void *ptr)
+{
+ if (!binary_protocol_is_enabled ())
+ return;
+
+ mono_mutex_lock (&scanned_objects_list_lock);
+ sgen_pointer_queue_add (&scanned_objects_list, ptr);
+ mono_mutex_unlock (&scanned_objects_list_lock);
+}
+#endif
+
static void
sweep_block (MSBlockInfo *block, gboolean during_major_collection);
int size = block_obj_sizes [size_index];
int count = MS_BLOCK_FREE / size;
MSBlockInfo *info;
- MSBlockHeader *header;
MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
char *obj_start;
int i;
if (!sgen_memgov_try_alloc_space (MS_BLOCK_SIZE, SPACE_MAJOR))
return FALSE;
- info = sgen_alloc_internal (INTERNAL_MEM_MS_BLOCK_INFO);
+ info = (MSBlockInfo*)ms_get_empty_block ();
SGEN_ASSERT (9, count >= 2, "block with %d objects, it must hold at least 2", count);
*/
info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD);
info->swept = 1;
- info->block = ms_get_empty_block ();
-
- header = (MSBlockHeader*) info->block;
- header->info = info;
#ifdef SGEN_HAVE_CONCURRENT_MARK
info->cardtable_mod_union = NULL;
#endif
return obj;
}
-#define MAJOR_OBJ_IS_IN_TO_SPACE(obj) FALSE
-
/*
* obj is some object. If it's not in the major heap (i.e. if it's in
* the nursery or LOS), return FALSE. Otherwise return whether it's
#define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,desc,block,queue) do { \
int __word, __bit; \
MS_CALC_MARK_BIT (__word, __bit, (obj)); \
- if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
+ if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
MS_SET_MARK_BIT ((block), __word, __bit); \
- if ((block)->has_references) \
+ if (sgen_gc_descr_has_references (desc)) \
GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
INC_NUM_MAJOR_OBJECTS_MARKED (); \
#define MS_MARK_OBJECT_AND_ENQUEUE(obj,desc,block,queue) do { \
int __word, __bit; \
MS_CALC_MARK_BIT (__word, __bit, (obj)); \
- SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
+ SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
if (!MS_MARK_BIT ((block), __word, __bit)) { \
MS_SET_MARK_BIT ((block), __word, __bit); \
- if ((block)->has_references) \
- GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
- binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
- INC_NUM_MAJOR_OBJECTS_MARKED (); \
- } \
- } while (0)
-#define MS_PAR_MARK_OBJECT_AND_ENQUEUE(obj,desc,block,queue) do { \
- int __word, __bit; \
- gboolean __was_marked; \
- SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
- MS_CALC_MARK_BIT (__word, __bit, (obj)); \
- MS_PAR_SET_MARK_BIT (__was_marked, (block), __word, __bit); \
- if (!__was_marked) { \
- if ((block)->has_references) \
+ if (sgen_gc_descr_has_references (desc)) \
GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
INC_NUM_MAJOR_OBJECTS_MARKED (); \
#ifdef SGEN_HAVE_CONCURRENT_MARK
static void
-major_copy_or_mark_object_concurrent (void **ptr, void *obj, SgenGrayQueue *queue)
+major_copy_or_mark_object_with_evacuation_concurrent (void **ptr, void *obj, SgenGrayQueue *queue)
{
+ SGEN_ASSERT (9, sgen_concurrent_collection_in_progress (), "Why are we scanning concurrently when there's no concurrent collection on?");
+ SGEN_ASSERT (9, !sgen_workers_are_working () || sgen_is_worker_thread (mono_native_thread_id_get ()), "We must not scan from two threads at the same time!");
+
g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
if (!sgen_ptr_in_nursery (obj)) {
}
#endif
-static void
-major_copy_or_mark_object (void **ptr, void *obj, SgenGrayQueue *queue)
+static long long
+major_get_and_reset_num_major_objects_marked (void)
{
- MSBlockInfo *block;
-
- HEAVY_STAT (++stat_copy_object_called_major);
-
- SGEN_ASSERT (9, obj, "null object from pointer %p", ptr);
- SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
+#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
+ long long num = num_major_objects_marked;
+ num_major_objects_marked = 0;
+ return num;
+#else
+ return 0;
+#endif
+}
- if (sgen_ptr_in_nursery (obj)) {
- int word, bit;
- char *forwarded, *old_obj;
+#ifdef HEAVY_STATISTICS
+static guint64 stat_optimized_copy;
+static guint64 stat_optimized_copy_nursery;
+static guint64 stat_optimized_copy_nursery_forwarded;
+static guint64 stat_optimized_copy_nursery_pinned;
+static guint64 stat_optimized_copy_major;
+static guint64 stat_optimized_copy_major_small_fast;
+static guint64 stat_optimized_copy_major_small_slow;
+static guint64 stat_optimized_copy_major_large;
+static guint64 stat_optimized_copy_major_forwarded;
+static guint64 stat_optimized_copy_major_small_evacuate;
+static guint64 stat_optimized_major_scan;
+static guint64 stat_optimized_major_scan_no_refs;
+
+static guint64 stat_drain_prefetch_fills;
+static guint64 stat_drain_prefetch_fill_failures;
+static guint64 stat_drain_loops;
+#endif
- if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
- *ptr = forwarded;
- return;
- }
- if (SGEN_OBJECT_IS_PINNED (obj))
- return;
+static void major_scan_object_with_evacuation (char *start, mword desc, SgenGrayQueue *queue);
- /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
- if (sgen_nursery_is_to_space (obj))
- return;
+#define COPY_OR_MARK_FUNCTION_NAME major_copy_or_mark_object_no_evacuation
+#define SCAN_OBJECT_FUNCTION_NAME major_scan_object_no_evacuation
+#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_no_evacuation
+#include "sgen-marksweep-drain-gray-stack.h"
- HEAVY_STAT (++stat_objects_copied_major);
-
- do_copy_object:
- old_obj = obj;
- obj = copy_object_no_checks (obj, queue);
- if (G_UNLIKELY (old_obj == obj)) {
- /*If we fail to evacuate an object we just stop doing it for a given block size as all other will surely fail too.*/
- if (!sgen_ptr_in_nursery (obj)) {
- int size_index;
- block = MS_BLOCK_FOR_OBJ (obj);
- size_index = block->obj_size_index;
- evacuate_block_obj_sizes [size_index] = FALSE;
- MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
- }
- return;
- }
- *ptr = obj;
-
- /*
- * FIXME: See comment for copy_object_no_checks(). If
- * we have that, we can let the allocation function
- * give us the block info, too, and we won't have to
- * re-fetch it.
- *
- * FIXME (2): We should rework this to avoid all those nursery checks.
- */
- /*
- * For the split nursery allocator the object might
- * still be in the nursery despite having being
- * promoted, in which case we can't mark it.
- */
- if (!sgen_ptr_in_nursery (obj)) {
- block = MS_BLOCK_FOR_OBJ (obj);
- MS_CALC_MARK_BIT (word, bit, obj);
- SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
- MS_SET_MARK_BIT (block, word, bit);
- binary_protocol_mark (obj, (gpointer)LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
- }
- } else {
- char *forwarded;
- mword objsize;
+#define COPY_OR_MARK_WITH_EVACUATION
+#define COPY_OR_MARK_FUNCTION_NAME major_copy_or_mark_object_with_evacuation
+#define SCAN_OBJECT_FUNCTION_NAME major_scan_object_with_evacuation
+#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_with_evacuation
+#include "sgen-marksweep-drain-gray-stack.h"
- /*
- * If we have don't have a fixed heap we cannot know
- * whether an object is in the LOS or in the small
- * object major heap without checking its size. To do
- * that, however, we need to know that we actually
- * have a valid object, not a forwarding pointer, so
- * we have to do this check first.
- */
- if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
- *ptr = forwarded;
- return;
+static gboolean
+drain_gray_stack (ScanCopyContext ctx)
+{
+ gboolean evacuation = FALSE;
+ int i;
+ for (i = 0; i < num_block_obj_sizes; ++i) {
+ if (evacuate_block_obj_sizes [i]) {
+ evacuation = TRUE;
+ break;
}
+ }
- objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
-
- if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE) {
- int size_index;
- gboolean evacuate;
-
- block = MS_BLOCK_FOR_OBJ (obj);
- size_index = block->obj_size_index;
- evacuate = evacuate_block_obj_sizes [size_index];
-
- if (evacuate && !block->has_pinned) {
- g_assert (!SGEN_OBJECT_IS_PINNED (obj));
- if (block->is_to_space)
- return;
- HEAVY_STAT (++stat_major_objects_evacuated);
- goto do_copy_object;
- } else {
- MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
- }
- } else {
- if (sgen_los_object_is_pinned (obj))
- return;
- binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
+ if (evacuation)
+ return drain_gray_stack_with_evacuation (ctx);
+ else
+ return drain_gray_stack_no_evacuation (ctx);
+}
-#ifdef ENABLE_DTRACE
- if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
- MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
- MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
- }
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+#include "sgen-marksweep-scan-object-concurrent.h"
#endif
- sgen_los_pin_object (obj);
- if (SGEN_OBJECT_HAS_REFERENCES (obj))
- GRAY_OBJECT_ENQUEUE (queue, obj, sgen_obj_get_descriptor (obj));
- }
- }
-}
-
static void
major_copy_or_mark_object_canonical (void **ptr, SgenGrayQueue *queue)
{
- major_copy_or_mark_object (ptr, *ptr, queue);
+ major_copy_or_mark_object_with_evacuation (ptr, *ptr, queue);
}
#ifdef SGEN_HAVE_CONCURRENT_MARK
static void
major_copy_or_mark_object_concurrent_canonical (void **ptr, SgenGrayQueue *queue)
{
- major_copy_or_mark_object_concurrent (ptr, *ptr, queue);
-}
-#endif
-
-static long long
-major_get_and_reset_num_major_objects_marked (void)
-{
-#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
- long long num = num_major_objects_marked;
- num_major_objects_marked = 0;
- return num;
-#else
- return 0;
-#endif
+ major_copy_or_mark_object_with_evacuation_concurrent (ptr, *ptr, queue);
}
-
-#include "sgen-major-scan-object.h"
-
-#ifdef SGEN_HAVE_CONCURRENT_MARK
-#define SCAN_FOR_CONCURRENT_MARK
-#include "sgen-major-scan-object.h"
-#undef SCAN_FOR_CONCURRENT_MARK
#endif
static void
DELETE_BLOCK_IN_FOREACH ();
binary_protocol_empty (MS_BLOCK_OBJ (block, 0), (char*)MS_BLOCK_OBJ (block, count) - (char*)MS_BLOCK_OBJ (block, 0));
- ms_free_block (block->block);
- sgen_free_internal (block, INTERNAL_MEM_MS_BLOCK_INFO);
+ ms_free_block (block);
--num_major_sections;
}
}
static void
-major_finish_major_collection (void)
+major_finish_major_collection (ScannedObjectCounts *counts)
{
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+ if (binary_protocol_is_enabled ()) {
+ counts->num_scanned_objects = scanned_objects_list.next_slot;
+
+ sgen_pointer_queue_sort_uniq (&scanned_objects_list);
+ counts->num_unique_scanned_objects = scanned_objects_list.next_slot;
+
+ sgen_pointer_queue_clear (&scanned_objects_list);
+ }
+#endif
}
#if SIZEOF_VOID_P != 8
}
#ifdef HEAVY_STATISTICS
-extern long long marked_cards;
-extern long long scanned_cards;
-extern long long scanned_objects;
-extern long long remarked_cards;
+extern guint64 marked_cards;
+extern guint64 scanned_cards;
+extern guint64 scanned_objects;
+extern guint64 remarked_cards;
#endif
#define CARD_WORDS_PER_BLOCK (CARDS_PER_BLOCK / SIZEOF_VOID_P)
end = block_start + MS_BLOCK_SIZE;
base = sgen_card_table_align_pointer (obj);
+ cards += MS_BLOCK_SKIP >> CARD_BITS;
+
while (obj < end) {
size_t card_offset;
}
card_data_end = card_data + CARDS_PER_BLOCK;
+ card_data += MS_BLOCK_SKIP >> CARD_BITS;
+
for (card_data = initial_skip_card (card_data); card_data < card_data_end; ++card_data) { //card_data = skip_card (card_data + 1, card_data_end)) {
size_t index;
size_t idx = card_data - card_base;
for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
- mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced);
- mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed);
- mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_lazy_swept);
- mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_objects_evacuated);
+ mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced);
+ mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed);
+ mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_lazy_swept);
+ mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_objects_evacuated);
#if SIZEOF_VOID_P != 8
- mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_ideal);
- mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_less_ideal);
- mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_individual);
- mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced_less_ideal);
+ mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_ideal);
+ mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_less_ideal);
+ mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_individual);
+ mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced_less_ideal);
#endif
collector->section_size = MAJOR_SECTION_SIZE;
collector->count_cards = major_count_cards;
collector->major_ops.copy_or_mark_object = major_copy_or_mark_object_canonical;
- collector->major_ops.scan_object = major_scan_object;
+ collector->major_ops.scan_object = major_scan_object_with_evacuation;
#ifdef SGEN_HAVE_CONCURRENT_MARK
if (is_concurrent) {
collector->major_concurrent_ops.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
- collector->major_concurrent_ops.scan_object = major_scan_object_concurrent;
+ collector->major_concurrent_ops.scan_object = major_scan_object_no_mark_concurrent;
collector->major_concurrent_ops.scan_vtype = major_scan_vtype_concurrent;
}
#endif
+#if !defined (FIXED_HEAP) && !defined (SGEN_PARALLEL_MARK)
+ /* FIXME: this will not work with evacuation or the split nursery. */
+ if (!is_concurrent)
+ collector->drain_gray_stack = drain_gray_stack;
+
+#ifdef HEAVY_STATISTICS
+ mono_counters_register ("Optimized copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy);
+ mono_counters_register ("Optimized copy nursery", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery);
+ mono_counters_register ("Optimized copy nursery forwarded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery_forwarded);
+ mono_counters_register ("Optimized copy nursery pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery_pinned);
+ mono_counters_register ("Optimized copy major", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major);
+ mono_counters_register ("Optimized copy major small fast", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_fast);
+ mono_counters_register ("Optimized copy major small slow", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_slow);
+ mono_counters_register ("Optimized copy major large", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_large);
+ mono_counters_register ("Optimized major scan", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan);
+ mono_counters_register ("Optimized major scan no refs", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan_no_refs);
+
+ mono_counters_register ("Gray stack drain loops", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_loops);
+ mono_counters_register ("Gray stack prefetch fills", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fills);
+ mono_counters_register ("Gray stack prefetch failures", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fill_failures);
+#endif
+#endif
+
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+ mono_mutex_init (&scanned_objects_list_lock);
+#endif
+
+ SGEN_ASSERT (0, SGEN_MAX_SMALL_OBJ_SIZE <= MS_BLOCK_FREE / 2, "MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2");
+
/*cardtable requires major pages to be 8 cards aligned*/
g_assert ((MS_BLOCK_SIZE % (8 * CARD_SIZE_IN_BYTES)) == 0);
}