#include "metadata/sgen-protocol.h"
#include "metadata/sgen-cardtable.h"
#include "metadata/sgen-memory-governor.h"
+#include "metadata/sgen-layout-stats.h"
#include "metadata/gc-internal.h"
#if !defined(SGEN_PARALLEL_MARK) && !defined(FIXED_HEAP)
/*
* Don't allocate single blocks, but alloc a contingent of this many
- * blocks in one swoop.
+ * blocks in one swoop. This must be a power of two.
*/
#define MS_BLOCK_ALLOC_NUM 32
static long long stat_major_blocks_lazy_swept = 0;
static long long stat_major_objects_evacuated = 0;
+#if SIZEOF_VOID_P != 8
+static long long stat_major_blocks_freed_ideal = 0;
+static long long stat_major_blocks_freed_less_ideal = 0;
+static long long stat_major_blocks_freed_individual = 0;
+static long long stat_major_blocks_alloced_less_ideal = 0;
+#endif
#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
static long long num_major_objects_marked = 0;
retry:
if (!empty_blocks) {
- p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * MS_BLOCK_ALLOC_NUM, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "major heap section");
+ /*
+ * We try allocating MS_BLOCK_ALLOC_NUM blocks first. If that's
+ * unsuccessful, we halve the number of blocks and try again, until we're at
+ * 1. If that doesn't work, either, we assert.
+ */
+ int alloc_num = MS_BLOCK_ALLOC_NUM;
+ for (;;) {
+ p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * alloc_num, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE,
+ alloc_num == 1 ? "major heap section" : NULL);
+ if (p)
+ break;
+ alloc_num >>= 1;
+ }
- for (i = 0; i < MS_BLOCK_ALLOC_NUM; ++i) {
+ for (i = 0; i < alloc_num; ++i) {
block = p;
/*
* We do the free list update one after the
p += MS_BLOCK_SIZE;
}
- SGEN_ATOMIC_ADD (num_empty_blocks, MS_BLOCK_ALLOC_NUM);
+ SGEN_ATOMIC_ADD (num_empty_blocks, alloc_num);
- stat_major_blocks_alloced += MS_BLOCK_ALLOC_NUM;
+ stat_major_blocks_alloced += alloc_num;
+#if SIZEOF_VOID_P != 8
+ if (alloc_num != MS_BLOCK_ALLOC_NUM)
+ stat_major_blocks_alloced_less_ideal += alloc_num;
+#endif
}
do {
info->pinned = pinned;
info->has_references = has_references;
info->has_pinned = pinned;
- info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD); /*FIXME WHY??? */
+ /*
+ * Blocks that are to-space are not evacuated from. During an major collection
+ * blocks are allocated for two reasons: evacuating objects from the nursery and
+ * evacuating them from major blocks marked for evacuation. In both cases we don't
+ * want further evacuation.
+ */
+ info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD);
info->swept = 1;
#ifndef FIXED_HEAP
info->block = ms_get_empty_block ();
}
-static gboolean
+static MonoVTable*
major_describe_pointer (char *ptr)
{
MSBlockInfo *block;
SGEN_LOG (0, " marked %d)\n", marked ? 1 : 0);
- return TRUE;
+ return vtable;
} END_FOREACH_BLOCK;
- return FALSE;
+ return NULL;
}
static void
MS_CALC_MARK_BIT (word, bit, obj);
SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
MS_PAR_SET_MARK_BIT (was_marked, block, word, bit);
+ binary_protocol_mark (obj, vt, sgen_safe_object_get_size ((MonoObject*)obj));
}
} else {
/*
#endif
sgen_los_pin_object (obj);
- /* FIXME: only enqueue if object has references */
- GRAY_OBJECT_ENQUEUE (queue, obj);
+ if (SGEN_OBJECT_HAS_REFERENCES (obj))
+ GRAY_OBJECT_ENQUEUE (queue, obj);
INC_NUM_MAJOR_OBJECTS_MARKED ();
}
}
#endif
sgen_los_pin_object (obj);
- /* FIXME: only enqueue if object has references */
- GRAY_OBJECT_ENQUEUE (queue, obj);
+ if (SGEN_OBJECT_HAS_REFERENCES (obj))
+ GRAY_OBJECT_ENQUEUE (queue, obj);
}
}
}
sweep_block (MSBlockInfo *block, gboolean during_major_collection)
{
int count;
+ void *reversed = NULL;
if (!during_major_collection)
g_assert (!sgen_concurrent_collection_in_progress ());
/* reset mark bits */
memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
- /*
- * FIXME: reverse free list so that it's in address
- * order
- */
+ /* Reverse free list so that it's in address order */
+ reversed = NULL;
+ while (block->free_list) {
+ void *next = *(void**)block->free_list;
+ *(void**)block->free_list = reversed;
+ reversed = block->free_list;
+ block->free_list = next;
+ }
+ block->free_list = reversed;
block->swept = 1;
}
static inline int
bitcount (mword d)
{
-#if SIZEOF_VOID_P == 8
- /* http://www.jjj.de/bitwizardry/bitwizardrypage.html */
- d -= (d>>1) & 0x5555555555555555;
- d = ((d>>2) & 0x3333333333333333) + (d & 0x3333333333333333);
- d = ((d>>4) + d) & 0x0f0f0f0f0f0f0f0f;
- d *= 0x0101010101010101;
- return d >> 56;
+ int count = 0;
+
+#ifdef __GNUC__
+ if (sizeof (mword) == sizeof (unsigned long))
+ count += __builtin_popcountl (d);
+ else
+ count += __builtin_popcount (d);
#else
- /* http://aggregate.org/MAGIC/ */
- d -= ((d >> 1) & 0x55555555);
- d = (((d >> 2) & 0x33333333) + (d & 0x33333333));
- d = (((d >> 4) + d) & 0x0f0f0f0f);
- d += (d >> 8);
- d += (d >> 16);
- return (d & 0x0000003f);
+ while (d) {
+ count ++;
+ d &= (d - 1);
+ }
#endif
+ return count;
}
static void
{
}
+#if !defined(FIXED_HEAP) && SIZEOF_VOID_P != 8
+static int
+compare_pointers (const void *va, const void *vb) {
+ char *a = *(char**)va, *b = *(char**)vb;
+ if (a < b)
+ return -1;
+ if (a > b)
+ return 1;
+ return 0;
+}
+#endif
+
static void
major_have_computer_minor_collection_allowance (void)
{
g_assert (have_swept);
+#if SIZEOF_VOID_P != 8
+ {
+ int i, num_empty_blocks_orig, num_blocks, arr_length;
+ void *block;
+ void **empty_block_arr;
+ void **rebuild_next;
+
+#ifdef TARGET_WIN32
+ /*
+ * sgen_free_os_memory () asserts in mono_vfree () because windows doesn't like freeing the middle of
+ * a VirtualAlloc ()-ed block.
+ */
+ return;
+#endif
+
+ if (num_empty_blocks <= section_reserve)
+ return;
+ SGEN_ASSERT (0, num_empty_blocks > 0, "section reserve can't be negative");
+
+ num_empty_blocks_orig = num_empty_blocks;
+ empty_block_arr = (void**)sgen_alloc_internal_dynamic (sizeof (void*) * num_empty_blocks_orig,
+ INTERNAL_MEM_MS_BLOCK_INFO_SORT, FALSE);
+ if (!empty_block_arr)
+ goto fallback;
+
+ i = 0;
+ for (block = empty_blocks; block; block = *(void**)block)
+ empty_block_arr [i++] = block;
+ SGEN_ASSERT (0, i == num_empty_blocks, "empty block count wrong");
+
+ sgen_qsort (empty_block_arr, num_empty_blocks, sizeof (void*), compare_pointers);
+
+ /*
+ * We iterate over the free blocks, trying to find MS_BLOCK_ALLOC_NUM
+ * contiguous ones. If we do, we free them. If that's not enough to get to
+ * section_reserve, we halve the number of contiguous blocks we're looking
+ * for and have another go, until we're done with looking for pairs of
+ * blocks, at which point we give up and go to the fallback.
+ */
+ arr_length = num_empty_blocks_orig;
+ num_blocks = MS_BLOCK_ALLOC_NUM;
+ while (num_empty_blocks > section_reserve && num_blocks > 1) {
+ int first = -1;
+ int dest = 0;
+
+ dest = 0;
+ for (i = 0; i < arr_length; ++i) {
+ int d = dest;
+ void *block = empty_block_arr [i];
+ SGEN_ASSERT (0, block, "we're not shifting correctly");
+ if (i != dest) {
+ empty_block_arr [dest] = block;
+ /*
+ * This is not strictly necessary, but we're
+ * cautious.
+ */
+ empty_block_arr [i] = NULL;
+ }
+ ++dest;
+
+ if (first < 0) {
+ first = d;
+ continue;
+ }
+
+ SGEN_ASSERT (0, first >= 0 && d > first, "algorithm is wrong");
+
+ if ((char*)block != ((char*)empty_block_arr [d-1]) + MS_BLOCK_SIZE) {
+ first = d;
+ continue;
+ }
+
+ if (d + 1 - first == num_blocks) {
+ /*
+ * We found num_blocks contiguous blocks. Free them
+ * and null their array entries. As an optimization
+ * we could, instead of nulling the entries, shift
+ * the following entries over to the left, while
+ * we're iterating.
+ */
+ int j;
+ sgen_free_os_memory (empty_block_arr [first], MS_BLOCK_SIZE * num_blocks, SGEN_ALLOC_HEAP);
+ for (j = first; j <= d; ++j)
+ empty_block_arr [j] = NULL;
+ dest = first;
+ first = -1;
+
+ num_empty_blocks -= num_blocks;
+
+ stat_major_blocks_freed += num_blocks;
+ if (num_blocks == MS_BLOCK_ALLOC_NUM)
+ stat_major_blocks_freed_ideal += num_blocks;
+ else
+ stat_major_blocks_freed_less_ideal += num_blocks;
+
+ }
+ }
+
+ SGEN_ASSERT (0, dest <= i && dest <= arr_length, "array length is off");
+ arr_length = dest;
+ SGEN_ASSERT (0, arr_length == num_empty_blocks, "array length is off");
+
+ num_blocks >>= 1;
+ }
+
+ /* rebuild empty_blocks free list */
+ rebuild_next = (void**)&empty_blocks;
+ for (i = 0; i < arr_length; ++i) {
+ void *block = empty_block_arr [i];
+ SGEN_ASSERT (0, block, "we're missing blocks");
+ *rebuild_next = block;
+ rebuild_next = (void**)block;
+ }
+ *rebuild_next = NULL;
+
+ /* free array */
+ sgen_free_internal_dynamic (empty_block_arr, sizeof (void*) * num_empty_blocks_orig, INTERNAL_MEM_MS_BLOCK_INFO_SORT);
+ }
+
+ SGEN_ASSERT (0, num_empty_blocks >= 0, "we freed more blocks than we had in the first place?");
+
+ fallback:
/*
- * FIXME: We don't free blocks on 32 bit platforms because it
- * can lead to address space fragmentation, since we're
- * allocating blocks in larger contingents.
+ * This is our threshold. If there's not more empty than used blocks, we won't
+ * release uncontiguous blocks, in fear of fragmenting the address space.
*/
- if (sizeof (mword) < 8)
+ if (num_empty_blocks <= num_major_sections)
return;
+#endif
while (num_empty_blocks > section_reserve) {
void *next = *(void**)empty_blocks;
--num_empty_blocks;
++stat_major_blocks_freed;
+#if SIZEOF_VOID_P != 8
+ ++stat_major_blocks_freed_individual;
+#endif
}
#endif
}
*/
if (!card_data)
continue;
+#else
+ g_assert_not_reached ();
+ card_data = NULL;
#endif
} else {
card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
MSBlockInfo *block;
FOREACH_BLOCK (block) {
- guint8 *cards;
- gboolean init = FALSE;
+ size_t num_cards;
- if (!block->cardtable_mod_union) {
- block->cardtable_mod_union = sgen_alloc_internal_dynamic (CARDS_PER_BLOCK,
- INTERNAL_MEM_CARDTABLE_MOD_UNION, TRUE);
- init = TRUE;
- }
+ block->cardtable_mod_union = sgen_card_table_update_mod_union (block->cardtable_mod_union,
+ block->block, MS_BLOCK_SIZE, &num_cards);
- cards = sgen_card_table_get_card_scan_address ((mword)block->block);
- if (init) {
- memcpy (block->cardtable_mod_union, cards, CARDS_PER_BLOCK);
- } else {
- int i;
- for (i = 0; i < CARDS_PER_BLOCK; ++i)
- block->cardtable_mod_union [i] |= cards [i];
- }
+ SGEN_ASSERT (0, num_cards == CARDS_PER_BLOCK, "Number of cards calculation is wrong");
} END_FOREACH_BLOCK;
}
+
+static guint8*
+major_get_cardtable_mod_union_for_object (char *obj)
+{
+ MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
+ return &block->cardtable_mod_union [(obj - (char*)sgen_card_table_align_pointer (block->block)) >> CARD_BITS];
+}
#endif
static void
mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed);
mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_lazy_swept);
mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_objects_evacuated);
+#if SIZEOF_VOID_P != 8
+ mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_ideal);
+ mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_less_ideal);
+ mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_individual);
+ mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced_less_ideal);
+#endif
+
#ifdef SGEN_PARALLEL_MARK
#ifndef HAVE_KW_THREAD
mono_native_tls_alloc (&workers_free_block_lists_key, NULL);
collector->scan_card_table = major_scan_card_table;
collector->iterate_live_block_ranges = (void*)(void*) major_iterate_live_block_ranges;
#ifdef SGEN_HAVE_CONCURRENT_MARK
- if (is_concurrent)
+ if (is_concurrent) {
collector->update_cardtable_mod_union = update_cardtable_mod_union;
+ collector->get_cardtable_mod_union_for_object = major_get_cardtable_mod_union_for_object;
+ }
#endif
collector->init_to_space = major_init_to_space;
collector->sweep = major_sweep;