#define BLOCK_TAG(bl) ((bl)->has_references ? BLOCK_TAG_HAS_REFERENCES ((bl)) : (bl))
/* all allocated blocks in the system */
-static SgenArrayList allocated_blocks = SGEN_ARRAY_LIST_INIT (NULL, NULL, NULL, INTERNAL_MEM_PIN_QUEUE);
+static SgenArrayList allocated_blocks = SGEN_ARRAY_LIST_INIT (NULL, sgen_array_list_default_is_slot_set, sgen_array_list_default_cas_setter, INTERNAL_MEM_PIN_QUEUE);
/* non-allocated block free-list */
static void *empty_blocks = NULL;
static guint64 stat_major_blocks_freed = 0;
static guint64 stat_major_blocks_lazy_swept = 0;
-#if SIZEOF_VOID_P != 8
static guint64 stat_major_blocks_freed_ideal = 0;
static guint64 stat_major_blocks_freed_less_ideal = 0;
static guint64 stat_major_blocks_freed_individual = 0;
static guint64 stat_major_blocks_alloced_less_ideal = 0;
-#endif
#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
static guint64 num_major_objects_marked = 0;
{
char *start;
if (nursery_align)
- start = (char *)sgen_alloc_os_memory_aligned (nursery_size, nursery_align, (SgenAllocFlags)(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE), "nursery");
+ start = (char *)sgen_alloc_os_memory_aligned (nursery_size, nursery_align, (SgenAllocFlags)(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE), "nursery", MONO_MEM_ACCOUNT_SGEN_NURSERY);
else
- start = (char *)sgen_alloc_os_memory (nursery_size, (SgenAllocFlags)(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE), "nursery");
+ start = (char *)sgen_alloc_os_memory (nursery_size, (SgenAllocFlags)(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE), "nursery", MONO_MEM_ACCOUNT_SGEN_NURSERY);
return start;
}
for (;;) {
p = (char *)sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * alloc_num, MS_BLOCK_SIZE,
(SgenAllocFlags)(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE),
- alloc_num == 1 ? "major heap section" : NULL);
+ alloc_num == 1 ? "major heap section" : NULL, MONO_MEM_ACCOUNT_SGEN_MARKSWEEP);
if (p)
break;
alloc_num >>= 1;
}
static gboolean
-ptr_is_from_pinned_alloc (char *ptr)
+ptr_is_in_major_block (char *ptr, char **start, gboolean *pinned)
{
MSBlockInfo *block;
FOREACH_BLOCK_NO_LOCK (block) {
- if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE)
- return block->pinned;
+ if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) {
+ int count = MS_BLOCK_FREE / block->obj_size;
+ int i;
+
+ if (start)
+ *start = NULL;
+ for (i = 0; i <= count; ++i) {
+ if (ptr >= (char*)MS_BLOCK_OBJ (block, i) && ptr < (char*)MS_BLOCK_OBJ (block, i + 1)) {
+ if (start)
+ *start = (char *)MS_BLOCK_OBJ (block, i);
+ break;
+ }
+ }
+ if (pinned)
+ *pinned = block->pinned;
+ return TRUE;
+ }
} END_FOREACH_BLOCK_NO_LOCK;
return FALSE;
}
+static gboolean
+ptr_is_from_pinned_alloc (char *ptr)
+{
+ gboolean pinned;
+ if (ptr_is_in_major_block (ptr, NULL, &pinned))
+ return pinned;
+ return FALSE;
+}
+
static void
ensure_can_access_block_free_list (MSBlockInfo *block)
{
static gboolean
major_ptr_is_in_non_pinned_space (char *ptr, char **start)
{
- MSBlockInfo *block;
-
- FOREACH_BLOCK_NO_LOCK (block) {
- if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) {
- int count = MS_BLOCK_FREE / block->obj_size;
- int i;
-
- *start = NULL;
- for (i = 0; i <= count; ++i) {
- if (ptr >= (char*)MS_BLOCK_OBJ (block, i) && ptr < (char*)MS_BLOCK_OBJ (block, i + 1)) {
- *start = (char *)MS_BLOCK_OBJ (block, i);
- break;
- }
- }
- return !block->pinned;
- }
- } END_FOREACH_BLOCK_NO_LOCK;
+ gboolean pinned;
+ if (ptr_is_in_major_block (ptr, start, &pinned))
+ return !pinned;
return FALSE;
}
gboolean pinned = flags & ITERATE_OBJECTS_PINNED;
MSBlockInfo *block;
+ /* No actual sweeping will take place if we are in the middle of a major collection. */
major_finish_sweep_checking ();
FOREACH_BLOCK_NO_LOCK (block) {
int count = MS_BLOCK_FREE / block->obj_size;
continue;
if (!block->pinned && !non_pinned)
continue;
- if (sweep && lazy_sweep) {
+ if (sweep && lazy_sweep && !block_is_swept_or_marking (block)) {
sweep_block (block);
SGEN_ASSERT (6, block->state == BLOCK_STATE_SWEPT, "Block must be swept after sweeping");
}
for (i = 0; i < count; ++i) {
void **obj = (void**) MS_BLOCK_OBJ (block, i);
- /*
- * We've finished sweep checking, but if we're sweeping lazily and
- * the flags don't require us to sweep, the block might still need
- * sweeping. In that case, we need to consult the mark bits to tell
- * us whether an object slot is live.
- */
- if (!block_is_swept_or_marking (block)) {
- int word, bit;
- SGEN_ASSERT (6, !sweep && block->state == BLOCK_STATE_NEED_SWEEPING, "Has sweeping not finished?");
- MS_CALC_MARK_BIT (word, bit, obj);
- if (!MS_MARK_BIT (block, word, bit))
- continue;
- }
if (MS_OBJ_ALLOCED (obj, block))
callback ((GCObject*)obj, block->obj_size, data);
}
/* gcc 4.2.1 from xcode4 crashes on sgen_card_table_get_card_address () when this is enabled */
#if defined(PLATFORM_MACOSX)
-#define GCC_VERSION (__GNUC__ * 10000 \
- + __GNUC_MINOR__ * 100 \
- + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION <= 40300
+#if MONO_GNUC_VERSION <= 40300
#undef PREFETCH_CARDS
#endif
#endif
for (j = 0; j < num_block_obj_sizes; ++j)
free_blocks [j] = NULL;
}
-
- sgen_array_list_remove_nulls (&allocated_blocks);
}
static void sweep_finish (void);
ms_free_block (block);
SGEN_ATOMIC_ADD_P (num_major_sections, -1);
+ SGEN_ATOMIC_ADD_P (num_major_sections_freed_in_sweep, 1);
tagged_block = NULL;
}
* cooperate with the sweep thread to finish sweeping, and they will traverse from
* low to high, to avoid constantly colliding on the same blocks.
*/
- for (block_index = num_blocks; block_index-- > 0;) {
- /*
- * The block might have been freed by another thread doing some checking
- * work.
- */
- if (!ensure_block_is_checked_for_sweeping (block_index, TRUE, NULL))
- ++num_major_sections_freed_in_sweep;
+ for (block_index = allocated_blocks.next_slot; block_index-- > 0;) {
+ ensure_block_is_checked_for_sweeping (block_index, TRUE, NULL);
}
while (!try_set_sweep_state (SWEEP_STATE_COMPACTING, SWEEP_STATE_SWEEPING)) {
sweep_start ();
- SGEN_ASSERT (0, num_major_sections == allocated_blocks.next_slot, "We don't know how many blocks we have?");
-
num_major_sections_before_sweep = num_major_sections;
num_major_sections_freed_in_sweep = 0;
#endif
}
-#if SIZEOF_VOID_P != 8
static int
compare_pointers (const void *va, const void *vb) {
char *a = *(char**)va, *b = *(char**)vb;
return 1;
return 0;
}
-#endif
/*
* This is called with sweep completed and the world stopped.
*/
static void
-major_free_swept_blocks (size_t allowance)
+major_free_swept_blocks (size_t section_reserve)
{
- /* FIXME: This is probably too much. It's assuming all objects are small. */
- size_t section_reserve = allowance / MS_BLOCK_SIZE;
-
SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "Sweeping must have finished before freeing blocks");
#ifdef TARGET_WIN32
return;
#endif
-#if SIZEOF_VOID_P != 8
{
int i, num_empty_blocks_orig, num_blocks, arr_length;
void *block;
* we're iterating.
*/
int j;
- sgen_free_os_memory (empty_block_arr [first], MS_BLOCK_SIZE * num_blocks, SGEN_ALLOC_HEAP);
+ sgen_free_os_memory (empty_block_arr [first], MS_BLOCK_SIZE * num_blocks, SGEN_ALLOC_HEAP, MONO_MEM_ACCOUNT_SGEN_MARKSWEEP);
for (j = first; j <= d; ++j)
empty_block_arr [j] = NULL;
dest = first;
*/
if (num_empty_blocks <= num_major_sections)
return;
-#endif
while (num_empty_blocks > section_reserve) {
void *next = *(void**)empty_blocks;
- sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP);
+ sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP, MONO_MEM_ACCOUNT_SGEN_MARKSWEEP);
empty_blocks = next;
/*
* Needs not be atomic because this is running
--num_empty_blocks;
++stat_major_blocks_freed;
-#if SIZEOF_VOID_P != 8
++stat_major_blocks_freed_individual;
-#endif
}
}
}
static void
-sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurrent)
+sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurrent, gboolean is_parallel)
{
int i;
mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced);
mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed);
mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_lazy_swept);
-#if SIZEOF_VOID_P != 8
mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_ideal);
mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_less_ideal);
mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_individual);
mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced_less_ideal);
-#endif
collector->section_size = MAJOR_SECTION_SIZE;
concurrent_mark = is_concurrent;
collector->is_concurrent = is_concurrent;
+ collector->is_parallel = is_parallel;
collector->needs_thread_pool = is_concurrent || concurrent_sweep;
collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
collector->supports_cardtable = TRUE;
collector->major_ops_concurrent_finish.scan_vtype = major_scan_vtype_with_evacuation;
collector->major_ops_concurrent_finish.scan_ptr_field = major_scan_ptr_field_with_evacuation;
collector->major_ops_concurrent_finish.drain_gray_stack = drain_gray_stack;
+
+ if (is_parallel) {
+ /* FIXME use parallel obj ops */
+ collector->major_ops_conc_par_start = collector->major_ops_concurrent_start;
+ collector->major_ops_conc_par_finish = collector->major_ops_concurrent_finish;
+ }
}
#ifdef HEAVY_STATISTICS
void
sgen_marksweep_init (SgenMajorCollector *collector)
{
- sgen_marksweep_init_internal (collector, FALSE);
+ sgen_marksweep_init_internal (collector, FALSE, FALSE);
}
void
sgen_marksweep_conc_init (SgenMajorCollector *collector)
{
- sgen_marksweep_init_internal (collector, TRUE);
+ sgen_marksweep_init_internal (collector, TRUE, FALSE);
+}
+
+void
+sgen_marksweep_conc_par_init (SgenMajorCollector *collector)
+{
+ sgen_marksweep_init_internal (collector, TRUE, TRUE);
}
#endif