X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Fsgen%2Fsgen-marksweep.c;h=04feb680f2d77b9f6b239c0046cf4abf165cb589;hb=a7ae3550e9853dff3600ce5a381e65905b06843e;hp=5c4ec06a9ebda9b21afd80477086a42fe303ba00;hpb=8b671a8c31368e2046aa9546c46f01b99c5c8008;p=mono.git diff --git a/mono/sgen/sgen-marksweep.c b/mono/sgen/sgen-marksweep.c index 5c4ec06a9eb..04feb680f2d 100644 --- a/mono/sgen/sgen-marksweep.c +++ b/mono/sgen/sgen-marksweep.c @@ -1,5 +1,6 @@ -/* - * sgen-marksweep.c: The Mark & Sweep major collector. +/** + * \file + * The Mark & Sweep major collector. * * Author: * Mark Probst @@ -31,16 +32,17 @@ #include "mono/sgen/sgen-thread-pool.h" #include "mono/sgen/sgen-client.h" #include "mono/utils/mono-memory-model.h" +#include "mono/utils/mono-proclib.h" -#if defined(ARCH_MIN_MS_BLOCK_SIZE) && defined(ARCH_MIN_MS_BLOCK_SIZE_SHIFT) -#define MS_BLOCK_SIZE ARCH_MIN_MS_BLOCK_SIZE -#define MS_BLOCK_SIZE_SHIFT ARCH_MIN_MS_BLOCK_SIZE_SHIFT -#else -#define MS_BLOCK_SIZE_SHIFT 14 /* INT FASTENABLE */ -#define MS_BLOCK_SIZE (1 << MS_BLOCK_SIZE_SHIFT) -#endif -#define MAJOR_SECTION_SIZE MS_BLOCK_SIZE -#define CARDS_PER_BLOCK (MS_BLOCK_SIZE / CARD_SIZE_IN_BYTES) +static int ms_block_size; + +/* + * Blocks must be at least this size, meaning that if we detect a + * page size lower than this, we'll use this instead. + */ +#define MS_BLOCK_SIZE_MIN (1024 * 16) + +#define CARDS_PER_BLOCK (ms_block_size / CARD_SIZE_IN_BYTES) /* * Don't allocate single blocks, but alloc a contingent of this many @@ -48,16 +50,22 @@ */ #define MS_BLOCK_ALLOC_NUM 32 +#define MS_NUM_MARK_WORDS ((ms_block_size / SGEN_ALLOC_ALIGN + sizeof (guint32) * 8 - 1) / (sizeof (guint32) * 8)) + +/* + * Use this instead of sizeof (MSBlockInfo) since the mark_words + * array size depends on page size at runtime. + */ +#define SIZEOF_MS_BLOCK_INFO (sizeof (MSBlockInfo) + sizeof (guint32) * (MS_NUM_MARK_WORDS - MONO_ZERO_LEN_ARRAY)) + /* * Number of bytes before the first object in a block. At the start * of a block is the MSBlockHeader, then opional padding, then come - * the objects, so this must be >= sizeof (MSBlockHeader). + * the objects, so this must be >= SIZEOF_MS_BLOCK_INFO. */ -#define MS_BLOCK_SKIP ((sizeof (MSBlockHeader) + 15) & ~15) - -#define MS_BLOCK_FREE (MS_BLOCK_SIZE - MS_BLOCK_SKIP) +#define MS_BLOCK_SKIP ((SIZEOF_MS_BLOCK_INFO + 15) & ~15) -#define MS_NUM_MARK_WORDS (MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (guint32) * 8 - 1) / (sizeof (guint32) * 8) +#define MS_BLOCK_FREE (ms_block_size - MS_BLOCK_SKIP) /* * Blocks progress from one state to the next: @@ -107,14 +115,14 @@ struct _MSBlockInfo { void ** volatile free_list; MSBlockInfo * volatile next_free; guint8 * volatile cardtable_mod_union; - guint32 mark_words [MS_NUM_MARK_WORDS]; + guint32 mark_words [MONO_ZERO_LEN_ARRAY]; }; #define MS_BLOCK_FOR_BLOCK_INFO(b) ((char*)(b)) #define MS_BLOCK_OBJ(b,i) ((GCObject *)(MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (b)->obj_size * (i))) #define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size) (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (obj_size) * (i)) -#define MS_BLOCK_DATA_FOR_OBJ(o) ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1))) +#define MS_BLOCK_DATA_FOR_OBJ(o) ((char*)((mword)(o) & ~(mword)(ms_block_size - 1))) typedef struct { MSBlockInfo info; @@ -149,7 +157,7 @@ typedef struct { } while (0) -#define MS_OBJ_ALLOCED(o,b) (*(void**)(o) && (*(char**)(o) < MS_BLOCK_FOR_BLOCK_INFO (b) || *(char**)(o) >= MS_BLOCK_FOR_BLOCK_INFO (b) + MS_BLOCK_SIZE)) +#define MS_OBJ_ALLOCED(o,b) (*(void**)(o) && (*(char**)(o) < MS_BLOCK_FOR_BLOCK_INFO (b) || *(char**)(o) >= MS_BLOCK_FOR_BLOCK_INFO (b) + ms_block_size)) #define MS_BLOCK_OBJ_SIZE_FACTOR (pow (2.0, 1.0 / 3)) @@ -186,6 +194,8 @@ static volatile int sweep_state = SWEEP_STATE_SWEPT; static gboolean concurrent_mark; static gboolean concurrent_sweep = TRUE; +int sweep_pool_context = -1; + #define BLOCK_IS_TAGGED_HAS_REFERENCES(bl) SGEN_POINTER_IS_TAGGED_1 ((bl)) #define BLOCK_TAG_HAS_REFERENCES(bl) SGEN_POINTER_TAG_1 ((bl)) @@ -302,7 +312,7 @@ ms_find_block_obj_size_index (size_t size) ms_find_block_obj_size_index ((s))) static void* -major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits) +major_alloc_heap (mword nursery_size, mword nursery_align) { char *start; if (nursery_align) @@ -316,7 +326,7 @@ major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits) static void update_heap_boundaries_for_block (MSBlockInfo *block) { - sgen_update_heap_boundaries ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), (mword)MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE); + sgen_update_heap_boundaries ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), (mword)MS_BLOCK_FOR_BLOCK_INFO (block) + ms_block_size); } /* @@ -338,7 +348,7 @@ ms_get_empty_block (void) */ int alloc_num = MS_BLOCK_ALLOC_NUM; for (;;) { - p = (char *)sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * alloc_num, MS_BLOCK_SIZE, + p = (char *)sgen_alloc_os_memory_aligned (ms_block_size * alloc_num, ms_block_size, (SgenAllocFlags)(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE), alloc_num == 1 ? "major heap section" : NULL, MONO_MEM_ACCOUNT_SGEN_MARKSWEEP); if (p) @@ -357,7 +367,7 @@ ms_get_empty_block (void) empty = empty_blocks; *(void**)block = empty; } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block, empty) != empty); - p += MS_BLOCK_SIZE; + p += ms_block_size; } SGEN_ATOMIC_ADD_P (num_empty_blocks, alloc_num); @@ -381,7 +391,7 @@ ms_get_empty_block (void) *(void**)block = NULL; - g_assert (!((mword)block & (MS_BLOCK_SIZE - 1))); + g_assert (!((mword)block & (ms_block_size - 1))); return block; } @@ -396,10 +406,10 @@ ms_free_block (MSBlockInfo *info) void *empty; char *block = MS_BLOCK_FOR_BLOCK_INFO (info); - sgen_memgov_release_space (MS_BLOCK_SIZE, SPACE_MAJOR); + sgen_memgov_release_space (ms_block_size, SPACE_MAJOR); if (info->cardtable_mod_union) - sgen_card_table_free_mod_union (info->cardtable_mod_union, block, MS_BLOCK_SIZE); - memset (block, 0, MS_BLOCK_SIZE); + sgen_card_table_free_mod_union (info->cardtable_mod_union, block, ms_block_size); + memset (block, 0, ms_block_size); do { empty = empty_blocks; @@ -408,7 +418,7 @@ ms_free_block (MSBlockInfo *info) SGEN_ATOMIC_ADD_P (num_empty_blocks, 1); - binary_protocol_block_free (block, MS_BLOCK_SIZE); + binary_protocol_block_free (block, ms_block_size); } static gboolean @@ -523,7 +533,7 @@ ms_alloc_block (int size_index, gboolean pinned, gboolean has_references) char *obj_start; int i; - if (!sgen_memgov_try_alloc_space (MS_BLOCK_SIZE, SPACE_MAJOR)) + if (!sgen_memgov_try_alloc_space (ms_block_size, SPACE_MAJOR)) return FALSE; info = (MSBlockInfo*)ms_get_empty_block (); @@ -549,7 +559,7 @@ ms_alloc_block (int size_index, gboolean pinned, gboolean has_references) update_heap_boundaries_for_block (info); - binary_protocol_block_alloc (info, MS_BLOCK_SIZE); + binary_protocol_block_alloc (info, ms_block_size); /* build free list */ obj_start = MS_BLOCK_FOR_BLOCK_INFO (info) + MS_BLOCK_SKIP; @@ -577,7 +587,7 @@ ptr_is_in_major_block (char *ptr, char **start, gboolean *pinned) MSBlockInfo *block; FOREACH_BLOCK_NO_LOCK (block) { - if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) { + if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + ms_block_size) { int count = MS_BLOCK_FREE / block->obj_size; int i; @@ -726,7 +736,6 @@ get_block: */ if (SGEN_CAS_PTR ((volatile gpointer *)&free_blocks [size_index], next_free, block) != block) goto get_block; - g_assert (block->free_list); block->next_free = free_blocks_local [size_index]; free_blocks_local [size_index] = block; @@ -918,7 +927,7 @@ major_finish_sweep_checking (void) wait: job = sweep_job; if (job) - sgen_thread_pool_job_wait (job); + sgen_thread_pool_job_wait (sweep_pool_context, job); SGEN_ASSERT (0, !sweep_job, "Why did the sweep job not null itself?"); SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "How is the sweep job done but we're not swept?"); } @@ -963,7 +972,7 @@ major_is_valid_object (char *object) int idx; char *obj; - if ((MS_BLOCK_FOR_BLOCK_INFO (block) > object) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= object)) + if ((MS_BLOCK_FOR_BLOCK_INFO (block) > object) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + ms_block_size) <= object)) continue; idx = MS_BLOCK_OBJ_INDEX (object, block); @@ -990,7 +999,7 @@ major_describe_pointer (char *ptr) int w, b; gboolean marked; - if ((MS_BLOCK_FOR_BLOCK_INFO (block) > ptr) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= ptr)) + if ((MS_BLOCK_FOR_BLOCK_INFO (block) > ptr) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + ms_block_size) <= ptr)) continue; SGEN_LOG (0, "major-ptr (block %p sz %d pin %d ref %d)\n", @@ -1091,13 +1100,13 @@ get_cardtable_mod_union_for_block (MSBlockInfo *block, gboolean allocate) return mod_union; else if (!allocate) return NULL; - mod_union = sgen_card_table_alloc_mod_union (MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE); + mod_union = sgen_card_table_alloc_mod_union (MS_BLOCK_FOR_BLOCK_INFO (block), ms_block_size); other = (guint8 *)SGEN_CAS_PTR ((gpointer*)&block->cardtable_mod_union, mod_union, NULL); if (!other) { SGEN_ASSERT (0, block->cardtable_mod_union == mod_union, "Why did CAS not replace?"); return mod_union; } - sgen_card_table_free_mod_union (mod_union, MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE); + sgen_card_table_free_mod_union (mod_union, MS_BLOCK_FOR_BLOCK_INFO (block), ms_block_size); return other; } @@ -1400,10 +1409,10 @@ static inline void sweep_block_for_size (MSBlockInfo *block, int count, int obj_size) { int obj_index; + void *obj = MS_BLOCK_OBJ_FOR_SIZE (block, 0, obj_size); - for (obj_index = 0; obj_index < count; ++obj_index) { + for (obj_index = 0; obj_index < count; ++obj_index, obj = (void*)((mword)obj + obj_size)) { int word, bit; - void *obj = MS_BLOCK_OBJ_FOR_SIZE (block, obj_index, obj_size); MS_CALC_MARK_BIT (word, bit, obj); if (MS_MARK_BIT (block, word, bit)) { @@ -1432,7 +1441,7 @@ try_set_block_state (MSBlockInfo *block, gint32 new_state, gint32 expected_state gint32 old_state = SGEN_CAS (&block->state, new_state, expected_state); gboolean success = old_state == expected_state; if (success) - binary_protocol_block_set_state (block, MS_BLOCK_SIZE, old_state, new_state); + binary_protocol_block_set_state (block, ms_block_size, old_state, new_state); return success; } @@ -1441,7 +1450,7 @@ set_block_state (MSBlockInfo *block, gint32 new_state, gint32 expected_state) { SGEN_ASSERT (6, block->state == expected_state, "Block state incorrect before set"); block->state = new_state; - binary_protocol_block_set_state (block, MS_BLOCK_SIZE, expected_state, new_state); + binary_protocol_block_set_state (block, ms_block_size, expected_state, new_state); } /* @@ -1555,6 +1564,25 @@ sgen_worker_clear_free_block_lists (WorkerData *worker) } } +static void +sgen_worker_clear_free_block_lists_evac (WorkerData *worker) +{ + int i, j; + + if (!worker->free_block_lists) + return; + + for (i = 0; i < MS_BLOCK_TYPE_MAX; i++) { + for (j = 0; j < num_block_obj_sizes; j++) { + if (((MSBlockInfo***) worker->free_block_lists) [i][j]) + SGEN_ASSERT (0, !((MSBlockInfo***) worker->free_block_lists) [i][j]->next_free, "Why do we have linked free blocks on the workers"); + + if (evacuate_block_obj_sizes [j]) + ((MSBlockInfo***) worker->free_block_lists) [i][j] = NULL; + } + } +} + static void sweep_start (void) { @@ -1571,7 +1599,8 @@ sweep_start (void) free_blocks [j] = NULL; } - sgen_workers_foreach (sgen_worker_clear_free_block_lists); + sgen_workers_foreach (GENERATION_NURSERY, sgen_worker_clear_free_block_lists); + sgen_workers_foreach (GENERATION_OLD, sgen_worker_clear_free_block_lists); } static void sweep_finish (void); @@ -1787,7 +1816,7 @@ sweep_job_func (void *thread_data_untyped, SgenThreadPoolJob *job) */ if (concurrent_sweep && lazy_sweep) { sweep_blocks_job = sgen_thread_pool_job_alloc ("sweep_blocks", sweep_blocks_job_func, sizeof (SgenThreadPoolJob)); - sgen_thread_pool_job_enqueue (sweep_blocks_job); + sgen_thread_pool_job_enqueue (sweep_pool_context, sweep_blocks_job); } sweep_finish (); @@ -1836,7 +1865,7 @@ major_sweep (void) SGEN_ASSERT (0, !sweep_job, "We haven't finished the last sweep?"); if (concurrent_sweep) { sweep_job = sgen_thread_pool_job_alloc ("sweep", sweep_job_func, sizeof (SgenThreadPoolJob)); - sgen_thread_pool_job_enqueue (sweep_job); + sgen_thread_pool_job_enqueue (sweep_pool_context, sweep_job); } else { sweep_job_func (NULL, NULL); } @@ -2038,6 +2067,10 @@ major_start_major_collection (void) sgen_evacuation_freelist_blocks (&free_block_lists [MS_BLOCK_FLAG_REFS][i], i); } + /* We expect workers to have very few blocks on the freelist, just evacuate them */ + sgen_workers_foreach (GENERATION_NURSERY, sgen_worker_clear_free_block_lists_evac); + sgen_workers_foreach (GENERATION_OLD, sgen_worker_clear_free_block_lists_evac); + if (lazy_sweep && concurrent_sweep) { /* * sweep_blocks_job is created before sweep_finish, which we wait for above @@ -2046,7 +2079,7 @@ major_start_major_collection (void) */ SgenThreadPoolJob *job = sweep_blocks_job; if (job) - sgen_thread_pool_job_wait (job); + sgen_thread_pool_job_wait (sweep_pool_context, job); } if (lazy_sweep && !concurrent_sweep) @@ -2104,7 +2137,7 @@ major_free_swept_blocks (size_t section_reserve) { SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "Sweeping must have finished before freeing blocks"); -#ifdef TARGET_WIN32 +#if defined(HOST_WIN32) || defined(HOST_ORBIS) /* * sgen_free_os_memory () asserts in mono_vfree () because windows doesn't like freeing the middle of * a VirtualAlloc ()-ed block. @@ -2170,7 +2203,7 @@ major_free_swept_blocks (size_t section_reserve) SGEN_ASSERT (6, first >= 0 && d > first, "algorithm is wrong"); - if ((char*)block != ((char*)empty_block_arr [d-1]) + MS_BLOCK_SIZE) { + if ((char*)block != ((char*)empty_block_arr [d-1]) + ms_block_size) { first = d; continue; } @@ -2184,7 +2217,7 @@ major_free_swept_blocks (size_t section_reserve) * we're iterating. */ int j; - sgen_free_os_memory (empty_block_arr [first], MS_BLOCK_SIZE * num_blocks, SGEN_ALLOC_HEAP, MONO_MEM_ACCOUNT_SGEN_MARKSWEEP); + sgen_free_os_memory (empty_block_arr [first], ms_block_size * num_blocks, SGEN_ALLOC_HEAP, MONO_MEM_ACCOUNT_SGEN_MARKSWEEP); for (j = first; j <= d; ++j) empty_block_arr [j] = NULL; dest = first; @@ -2234,7 +2267,7 @@ major_free_swept_blocks (size_t section_reserve) while (num_empty_blocks > section_reserve) { void *next = *(void**)empty_blocks; - sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP, MONO_MEM_ACCOUNT_SGEN_MARKSWEEP); + sgen_free_os_memory (empty_blocks, ms_block_size, SGEN_ALLOC_HEAP, MONO_MEM_ACCOUNT_SGEN_MARKSWEEP); empty_blocks = next; /* * Needs not be atomic because this is running @@ -2255,7 +2288,7 @@ major_pin_objects (SgenGrayQueue *queue) FOREACH_BLOCK_NO_LOCK (block) { size_t first_entry, last_entry; SGEN_ASSERT (6, block_is_swept_or_marking (block), "All blocks must be swept when we're pinning."); - sgen_find_optimized_pin_queue_area (MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SKIP, MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE, + sgen_find_optimized_pin_queue_area (MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SKIP, MS_BLOCK_FOR_BLOCK_INFO (block) + ms_block_size, &first_entry, &last_entry); mark_pinned_objects_in_block (block, first_entry, last_entry, queue); } END_FOREACH_BLOCK_NO_LOCK; @@ -2311,7 +2344,7 @@ static size_t get_bytes_survived_last_sweep (void) { SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "Can only query unswept sections after sweep"); - return (num_major_sections_before_sweep - num_major_sections_freed_in_sweep) * MS_BLOCK_SIZE; + return (num_major_sections_before_sweep - num_major_sections_freed_in_sweep) * ms_block_size; } static gboolean @@ -2365,7 +2398,7 @@ major_iterate_block_ranges (sgen_cardtable_block_callback callback) FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) { if (has_references) - callback ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE); + callback ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), ms_block_size); } END_FOREACH_BLOCK_NO_LOCK; } @@ -2378,7 +2411,7 @@ major_iterate_live_block_ranges (sgen_cardtable_block_callback callback) major_finish_sweep_checking (); FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) { if (has_references) - callback ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE); + callback ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), ms_block_size); } END_FOREACH_BLOCK_NO_LOCK; } @@ -2399,7 +2432,7 @@ static guint8* initial_skip_card (guint8 *card_data) { mword *cards = (mword*)card_data; - mword card; + mword card = 0; int i; for (i = 0; i < CARD_WORDS_PER_BLOCK; ++i) { card = cards [i]; @@ -2427,17 +2460,22 @@ initial_skip_card (guint8 *card_data) #define MS_BLOCK_OBJ_INDEX_FAST(o,b,os) (((char*)(o) - ((b) + MS_BLOCK_SKIP)) / (os)) #define MS_BLOCK_OBJ_FAST(b,os,i) ((b) + MS_BLOCK_SKIP + (os) * (i)) -#define MS_OBJ_ALLOCED_FAST(o,b) (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + MS_BLOCK_SIZE)) +#define MS_OBJ_ALLOCED_FAST(o,b) (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + ms_block_size)) static void scan_card_table_for_block (MSBlockInfo *block, CardTableScanType scan_type, ScanCopyContext ctx) { SgenGrayQueue *queue = ctx.queue; ScanObjectFunc scan_func = ctx.ops->scan_object; + /* + * FIXME: On systems with very large pages, we allocate fairly large + * arrays on the stack here. This shouldn't be a problem once block + * size is no longer required to be a multiple of the system page size. + */ #ifndef SGEN_HAVE_OVERLAPPING_CARDS - guint8 cards_copy [CARDS_PER_BLOCK]; + guint8 *cards_copy = alloca (sizeof (guint8) * CARDS_PER_BLOCK); #endif - guint8 cards_preclean [CARDS_PER_BLOCK]; + guint8 *cards_preclean = alloca (sizeof (guint8) * CARDS_PER_BLOCK); gboolean small_objects; int block_obj_size; char *block_start; @@ -2680,7 +2718,7 @@ update_cardtable_mod_union (void) if (has_dirty_cards) { size_t num_cards; guint8 *mod_union = get_cardtable_mod_union_for_block (block, TRUE); - sgen_card_table_update_mod_union (mod_union, MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE, &num_cards); + sgen_card_table_update_mod_union (mod_union, MS_BLOCK_FOR_BLOCK_INFO (block), ms_block_size, &num_cards); SGEN_ASSERT (6, num_cards == CARDS_PER_BLOCK, "Number of cards calculation is wrong"); } } END_FOREACH_BLOCK_NO_LOCK; @@ -2692,21 +2730,36 @@ static void post_param_init (SgenMajorCollector *collector) { collector->sweeps_lazily = lazy_sweep; - collector->needs_thread_pool = concurrent_mark || concurrent_sweep; } -/* We are guaranteed to be called by the worker in question */ +/* + * We are guaranteed to be called by the worker in question. + * This provides initialization for threads that plan to do + * parallel object allocation. We need to store these lists + * in additional data structures so we can traverse them + * at major/sweep start. + */ static void -sgen_worker_init_callback (gpointer worker_untyped) +sgen_init_block_free_lists (gpointer *list_p) { int i; - WorkerData *worker = (WorkerData*) worker_untyped; - MSBlockInfo ***worker_free_blocks = (MSBlockInfo ***) sgen_alloc_internal_dynamic (sizeof (MSBlockInfo**) * MS_BLOCK_TYPE_MAX, INTERNAL_MEM_MS_TABLES, TRUE); + MSBlockInfo ***worker_free_blocks = (MSBlockInfo ***) mono_native_tls_get_value (worker_block_free_list_key); + + /* + * For simplification, a worker thread uses the same free block lists, + * regardless of the context it is part of (major/minor). + */ + if (worker_free_blocks) { + *list_p = (gpointer)worker_free_blocks; + return; + } + + worker_free_blocks = (MSBlockInfo ***) sgen_alloc_internal_dynamic (sizeof (MSBlockInfo**) * MS_BLOCK_TYPE_MAX, INTERNAL_MEM_MS_TABLES, TRUE); for (i = 0; i < MS_BLOCK_TYPE_MAX; i++) worker_free_blocks [i] = (MSBlockInfo **) sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE); - worker->free_block_lists = worker_free_blocks; + *list_p = (gpointer)worker_free_blocks; mono_native_tls_set_value (worker_block_free_list_key, worker_free_blocks); } @@ -2716,7 +2769,15 @@ sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurr { int i; - sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo)); + ms_block_size = mono_pagesize (); + + if (ms_block_size < MS_BLOCK_SIZE_MIN) + ms_block_size = MS_BLOCK_SIZE_MIN; + + sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, SIZEOF_MS_BLOCK_INFO); + + if (mono_cpu_count () <= 1) + is_parallel = FALSE; num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL); block_obj_sizes = (int *)sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE); @@ -2747,6 +2808,10 @@ sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurr for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i) g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i)); + /* We can do this because we always init the minor before the major */ + if (is_parallel || sgen_get_minor_collector ()->is_parallel) + mono_native_tls_alloc (&worker_block_free_list_key, NULL); + mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced); mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed); mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_lazy_swept); @@ -2755,12 +2820,11 @@ sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurr mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_individual); mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced_less_ideal); - collector->section_size = MAJOR_SECTION_SIZE; + collector->section_size = ms_block_size; concurrent_mark = is_concurrent; collector->is_concurrent = is_concurrent; collector->is_parallel = is_parallel; - collector->needs_thread_pool = is_concurrent || concurrent_sweep; collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked; collector->supports_cardtable = TRUE; @@ -2806,6 +2870,7 @@ sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurr collector->is_valid_object = major_is_valid_object; collector->describe_pointer = major_describe_pointer; collector->count_cards = major_count_cards; + collector->init_block_free_lists = sgen_init_block_free_lists; collector->major_ops_serial.copy_or_mark_object = major_copy_or_mark_object_canonical; collector->major_ops_serial.scan_object = major_scan_object_with_evacuation; @@ -2836,10 +2901,6 @@ sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurr collector->major_ops_conc_par_finish.scan_vtype = major_scan_vtype_par_with_evacuation; collector->major_ops_conc_par_finish.scan_ptr_field = major_scan_ptr_field_par_with_evacuation; collector->major_ops_conc_par_finish.drain_gray_stack = drain_gray_stack_par; - - collector->worker_init_cb = sgen_worker_init_callback; - - mono_native_tls_alloc (&worker_block_free_list_key, NULL); } } @@ -2868,7 +2929,15 @@ sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurr SGEN_ASSERT (0, SGEN_MAX_SMALL_OBJ_SIZE <= MS_BLOCK_FREE / 2, "MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2"); /*cardtable requires major pages to be 8 cards aligned*/ - g_assert ((MS_BLOCK_SIZE % (8 * CARD_SIZE_IN_BYTES)) == 0); + g_assert ((ms_block_size % (8 * CARD_SIZE_IN_BYTES)) == 0); + + if (is_concurrent && is_parallel) + sgen_workers_create_context (GENERATION_OLD, mono_cpu_count ()); + else if (is_concurrent) + sgen_workers_create_context (GENERATION_OLD, 1); + + if (concurrent_sweep) + sweep_pool_context = sgen_thread_pool_create_context (1, NULL, NULL, NULL, NULL, NULL); } void