X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Fsgen%2Fsgen-workers.c;h=ccb9f9c5f27e9678ebc1078607b5c863e527517f;hb=HEAD;hp=49f5ccf3548cb0a9f677dc266cb8b4563bbe3265;hpb=5f2d0db2babca3623a67a11d2b26198281a93b65;p=mono.git diff --git a/mono/sgen/sgen-workers.c b/mono/sgen/sgen-workers.c index 49f5ccf3548..ccb9f9c5f27 100644 --- a/mono/sgen/sgen-workers.c +++ b/mono/sgen/sgen-workers.c @@ -1,5 +1,6 @@ -/* - * sgen-workers.c: Worker threads for parallel and concurrent GC. +/** + * \file + * Worker threads for parallel and concurrent GC. * * Copyright 2001-2003 Ximian, Inc * Copyright 2003-2010 Novell, Inc. @@ -19,20 +20,7 @@ #include "mono/utils/mono-membar.h" #include "mono/sgen/sgen-client.h" -static int workers_num; -static volatile gboolean forced_stop; -static WorkerData *workers_data; - -/* - * When using multiple workers, we need to have the last worker - * enqueue the preclean jobs (if there are any). This lock ensures - * that when the last worker takes it, all the other workers have - * gracefully finished, so it can restart them. - */ -static mono_mutex_t finished_lock; - -static SgenSectionGrayQueue workers_distribute_gray_queue; -static gboolean workers_distribute_gray_queue_inited; +static WorkerContext worker_contexts [GENERATION_MAX]; /* * Allowed transitions: @@ -56,14 +44,7 @@ enum { STATE_WORK_ENQUEUED }; -typedef gint32 State; - -static SgenObjectOperations * volatile idle_func_object_ops; -/* - * finished_callback is called only when the workers finish work normally (when they - * are not forced to finish). The callback is used to enqueue preclean jobs. - */ -static volatile SgenWorkersFinishCallback finish_callback; +#define SGEN_WORKER_MIN_SECTIONS_SIGNAL 4 static guint64 stat_workers_num_finished; @@ -75,8 +56,6 @@ set_state (WorkerData *data, State old_state, State new_state) SGEN_ASSERT (0, old_state == STATE_WORKING, "We can only transition to NOT WORKING from WORKING"); else if (new_state == STATE_WORKING) SGEN_ASSERT (0, old_state == STATE_WORK_ENQUEUED, "We can only transition to WORKING from WORK ENQUEUED"); - if (new_state == STATE_NOT_WORKING || new_state == STATE_WORKING) - SGEN_ASSERT (6, sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "Only the worker thread is allowed to transition to NOT_WORKING or WORKING"); return InterlockedCompareExchange (&data->state, new_state, old_state) == old_state; } @@ -88,22 +67,34 @@ state_is_working_or_enqueued (State state) } static void -sgen_workers_ensure_awake (void) +sgen_workers_ensure_awake (WorkerContext *context) { int i; gboolean need_signal = FALSE; - for (i = 0; i < workers_num; i++) { + /* + * All workers are awaken, make sure we reset the parallel context. + * We call this function only when starting the workers so nobody is running, + * or when the last worker is enqueuing preclean work. In both cases we can't + * have a worker working using a nopar context, which means it is safe. + */ + context->idle_func_object_ops = (context->active_workers_num > 1) ? context->idle_func_object_ops_par : context->idle_func_object_ops_nopar; + context->workers_finished = FALSE; + + for (i = 0; i < context->active_workers_num; i++) { State old_state; gboolean did_set_state; do { - old_state = workers_data [i].state; + old_state = context->workers_data [i].state; if (old_state == STATE_WORK_ENQUEUED) break; - did_set_state = set_state (&workers_data [i], old_state, STATE_WORK_ENQUEUED); + did_set_state = set_state (&context->workers_data [i], old_state, STATE_WORK_ENQUEUED); + + if (did_set_state && old_state == STATE_NOT_WORKING) + context->workers_data [i].last_start = sgen_timestamp (); } while (!did_set_state); if (!state_is_working_or_enqueued (old_state)) @@ -111,7 +102,7 @@ sgen_workers_ensure_awake (void) } if (need_signal) - sgen_thread_pool_idle_signal (); + sgen_thread_pool_idle_signal (context->thread_pool_context); } static void @@ -119,26 +110,36 @@ worker_try_finish (WorkerData *data) { State old_state; int i, working = 0; + WorkerContext *context = data->context; + gint64 last_start = data->last_start; ++stat_workers_num_finished; - mono_os_mutex_lock (&finished_lock); + mono_os_mutex_lock (&context->finished_lock); - for (i = 0; i < workers_num; i++) { - if (state_is_working_or_enqueued (workers_data [i].state)) + for (i = 0; i < context->active_workers_num; i++) { + if (state_is_working_or_enqueued (context->workers_data [i].state)) working++; } if (working == 1) { - SgenWorkersFinishCallback callback = finish_callback; + SgenWorkersFinishCallback callback = context->finish_callback; + SGEN_ASSERT (0, context->idle_func_object_ops == context->idle_func_object_ops_nopar, "Why are we finishing with parallel context"); /* We are the last one left. Enqueue preclean job if we have one and awake everybody */ SGEN_ASSERT (0, data->state != STATE_NOT_WORKING, "How did we get from doing idle work to NOT WORKING without setting it ourselves?"); if (callback) { - finish_callback = NULL; + context->finish_callback = NULL; callback (); + context->worker_awakenings = 0; /* Make sure each worker has a chance of seeing the enqueued jobs */ - sgen_workers_ensure_awake (); + sgen_workers_ensure_awake (context); SGEN_ASSERT (0, data->state == STATE_WORK_ENQUEUED, "Why did we fail to set our own state to ENQUEUED"); + + /* + * Log to be able to get the duration of normal concurrent M&S phase. + * Worker indexes are 1 based, since 0 is logically considered gc thread. + */ + binary_protocol_worker_finish_stats (data - &context->workers_data [0] + 1, context->generation, context->forced_stop, data->major_scan_time, data->los_scan_time, data->total_time + sgen_timestamp () - last_start); goto work_available; } } @@ -152,19 +153,29 @@ worker_try_finish (WorkerData *data) SGEN_ASSERT (0, old_state == STATE_WORKING, "What other possibility is there?"); } while (!set_state (data, old_state, STATE_NOT_WORKING)); - mono_os_mutex_unlock (&finished_lock); + /* + * If we are second to last to finish, we set the scan context to the non-parallel + * version so we can speed up the last worker. This helps us maintain same level + * of performance as non-parallel mode even if we fail to distribute work properly. + */ + if (working == 2) + context->idle_func_object_ops = context->idle_func_object_ops_nopar; + + context->workers_finished = TRUE; + mono_os_mutex_unlock (&context->finished_lock); - binary_protocol_worker_finish (sgen_timestamp (), forced_stop); + data->total_time += (sgen_timestamp () - last_start); + binary_protocol_worker_finish_stats (data - &context->workers_data [0] + 1, context->generation, context->forced_stop, data->major_scan_time, data->los_scan_time, data->total_time); sgen_gray_object_queue_trim_free_list (&data->private_gray_queue); return; work_available: - mono_os_mutex_unlock (&finished_lock); + mono_os_mutex_unlock (&context->finished_lock); } void -sgen_workers_enqueue_job (SgenThreadPoolJob *job, gboolean enqueue) +sgen_workers_enqueue_job (int generation, SgenThreadPoolJob *job, gboolean enqueue) { if (!enqueue) { job->func (NULL, job); @@ -172,38 +183,58 @@ sgen_workers_enqueue_job (SgenThreadPoolJob *job, gboolean enqueue) return; } - sgen_thread_pool_job_enqueue (job); + sgen_thread_pool_job_enqueue (worker_contexts [generation].thread_pool_context, job); } -void -sgen_workers_wait_for_jobs_finished (void) +static gboolean +workers_get_work (WorkerData *data) { - sgen_thread_pool_wait_for_all_jobs (); - /* - * If the idle task was never triggered or it finished before the last job did and - * then didn't get triggered again, we might end up in the situation of having - * something in the gray queue yet the idle task not working. The easiest way to - * make sure this doesn't stay that way is to just trigger it again after all jobs - * have finished. - */ - sgen_workers_ensure_awake (); + SgenMajorCollector *major = sgen_get_major_collector (); + SgenMinorCollector *minor = sgen_get_minor_collector (); + GrayQueueSection *section; + + g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue)); + g_assert (major->is_concurrent || minor->is_parallel); + + section = sgen_section_gray_queue_dequeue (&data->context->workers_distribute_gray_queue); + if (section) { + sgen_gray_object_enqueue_section (&data->private_gray_queue, section, major->is_parallel); + return TRUE; + } + + /* Nobody to steal from */ + g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue)); + return FALSE; } static gboolean -workers_get_work (WorkerData *data) +workers_steal_work (WorkerData *data) { - SgenMajorCollector *major; + SgenMajorCollector *major = sgen_get_major_collector (); + SgenMinorCollector *minor = sgen_get_minor_collector (); + int generation = sgen_get_current_collection_generation (); + GrayQueueSection *section = NULL; + WorkerContext *context = data->context; + int i, current_worker; + + if ((generation == GENERATION_OLD && !major->is_parallel) || + (generation == GENERATION_NURSERY && !minor->is_parallel)) + return FALSE; + /* If we're parallel, steal from other workers' private gray queues */ g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue)); - /* If we're concurrent, steal from the workers distribute gray queue. */ - major = sgen_get_major_collector (); - if (major->is_concurrent) { - GrayQueueSection *section = sgen_section_gray_queue_dequeue (&workers_distribute_gray_queue); - if (section) { - sgen_gray_object_enqueue_section (&data->private_gray_queue, section); - return TRUE; - } + current_worker = (int) (data - context->workers_data); + + for (i = 1; i < context->active_workers_num && !section; i++) { + int steal_worker = (current_worker + i) % context->active_workers_num; + if (state_is_working_or_enqueued (context->workers_data [steal_worker].state)) + section = sgen_gray_object_steal_section (&context->workers_data [steal_worker].private_gray_queue); + } + + if (section) { + sgen_gray_object_enqueue_section (&data->private_gray_queue, section, TRUE); + return TRUE; } /* Nobody to steal from */ @@ -232,157 +263,271 @@ thread_pool_init_func (void *data_untyped) { WorkerData *data = (WorkerData *)data_untyped; SgenMajorCollector *major = sgen_get_major_collector (); + SgenMinorCollector *minor = sgen_get_minor_collector (); - sgen_client_thread_register_worker (); - - if (!major->is_concurrent) + if (!major->is_concurrent && !minor->is_parallel) return; init_private_gray_queue (data); + + /* Separate WorkerData for same thread share free_block_lists */ + if (major->is_parallel || minor->is_parallel) + major->init_block_free_lists (&data->free_block_lists); } static gboolean -continue_idle_func (void *data_untyped) +sgen_workers_are_working (WorkerContext *context) { - if (data_untyped) { - WorkerData *data = (WorkerData *)data_untyped; - return state_is_working_or_enqueued (data->state); - } else { - /* Return if any of the threads is working */ - return !sgen_workers_all_done (); + int i; + + for (i = 0; i < context->active_workers_num; i++) { + if (state_is_working_or_enqueued (context->workers_data [i].state)) + return TRUE; } + return FALSE; +} + +static gboolean +continue_idle_func (void *data_untyped, int thread_pool_context) +{ + if (data_untyped) + return state_is_working_or_enqueued (((WorkerData*)data_untyped)->state); + + /* Return if any of the threads is working in the context */ + if (worker_contexts [GENERATION_NURSERY].workers_num && worker_contexts [GENERATION_NURSERY].thread_pool_context == thread_pool_context) + return sgen_workers_are_working (&worker_contexts [GENERATION_NURSERY]); + if (worker_contexts [GENERATION_OLD].workers_num && worker_contexts [GENERATION_OLD].thread_pool_context == thread_pool_context) + return sgen_workers_are_working (&worker_contexts [GENERATION_OLD]); + + g_assert_not_reached (); + return FALSE; +} + +static gboolean +should_work_func (void *data_untyped) +{ + WorkerData *data = (WorkerData*)data_untyped; + WorkerContext *context = data->context; + int current_worker = (int) (data - context->workers_data); + + return context->started && current_worker < context->active_workers_num && state_is_working_or_enqueued (data->state); } static void marker_idle_func (void *data_untyped) { WorkerData *data = (WorkerData *)data_untyped; + WorkerContext *context = data->context; - SGEN_ASSERT (0, continue_idle_func (data_untyped), "Why are we called when we're not supposed to work?"); - SGEN_ASSERT (0, sgen_concurrent_collection_in_progress (), "The worker should only mark in concurrent collections."); + SGEN_ASSERT (0, continue_idle_func (data_untyped, context->thread_pool_context), "Why are we called when we're not supposed to work?"); if (data->state == STATE_WORK_ENQUEUED) { set_state (data, STATE_WORK_ENQUEUED, STATE_WORKING); SGEN_ASSERT (0, data->state != STATE_NOT_WORKING, "How did we get from WORK ENQUEUED to NOT WORKING?"); } - if (!forced_stop && (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data))) { - ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (idle_func_object_ops, &data->private_gray_queue); + if (!context->forced_stop && (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data) || workers_steal_work (data))) { + ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (context->idle_func_object_ops, &data->private_gray_queue); SGEN_ASSERT (0, !sgen_gray_object_queue_is_empty (&data->private_gray_queue), "How is our gray queue empty if we just got work?"); sgen_drain_gray_stack (ctx); + + if (data->private_gray_queue.num_sections >= SGEN_WORKER_MIN_SECTIONS_SIGNAL + && context->workers_finished && context->worker_awakenings < context->active_workers_num) { + /* We bound the number of worker awakenings just to be sure */ + context->worker_awakenings++; + mono_os_mutex_lock (&context->finished_lock); + sgen_workers_ensure_awake (context); + mono_os_mutex_unlock (&context->finished_lock); + } } else { worker_try_finish (data); } } static void -init_distribute_gray_queue (void) +init_distribute_gray_queue (WorkerContext *context) { - if (workers_distribute_gray_queue_inited) { - g_assert (sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue)); - g_assert (workers_distribute_gray_queue.locked); - return; - } - - sgen_section_gray_queue_init (&workers_distribute_gray_queue, TRUE, + sgen_section_gray_queue_init (&context->workers_distribute_gray_queue, TRUE, sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL); - workers_distribute_gray_queue_inited = TRUE; } void -sgen_workers_init_distribute_gray_queue (void) +sgen_workers_create_context (int generation, int num_workers) { - SGEN_ASSERT (0, sgen_get_major_collector ()->is_concurrent, - "Why should we init the distribute gray queue if we don't need it?"); - init_distribute_gray_queue (); + static gboolean stat_inited = FALSE; + int i; + WorkerData **workers_data_ptrs = (WorkerData**)sgen_alloc_internal_dynamic (num_workers * sizeof(WorkerData*), INTERNAL_MEM_WORKER_DATA, TRUE); + WorkerContext *context = &worker_contexts [generation]; + + SGEN_ASSERT (0, !context->workers_num, "We can't init the worker context for a generation twice"); + + mono_os_mutex_init (&context->finished_lock); + + context->generation = generation; + context->workers_num = num_workers; + context->active_workers_num = num_workers; + + context->workers_data = (WorkerData *)sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA, TRUE); + memset (context->workers_data, 0, sizeof (WorkerData) * num_workers); + + init_distribute_gray_queue (context); + + for (i = 0; i < num_workers; ++i) { + workers_data_ptrs [i] = &context->workers_data [i]; + context->workers_data [i].context = context; + } + + context->thread_pool_context = sgen_thread_pool_create_context (num_workers, thread_pool_init_func, marker_idle_func, continue_idle_func, should_work_func, (void**)workers_data_ptrs); + + if (!stat_inited) { + mono_counters_register ("# workers finished", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_workers_num_finished); + stat_inited = TRUE; + } } -void -sgen_workers_init (int num_workers) +/* This is called with thread pool lock so no context switch can happen */ +static gboolean +continue_idle_wait (int calling_context, int *threads_context) { + WorkerContext *context; int i; - void **workers_data_ptrs = (void **)alloca(num_workers * sizeof(void *)); - if (!sgen_get_major_collector ()->is_concurrent) { - sgen_thread_pool_init (num_workers, thread_pool_init_func, NULL, NULL, NULL); - return; + if (worker_contexts [GENERATION_OLD].workers_num && calling_context == worker_contexts [GENERATION_OLD].thread_pool_context) + context = &worker_contexts [GENERATION_OLD]; + else if (worker_contexts [GENERATION_NURSERY].workers_num && calling_context == worker_contexts [GENERATION_NURSERY].thread_pool_context) + context = &worker_contexts [GENERATION_NURSERY]; + else + g_assert_not_reached (); + + /* + * We assume there are no pending jobs, since this is called only after + * we waited for all the jobs. + */ + for (i = 0; i < context->active_workers_num; i++) { + if (threads_context [i] == calling_context) + return TRUE; + } + + if (sgen_workers_have_idle_work (context->generation) && !context->forced_stop) + return TRUE; + + /* + * At this point there are no jobs to be done, and no objects to be scanned + * in the gray queues. We can simply asynchronously finish all the workers + * from the context that were not finished already (due to being stuck working + * in another context) + */ + + for (i = 0; i < context->active_workers_num; i++) { + if (context->workers_data [i].state == STATE_WORK_ENQUEUED) + set_state (&context->workers_data [i], STATE_WORK_ENQUEUED, STATE_WORKING); + if (context->workers_data [i].state == STATE_WORKING) + worker_try_finish (&context->workers_data [i]); } - mono_os_mutex_init (&finished_lock); - //g_print ("initing %d workers\n", num_workers); + return FALSE; +} - workers_num = num_workers; - workers_data = (WorkerData *)sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA, TRUE); - memset (workers_data, 0, sizeof (WorkerData) * num_workers); +void +sgen_workers_stop_all_workers (int generation) +{ + WorkerContext *context = &worker_contexts [generation]; - init_distribute_gray_queue (); + mono_os_mutex_lock (&context->finished_lock); + context->finish_callback = NULL; + mono_os_mutex_unlock (&context->finished_lock); - for (i = 0; i < num_workers; ++i) - workers_data_ptrs [i] = (void *) &workers_data [i]; + context->forced_stop = TRUE; - sgen_thread_pool_init (num_workers, thread_pool_init_func, marker_idle_func, continue_idle_func, workers_data_ptrs); + sgen_thread_pool_wait_for_all_jobs (context->thread_pool_context); + sgen_thread_pool_idle_wait (context->thread_pool_context, continue_idle_wait); + SGEN_ASSERT (0, !sgen_workers_are_working (context), "Can only signal enqueue work when in no work state"); - mono_counters_register ("# workers finished", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_workers_num_finished); + context->started = FALSE; } void -sgen_workers_stop_all_workers (void) +sgen_workers_set_num_active_workers (int generation, int num_workers) { - finish_callback = NULL; - mono_memory_write_barrier (); - forced_stop = TRUE; - - sgen_thread_pool_wait_for_all_jobs (); - sgen_thread_pool_idle_wait (); - SGEN_ASSERT (0, sgen_workers_all_done (), "Can only signal enqueue work when in no work state"); + WorkerContext *context = &worker_contexts [generation]; + if (num_workers) { + SGEN_ASSERT (0, num_workers <= context->workers_num, "We can't start more workers than we initialized"); + context->active_workers_num = num_workers; + } else { + context->active_workers_num = context->workers_num; + } } void -sgen_workers_start_all_workers (SgenObjectOperations *object_ops, SgenWorkersFinishCallback callback) +sgen_workers_start_all_workers (int generation, SgenObjectOperations *object_ops_nopar, SgenObjectOperations *object_ops_par, SgenWorkersFinishCallback callback) { - forced_stop = FALSE; - idle_func_object_ops = object_ops; - finish_callback = callback; + WorkerContext *context = &worker_contexts [generation]; + int i; + SGEN_ASSERT (0, !context->started, "Why are we starting to work without finishing previous cycle"); + + context->idle_func_object_ops_par = object_ops_par; + context->idle_func_object_ops_nopar = object_ops_nopar; + context->forced_stop = FALSE; + context->finish_callback = callback; + context->worker_awakenings = 0; + context->started = TRUE; + + for (i = 0; i < context->active_workers_num; i++) { + context->workers_data [i].major_scan_time = 0; + context->workers_data [i].los_scan_time = 0; + context->workers_data [i].total_time = 0; + context->workers_data [i].last_start = 0; + } mono_memory_write_barrier (); - sgen_workers_ensure_awake (); + /* + * We expect workers to start finishing only after all of them were awaken. + * Otherwise we might think that we have fewer workers and use wrong context. + */ + mono_os_mutex_lock (&context->finished_lock); + sgen_workers_ensure_awake (context); + mono_os_mutex_unlock (&context->finished_lock); } void -sgen_workers_join (void) +sgen_workers_join (int generation) { + WorkerContext *context = &worker_contexts [generation]; int i; - sgen_thread_pool_wait_for_all_jobs (); - sgen_thread_pool_idle_wait (); - SGEN_ASSERT (0, sgen_workers_all_done (), "Can only signal enqueue work when in no work state"); + SGEN_ASSERT (0, !context->finish_callback, "Why are we joining concurrent mark early"); + + sgen_thread_pool_wait_for_all_jobs (context->thread_pool_context); + sgen_thread_pool_idle_wait (context->thread_pool_context, continue_idle_wait); + SGEN_ASSERT (0, !sgen_workers_are_working (context), "Can only signal enqueue work when in no work state"); /* At this point all the workers have stopped. */ - SGEN_ASSERT (0, sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue), "Why is there still work left to do?"); - for (i = 0; i < workers_num; ++i) - SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&workers_data [i].private_gray_queue), "Why is there still work left to do?"); + SGEN_ASSERT (0, sgen_section_gray_queue_is_empty (&context->workers_distribute_gray_queue), "Why is there still work left to do?"); + for (i = 0; i < context->active_workers_num; ++i) + SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&context->workers_data [i].private_gray_queue), "Why is there still work left to do?"); + + context->started = FALSE; } /* - * Can only be called if the workers are stopped. - * If we're stopped, there are also no pending jobs. + * Can only be called if the workers are not working in the + * context and there are no pending jobs. */ gboolean -sgen_workers_have_idle_work (void) +sgen_workers_have_idle_work (int generation) { + WorkerContext *context = &worker_contexts [generation]; int i; - SGEN_ASSERT (0, forced_stop && sgen_workers_all_done (), "Checking for idle work should only happen if the workers are stopped."); - - if (!sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue)) + if (!sgen_section_gray_queue_is_empty (&context->workers_distribute_gray_queue)) return TRUE; - for (i = 0; i < workers_num; ++i) { - if (!sgen_gray_object_queue_is_empty (&workers_data [i].private_gray_queue)) + for (i = 0; i < context->active_workers_num; ++i) { + if (!sgen_gray_object_queue_is_empty (&context->workers_data [i].private_gray_queue)) return TRUE; } @@ -392,51 +537,72 @@ sgen_workers_have_idle_work (void) gboolean sgen_workers_all_done (void) { - int i; + if (worker_contexts [GENERATION_NURSERY].workers_num && sgen_workers_are_working (&worker_contexts [GENERATION_NURSERY])) + return FALSE; + if (worker_contexts [GENERATION_OLD].workers_num && sgen_workers_are_working (&worker_contexts [GENERATION_OLD])) + return FALSE; - for (i = 0; i < workers_num; i++) { - if (state_is_working_or_enqueued (workers_data [i].state)) - return FALSE; - } return TRUE; } -/* Must only be used for debugging */ -gboolean -sgen_workers_are_working (void) -{ - return !sgen_workers_all_done (); -} - void -sgen_workers_assert_gray_queue_is_empty (void) +sgen_workers_assert_gray_queue_is_empty (int generation) { - SGEN_ASSERT (0, sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue), "Why is the workers gray queue not empty?"); + SGEN_ASSERT (0, sgen_section_gray_queue_is_empty (&worker_contexts [generation].workers_distribute_gray_queue), "Why is the workers gray queue not empty?"); } void -sgen_workers_take_from_queue_and_awake (SgenGrayQueue *queue) +sgen_workers_take_from_queue (int generation, SgenGrayQueue *queue) { - gboolean wake = FALSE; + WorkerContext *context = &worker_contexts [generation]; + + sgen_gray_object_spread (queue, sgen_workers_get_job_split_count (generation)); for (;;) { GrayQueueSection *section = sgen_gray_object_dequeue_section (queue); if (!section) break; - sgen_section_gray_queue_enqueue (&workers_distribute_gray_queue, section); - wake = TRUE; + sgen_section_gray_queue_enqueue (&context->workers_distribute_gray_queue, section); } - if (wake) { - SGEN_ASSERT (0, sgen_concurrent_collection_in_progress (), "Why is there work to take when there's no concurrent collection in progress?"); - sgen_workers_ensure_awake (); - } + SGEN_ASSERT (0, !sgen_workers_are_working (context), "We should fully populate the distribute gray queue before we start the workers"); } SgenObjectOperations* -sgen_workers_get_idle_func_object_ops (void) +sgen_workers_get_idle_func_object_ops (WorkerData *worker) +{ + g_assert (worker->context->idle_func_object_ops); + return worker->context->idle_func_object_ops; +} + +/* + * If we have a single worker, splitting into multiple jobs makes no sense. With + * more than one worker, we split into a larger number of jobs so that, in case + * the work load is uneven, a worker that finished quickly can take up more jobs + * than another one. + * + * We also return 1 if there is no worker context for that generation. + */ +int +sgen_workers_get_job_split_count (int generation) +{ + return (worker_contexts [generation].active_workers_num > 1) ? worker_contexts [generation].active_workers_num * 4 : 1; +} + +void +sgen_workers_foreach (int generation, SgenWorkerCallback callback) +{ + WorkerContext *context = &worker_contexts [generation]; + int i; + + for (i = 0; i < context->workers_num; i++) + callback (&context->workers_data [i]); +} + +gboolean +sgen_workers_is_worker_thread (MonoNativeThreadId id) { - return idle_func_object_ops; + return sgen_thread_pool_is_thread_pool_thread (id); } #endif