3 * native threadpool worker
6 * Ludovic Henry (ludovic.henry@xamarin.com)
8 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
12 #define _USE_MATH_DEFINES // needed by MSVC to define math constants
17 #include <mono/metadata/class-internals.h>
18 #include <mono/metadata/exception.h>
19 #include <mono/metadata/gc-internals.h>
20 #include <mono/metadata/object.h>
21 #include <mono/metadata/object-internals.h>
22 #include <mono/metadata/threadpool.h>
23 #include <mono/metadata/threadpool-worker.h>
24 #include <mono/metadata/threadpool-io.h>
25 #include <mono/metadata/w32event.h>
26 #include <mono/utils/atomic.h>
27 #include <mono/utils/mono-compiler.h>
28 #include <mono/utils/mono-complex.h>
29 #include <mono/utils/mono-logger.h>
30 #include <mono/utils/mono-logger-internals.h>
31 #include <mono/utils/mono-proclib.h>
32 #include <mono/utils/mono-threads.h>
33 #include <mono/utils/mono-time.h>
34 #include <mono/utils/mono-rand.h>
35 #include <mono/utils/refcount.h>
36 #include <mono/utils/w32api.h>
38 #define CPU_USAGE_LOW 80
39 #define CPU_USAGE_HIGH 95
41 #define MONITOR_INTERVAL 500 // ms
42 #define MONITOR_MINIMAL_LIFETIME 60 * 1000 // ms
44 #define WORKER_CREATION_MAX_PER_SEC 10
46 /* The exponent to apply to the gain. 1.0 means to use linear gain,
47 * higher values will enhance large moves and damp small ones.
49 #define HILL_CLIMBING_GAIN_EXPONENT 2.0
51 /* The 'cost' of a thread. 0 means drive for increased throughput regardless
52 * of thread count, higher values bias more against higher thread counts.
54 #define HILL_CLIMBING_BIAS 0.15
56 #define HILL_CLIMBING_WAVE_PERIOD 4
57 #define HILL_CLIMBING_MAX_WAVE_MAGNITUDE 20
58 #define HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER 1.0
59 #define HILL_CLIMBING_WAVE_HISTORY_SIZE 8
60 #define HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO 3.0
61 #define HILL_CLIMBING_MAX_CHANGE_PER_SECOND 4
62 #define HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE 20
63 #define HILL_CLIMBING_SAMPLE_INTERVAL_LOW 10
64 #define HILL_CLIMBING_SAMPLE_INTERVAL_HIGH 200
65 #define HILL_CLIMBING_ERROR_SMOOTHING_FACTOR 0.01
66 #define HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT 0.15
70 TRANSITION_INITIALIZING,
71 TRANSITION_RANDOM_MOVE,
72 TRANSITION_CLIMBING_MOVE,
73 TRANSITION_CHANGE_POINT,
74 TRANSITION_STABILIZING,
75 TRANSITION_STARVATION,
76 TRANSITION_THREAD_TIMED_OUT,
78 } ThreadPoolHeuristicStateTransition;
82 gint32 samples_to_measure;
83 gdouble target_throughput_ratio;
84 gdouble target_signal_to_noise_ratio;
85 gdouble max_change_per_second;
86 gdouble max_change_per_sample;
87 gint32 max_thread_wave_magnitude;
88 gint32 sample_interval_low;
89 gdouble thread_magnitude_multiplier;
90 gint32 sample_interval_high;
91 gdouble throughput_error_smoothing_factor;
92 gdouble gain_exponent;
93 gdouble max_sample_error;
95 gdouble current_control_setting;
97 gint16 last_thread_count;
98 gdouble elapsed_since_last_change;
99 gdouble completions_since_last_change;
101 gdouble average_throughput_noise;
104 gdouble *thread_counts;
106 guint32 current_sample_interval;
107 gpointer random_interval_generator;
109 gint32 accumulated_completion_count;
110 gdouble accumulated_sample_duration;
111 } ThreadPoolHillClimbing;
115 gint16 max_working; /* determined by heuristic */
116 gint16 starting; /* starting, but not yet in worker_thread */
117 gint16 working; /* executing worker_thread */
118 gint16 parked; /* parked */
121 } ThreadPoolWorkerCounter
123 __attribute__((aligned(64)))
130 MonoThreadPoolWorkerCallback callback;
132 ThreadPoolWorkerCounter counters;
134 MonoCoopMutex parked_threads_lock;
135 gint32 parked_threads_count;
136 MonoCoopCond parked_threads_cond;
138 volatile gint32 work_items_count;
140 guint32 worker_creation_current_second;
141 guint32 worker_creation_current_count;
142 MonoCoopMutex worker_creation_lock;
144 gint32 heuristic_completions;
145 gint64 heuristic_sample_start;
146 gint64 heuristic_last_dequeue; // ms
147 gint64 heuristic_last_adjustment; // ms
148 gint64 heuristic_adjustment_interval; // ms
149 ThreadPoolHillClimbing heuristic_hill_climbing;
150 MonoCoopMutex heuristic_lock;
152 gint32 limit_worker_min;
153 gint32 limit_worker_max;
155 MonoCpuUsageState *cpu_usage_state;
158 /* suspended by the debugger */
161 gint32 monitor_status;
165 MONITOR_STATUS_REQUESTED,
166 MONITOR_STATUS_WAITING_FOR_REQUEST,
167 MONITOR_STATUS_NOT_RUNNING,
170 static ThreadPoolWorker worker;
172 #define COUNTER_CHECK(counter) \
174 g_assert (counter._.max_working > 0); \
175 g_assert (counter._.starting >= 0); \
176 g_assert (counter._.working >= 0); \
179 #define COUNTER_ATOMIC(var,block) \
181 ThreadPoolWorkerCounter __old; \
183 __old = COUNTER_READ (); \
186 COUNTER_CHECK (var); \
187 } while (InterlockedCompareExchange64 (&worker.counters.as_gint64, (var).as_gint64, __old.as_gint64) != __old.as_gint64); \
190 static inline ThreadPoolWorkerCounter
193 ThreadPoolWorkerCounter counter;
194 counter.as_gint64 = InterlockedRead64 (&worker.counters.as_gint64);
202 return mono_rand_init (NULL, 0);
206 rand_next (gpointer *handle, guint32 min, guint32 max)
210 mono_rand_try_get_uint32 (handle, &val, min, max, &error);
211 // FIXME handle error
212 mono_error_assert_ok (&error);
217 destroy (gpointer data)
219 mono_coop_mutex_destroy (&worker.parked_threads_lock);
220 mono_coop_cond_destroy (&worker.parked_threads_cond);
222 mono_coop_mutex_destroy (&worker.worker_creation_lock);
224 mono_coop_mutex_destroy (&worker.heuristic_lock);
226 g_free (worker.cpu_usage_state);
230 mono_threadpool_worker_init (MonoThreadPoolWorkerCallback callback)
232 ThreadPoolHillClimbing *hc;
233 const char *threads_per_cpu_env;
234 gint threads_per_cpu;
237 mono_refcount_init (&worker, destroy);
239 worker.callback = callback;
241 mono_coop_mutex_init (&worker.parked_threads_lock);
242 worker.parked_threads_count = 0;
243 mono_coop_cond_init (&worker.parked_threads_cond);
245 worker.worker_creation_current_second = -1;
246 mono_coop_mutex_init (&worker.worker_creation_lock);
248 worker.heuristic_adjustment_interval = 10;
249 mono_coop_mutex_init (&worker.heuristic_lock);
253 hc = &worker.heuristic_hill_climbing;
255 hc->wave_period = HILL_CLIMBING_WAVE_PERIOD;
256 hc->max_thread_wave_magnitude = HILL_CLIMBING_MAX_WAVE_MAGNITUDE;
257 hc->thread_magnitude_multiplier = (gdouble) HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER;
258 hc->samples_to_measure = hc->wave_period * HILL_CLIMBING_WAVE_HISTORY_SIZE;
259 hc->target_throughput_ratio = (gdouble) HILL_CLIMBING_BIAS;
260 hc->target_signal_to_noise_ratio = (gdouble) HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO;
261 hc->max_change_per_second = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SECOND;
262 hc->max_change_per_sample = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE;
263 hc->sample_interval_low = HILL_CLIMBING_SAMPLE_INTERVAL_LOW;
264 hc->sample_interval_high = HILL_CLIMBING_SAMPLE_INTERVAL_HIGH;
265 hc->throughput_error_smoothing_factor = (gdouble) HILL_CLIMBING_ERROR_SMOOTHING_FACTOR;
266 hc->gain_exponent = (gdouble) HILL_CLIMBING_GAIN_EXPONENT;
267 hc->max_sample_error = (gdouble) HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT;
268 hc->current_control_setting = 0;
269 hc->total_samples = 0;
270 hc->last_thread_count = 0;
271 hc->average_throughput_noise = 0;
272 hc->elapsed_since_last_change = 0;
273 hc->accumulated_completion_count = 0;
274 hc->accumulated_sample_duration = 0;
275 hc->samples = g_new0 (gdouble, hc->samples_to_measure);
276 hc->thread_counts = g_new0 (gdouble, hc->samples_to_measure);
277 hc->random_interval_generator = rand_create ();
278 hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
280 if (!(threads_per_cpu_env = g_getenv ("MONO_THREADS_PER_CPU")))
283 threads_per_cpu = CLAMP (atoi (threads_per_cpu_env), 1, 50);
285 threads_count = mono_cpu_count () * threads_per_cpu;
287 worker.limit_worker_min = threads_count;
289 #if defined (HOST_ANDROID) || defined (HOST_IOS)
290 worker.limit_worker_max = CLAMP (threads_count * 100, MIN (threads_count, 200), MAX (threads_count, 200));
292 worker.limit_worker_max = threads_count * 100;
295 worker.counters._.max_working = worker.limit_worker_min;
297 worker.cpu_usage_state = g_new0 (MonoCpuUsageState, 1);
299 worker.suspended = FALSE;
301 worker.monitor_status = MONITOR_STATUS_NOT_RUNNING;
305 mono_threadpool_worker_cleanup (void)
307 mono_refcount_dec (&worker);
311 work_item_push (void)
316 old = InterlockedRead (&worker.work_items_count);
320 } while (InterlockedCompareExchange (&worker.work_items_count, new, old) != old);
324 work_item_try_pop (void)
329 old = InterlockedRead (&worker.work_items_count);
336 } while (InterlockedCompareExchange (&worker.work_items_count, new, old) != old);
342 work_item_count (void)
344 return InterlockedRead (&worker.work_items_count);
347 static void worker_request (void);
350 mono_threadpool_worker_request (void)
352 if (!mono_refcount_tryinc (&worker))
359 mono_refcount_dec (&worker);
363 worker_wait_interrupt (gpointer unused)
365 /* If the runtime is not shutting down, we are not using this mechanism to wake up a unparked thread, and if the
366 * runtime is shutting down, then we need to wake up ALL the threads.
367 * It might be a bit wasteful, but I witnessed shutdown hang where the main thread would abort and then wait for all
368 * background threads to exit (see mono_thread_manage). This would go wrong because not all threadpool threads would
369 * be unparked. It would end up getting unstucked because of the timeout, but that would delay shutdown by 5-60s. */
370 if (!mono_runtime_is_shutting_down ())
373 if (!mono_refcount_tryinc (&worker))
376 mono_coop_mutex_lock (&worker.parked_threads_lock);
377 mono_coop_cond_broadcast (&worker.parked_threads_cond);
378 mono_coop_mutex_unlock (&worker.parked_threads_lock);
380 mono_refcount_dec (&worker);
383 /* return TRUE if timeout, FALSE otherwise (worker unpark or interrupt) */
387 gboolean timeout = FALSE;
388 gboolean interrupted = FALSE;
390 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker parking",
391 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
393 mono_coop_mutex_lock (&worker.parked_threads_lock);
395 if (!mono_runtime_is_shutting_down ()) {
396 static gpointer rand_handle = NULL;
397 MonoInternalThread *thread;
398 ThreadPoolWorkerCounter counter;
401 rand_handle = rand_create ();
402 g_assert (rand_handle);
404 thread = mono_thread_internal_current ();
407 COUNTER_ATOMIC (counter, {
408 counter._.working --;
412 worker.parked_threads_count += 1;
414 mono_thread_info_install_interrupt (worker_wait_interrupt, NULL, &interrupted);
418 if (mono_coop_cond_timedwait (&worker.parked_threads_cond, &worker.parked_threads_lock, rand_next (&rand_handle, 5 * 1000, 60 * 1000)) != 0)
421 mono_thread_info_uninstall_interrupt (&interrupted);
424 worker.parked_threads_count -= 1;
426 COUNTER_ATOMIC (counter, {
427 counter._.working ++;
432 mono_coop_mutex_unlock (&worker.parked_threads_lock);
434 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker unparking, timeout? %s interrupted? %s",
435 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())), timeout ? "yes" : "no", interrupted ? "yes" : "no");
441 worker_try_unpark (void)
443 gboolean res = FALSE;
445 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker",
446 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
448 mono_coop_mutex_lock (&worker.parked_threads_lock);
449 if (worker.parked_threads_count > 0) {
450 mono_coop_cond_signal (&worker.parked_threads_cond);
453 mono_coop_mutex_unlock (&worker.parked_threads_lock);
455 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker, success? %s",
456 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())), res ? "yes" : "no");
462 worker_thread (gpointer unused)
464 MonoInternalThread *thread;
465 ThreadPoolWorkerCounter counter;
467 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker starting",
468 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
470 if (!mono_refcount_tryinc (&worker))
473 COUNTER_ATOMIC (counter, {
474 counter._.starting --;
475 counter._.working ++;
478 thread = mono_thread_internal_current ();
481 while (!mono_runtime_is_shutting_down ()) {
482 if (mono_thread_interruption_checkpoint ())
485 if (!work_item_try_pop ()) {
488 timeout = worker_park ();
495 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker executing",
496 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
501 COUNTER_ATOMIC (counter, {
502 counter._.working --;
505 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker finishing",
506 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
508 mono_refcount_dec (&worker);
514 worker_try_create (void)
517 MonoInternalThread *thread;
518 gint64 current_ticks;
520 ThreadPoolWorkerCounter counter;
522 if (mono_runtime_is_shutting_down ())
525 mono_coop_mutex_lock (&worker.worker_creation_lock);
527 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker",
528 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
530 current_ticks = mono_100ns_ticks ();
531 if (0 == current_ticks) {
532 g_warning ("failed to get 100ns ticks");
534 now = current_ticks / (10 * 1000 * 1000);
535 if (worker.worker_creation_current_second != now) {
536 worker.worker_creation_current_second = now;
537 worker.worker_creation_current_count = 0;
539 g_assert (worker.worker_creation_current_count <= WORKER_CREATION_MAX_PER_SEC);
540 if (worker.worker_creation_current_count == WORKER_CREATION_MAX_PER_SEC) {
541 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of worker created per second reached, current count = %d",
542 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())), worker.worker_creation_current_count);
543 mono_coop_mutex_unlock (&worker.worker_creation_lock);
549 COUNTER_ATOMIC (counter, {
550 if (counter._.working >= counter._.max_working) {
551 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of working threads reached",
552 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
553 mono_coop_mutex_unlock (&worker.worker_creation_lock);
556 counter._.starting ++;
559 thread = mono_thread_create_internal (mono_get_root_domain (), worker_thread, NULL, MONO_THREAD_CREATE_FLAGS_THREADPOOL, &error);
561 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: could not create thread due to %s",
562 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())), mono_error_get_message (&error));
563 mono_error_cleanup (&error);
565 COUNTER_ATOMIC (counter, {
566 counter._.starting --;
569 mono_coop_mutex_unlock (&worker.worker_creation_lock);
574 worker.worker_creation_current_count += 1;
576 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, created %p, now = %d count = %d",
577 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())), (gpointer) thread->tid, now, worker.worker_creation_current_count);
579 mono_coop_mutex_unlock (&worker.worker_creation_lock);
583 static void monitor_ensure_running (void);
586 worker_request (void)
588 if (worker.suspended)
591 monitor_ensure_running ();
593 if (worker_try_unpark ()) {
594 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, unparked",
595 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
599 if (worker_try_create ()) {
600 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, created",
601 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
605 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, failed",
606 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
610 monitor_should_keep_running (void)
612 static gint64 last_should_keep_running = -1;
614 g_assert (worker.monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || worker.monitor_status == MONITOR_STATUS_REQUESTED);
616 if (InterlockedExchange (&worker.monitor_status, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST) {
617 gboolean should_keep_running = TRUE, force_should_keep_running = FALSE;
619 if (mono_runtime_is_shutting_down ()) {
620 should_keep_running = FALSE;
622 if (work_item_count () == 0)
623 should_keep_running = FALSE;
625 if (!should_keep_running) {
626 if (last_should_keep_running == -1 || mono_100ns_ticks () - last_should_keep_running < MONITOR_MINIMAL_LIFETIME * 1000 * 10) {
627 should_keep_running = force_should_keep_running = TRUE;
632 if (should_keep_running) {
633 if (last_should_keep_running == -1 || !force_should_keep_running)
634 last_should_keep_running = mono_100ns_ticks ();
636 last_should_keep_running = -1;
637 if (InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_NOT_RUNNING, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST)
642 g_assert (worker.monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || worker.monitor_status == MONITOR_STATUS_REQUESTED);
648 monitor_sufficient_delay_since_last_dequeue (void)
652 if (worker.cpu_usage < CPU_USAGE_LOW) {
653 threshold = MONITOR_INTERVAL;
655 ThreadPoolWorkerCounter counter;
656 counter = COUNTER_READ ();
657 threshold = counter._.max_working * MONITOR_INTERVAL * 2;
660 return mono_msec_ticks () >= worker.heuristic_last_dequeue + threshold;
663 static void hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition);
666 monitor_thread (gpointer unused)
668 MonoInternalThread *internal;
671 if (!mono_refcount_tryinc (&worker))
674 internal = mono_thread_internal_current ();
677 mono_cpu_usage (worker.cpu_usage_state);
679 // printf ("monitor_thread: start\n");
681 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, started",
682 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
685 ThreadPoolWorkerCounter counter;
686 gboolean limit_worker_max_reached;
687 gint32 interval_left = MONITOR_INTERVAL;
688 gint32 awake = 0; /* number of spurious awakes we tolerate before doing a round of rebalancing */
690 g_assert (worker.monitor_status != MONITOR_STATUS_NOT_RUNNING);
692 // counter = COUNTER_READ ();
693 // printf ("monitor_thread: starting = %d working = %d parked = %d max_working = %d\n",
694 // counter._.starting, counter._.working, counter._.parked, counter._.max_working);
698 gboolean alerted = FALSE;
700 if (mono_runtime_is_shutting_down ())
703 ts = mono_msec_ticks ();
704 if (mono_thread_info_sleep (interval_left, &alerted) == 0)
706 interval_left -= mono_msec_ticks () - ts;
708 mono_thread_interruption_checkpoint ();
709 } while (interval_left > 0 && ++awake < 10);
711 if (mono_runtime_is_shutting_down ())
714 if (worker.suspended)
717 if (work_item_count () == 0)
720 worker.cpu_usage = mono_cpu_usage (worker.cpu_usage_state);
722 if (!monitor_sufficient_delay_since_last_dequeue ())
725 limit_worker_max_reached = FALSE;
727 COUNTER_ATOMIC (counter, {
728 if (counter._.max_working >= worker.limit_worker_max) {
729 limit_worker_max_reached = TRUE;
732 counter._.max_working ++;
735 if (limit_worker_max_reached)
738 hill_climbing_force_change (counter._.max_working, TRANSITION_STARVATION);
740 for (i = 0; i < 5; ++i) {
741 if (mono_runtime_is_shutting_down ())
744 if (worker_try_unpark ()) {
745 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, unparked",
746 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
750 if (worker_try_create ()) {
751 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, created",
752 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
756 } while (monitor_should_keep_running ());
758 // printf ("monitor_thread: stop\n");
760 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, finished",
761 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())));
763 mono_refcount_dec (&worker);
768 monitor_ensure_running (void)
772 switch (worker.monitor_status) {
773 case MONITOR_STATUS_REQUESTED:
774 // printf ("monitor_thread: requested\n");
776 case MONITOR_STATUS_WAITING_FOR_REQUEST:
777 // printf ("monitor_thread: waiting for request\n");
778 InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_WAITING_FOR_REQUEST);
780 case MONITOR_STATUS_NOT_RUNNING:
781 // printf ("monitor_thread: not running\n");
782 if (mono_runtime_is_shutting_down ())
784 if (InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_NOT_RUNNING) == MONITOR_STATUS_NOT_RUNNING) {
785 // printf ("monitor_thread: creating\n");
786 if (!mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, MONO_THREAD_CREATE_FLAGS_THREADPOOL | MONO_THREAD_CREATE_FLAGS_SMALL_STACK, &error)) {
787 // printf ("monitor_thread: creating failed\n");
788 worker.monitor_status = MONITOR_STATUS_NOT_RUNNING;
789 mono_error_cleanup (&error);
790 mono_refcount_dec (&worker);
795 default: g_assert_not_reached ();
801 hill_climbing_change_thread_count (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
803 ThreadPoolHillClimbing *hc;
805 hc = &worker.heuristic_hill_climbing;
807 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] hill climbing, change max number of threads %d",
808 GUINT_TO_POINTER (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ())), new_thread_count);
810 hc->last_thread_count = new_thread_count;
811 hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
812 hc->elapsed_since_last_change = 0;
813 hc->completions_since_last_change = 0;
817 hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
819 ThreadPoolHillClimbing *hc;
821 hc = &worker.heuristic_hill_climbing;
823 if (new_thread_count != hc->last_thread_count) {
824 hc->current_control_setting += new_thread_count - hc->last_thread_count;
825 hill_climbing_change_thread_count (new_thread_count, transition);
829 static double_complex
830 hill_climbing_get_wave_component (gdouble *samples, guint sample_count, gdouble period)
832 ThreadPoolHillClimbing *hc;
833 gdouble w, cosine, sine, coeff, q0, q1, q2;
836 g_assert (sample_count >= period);
837 g_assert (period >= 2);
839 hc = &worker.heuristic_hill_climbing;
841 w = 2.0 * M_PI / period;
844 coeff = 2.0 * cosine;
847 for (i = 0; i < sample_count; ++i) {
848 q0 = coeff * q1 - q2 + samples [(hc->total_samples - sample_count + i) % hc->samples_to_measure];
853 return mono_double_complex_scalar_div (mono_double_complex_make (q1 - q2 * cosine, (q2 * sine)), ((gdouble)sample_count));
857 hill_climbing_update (gint16 current_thread_count, guint32 sample_duration, gint32 completions, gint64 *adjustment_interval)
859 ThreadPoolHillClimbing *hc;
860 ThreadPoolHeuristicStateTransition transition;
862 gdouble throughput_error_estimate;
868 gint new_thread_wave_magnitude;
869 gint new_thread_count;
870 double_complex thread_wave_component;
871 double_complex throughput_wave_component;
872 double_complex ratio;
874 g_assert (adjustment_interval);
876 hc = &worker.heuristic_hill_climbing;
878 /* If someone changed the thread count without telling us, update our records accordingly. */
879 if (current_thread_count != hc->last_thread_count)
880 hill_climbing_force_change (current_thread_count, TRANSITION_INITIALIZING);
882 /* Update the cumulative stats for this thread count */
883 hc->elapsed_since_last_change += sample_duration;
884 hc->completions_since_last_change += completions;
886 /* Add in any data we've already collected about this sample */
887 sample_duration += hc->accumulated_sample_duration;
888 completions += hc->accumulated_completion_count;
890 /* We need to make sure we're collecting reasonably accurate data. Since we're just counting the end
891 * of each work item, we are goinng to be missing some data about what really happened during the
892 * sample interval. The count produced by each thread includes an initial work item that may have
893 * started well before the start of the interval, and each thread may have been running some new
894 * work item for some time before the end of the interval, which did not yet get counted. So
895 * our count is going to be off by +/- threadCount workitems.
897 * The exception is that the thread that reported to us last time definitely wasn't running any work
898 * at that time, and the thread that's reporting now definitely isn't running a work item now. So
899 * we really only need to consider threadCount-1 threads.
901 * Thus the percent error in our count is +/- (threadCount-1)/numCompletions.
903 * We cannot rely on the frequency-domain analysis we'll be doing later to filter out this error, because
904 * of the way it accumulates over time. If this sample is off by, say, 33% in the negative direction,
905 * then the next one likely will be too. The one after that will include the sum of the completions
906 * we missed in the previous samples, and so will be 33% positive. So every three samples we'll have
907 * two "low" samples and one "high" sample. This will appear as periodic variation right in the frequency
908 * range we're targeting, which will not be filtered by the frequency-domain translation. */
909 if (hc->total_samples > 0 && ((current_thread_count - 1.0) / completions) >= hc->max_sample_error) {
910 /* Not accurate enough yet. Let's accumulate the data so
911 * far, and tell the ThreadPoolWorker to collect a little more. */
912 hc->accumulated_sample_duration = sample_duration;
913 hc->accumulated_completion_count = completions;
914 *adjustment_interval = 10;
915 return current_thread_count;
918 /* We've got enouugh data for our sample; reset our accumulators for next time. */
919 hc->accumulated_sample_duration = 0;
920 hc->accumulated_completion_count = 0;
922 /* Add the current thread count and throughput sample to our history. */
923 throughput = ((gdouble) completions) / sample_duration;
925 sample_index = hc->total_samples % hc->samples_to_measure;
926 hc->samples [sample_index] = throughput;
927 hc->thread_counts [sample_index] = current_thread_count;
928 hc->total_samples ++;
930 /* Set up defaults for our metrics. */
931 thread_wave_component = mono_double_complex_make(0, 0);
932 throughput_wave_component = mono_double_complex_make(0, 0);
933 throughput_error_estimate = 0;
934 ratio = mono_double_complex_make(0, 0);
937 transition = TRANSITION_WARMUP;
939 /* How many samples will we use? It must be at least the three wave periods we're looking for, and it must also
940 * be a whole multiple of the primary wave's period; otherwise the frequency we're looking for will fall between
941 * two frequency bands in the Fourier analysis, and we won't be able to measure it accurately. */
942 sample_count = ((gint) MIN (hc->total_samples - 1, hc->samples_to_measure) / hc->wave_period) * hc->wave_period;
944 if (sample_count > hc->wave_period) {
946 gdouble average_throughput;
947 gdouble average_thread_count;
948 gdouble sample_sum = 0;
949 gdouble thread_sum = 0;
951 /* Average the throughput and thread count samples, so we can scale the wave magnitudes later. */
952 for (i = 0; i < sample_count; ++i) {
953 guint j = (hc->total_samples - sample_count + i) % hc->samples_to_measure;
954 sample_sum += hc->samples [j];
955 thread_sum += hc->thread_counts [j];
958 average_throughput = sample_sum / sample_count;
959 average_thread_count = thread_sum / sample_count;
961 if (average_throughput > 0 && average_thread_count > 0) {
962 gdouble noise_for_confidence, adjacent_period_1, adjacent_period_2;
964 /* Calculate the periods of the adjacent frequency bands we'll be using to
965 * measure noise levels. We want the two adjacent Fourier frequency bands. */
966 adjacent_period_1 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) + 1);
967 adjacent_period_2 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) - 1);
969 /* Get the the three different frequency components of the throughput (scaled by average
970 * throughput). Our "error" estimate (the amount of noise that might be present in the
971 * frequency band we're really interested in) is the average of the adjacent bands. */
972 throughput_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, hc->wave_period), average_throughput);
973 throughput_error_estimate = cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, adjacent_period_1), average_throughput));
975 if (adjacent_period_2 <= sample_count) {
976 throughput_error_estimate = MAX (throughput_error_estimate, cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (
977 hc->samples, sample_count, adjacent_period_2), average_throughput)));
980 /* Do the same for the thread counts, so we have something to compare to. We don't
981 * measure thread count noise, because there is none; these are exact measurements. */
982 thread_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->thread_counts, sample_count, hc->wave_period), average_thread_count);
984 /* Update our moving average of the throughput noise. We'll use this
985 * later as feedback to determine the new size of the thread wave. */
986 if (hc->average_throughput_noise == 0) {
987 hc->average_throughput_noise = throughput_error_estimate;
989 hc->average_throughput_noise = (hc->throughput_error_smoothing_factor * throughput_error_estimate)
990 + ((1.0 + hc->throughput_error_smoothing_factor) * hc->average_throughput_noise);
993 if (cabs (thread_wave_component) > 0) {
994 /* Adjust the throughput wave so it's centered around the target wave,
995 * and then calculate the adjusted throughput/thread ratio. */
996 ratio = mono_double_complex_div (mono_double_complex_sub (throughput_wave_component, mono_double_complex_scalar_mul(thread_wave_component, hc->target_throughput_ratio)), thread_wave_component);
997 transition = TRANSITION_CLIMBING_MOVE;
999 ratio = mono_double_complex_make (0, 0);
1000 transition = TRANSITION_STABILIZING;
1003 noise_for_confidence = MAX (hc->average_throughput_noise, throughput_error_estimate);
1004 if (noise_for_confidence > 0) {
1005 confidence = cabs (thread_wave_component) / noise_for_confidence / hc->target_signal_to_noise_ratio;
1007 /* there is no noise! */
1013 /* We use just the real part of the complex ratio we just calculated. If the throughput signal
1014 * is exactly in phase with the thread signal, this will be the same as taking the magnitude of
1015 * the complex move and moving that far up. If they're 180 degrees out of phase, we'll move
1016 * backward (because this indicates that our changes are having the opposite of the intended effect).
1017 * If they're 90 degrees out of phase, we won't move at all, because we can't tell wether we're
1018 * having a negative or positive effect on throughput. */
1019 move = creal (ratio);
1020 move = CLAMP (move, -1.0, 1.0);
1022 /* Apply our confidence multiplier. */
1023 move *= CLAMP (confidence, -1.0, 1.0);
1025 /* Now apply non-linear gain, such that values around zero are attenuated, while higher values
1026 * are enhanced. This allows us to move quickly if we're far away from the target, but more slowly
1027 * if we're getting close, giving us rapid ramp-up without wild oscillations around the target. */
1028 gain = hc->max_change_per_second * sample_duration;
1029 move = pow (fabs (move), hc->gain_exponent) * (move >= 0.0 ? 1 : -1) * gain;
1030 move = MIN (move, hc->max_change_per_sample);
1032 /* If the result was positive, and CPU is > 95%, refuse the move. */
1033 if (move > 0.0 && worker.cpu_usage > CPU_USAGE_HIGH)
1036 /* Apply the move to our control setting. */
1037 hc->current_control_setting += move;
1039 /* Calculate the new thread wave magnitude, which is based on the moving average we've been keeping of the
1040 * throughput error. This average starts at zero, so we'll start with a nice safe little wave at first. */
1041 new_thread_wave_magnitude = (gint)(0.5 + (hc->current_control_setting * hc->average_throughput_noise
1042 * hc->target_signal_to_noise_ratio * hc->thread_magnitude_multiplier * 2.0));
1043 new_thread_wave_magnitude = CLAMP (new_thread_wave_magnitude, 1, hc->max_thread_wave_magnitude);
1045 /* Make sure our control setting is within the ThreadPoolWorker's limits. */
1046 hc->current_control_setting = CLAMP (hc->current_control_setting, worker.limit_worker_min, worker.limit_worker_max - new_thread_wave_magnitude);
1048 /* Calculate the new thread count (control setting + square wave). */
1049 new_thread_count = (gint)(hc->current_control_setting + new_thread_wave_magnitude * ((hc->total_samples / (hc->wave_period / 2)) % 2));
1051 /* Make sure the new thread count doesn't exceed the ThreadPoolWorker's limits. */
1052 new_thread_count = CLAMP (new_thread_count, worker.limit_worker_min, worker.limit_worker_max);
1054 if (new_thread_count != current_thread_count)
1055 hill_climbing_change_thread_count (new_thread_count, transition);
1057 if (creal (ratio) < 0.0 && new_thread_count == worker.limit_worker_min)
1058 *adjustment_interval = (gint)(0.5 + hc->current_sample_interval * (10.0 * MAX (-1.0 * creal (ratio), 1.0)));
1060 *adjustment_interval = hc->current_sample_interval;
1062 return new_thread_count;
1066 heuristic_should_adjust (void)
1068 if (worker.heuristic_last_dequeue > worker.heuristic_last_adjustment + worker.heuristic_adjustment_interval) {
1069 ThreadPoolWorkerCounter counter;
1070 counter = COUNTER_READ ();
1071 if (counter._.working <= counter._.max_working)
1079 heuristic_adjust (void)
1081 if (mono_coop_mutex_trylock (&worker.heuristic_lock) == 0) {
1082 gint32 completions = InterlockedExchange (&worker.heuristic_completions, 0);
1083 gint64 sample_end = mono_msec_ticks ();
1084 gint64 sample_duration = sample_end - worker.heuristic_sample_start;
1086 if (sample_duration >= worker.heuristic_adjustment_interval / 2) {
1087 ThreadPoolWorkerCounter counter;
1088 gint16 new_thread_count;
1090 counter = COUNTER_READ ();
1091 new_thread_count = hill_climbing_update (counter._.max_working, sample_duration, completions, &worker.heuristic_adjustment_interval);
1093 COUNTER_ATOMIC (counter, {
1094 counter._.max_working = new_thread_count;
1097 if (new_thread_count > counter._.max_working)
1100 worker.heuristic_sample_start = sample_end;
1101 worker.heuristic_last_adjustment = mono_msec_ticks ();
1104 mono_coop_mutex_unlock (&worker.heuristic_lock);
1109 heuristic_notify_work_completed (void)
1111 InterlockedIncrement (&worker.heuristic_completions);
1112 worker.heuristic_last_dequeue = mono_msec_ticks ();
1114 if (heuristic_should_adjust ())
1115 heuristic_adjust ();
1119 mono_threadpool_worker_notify_completed (void)
1121 ThreadPoolWorkerCounter counter;
1123 heuristic_notify_work_completed ();
1125 counter = COUNTER_READ ();
1126 return counter._.working <= counter._.max_working;
1130 mono_threadpool_worker_get_min (void)
1134 if (!mono_refcount_tryinc (&worker))
1137 ret = worker.limit_worker_min;
1139 mono_refcount_dec (&worker);
1144 mono_threadpool_worker_set_min (gint32 value)
1146 if (value <= 0 || value > worker.limit_worker_max)
1149 if (!mono_refcount_tryinc (&worker))
1152 worker.limit_worker_min = value;
1154 mono_refcount_dec (&worker);
1159 mono_threadpool_worker_get_max (void)
1163 if (!mono_refcount_tryinc (&worker))
1166 ret = worker.limit_worker_max;
1168 mono_refcount_dec (&worker);
1173 mono_threadpool_worker_set_max (gint32 value)
1177 cpu_count = mono_cpu_count ();
1178 if (value < worker.limit_worker_min || value < cpu_count)
1181 if (!mono_refcount_tryinc (&worker))
1184 worker.limit_worker_max = value;
1186 mono_refcount_dec (&worker);
1191 mono_threadpool_worker_set_suspended (gboolean suspended)
1193 if (!mono_refcount_tryinc (&worker))
1196 worker.suspended = suspended;
1200 mono_refcount_dec (&worker);