cfcecf95a85e73d07860d22ffdcc2419e3b2142c
[mono.git] / mono / metadata / threadpool-worker-default.c
1 /**
2  * \file
3  * native threadpool worker
4  *
5  * Author:
6  *      Ludovic Henry (ludovic.henry@xamarin.com)
7  *
8  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
9  */
10
11 #include <stdlib.h>
12 #define _USE_MATH_DEFINES // needed by MSVC to define math constants
13 #include <math.h>
14 #include <config.h>
15 #include <glib.h>
16
17 #include <mono/metadata/class-internals.h>
18 #include <mono/metadata/exception.h>
19 #include <mono/metadata/gc-internals.h>
20 #include <mono/metadata/object.h>
21 #include <mono/metadata/object-internals.h>
22 #include <mono/metadata/threadpool.h>
23 #include <mono/metadata/threadpool-worker.h>
24 #include <mono/metadata/threadpool-io.h>
25 #include <mono/metadata/w32event.h>
26 #include <mono/utils/atomic.h>
27 #include <mono/utils/mono-compiler.h>
28 #include <mono/utils/mono-complex.h>
29 #include <mono/utils/mono-logger.h>
30 #include <mono/utils/mono-logger-internals.h>
31 #include <mono/utils/mono-proclib.h>
32 #include <mono/utils/mono-threads.h>
33 #include <mono/utils/mono-time.h>
34 #include <mono/utils/mono-rand.h>
35 #include <mono/utils/refcount.h>
36 #include <mono/utils/w32api.h>
37
38 #define CPU_USAGE_LOW 80
39 #define CPU_USAGE_HIGH 95
40
41 #define MONITOR_INTERVAL 500 // ms
42 #define MONITOR_MINIMAL_LIFETIME 60 * 1000 // ms
43
44 #define WORKER_CREATION_MAX_PER_SEC 10
45
46 /* The exponent to apply to the gain. 1.0 means to use linear gain,
47  * higher values will enhance large moves and damp small ones.
48  * default: 2.0 */
49 #define HILL_CLIMBING_GAIN_EXPONENT 2.0
50
51 /* The 'cost' of a thread. 0 means drive for increased throughput regardless
52  * of thread count, higher values bias more against higher thread counts.
53  * default: 0.15 */
54 #define HILL_CLIMBING_BIAS 0.15
55
56 #define HILL_CLIMBING_WAVE_PERIOD 4
57 #define HILL_CLIMBING_MAX_WAVE_MAGNITUDE 20
58 #define HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER 1.0
59 #define HILL_CLIMBING_WAVE_HISTORY_SIZE 8
60 #define HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO 3.0
61 #define HILL_CLIMBING_MAX_CHANGE_PER_SECOND 4
62 #define HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE 20
63 #define HILL_CLIMBING_SAMPLE_INTERVAL_LOW 10
64 #define HILL_CLIMBING_SAMPLE_INTERVAL_HIGH 200
65 #define HILL_CLIMBING_ERROR_SMOOTHING_FACTOR 0.01
66 #define HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT 0.15
67
68 typedef enum {
69         TRANSITION_WARMUP,
70         TRANSITION_INITIALIZING,
71         TRANSITION_RANDOM_MOVE,
72         TRANSITION_CLIMBING_MOVE,
73         TRANSITION_CHANGE_POINT,
74         TRANSITION_STABILIZING,
75         TRANSITION_STARVATION,
76         TRANSITION_THREAD_TIMED_OUT,
77         TRANSITION_UNDEFINED,
78 } ThreadPoolHeuristicStateTransition;
79
80 typedef struct {
81         gint32 wave_period;
82         gint32 samples_to_measure;
83         gdouble target_throughput_ratio;
84         gdouble target_signal_to_noise_ratio;
85         gdouble max_change_per_second;
86         gdouble max_change_per_sample;
87         gint32 max_thread_wave_magnitude;
88         gint32 sample_interval_low;
89         gdouble thread_magnitude_multiplier;
90         gint32 sample_interval_high;
91         gdouble throughput_error_smoothing_factor;
92         gdouble gain_exponent;
93         gdouble max_sample_error;
94
95         gdouble current_control_setting;
96         gint64 total_samples;
97         gint16 last_thread_count;
98         gdouble elapsed_since_last_change;
99         gdouble completions_since_last_change;
100
101         gdouble average_throughput_noise;
102
103         gdouble *samples;
104         gdouble *thread_counts;
105
106         guint32 current_sample_interval;
107         gpointer random_interval_generator;
108
109         gint32 accumulated_completion_count;
110         gdouble accumulated_sample_duration;
111 } ThreadPoolHillClimbing;
112
113 typedef union {
114         struct {
115                 gint16 max_working; /* determined by heuristic */
116                 gint16 starting; /* starting, but not yet in worker_thread */
117                 gint16 working; /* executing worker_thread */
118                 gint16 parked; /* parked */
119         } _;
120         gint64 as_gint64;
121 } ThreadPoolWorkerCounter;
122
123 typedef struct {
124         MonoRefCount ref;
125
126         MonoThreadPoolWorkerCallback callback;
127
128         ThreadPoolWorkerCounter counters;
129
130         MonoCoopMutex parked_threads_lock;
131         gint32 parked_threads_count;
132         MonoCoopCond parked_threads_cond;
133
134         volatile gint32 work_items_count;
135
136         guint32 worker_creation_current_second;
137         guint32 worker_creation_current_count;
138         MonoCoopMutex worker_creation_lock;
139
140         gint32 heuristic_completions;
141         gint64 heuristic_sample_start;
142         gint64 heuristic_last_dequeue; // ms
143         gint64 heuristic_last_adjustment; // ms
144         gint64 heuristic_adjustment_interval; // ms
145         ThreadPoolHillClimbing heuristic_hill_climbing;
146         MonoCoopMutex heuristic_lock;
147
148         gint32 limit_worker_min;
149         gint32 limit_worker_max;
150
151         MonoCpuUsageState *cpu_usage_state;
152         gint32 cpu_usage;
153
154         /* suspended by the debugger */
155         gboolean suspended;
156
157         gint32 monitor_status;
158 } ThreadPoolWorker;
159
160 enum {
161         MONITOR_STATUS_REQUESTED,
162         MONITOR_STATUS_WAITING_FOR_REQUEST,
163         MONITOR_STATUS_NOT_RUNNING,
164 };
165
166 static ThreadPoolWorker worker;
167
168 #define COUNTER_CHECK(counter) \
169         do { \
170                 g_assert (counter._.max_working > 0); \
171                 g_assert (counter._.starting >= 0); \
172                 g_assert (counter._.working >= 0); \
173         } while (0)
174
175 #define COUNTER_ATOMIC(var,block) \
176         do { \
177                 ThreadPoolWorkerCounter __old; \
178                 do { \
179                         __old = COUNTER_READ (); \
180                         (var) = __old; \
181                         { block; } \
182                         COUNTER_CHECK (var); \
183                 } while (InterlockedCompareExchange64 (&worker.counters.as_gint64, (var).as_gint64, __old.as_gint64) != __old.as_gint64); \
184         } while (0)
185
186 static inline ThreadPoolWorkerCounter
187 COUNTER_READ (void)
188 {
189         ThreadPoolWorkerCounter counter;
190         counter.as_gint64 = InterlockedRead64 (&worker.counters.as_gint64);
191         return counter;
192 }
193
194 static gpointer
195 rand_create (void)
196 {
197         mono_rand_open ();
198         return mono_rand_init (NULL, 0);
199 }
200
201 static guint32
202 rand_next (gpointer *handle, guint32 min, guint32 max)
203 {
204         MonoError error;
205         guint32 val;
206         mono_rand_try_get_uint32 (handle, &val, min, max, &error);
207         // FIXME handle error
208         mono_error_assert_ok (&error);
209         return val;
210 }
211
212 static void
213 destroy (gpointer data)
214 {
215         mono_coop_mutex_destroy (&worker.parked_threads_lock);
216         mono_coop_cond_destroy (&worker.parked_threads_cond);
217
218         mono_coop_mutex_destroy (&worker.worker_creation_lock);
219
220         mono_coop_mutex_destroy (&worker.heuristic_lock);
221
222         g_free (worker.cpu_usage_state);
223 }
224
225 void
226 mono_threadpool_worker_init (MonoThreadPoolWorkerCallback callback)
227 {
228         ThreadPoolHillClimbing *hc;
229         const char *threads_per_cpu_env;
230         gint threads_per_cpu;
231         gint threads_count;
232
233         mono_refcount_init (&worker, destroy);
234
235         worker.callback = callback;
236
237         mono_coop_mutex_init (&worker.parked_threads_lock);
238         worker.parked_threads_count = 0;
239         mono_coop_cond_init (&worker.parked_threads_cond);
240
241         worker.worker_creation_current_second = -1;
242         mono_coop_mutex_init (&worker.worker_creation_lock);
243
244         worker.heuristic_adjustment_interval = 10;
245         mono_coop_mutex_init (&worker.heuristic_lock);
246
247         mono_rand_open ();
248
249         hc = &worker.heuristic_hill_climbing;
250
251         hc->wave_period = HILL_CLIMBING_WAVE_PERIOD;
252         hc->max_thread_wave_magnitude = HILL_CLIMBING_MAX_WAVE_MAGNITUDE;
253         hc->thread_magnitude_multiplier = (gdouble) HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER;
254         hc->samples_to_measure = hc->wave_period * HILL_CLIMBING_WAVE_HISTORY_SIZE;
255         hc->target_throughput_ratio = (gdouble) HILL_CLIMBING_BIAS;
256         hc->target_signal_to_noise_ratio = (gdouble) HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO;
257         hc->max_change_per_second = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SECOND;
258         hc->max_change_per_sample = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE;
259         hc->sample_interval_low = HILL_CLIMBING_SAMPLE_INTERVAL_LOW;
260         hc->sample_interval_high = HILL_CLIMBING_SAMPLE_INTERVAL_HIGH;
261         hc->throughput_error_smoothing_factor = (gdouble) HILL_CLIMBING_ERROR_SMOOTHING_FACTOR;
262         hc->gain_exponent = (gdouble) HILL_CLIMBING_GAIN_EXPONENT;
263         hc->max_sample_error = (gdouble) HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT;
264         hc->current_control_setting = 0;
265         hc->total_samples = 0;
266         hc->last_thread_count = 0;
267         hc->average_throughput_noise = 0;
268         hc->elapsed_since_last_change = 0;
269         hc->accumulated_completion_count = 0;
270         hc->accumulated_sample_duration = 0;
271         hc->samples = g_new0 (gdouble, hc->samples_to_measure);
272         hc->thread_counts = g_new0 (gdouble, hc->samples_to_measure);
273         hc->random_interval_generator = rand_create ();
274         hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
275
276         if (!(threads_per_cpu_env = g_getenv ("MONO_THREADS_PER_CPU")))
277                 threads_per_cpu = 1;
278         else
279                 threads_per_cpu = CLAMP (atoi (threads_per_cpu_env), 1, 50);
280
281         threads_count = mono_cpu_count () * threads_per_cpu;
282
283         worker.limit_worker_min = threads_count;
284
285 #if defined (PLATFORM_ANDROID) || defined (HOST_IOS)
286         worker.limit_worker_max = CLAMP (threads_count * 100, MIN (threads_count, 200), MAX (threads_count, 200));
287 #else
288         worker.limit_worker_max = threads_count * 100;
289 #endif
290
291         worker.counters._.max_working = worker.limit_worker_min;
292
293         worker.cpu_usage_state = g_new0 (MonoCpuUsageState, 1);
294
295         worker.suspended = FALSE;
296
297         worker.monitor_status = MONITOR_STATUS_NOT_RUNNING;
298 }
299
300 void
301 mono_threadpool_worker_cleanup (void)
302 {
303         mono_refcount_dec (&worker);
304 }
305
306 static void
307 work_item_push (void)
308 {
309         gint32 old, new;
310
311         do {
312                 old = InterlockedRead (&worker.work_items_count);
313                 g_assert (old >= 0);
314
315                 new = old + 1;
316         } while (InterlockedCompareExchange (&worker.work_items_count, new, old) != old);
317 }
318
319 static gboolean
320 work_item_try_pop (void)
321 {
322         gint32 old, new;
323
324         do {
325                 old = InterlockedRead (&worker.work_items_count);
326                 g_assert (old >= 0);
327
328                 if (old == 0)
329                         return FALSE;
330
331                 new = old - 1;
332         } while (InterlockedCompareExchange (&worker.work_items_count, new, old) != old);
333
334         return TRUE;
335 }
336
337 static gint32
338 work_item_count (void)
339 {
340         return InterlockedRead (&worker.work_items_count);
341 }
342
343 static void worker_request (void);
344
345 void
346 mono_threadpool_worker_request (void)
347 {
348         if (!mono_refcount_tryinc (&worker))
349                 return;
350
351         work_item_push ();
352
353         worker_request ();
354
355         mono_refcount_dec (&worker);
356 }
357
358 static void
359 worker_wait_interrupt (gpointer unused)
360 {
361         /* If the runtime is not shutting down, we are not using this mechanism to wake up a unparked thread, and if the
362          * runtime is shutting down, then we need to wake up ALL the threads.
363          * It might be a bit wasteful, but I witnessed shutdown hang where the main thread would abort and then wait for all
364          * background threads to exit (see mono_thread_manage). This would go wrong because not all threadpool threads would
365          * be unparked. It would end up getting unstucked because of the timeout, but that would delay shutdown by 5-60s. */
366         if (!mono_runtime_is_shutting_down ())
367                 return;
368
369         if (!mono_refcount_tryinc (&worker))
370                 return;
371
372         mono_coop_mutex_lock (&worker.parked_threads_lock);
373         mono_coop_cond_broadcast (&worker.parked_threads_cond);
374         mono_coop_mutex_unlock (&worker.parked_threads_lock);
375
376         mono_refcount_dec (&worker);
377 }
378
379 /* return TRUE if timeout, FALSE otherwise (worker unpark or interrupt) */
380 static gboolean
381 worker_park (void)
382 {
383         gboolean timeout = FALSE;
384         gboolean interrupted = FALSE;
385
386         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker parking", mono_native_thread_id_get ());
387
388         mono_coop_mutex_lock (&worker.parked_threads_lock);
389
390         if (!mono_runtime_is_shutting_down ()) {
391                 static gpointer rand_handle = NULL;
392                 MonoInternalThread *thread;
393                 ThreadPoolWorkerCounter counter;
394
395                 if (!rand_handle)
396                         rand_handle = rand_create ();
397                 g_assert (rand_handle);
398
399                 thread = mono_thread_internal_current ();
400                 g_assert (thread);
401
402                 COUNTER_ATOMIC (counter, {
403                         counter._.working --;
404                         counter._.parked ++;
405                 });
406
407                 worker.parked_threads_count += 1;
408
409                 mono_thread_info_install_interrupt (worker_wait_interrupt, NULL, &interrupted);
410                 if (interrupted)
411                         goto done;
412
413                 if (mono_coop_cond_timedwait (&worker.parked_threads_cond, &worker.parked_threads_lock, rand_next (&rand_handle, 5 * 1000, 60 * 1000)) != 0)
414                         timeout = TRUE;
415
416                 mono_thread_info_uninstall_interrupt (&interrupted);
417
418 done:
419                 worker.parked_threads_count -= 1;
420
421                 COUNTER_ATOMIC (counter, {
422                         counter._.working ++;
423                         counter._.parked --;
424                 });
425         }
426
427         mono_coop_mutex_unlock (&worker.parked_threads_lock);
428
429         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker unparking, timeout? %s interrupted? %s",
430                 mono_native_thread_id_get (), timeout ? "yes" : "no", interrupted ? "yes" : "no");
431
432         return timeout;
433 }
434
435 static gboolean
436 worker_try_unpark (void)
437 {
438         gboolean res = FALSE;
439
440         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker", mono_native_thread_id_get ());
441
442         mono_coop_mutex_lock (&worker.parked_threads_lock);
443         if (worker.parked_threads_count > 0) {
444                 mono_coop_cond_signal (&worker.parked_threads_cond);
445                 res = TRUE;
446         }
447         mono_coop_mutex_unlock (&worker.parked_threads_lock);
448
449         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker, success? %s", mono_native_thread_id_get (), res ? "yes" : "no");
450
451         return res;
452 }
453
454 static gsize WINAPI
455 worker_thread (gpointer unused)
456 {
457         MonoInternalThread *thread;
458         ThreadPoolWorkerCounter counter;
459
460         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker starting", mono_native_thread_id_get ());
461
462         if (!mono_refcount_tryinc (&worker))
463                 return 0;
464
465         COUNTER_ATOMIC (counter, {
466                 counter._.starting --;
467                 counter._.working ++;
468         });
469
470         thread = mono_thread_internal_current ();
471         g_assert (thread);
472
473         while (!mono_runtime_is_shutting_down ()) {
474                 if (mono_thread_interruption_checkpoint ())
475                         continue;
476
477                 if (!work_item_try_pop ()) {
478                         gboolean timeout;
479
480                         timeout = worker_park ();
481                         if (timeout)
482                                 break;
483
484                         continue;
485                 }
486
487                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker executing",
488                         mono_native_thread_id_get ());
489
490                 worker.callback ();
491         }
492
493         COUNTER_ATOMIC (counter, {
494                 counter._.working --;
495         });
496
497         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker finishing", mono_native_thread_id_get ());
498
499         mono_refcount_dec (&worker);
500
501         return 0;
502 }
503
504 static gboolean
505 worker_try_create (void)
506 {
507         MonoError error;
508         MonoInternalThread *thread;
509         gint64 current_ticks;
510         gint32 now;
511         ThreadPoolWorkerCounter counter;
512
513         if (mono_runtime_is_shutting_down ())
514                 return FALSE;
515
516         mono_coop_mutex_lock (&worker.worker_creation_lock);
517
518         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker", mono_native_thread_id_get ());
519
520         current_ticks = mono_100ns_ticks ();
521         if (0 == current_ticks) {
522                 g_warning ("failed to get 100ns ticks");
523         } else {
524                 now = current_ticks / (10 * 1000 * 1000);
525                 if (worker.worker_creation_current_second != now) {
526                         worker.worker_creation_current_second = now;
527                         worker.worker_creation_current_count = 0;
528                 } else {
529                         g_assert (worker.worker_creation_current_count <= WORKER_CREATION_MAX_PER_SEC);
530                         if (worker.worker_creation_current_count == WORKER_CREATION_MAX_PER_SEC) {
531                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of worker created per second reached, current count = %d",
532                                         mono_native_thread_id_get (), worker.worker_creation_current_count);
533                                 mono_coop_mutex_unlock (&worker.worker_creation_lock);
534                                 return FALSE;
535                         }
536                 }
537         }
538
539         COUNTER_ATOMIC (counter, {
540                 if (counter._.working >= counter._.max_working) {
541                         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of working threads reached",
542                                 mono_native_thread_id_get ());
543                         mono_coop_mutex_unlock (&worker.worker_creation_lock);
544                         return FALSE;
545                 }
546                 counter._.starting ++;
547         });
548
549         thread = mono_thread_create_internal (mono_get_root_domain (), worker_thread, NULL, MONO_THREAD_CREATE_FLAGS_THREADPOOL, &error);
550         if (!thread) {
551                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: could not create thread due to %s", mono_native_thread_id_get (), mono_error_get_message (&error));
552                 mono_error_cleanup (&error);
553
554                 COUNTER_ATOMIC (counter, {
555                         counter._.starting --;
556                 });
557
558                 mono_coop_mutex_unlock (&worker.worker_creation_lock);
559
560                 return FALSE;
561         }
562
563         worker.worker_creation_current_count += 1;
564
565         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, created %p, now = %d count = %d",
566                 mono_native_thread_id_get (), (gpointer) thread->tid, now, worker.worker_creation_current_count);
567
568         mono_coop_mutex_unlock (&worker.worker_creation_lock);
569         return TRUE;
570 }
571
572 static void monitor_ensure_running (void);
573
574 static void
575 worker_request (void)
576 {
577         if (worker.suspended)
578                 return;
579
580         monitor_ensure_running ();
581
582         if (worker_try_unpark ()) {
583                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, unparked", mono_native_thread_id_get ());
584                 return;
585         }
586
587         if (worker_try_create ()) {
588                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, created", mono_native_thread_id_get ());
589                 return;
590         }
591
592         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, failed", mono_native_thread_id_get ());
593 }
594
595 static gboolean
596 monitor_should_keep_running (void)
597 {
598         static gint64 last_should_keep_running = -1;
599
600         g_assert (worker.monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || worker.monitor_status == MONITOR_STATUS_REQUESTED);
601
602         if (InterlockedExchange (&worker.monitor_status, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST) {
603                 gboolean should_keep_running = TRUE, force_should_keep_running = FALSE;
604
605                 if (mono_runtime_is_shutting_down ()) {
606                         should_keep_running = FALSE;
607                 } else {
608                         if (work_item_count () == 0)
609                                 should_keep_running = FALSE;
610
611                         if (!should_keep_running) {
612                                 if (last_should_keep_running == -1 || mono_100ns_ticks () - last_should_keep_running < MONITOR_MINIMAL_LIFETIME * 1000 * 10) {
613                                         should_keep_running = force_should_keep_running = TRUE;
614                                 }
615                         }
616                 }
617
618                 if (should_keep_running) {
619                         if (last_should_keep_running == -1 || !force_should_keep_running)
620                                 last_should_keep_running = mono_100ns_ticks ();
621                 } else {
622                         last_should_keep_running = -1;
623                         if (InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_NOT_RUNNING, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST)
624                                 return FALSE;
625                 }
626         }
627
628         g_assert (worker.monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || worker.monitor_status == MONITOR_STATUS_REQUESTED);
629
630         return TRUE;
631 }
632
633 static gboolean
634 monitor_sufficient_delay_since_last_dequeue (void)
635 {
636         gint64 threshold;
637
638         if (worker.cpu_usage < CPU_USAGE_LOW) {
639                 threshold = MONITOR_INTERVAL;
640         } else {
641                 ThreadPoolWorkerCounter counter;
642                 counter = COUNTER_READ ();
643                 threshold = counter._.max_working * MONITOR_INTERVAL * 2;
644         }
645
646         return mono_msec_ticks () >= worker.heuristic_last_dequeue + threshold;
647 }
648
649 static void hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition);
650
651 static gsize WINAPI
652 monitor_thread (gpointer unused)
653 {
654         MonoInternalThread *internal;
655         guint i;
656
657         if (!mono_refcount_tryinc (&worker))
658                 return 0;
659
660         internal = mono_thread_internal_current ();
661         g_assert (internal);
662
663         mono_cpu_usage (worker.cpu_usage_state);
664
665         // printf ("monitor_thread: start\n");
666
667         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, started", mono_native_thread_id_get ());
668
669         do {
670                 ThreadPoolWorkerCounter counter;
671                 gboolean limit_worker_max_reached;
672                 gint32 interval_left = MONITOR_INTERVAL;
673                 gint32 awake = 0; /* number of spurious awakes we tolerate before doing a round of rebalancing */
674
675                 g_assert (worker.monitor_status != MONITOR_STATUS_NOT_RUNNING);
676
677                 // counter = COUNTER_READ ();
678                 // printf ("monitor_thread: starting = %d working = %d parked = %d max_working = %d\n",
679                 //      counter._.starting, counter._.working, counter._.parked, counter._.max_working);
680
681                 do {
682                         gint64 ts;
683                         gboolean alerted = FALSE;
684
685                         if (mono_runtime_is_shutting_down ())
686                                 break;
687
688                         ts = mono_msec_ticks ();
689                         if (mono_thread_info_sleep (interval_left, &alerted) == 0)
690                                 break;
691                         interval_left -= mono_msec_ticks () - ts;
692
693                         mono_thread_interruption_checkpoint ();
694                 } while (interval_left > 0 && ++awake < 10);
695
696                 if (mono_runtime_is_shutting_down ())
697                         continue;
698
699                 if (worker.suspended)
700                         continue;
701
702                 if (work_item_count () == 0)
703                         continue;
704
705                 worker.cpu_usage = mono_cpu_usage (worker.cpu_usage_state);
706
707                 if (!monitor_sufficient_delay_since_last_dequeue ())
708                         continue;
709
710                 limit_worker_max_reached = FALSE;
711
712                 COUNTER_ATOMIC (counter, {
713                         if (counter._.max_working >= worker.limit_worker_max) {
714                                 limit_worker_max_reached = TRUE;
715                                 break;
716                         }
717                         counter._.max_working ++;
718                 });
719
720                 if (limit_worker_max_reached)
721                         continue;
722
723                 hill_climbing_force_change (counter._.max_working, TRANSITION_STARVATION);
724
725                 for (i = 0; i < 5; ++i) {
726                         if (mono_runtime_is_shutting_down ())
727                                 break;
728
729                         if (worker_try_unpark ()) {
730                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, unparked", mono_native_thread_id_get ());
731                                 break;
732                         }
733
734                         if (worker_try_create ()) {
735                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, created", mono_native_thread_id_get ());
736                                 break;
737                         }
738                 }
739         } while (monitor_should_keep_running ());
740
741         // printf ("monitor_thread: stop\n");
742
743         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, finished", mono_native_thread_id_get ());
744
745         mono_refcount_dec (&worker);
746         return 0;
747 }
748
749 static void
750 monitor_ensure_running (void)
751 {
752         MonoError error;
753         for (;;) {
754                 switch (worker.monitor_status) {
755                 case MONITOR_STATUS_REQUESTED:
756                         // printf ("monitor_thread: requested\n");
757                         return;
758                 case MONITOR_STATUS_WAITING_FOR_REQUEST:
759                         // printf ("monitor_thread: waiting for request\n");
760                         InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_WAITING_FOR_REQUEST);
761                         break;
762                 case MONITOR_STATUS_NOT_RUNNING:
763                         // printf ("monitor_thread: not running\n");
764                         if (mono_runtime_is_shutting_down ())
765                                 return;
766                         if (InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_NOT_RUNNING) == MONITOR_STATUS_NOT_RUNNING) {
767                                 // printf ("monitor_thread: creating\n");
768                                 if (!mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, MONO_THREAD_CREATE_FLAGS_THREADPOOL | MONO_THREAD_CREATE_FLAGS_SMALL_STACK, &error)) {
769                                         // printf ("monitor_thread: creating failed\n");
770                                         worker.monitor_status = MONITOR_STATUS_NOT_RUNNING;
771                                         mono_error_cleanup (&error);
772                                         mono_refcount_dec (&worker);
773                                 }
774                                 return;
775                         }
776                         break;
777                 default: g_assert_not_reached ();
778                 }
779         }
780 }
781
782 static void
783 hill_climbing_change_thread_count (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
784 {
785         ThreadPoolHillClimbing *hc;
786
787         hc = &worker.heuristic_hill_climbing;
788
789         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] hill climbing, change max number of threads %d", mono_native_thread_id_get (), new_thread_count);
790
791         hc->last_thread_count = new_thread_count;
792         hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
793         hc->elapsed_since_last_change = 0;
794         hc->completions_since_last_change = 0;
795 }
796
797 static void
798 hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
799 {
800         ThreadPoolHillClimbing *hc;
801
802         hc = &worker.heuristic_hill_climbing;
803
804         if (new_thread_count != hc->last_thread_count) {
805                 hc->current_control_setting += new_thread_count - hc->last_thread_count;
806                 hill_climbing_change_thread_count (new_thread_count, transition);
807         }
808 }
809
810 static double_complex
811 hill_climbing_get_wave_component (gdouble *samples, guint sample_count, gdouble period)
812 {
813         ThreadPoolHillClimbing *hc;
814         gdouble w, cosine, sine, coeff, q0, q1, q2;
815         guint i;
816
817         g_assert (sample_count >= period);
818         g_assert (period >= 2);
819
820         hc = &worker.heuristic_hill_climbing;
821
822         w = 2.0 * M_PI / period;
823         cosine = cos (w);
824         sine = sin (w);
825         coeff = 2.0 * cosine;
826         q0 = q1 = q2 = 0;
827
828         for (i = 0; i < sample_count; ++i) {
829                 q0 = coeff * q1 - q2 + samples [(hc->total_samples - sample_count + i) % hc->samples_to_measure];
830                 q2 = q1;
831                 q1 = q0;
832         }
833
834         return mono_double_complex_scalar_div (mono_double_complex_make (q1 - q2 * cosine, (q2 * sine)), ((gdouble)sample_count));
835 }
836
837 static gint16
838 hill_climbing_update (gint16 current_thread_count, guint32 sample_duration, gint32 completions, gint64 *adjustment_interval)
839 {
840         ThreadPoolHillClimbing *hc;
841         ThreadPoolHeuristicStateTransition transition;
842         gdouble throughput;
843         gdouble throughput_error_estimate;
844         gdouble confidence;
845         gdouble move;
846         gdouble gain;
847         gint sample_index;
848         gint sample_count;
849         gint new_thread_wave_magnitude;
850         gint new_thread_count;
851         double_complex thread_wave_component;
852         double_complex throughput_wave_component;
853         double_complex ratio;
854
855         g_assert (adjustment_interval);
856
857         hc = &worker.heuristic_hill_climbing;
858
859         /* If someone changed the thread count without telling us, update our records accordingly. */
860         if (current_thread_count != hc->last_thread_count)
861                 hill_climbing_force_change (current_thread_count, TRANSITION_INITIALIZING);
862
863         /* Update the cumulative stats for this thread count */
864         hc->elapsed_since_last_change += sample_duration;
865         hc->completions_since_last_change += completions;
866
867         /* Add in any data we've already collected about this sample */
868         sample_duration += hc->accumulated_sample_duration;
869         completions += hc->accumulated_completion_count;
870
871         /* We need to make sure we're collecting reasonably accurate data. Since we're just counting the end
872          * of each work item, we are goinng to be missing some data about what really happened during the
873          * sample interval. The count produced by each thread includes an initial work item that may have
874          * started well before the start of the interval, and each thread may have been running some new
875          * work item for some time before the end of the interval, which did not yet get counted. So
876          * our count is going to be off by +/- threadCount workitems.
877          *
878          * The exception is that the thread that reported to us last time definitely wasn't running any work
879          * at that time, and the thread that's reporting now definitely isn't running a work item now. So
880          * we really only need to consider threadCount-1 threads.
881          *
882          * Thus the percent error in our count is +/- (threadCount-1)/numCompletions.
883          *
884          * We cannot rely on the frequency-domain analysis we'll be doing later to filter out this error, because
885          * of the way it accumulates over time. If this sample is off by, say, 33% in the negative direction,
886          * then the next one likely will be too. The one after that will include the sum of the completions
887          * we missed in the previous samples, and so will be 33% positive. So every three samples we'll have
888          * two "low" samples and one "high" sample. This will appear as periodic variation right in the frequency
889          * range we're targeting, which will not be filtered by the frequency-domain translation. */
890         if (hc->total_samples > 0 && ((current_thread_count - 1.0) / completions) >= hc->max_sample_error) {
891                 /* Not accurate enough yet. Let's accumulate the data so
892                  * far, and tell the ThreadPoolWorker to collect a little more. */
893                 hc->accumulated_sample_duration = sample_duration;
894                 hc->accumulated_completion_count = completions;
895                 *adjustment_interval = 10;
896                 return current_thread_count;
897         }
898
899         /* We've got enouugh data for our sample; reset our accumulators for next time. */
900         hc->accumulated_sample_duration = 0;
901         hc->accumulated_completion_count = 0;
902
903         /* Add the current thread count and throughput sample to our history. */
904         throughput = ((gdouble) completions) / sample_duration;
905
906         sample_index = hc->total_samples % hc->samples_to_measure;
907         hc->samples [sample_index] = throughput;
908         hc->thread_counts [sample_index] = current_thread_count;
909         hc->total_samples ++;
910
911         /* Set up defaults for our metrics. */
912         thread_wave_component = mono_double_complex_make(0, 0);
913         throughput_wave_component = mono_double_complex_make(0, 0);
914         throughput_error_estimate = 0;
915         ratio = mono_double_complex_make(0, 0);
916         confidence = 0;
917
918         transition = TRANSITION_WARMUP;
919
920         /* How many samples will we use? It must be at least the three wave periods we're looking for, and it must also
921          * be a whole multiple of the primary wave's period; otherwise the frequency we're looking for will fall between
922          * two frequency bands in the Fourier analysis, and we won't be able to measure it accurately. */
923         sample_count = ((gint) MIN (hc->total_samples - 1, hc->samples_to_measure) / hc->wave_period) * hc->wave_period;
924
925         if (sample_count > hc->wave_period) {
926                 guint i;
927                 gdouble average_throughput;
928                 gdouble average_thread_count;
929                 gdouble sample_sum = 0;
930                 gdouble thread_sum = 0;
931
932                 /* Average the throughput and thread count samples, so we can scale the wave magnitudes later. */
933                 for (i = 0; i < sample_count; ++i) {
934                         guint j = (hc->total_samples - sample_count + i) % hc->samples_to_measure;
935                         sample_sum += hc->samples [j];
936                         thread_sum += hc->thread_counts [j];
937                 }
938
939                 average_throughput = sample_sum / sample_count;
940                 average_thread_count = thread_sum / sample_count;
941
942                 if (average_throughput > 0 && average_thread_count > 0) {
943                         gdouble noise_for_confidence, adjacent_period_1, adjacent_period_2;
944
945                         /* Calculate the periods of the adjacent frequency bands we'll be using to
946                          * measure noise levels. We want the two adjacent Fourier frequency bands. */
947                         adjacent_period_1 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) + 1);
948                         adjacent_period_2 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) - 1);
949
950                         /* Get the the three different frequency components of the throughput (scaled by average
951                          * throughput). Our "error" estimate (the amount of noise that might be present in the
952                          * frequency band we're really interested in) is the average of the adjacent bands. */
953                         throughput_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, hc->wave_period), average_throughput);
954                         throughput_error_estimate = cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, adjacent_period_1), average_throughput));
955
956                         if (adjacent_period_2 <= sample_count) {
957                                 throughput_error_estimate = MAX (throughput_error_estimate, cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (
958                                         hc->samples, sample_count, adjacent_period_2), average_throughput)));
959                         }
960
961                         /* Do the same for the thread counts, so we have something to compare to. We don't
962                          * measure thread count noise, because there is none; these are exact measurements. */
963                         thread_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->thread_counts, sample_count, hc->wave_period), average_thread_count);
964
965                         /* Update our moving average of the throughput noise. We'll use this
966                          * later as feedback to determine the new size of the thread wave. */
967                         if (hc->average_throughput_noise == 0) {
968                                 hc->average_throughput_noise = throughput_error_estimate;
969                         } else {
970                                 hc->average_throughput_noise = (hc->throughput_error_smoothing_factor * throughput_error_estimate)
971                                         + ((1.0 + hc->throughput_error_smoothing_factor) * hc->average_throughput_noise);
972                         }
973
974                         if (cabs (thread_wave_component) > 0) {
975                                 /* Adjust the throughput wave so it's centered around the target wave,
976                                  * and then calculate the adjusted throughput/thread ratio. */
977                                 ratio = mono_double_complex_div (mono_double_complex_sub (throughput_wave_component, mono_double_complex_scalar_mul(thread_wave_component, hc->target_throughput_ratio)), thread_wave_component);
978                                 transition = TRANSITION_CLIMBING_MOVE;
979                         } else {
980                                 ratio = mono_double_complex_make (0, 0);
981                                 transition = TRANSITION_STABILIZING;
982                         }
983
984                         noise_for_confidence = MAX (hc->average_throughput_noise, throughput_error_estimate);
985                         if (noise_for_confidence > 0) {
986                                 confidence = cabs (thread_wave_component) / noise_for_confidence / hc->target_signal_to_noise_ratio;
987                         } else {
988                                 /* there is no noise! */
989                                 confidence = 1.0;
990                         }
991                 }
992         }
993
994         /* We use just the real part of the complex ratio we just calculated. If the throughput signal
995          * is exactly in phase with the thread signal, this will be the same as taking the magnitude of
996          * the complex move and moving that far up. If they're 180 degrees out of phase, we'll move
997          * backward (because this indicates that our changes are having the opposite of the intended effect).
998          * If they're 90 degrees out of phase, we won't move at all, because we can't tell wether we're
999          * having a negative or positive effect on throughput. */
1000         move = creal (ratio);
1001         move = CLAMP (move, -1.0, 1.0);
1002
1003         /* Apply our confidence multiplier. */
1004         move *= CLAMP (confidence, -1.0, 1.0);
1005
1006         /* Now apply non-linear gain, such that values around zero are attenuated, while higher values
1007          * are enhanced. This allows us to move quickly if we're far away from the target, but more slowly
1008         * if we're getting close, giving us rapid ramp-up without wild oscillations around the target. */
1009         gain = hc->max_change_per_second * sample_duration;
1010         move = pow (fabs (move), hc->gain_exponent) * (move >= 0.0 ? 1 : -1) * gain;
1011         move = MIN (move, hc->max_change_per_sample);
1012
1013         /* If the result was positive, and CPU is > 95%, refuse the move. */
1014         if (move > 0.0 && worker.cpu_usage > CPU_USAGE_HIGH)
1015                 move = 0.0;
1016
1017         /* Apply the move to our control setting. */
1018         hc->current_control_setting += move;
1019
1020         /* Calculate the new thread wave magnitude, which is based on the moving average we've been keeping of the
1021          * throughput error.  This average starts at zero, so we'll start with a nice safe little wave at first. */
1022         new_thread_wave_magnitude = (gint)(0.5 + (hc->current_control_setting * hc->average_throughput_noise
1023                 * hc->target_signal_to_noise_ratio * hc->thread_magnitude_multiplier * 2.0));
1024         new_thread_wave_magnitude = CLAMP (new_thread_wave_magnitude, 1, hc->max_thread_wave_magnitude);
1025
1026         /* Make sure our control setting is within the ThreadPoolWorker's limits. */
1027         hc->current_control_setting = CLAMP (hc->current_control_setting, worker.limit_worker_min, worker.limit_worker_max - new_thread_wave_magnitude);
1028
1029         /* Calculate the new thread count (control setting + square wave). */
1030         new_thread_count = (gint)(hc->current_control_setting + new_thread_wave_magnitude * ((hc->total_samples / (hc->wave_period / 2)) % 2));
1031
1032         /* Make sure the new thread count doesn't exceed the ThreadPoolWorker's limits. */
1033         new_thread_count = CLAMP (new_thread_count, worker.limit_worker_min, worker.limit_worker_max);
1034
1035         if (new_thread_count != current_thread_count)
1036                 hill_climbing_change_thread_count (new_thread_count, transition);
1037
1038         if (creal (ratio) < 0.0 && new_thread_count == worker.limit_worker_min)
1039                 *adjustment_interval = (gint)(0.5 + hc->current_sample_interval * (10.0 * MAX (-1.0 * creal (ratio), 1.0)));
1040         else
1041                 *adjustment_interval = hc->current_sample_interval;
1042
1043         return new_thread_count;
1044 }
1045
1046 static gboolean
1047 heuristic_should_adjust (void)
1048 {
1049         if (worker.heuristic_last_dequeue > worker.heuristic_last_adjustment + worker.heuristic_adjustment_interval) {
1050                 ThreadPoolWorkerCounter counter;
1051                 counter = COUNTER_READ ();
1052                 if (counter._.working <= counter._.max_working)
1053                         return TRUE;
1054         }
1055
1056         return FALSE;
1057 }
1058
1059 static void
1060 heuristic_adjust (void)
1061 {
1062         if (mono_coop_mutex_trylock (&worker.heuristic_lock) == 0) {
1063                 gint32 completions = InterlockedExchange (&worker.heuristic_completions, 0);
1064                 gint64 sample_end = mono_msec_ticks ();
1065                 gint64 sample_duration = sample_end - worker.heuristic_sample_start;
1066
1067                 if (sample_duration >= worker.heuristic_adjustment_interval / 2) {
1068                         ThreadPoolWorkerCounter counter;
1069                         gint16 new_thread_count;
1070
1071                         counter = COUNTER_READ ();
1072                         new_thread_count = hill_climbing_update (counter._.max_working, sample_duration, completions, &worker.heuristic_adjustment_interval);
1073
1074                         COUNTER_ATOMIC (counter, {
1075                                 counter._.max_working = new_thread_count;
1076                         });
1077
1078                         if (new_thread_count > counter._.max_working)
1079                                 worker_request ();
1080
1081                         worker.heuristic_sample_start = sample_end;
1082                         worker.heuristic_last_adjustment = mono_msec_ticks ();
1083                 }
1084
1085                 mono_coop_mutex_unlock (&worker.heuristic_lock);
1086         }
1087 }
1088
1089 static void
1090 heuristic_notify_work_completed (void)
1091 {
1092         InterlockedIncrement (&worker.heuristic_completions);
1093         worker.heuristic_last_dequeue = mono_msec_ticks ();
1094
1095         if (heuristic_should_adjust ())
1096                 heuristic_adjust ();
1097 }
1098
1099 gboolean
1100 mono_threadpool_worker_notify_completed (void)
1101 {
1102         ThreadPoolWorkerCounter counter;
1103
1104         heuristic_notify_work_completed ();
1105
1106         counter = COUNTER_READ ();
1107         return counter._.working <= counter._.max_working;
1108 }
1109
1110 gint32
1111 mono_threadpool_worker_get_min (void)
1112 {
1113         gint32 ret;
1114
1115         if (!mono_refcount_tryinc (&worker))
1116                 return 0;
1117
1118         ret = worker.limit_worker_min;
1119
1120         mono_refcount_dec (&worker);
1121         return ret;
1122 }
1123
1124 gboolean
1125 mono_threadpool_worker_set_min (gint32 value)
1126 {
1127         if (value <= 0 || value > worker.limit_worker_max)
1128                 return FALSE;
1129
1130         if (!mono_refcount_tryinc (&worker))
1131                 return FALSE;
1132
1133         worker.limit_worker_min = value;
1134
1135         mono_refcount_dec (&worker);
1136         return TRUE;
1137 }
1138
1139 gint32
1140 mono_threadpool_worker_get_max (void)
1141 {
1142         gint32 ret;
1143
1144         if (!mono_refcount_tryinc (&worker))
1145                 return 0;
1146
1147         ret = worker.limit_worker_max;
1148
1149         mono_refcount_dec (&worker);
1150         return ret;
1151 }
1152
1153 gboolean
1154 mono_threadpool_worker_set_max (gint32 value)
1155 {
1156         gint32 cpu_count;
1157
1158         cpu_count = mono_cpu_count ();
1159         if (value < worker.limit_worker_min || value < cpu_count)
1160                 return FALSE;
1161
1162         if (!mono_refcount_tryinc (&worker))
1163                 return FALSE;
1164
1165         worker.limit_worker_max = value;
1166
1167         mono_refcount_dec (&worker);
1168         return TRUE;
1169 }
1170
1171 void
1172 mono_threadpool_worker_set_suspended (gboolean suspended)
1173 {
1174         if (!mono_refcount_tryinc (&worker))
1175                 return;
1176
1177         worker.suspended = suspended;
1178         if (!suspended)
1179                 worker_request ();
1180
1181         mono_refcount_dec (&worker);
1182 }