[threadpool-ms] Fix race condition on domain unload (#3592)
[mono.git] / mono / metadata / threadpool-ms.c
1 /*
2  * threadpool-ms.c: Microsoft threadpool runtime support
3  *
4  * Author:
5  *      Ludovic Henry (ludovic.henry@xamarin.com)
6  *
7  * Copyright 2015 Xamarin, Inc (http://www.xamarin.com)
8  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
9  */
10
11 //
12 // Copyright (c) Microsoft. All rights reserved.
13 // Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 //
15 // Files:
16 //  - src/vm/comthreadpool.cpp
17 //  - src/vm/win32threadpoolcpp
18 //  - src/vm/threadpoolrequest.cpp
19 //  - src/vm/hillclimbing.cpp
20 //
21 // Ported from C++ to C and adjusted to Mono runtime
22
23 #include <stdlib.h>
24 #define _USE_MATH_DEFINES // needed by MSVC to define math constants
25 #include <math.h>
26 #include <config.h>
27 #include <glib.h>
28
29 #include <mono/metadata/class-internals.h>
30 #include <mono/metadata/exception.h>
31 #include <mono/metadata/gc-internals.h>
32 #include <mono/metadata/object.h>
33 #include <mono/metadata/object-internals.h>
34 #include <mono/metadata/threadpool-ms.h>
35 #include <mono/metadata/threadpool-ms-io.h>
36 #include <mono/metadata/w32event.h>
37 #include <mono/utils/atomic.h>
38 #include <mono/utils/mono-compiler.h>
39 #include <mono/utils/mono-complex.h>
40 #include <mono/utils/mono-lazy-init.h>
41 #include <mono/utils/mono-logger.h>
42 #include <mono/utils/mono-logger-internals.h>
43 #include <mono/utils/mono-proclib.h>
44 #include <mono/utils/mono-threads.h>
45 #include <mono/utils/mono-time.h>
46 #include <mono/utils/mono-rand.h>
47
48 #define CPU_USAGE_LOW 80
49 #define CPU_USAGE_HIGH 95
50
51 #define MONITOR_INTERVAL 500 // ms
52 #define MONITOR_MINIMAL_LIFETIME 60 * 1000 // ms
53
54 #define WORKER_CREATION_MAX_PER_SEC 10
55
56 /* The exponent to apply to the gain. 1.0 means to use linear gain,
57  * higher values will enhance large moves and damp small ones.
58  * default: 2.0 */
59 #define HILL_CLIMBING_GAIN_EXPONENT 2.0
60
61 /* The 'cost' of a thread. 0 means drive for increased throughput regardless
62  * of thread count, higher values bias more against higher thread counts.
63  * default: 0.15 */
64 #define HILL_CLIMBING_BIAS 0.15
65
66 #define HILL_CLIMBING_WAVE_PERIOD 4
67 #define HILL_CLIMBING_MAX_WAVE_MAGNITUDE 20
68 #define HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER 1.0
69 #define HILL_CLIMBING_WAVE_HISTORY_SIZE 8
70 #define HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO 3.0
71 #define HILL_CLIMBING_MAX_CHANGE_PER_SECOND 4
72 #define HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE 20
73 #define HILL_CLIMBING_SAMPLE_INTERVAL_LOW 10
74 #define HILL_CLIMBING_SAMPLE_INTERVAL_HIGH 200
75 #define HILL_CLIMBING_ERROR_SMOOTHING_FACTOR 0.01
76 #define HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT 0.15
77
78 typedef union {
79         struct {
80                 gint16 max_working; /* determined by heuristic */
81                 gint16 active; /* executing worker_thread */
82                 gint16 working; /* actively executing worker_thread, not parked */
83                 gint16 parked; /* parked */
84         } _;
85         gint64 as_gint64;
86 } ThreadPoolCounter;
87
88 typedef struct {
89         MonoDomain *domain;
90         gint32 outstanding_request;
91 } ThreadPoolDomain;
92
93 typedef MonoInternalThread ThreadPoolWorkingThread;
94
95 typedef struct {
96         gint32 wave_period;
97         gint32 samples_to_measure;
98         gdouble target_throughput_ratio;
99         gdouble target_signal_to_noise_ratio;
100         gdouble max_change_per_second;
101         gdouble max_change_per_sample;
102         gint32 max_thread_wave_magnitude;
103         gint32 sample_interval_low;
104         gdouble thread_magnitude_multiplier;
105         gint32 sample_interval_high;
106         gdouble throughput_error_smoothing_factor;
107         gdouble gain_exponent;
108         gdouble max_sample_error;
109
110         gdouble current_control_setting;
111         gint64 total_samples;
112         gint16 last_thread_count;
113         gdouble elapsed_since_last_change;
114         gdouble completions_since_last_change;
115
116         gdouble average_throughput_noise;
117
118         gdouble *samples;
119         gdouble *thread_counts;
120
121         guint32 current_sample_interval;
122         gpointer random_interval_generator;
123
124         gint32 accumulated_completion_count;
125         gdouble accumulated_sample_duration;
126 } ThreadPoolHillClimbing;
127
128 typedef struct {
129         ThreadPoolCounter counters;
130
131         GPtrArray *domains; // ThreadPoolDomain* []
132         MonoCoopMutex domains_lock;
133
134         GPtrArray *working_threads; // ThreadPoolWorkingThread* []
135         gint32 parked_threads_count;
136         MonoCoopCond parked_threads_cond;
137         MonoCoopMutex active_threads_lock; /* protect access to working_threads and parked_threads */
138
139         guint32 worker_creation_current_second;
140         guint32 worker_creation_current_count;
141         MonoCoopMutex worker_creation_lock;
142
143         gint32 heuristic_completions;
144         gint64 heuristic_sample_start;
145         gint64 heuristic_last_dequeue; // ms
146         gint64 heuristic_last_adjustment; // ms
147         gint64 heuristic_adjustment_interval; // ms
148         ThreadPoolHillClimbing heuristic_hill_climbing;
149         MonoCoopMutex heuristic_lock;
150
151         gint32 limit_worker_min;
152         gint32 limit_worker_max;
153         gint32 limit_io_min;
154         gint32 limit_io_max;
155
156         MonoCpuUsageState *cpu_usage_state;
157         gint32 cpu_usage;
158
159         /* suspended by the debugger */
160         gboolean suspended;
161 } ThreadPool;
162
163 typedef struct {
164         gint32 ref;
165         MonoCoopCond cond;
166 } ThreadPoolDomainCleanupSemaphore;
167
168 typedef enum {
169         TRANSITION_WARMUP,
170         TRANSITION_INITIALIZING,
171         TRANSITION_RANDOM_MOVE,
172         TRANSITION_CLIMBING_MOVE,
173         TRANSITION_CHANGE_POINT,
174         TRANSITION_STABILIZING,
175         TRANSITION_STARVATION,
176         TRANSITION_THREAD_TIMED_OUT,
177         TRANSITION_UNDEFINED,
178 } ThreadPoolHeuristicStateTransition;
179
180 static mono_lazy_init_t status = MONO_LAZY_INIT_STATUS_NOT_INITIALIZED;
181
182 enum {
183         MONITOR_STATUS_REQUESTED,
184         MONITOR_STATUS_WAITING_FOR_REQUEST,
185         MONITOR_STATUS_NOT_RUNNING,
186 };
187
188 static gint32 monitor_status = MONITOR_STATUS_NOT_RUNNING;
189
190 static ThreadPool* threadpool;
191
192 #define COUNTER_CHECK(counter) \
193         do { \
194                 g_assert (counter._.max_working > 0); \
195                 g_assert (counter._.working >= 0); \
196                 g_assert (counter._.active >= 0); \
197         } while (0)
198
199 #define COUNTER_READ() (InterlockedRead64 (&threadpool->counters.as_gint64))
200
201 #define COUNTER_ATOMIC(var,block) \
202         do { \
203                 ThreadPoolCounter __old; \
204                 do { \
205                         g_assert (threadpool); \
206                         __old.as_gint64 = COUNTER_READ (); \
207                         (var) = __old; \
208                         { block; } \
209                         COUNTER_CHECK (var); \
210                 } while (InterlockedCompareExchange64 (&threadpool->counters.as_gint64, (var).as_gint64, __old.as_gint64) != __old.as_gint64); \
211         } while (0)
212
213 #define COUNTER_TRY_ATOMIC(res,var,block) \
214         do { \
215                 ThreadPoolCounter __old; \
216                 do { \
217                         g_assert (threadpool); \
218                         __old.as_gint64 = COUNTER_READ (); \
219                         (var) = __old; \
220                         (res) = FALSE; \
221                         { block; } \
222                         COUNTER_CHECK (var); \
223                         (res) = InterlockedCompareExchange64 (&threadpool->counters.as_gint64, (var).as_gint64, __old.as_gint64) == __old.as_gint64; \
224                 } while (0); \
225         } while (0)
226
227 static gpointer
228 rand_create (void)
229 {
230         mono_rand_open ();
231         return mono_rand_init (NULL, 0);
232 }
233
234 static guint32
235 rand_next (gpointer *handle, guint32 min, guint32 max)
236 {
237         MonoError error;
238         guint32 val;
239         mono_rand_try_get_uint32 (handle, &val, min, max, &error);
240         // FIXME handle error
241         mono_error_assert_ok (&error);
242         return val;
243 }
244
245 static void
246 rand_free (gpointer handle)
247 {
248         mono_rand_close (handle);
249 }
250
251 static void
252 initialize (void)
253 {
254         ThreadPoolHillClimbing *hc;
255         const char *threads_per_cpu_env;
256         gint threads_per_cpu;
257         gint threads_count;
258
259         g_assert (!threadpool);
260         threadpool = g_new0 (ThreadPool, 1);
261         g_assert (threadpool);
262
263         threadpool->domains = g_ptr_array_new ();
264         mono_coop_mutex_init (&threadpool->domains_lock);
265
266         threadpool->parked_threads_count = 0;
267         mono_coop_cond_init (&threadpool->parked_threads_cond);
268         threadpool->working_threads = g_ptr_array_new ();
269         mono_coop_mutex_init (&threadpool->active_threads_lock);
270
271         threadpool->worker_creation_current_second = -1;
272         mono_coop_mutex_init (&threadpool->worker_creation_lock);
273
274         threadpool->heuristic_adjustment_interval = 10;
275         mono_coop_mutex_init (&threadpool->heuristic_lock);
276
277         mono_rand_open ();
278
279         hc = &threadpool->heuristic_hill_climbing;
280
281         hc->wave_period = HILL_CLIMBING_WAVE_PERIOD;
282         hc->max_thread_wave_magnitude = HILL_CLIMBING_MAX_WAVE_MAGNITUDE;
283         hc->thread_magnitude_multiplier = (gdouble) HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER;
284         hc->samples_to_measure = hc->wave_period * HILL_CLIMBING_WAVE_HISTORY_SIZE;
285         hc->target_throughput_ratio = (gdouble) HILL_CLIMBING_BIAS;
286         hc->target_signal_to_noise_ratio = (gdouble) HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO;
287         hc->max_change_per_second = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SECOND;
288         hc->max_change_per_sample = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE;
289         hc->sample_interval_low = HILL_CLIMBING_SAMPLE_INTERVAL_LOW;
290         hc->sample_interval_high = HILL_CLIMBING_SAMPLE_INTERVAL_HIGH;
291         hc->throughput_error_smoothing_factor = (gdouble) HILL_CLIMBING_ERROR_SMOOTHING_FACTOR;
292         hc->gain_exponent = (gdouble) HILL_CLIMBING_GAIN_EXPONENT;
293         hc->max_sample_error = (gdouble) HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT;
294         hc->current_control_setting = 0;
295         hc->total_samples = 0;
296         hc->last_thread_count = 0;
297         hc->average_throughput_noise = 0;
298         hc->elapsed_since_last_change = 0;
299         hc->accumulated_completion_count = 0;
300         hc->accumulated_sample_duration = 0;
301         hc->samples = g_new0 (gdouble, hc->samples_to_measure);
302         hc->thread_counts = g_new0 (gdouble, hc->samples_to_measure);
303         hc->random_interval_generator = rand_create ();
304         hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
305
306         if (!(threads_per_cpu_env = g_getenv ("MONO_THREADS_PER_CPU")))
307                 threads_per_cpu = 1;
308         else
309                 threads_per_cpu = CLAMP (atoi (threads_per_cpu_env), 1, 50);
310
311         threads_count = mono_cpu_count () * threads_per_cpu;
312
313         threadpool->limit_worker_min = threadpool->limit_io_min = threads_count;
314
315 #if defined (PLATFORM_ANDROID) || defined (HOST_IOS)
316         threadpool->limit_worker_max = threadpool->limit_io_max = CLAMP (threads_count * 100, MIN (threads_count, 200), MAX (threads_count, 200));
317 #else
318         threadpool->limit_worker_max = threadpool->limit_io_max = threads_count * 100;
319 #endif
320
321         threadpool->counters._.max_working = threadpool->limit_worker_min;
322
323         threadpool->cpu_usage_state = g_new0 (MonoCpuUsageState, 1);
324
325         threadpool->suspended = FALSE;
326 }
327
328 static void worker_kill (ThreadPoolWorkingThread *thread);
329
330 static void
331 cleanup (void)
332 {
333         guint i;
334
335         /* we make the assumption along the code that we are
336          * cleaning up only if the runtime is shutting down */
337         g_assert (mono_runtime_is_shutting_down ());
338
339         while (monitor_status != MONITOR_STATUS_NOT_RUNNING)
340                 mono_thread_info_sleep (1, NULL);
341
342         mono_coop_mutex_lock (&threadpool->active_threads_lock);
343
344         /* stop all threadpool->working_threads */
345         for (i = 0; i < threadpool->working_threads->len; ++i)
346                 worker_kill ((ThreadPoolWorkingThread*) g_ptr_array_index (threadpool->working_threads, i));
347
348         /* unpark all threadpool->parked_threads */
349         mono_coop_cond_broadcast (&threadpool->parked_threads_cond);
350
351         mono_coop_mutex_unlock (&threadpool->active_threads_lock);
352 }
353
354 gboolean
355 mono_threadpool_ms_enqueue_work_item (MonoDomain *domain, MonoObject *work_item, MonoError *error)
356 {
357         static MonoClass *threadpool_class = NULL;
358         static MonoMethod *unsafe_queue_custom_work_item_method = NULL;
359         MonoDomain *current_domain;
360         MonoBoolean f;
361         gpointer args [2];
362
363         mono_error_init (error);
364         g_assert (work_item);
365
366         if (!threadpool_class)
367                 threadpool_class = mono_class_load_from_name (mono_defaults.corlib, "System.Threading", "ThreadPool");
368
369         if (!unsafe_queue_custom_work_item_method)
370                 unsafe_queue_custom_work_item_method = mono_class_get_method_from_name (threadpool_class, "UnsafeQueueCustomWorkItem", 2);
371         g_assert (unsafe_queue_custom_work_item_method);
372
373         f = FALSE;
374
375         args [0] = (gpointer) work_item;
376         args [1] = (gpointer) &f;
377
378         current_domain = mono_domain_get ();
379         if (current_domain == domain) {
380                 mono_runtime_invoke_checked (unsafe_queue_custom_work_item_method, NULL, args, error);
381                 return_val_if_nok (error, FALSE);
382         } else {
383                 mono_thread_push_appdomain_ref (domain);
384                 if (mono_domain_set (domain, FALSE)) {
385                         mono_runtime_invoke_checked (unsafe_queue_custom_work_item_method, NULL, args, error);
386                         if (!is_ok (error)) {
387                                 mono_thread_pop_appdomain_ref ();
388                                 return FALSE;
389                         }
390                         mono_domain_set (current_domain, TRUE);
391                 }
392                 mono_thread_pop_appdomain_ref ();
393         }
394         return TRUE;
395 }
396
397 /* LOCKING: threadpool->domains_lock must be held */
398 static void
399 domain_add (ThreadPoolDomain *tpdomain)
400 {
401         guint i, len;
402
403         g_assert (tpdomain);
404
405         len = threadpool->domains->len;
406         for (i = 0; i < len; ++i) {
407                 if (g_ptr_array_index (threadpool->domains, i) == tpdomain)
408                         break;
409         }
410
411         if (i == len)
412                 g_ptr_array_add (threadpool->domains, tpdomain);
413 }
414
415 /* LOCKING: threadpool->domains_lock must be held */
416 static gboolean
417 domain_remove (ThreadPoolDomain *tpdomain)
418 {
419         g_assert (tpdomain);
420         return g_ptr_array_remove (threadpool->domains, tpdomain);
421 }
422
423 /* LOCKING: threadpool->domains_lock must be held */
424 static ThreadPoolDomain *
425 domain_get (MonoDomain *domain, gboolean create)
426 {
427         ThreadPoolDomain *tpdomain = NULL;
428         guint i;
429
430         g_assert (domain);
431
432         for (i = 0; i < threadpool->domains->len; ++i) {
433                 tpdomain = (ThreadPoolDomain *)g_ptr_array_index (threadpool->domains, i);
434                 if (tpdomain->domain == domain)
435                         return tpdomain;
436         }
437
438         if (create) {
439                 ThreadPoolDomainCleanupSemaphore *cleanup_semaphore;
440                 cleanup_semaphore = g_new0 (ThreadPoolDomainCleanupSemaphore, 1);
441                 cleanup_semaphore->ref = 2;
442                 mono_coop_cond_init (&cleanup_semaphore->cond);
443
444                 g_assert(!domain->cleanup_semaphore);
445                 domain->cleanup_semaphore = cleanup_semaphore;
446
447                 tpdomain = g_new0 (ThreadPoolDomain, 1);
448                 tpdomain->domain = domain;
449                 domain_add (tpdomain);
450         }
451
452         return tpdomain;
453 }
454
455 static void
456 domain_free (ThreadPoolDomain *tpdomain)
457 {
458         g_free (tpdomain);
459 }
460
461 /* LOCKING: threadpool->domains_lock must be held */
462 static gboolean
463 domain_any_has_request (void)
464 {
465         guint i;
466
467         for (i = 0; i < threadpool->domains->len; ++i) {
468                 ThreadPoolDomain *tmp = (ThreadPoolDomain *)g_ptr_array_index (threadpool->domains, i);
469                 if (tmp->outstanding_request > 0)
470                         return TRUE;
471         }
472
473         return FALSE;
474 }
475
476 /* LOCKING: threadpool->domains_lock must be held */
477 static ThreadPoolDomain *
478 domain_get_next (ThreadPoolDomain *current)
479 {
480         ThreadPoolDomain *tpdomain = NULL;
481         guint len;
482
483         len = threadpool->domains->len;
484         if (len > 0) {
485                 guint i, current_idx = -1;
486                 if (current) {
487                         for (i = 0; i < len; ++i) {
488                                 if (current == g_ptr_array_index (threadpool->domains, i)) {
489                                         current_idx = i;
490                                         break;
491                                 }
492                         }
493                         g_assert (current_idx != (guint)-1);
494                 }
495                 for (i = current_idx + 1; i < len + current_idx + 1; ++i) {
496                         ThreadPoolDomain *tmp = (ThreadPoolDomain *)g_ptr_array_index (threadpool->domains, i % len);
497                         if (tmp->outstanding_request > 0) {
498                                 tpdomain = tmp;
499                                 break;
500                         }
501                 }
502         }
503
504         return tpdomain;
505 }
506
507 static void
508 worker_wait_interrupt (gpointer data)
509 {
510         mono_coop_mutex_lock (&threadpool->active_threads_lock);
511         mono_coop_cond_signal (&threadpool->parked_threads_cond);
512         mono_coop_mutex_unlock (&threadpool->active_threads_lock);
513 }
514
515 /* return TRUE if timeout, FALSE otherwise (worker unpark or interrupt) */
516 static gboolean
517 worker_park (void)
518 {
519         gboolean timeout = FALSE;
520
521         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] current worker parking", mono_native_thread_id_get ());
522
523         mono_gc_set_skip_thread (TRUE);
524
525         mono_coop_mutex_lock (&threadpool->active_threads_lock);
526
527         if (!mono_runtime_is_shutting_down ()) {
528                 static gpointer rand_handle = NULL;
529                 MonoInternalThread *thread_internal;
530                 gboolean interrupted = FALSE;
531
532                 if (!rand_handle)
533                         rand_handle = rand_create ();
534                 g_assert (rand_handle);
535
536                 thread_internal = mono_thread_internal_current ();
537                 g_assert (thread_internal);
538
539                 threadpool->parked_threads_count += 1;
540                 g_ptr_array_remove_fast (threadpool->working_threads, thread_internal);
541
542                 mono_thread_info_install_interrupt (worker_wait_interrupt, NULL, &interrupted);
543                 if (interrupted)
544                         goto done;
545
546                 if (mono_coop_cond_timedwait (&threadpool->parked_threads_cond, &threadpool->active_threads_lock, rand_next (&rand_handle, 5 * 1000, 60 * 1000)) != 0)
547                         timeout = TRUE;
548
549                 mono_thread_info_uninstall_interrupt (&interrupted);
550
551 done:
552                 g_ptr_array_add (threadpool->working_threads, thread_internal);
553                 threadpool->parked_threads_count -= 1;
554         }
555
556         mono_coop_mutex_unlock (&threadpool->active_threads_lock);
557
558         mono_gc_set_skip_thread (FALSE);
559
560         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] current worker unparking, timeout? %s", mono_native_thread_id_get (), timeout ? "yes" : "no");
561
562         return timeout;
563 }
564
565 static gboolean
566 worker_try_unpark (void)
567 {
568         gboolean res = FALSE;
569
570         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker", mono_native_thread_id_get ());
571
572         mono_coop_mutex_lock (&threadpool->active_threads_lock);
573         if (threadpool->parked_threads_count > 0) {
574                 mono_coop_cond_signal (&threadpool->parked_threads_cond);
575                 res = TRUE;
576         }
577         mono_coop_mutex_unlock (&threadpool->active_threads_lock);
578
579         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker, success? %s", mono_native_thread_id_get (), res ? "yes" : "no");
580
581         return res;
582 }
583
584 static void
585 worker_kill (ThreadPoolWorkingThread *thread)
586 {
587         if (thread == mono_thread_internal_current ())
588                 return;
589
590         mono_thread_internal_stop ((MonoInternalThread*) thread);
591 }
592
593 static void
594 worker_thread (gpointer data)
595 {
596         MonoError error;
597         MonoInternalThread *thread;
598         ThreadPoolDomain *tpdomain, *previous_tpdomain;
599         ThreadPoolCounter counter;
600         gboolean retire = FALSE;
601
602         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker starting", mono_native_thread_id_get ());
603
604         g_assert (threadpool);
605
606         thread = mono_thread_internal_current ();
607         g_assert (thread);
608
609         mono_thread_set_name_internal (thread, mono_string_new (mono_get_root_domain (), "Threadpool worker"), FALSE, &error);
610         mono_error_assert_ok (&error);
611
612         mono_coop_mutex_lock (&threadpool->active_threads_lock);
613         g_ptr_array_add (threadpool->working_threads, thread);
614         mono_coop_mutex_unlock (&threadpool->active_threads_lock);
615
616         previous_tpdomain = NULL;
617
618         mono_coop_mutex_lock (&threadpool->domains_lock);
619
620         while (!mono_runtime_is_shutting_down ()) {
621                 tpdomain = NULL;
622
623                 if ((thread->state & (ThreadState_StopRequested | ThreadState_SuspendRequested)) != 0) {
624                         mono_coop_mutex_unlock (&threadpool->domains_lock);
625                         mono_thread_interruption_checkpoint ();
626                         mono_coop_mutex_lock (&threadpool->domains_lock);
627                 }
628
629                 if (retire || !(tpdomain = domain_get_next (previous_tpdomain))) {
630                         gboolean timeout;
631
632                         COUNTER_ATOMIC (counter, {
633                                 counter._.working --;
634                                 counter._.parked ++;
635                         });
636
637                         mono_coop_mutex_unlock (&threadpool->domains_lock);
638                         timeout = worker_park ();
639                         mono_coop_mutex_lock (&threadpool->domains_lock);
640
641                         COUNTER_ATOMIC (counter, {
642                                 counter._.working ++;
643                                 counter._.parked --;
644                         });
645
646                         if (timeout)
647                                 break;
648
649                         if (retire)
650                                 retire = FALSE;
651
652                         /* The tpdomain->domain might have unloaded, while this thread was parked */
653                         previous_tpdomain = NULL;
654
655                         continue;
656                 }
657
658                 tpdomain->outstanding_request --;
659                 g_assert (tpdomain->outstanding_request >= 0);
660
661                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker running in domain %p",
662                         mono_native_thread_id_get (), tpdomain->domain, tpdomain->outstanding_request);
663
664                 g_assert (tpdomain->domain);
665                 g_assert (tpdomain->domain->threadpool_jobs >= 0);
666                 tpdomain->domain->threadpool_jobs ++;
667
668                 mono_coop_mutex_unlock (&threadpool->domains_lock);
669
670                 mono_thread_push_appdomain_ref (tpdomain->domain);
671                 if (mono_domain_set (tpdomain->domain, FALSE)) {
672                         MonoObject *exc = NULL, *res;
673
674                         res = mono_runtime_try_invoke (mono_defaults.threadpool_perform_wait_callback_method, NULL, NULL, &exc, &error);
675                         if (exc || !mono_error_ok(&error)) {
676                                 if (exc == NULL)
677                                         exc = (MonoObject *) mono_error_convert_to_exception (&error);
678                                 else
679                                         mono_error_cleanup (&error);
680                                 mono_thread_internal_unhandled_exception (exc);
681                         } else if (res && *(MonoBoolean*) mono_object_unbox (res) == FALSE)
682                                 retire = TRUE;
683
684                         mono_thread_clr_state (thread, (MonoThreadState)~ThreadState_Background);
685                         if (!mono_thread_test_state (thread , ThreadState_Background))
686                                 ves_icall_System_Threading_Thread_SetState (thread, ThreadState_Background);
687
688                         mono_domain_set (mono_get_root_domain (), TRUE);
689                 }
690                 mono_thread_pop_appdomain_ref ();
691
692                 mono_coop_mutex_lock (&threadpool->domains_lock);
693
694                 tpdomain->domain->threadpool_jobs --;
695                 g_assert (tpdomain->domain->threadpool_jobs >= 0);
696
697                 if (tpdomain->domain->threadpool_jobs == 0 && mono_domain_is_unloading (tpdomain->domain)) {
698                         ThreadPoolDomainCleanupSemaphore *cleanup_semaphore;
699                         gboolean removed;
700
701                         removed = domain_remove(tpdomain);
702                         g_assert (removed);
703
704                         cleanup_semaphore = (ThreadPoolDomainCleanupSemaphore*) tpdomain->domain->cleanup_semaphore;
705                         g_assert (cleanup_semaphore);
706
707                         mono_coop_cond_signal (&cleanup_semaphore->cond);
708
709                         if (InterlockedDecrement (&cleanup_semaphore->ref) == 0) {
710                                 mono_coop_cond_destroy (&cleanup_semaphore->cond);
711                                 g_free (cleanup_semaphore);
712                                 tpdomain->domain->cleanup_semaphore = NULL;
713                         }
714
715                         domain_free (tpdomain);
716                         tpdomain = NULL;
717                 }
718
719                 previous_tpdomain = tpdomain;
720         }
721
722         mono_coop_mutex_unlock (&threadpool->domains_lock);
723
724         mono_coop_mutex_lock (&threadpool->active_threads_lock);
725         g_ptr_array_remove_fast (threadpool->working_threads, thread);
726         mono_coop_mutex_unlock (&threadpool->active_threads_lock);
727
728         COUNTER_ATOMIC (counter, {
729                 counter._.working--;
730                 counter._.active --;
731         });
732
733         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker finishing", mono_native_thread_id_get ());
734 }
735
736 static gboolean
737 worker_try_create (void)
738 {
739         ThreadPoolCounter counter;
740         MonoInternalThread *thread;
741         gint64 current_ticks;
742         gint32 now;
743
744         mono_coop_mutex_lock (&threadpool->worker_creation_lock);
745
746         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker", mono_native_thread_id_get ());
747         current_ticks = mono_100ns_ticks ();
748         now = current_ticks / (10 * 1000 * 1000);
749         if (0 == current_ticks) {
750                 g_warning ("failed to get 100ns ticks");
751         } else {
752                 if (threadpool->worker_creation_current_second != now) {
753                         threadpool->worker_creation_current_second = now;
754                         threadpool->worker_creation_current_count = 0;
755                 } else {
756                         g_assert (threadpool->worker_creation_current_count <= WORKER_CREATION_MAX_PER_SEC);
757                         if (threadpool->worker_creation_current_count == WORKER_CREATION_MAX_PER_SEC) {
758                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of worker created per second reached, current count = %d",
759                                         mono_native_thread_id_get (), threadpool->worker_creation_current_count);
760                                 mono_coop_mutex_unlock (&threadpool->worker_creation_lock);
761                                 return FALSE;
762                         }
763                 }
764         }
765
766         COUNTER_ATOMIC (counter, {
767                 if (counter._.working >= counter._.max_working) {
768                         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of working threads reached",
769                                 mono_native_thread_id_get ());
770                         mono_coop_mutex_unlock (&threadpool->worker_creation_lock);
771                         return FALSE;
772                 }
773                 counter._.working ++;
774                 counter._.active ++;
775         });
776
777         MonoError error;
778         if ((thread = mono_thread_create_internal (mono_get_root_domain (), worker_thread, NULL, TRUE, 0, &error)) != NULL) {
779                 threadpool->worker_creation_current_count += 1;
780
781                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, created %p, now = %d count = %d", mono_native_thread_id_get (), thread->tid, now, threadpool->worker_creation_current_count);
782                 mono_coop_mutex_unlock (&threadpool->worker_creation_lock);
783                 return TRUE;
784         }
785
786         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: could not create thread due to %s", mono_native_thread_id_get (), mono_error_get_message (&error));
787         mono_error_cleanup (&error);
788
789         COUNTER_ATOMIC (counter, {
790                 counter._.working --;
791                 counter._.active --;
792         });
793
794         mono_coop_mutex_unlock (&threadpool->worker_creation_lock);
795         return FALSE;
796 }
797
798 static void monitor_ensure_running (void);
799
800 static gboolean
801 worker_request (MonoDomain *domain)
802 {
803         ThreadPoolDomain *tpdomain;
804
805         g_assert (domain);
806         g_assert (threadpool);
807
808         if (mono_runtime_is_shutting_down ())
809                 return FALSE;
810
811         mono_coop_mutex_lock (&threadpool->domains_lock);
812
813         /* synchronize check with worker_thread */
814         if (mono_domain_is_unloading (domain)) {
815                 mono_coop_mutex_unlock (&threadpool->domains_lock);
816                 return FALSE;
817         }
818
819         tpdomain = domain_get (domain, TRUE);
820         g_assert (tpdomain);
821         tpdomain->outstanding_request ++;
822
823         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, domain = %p, outstanding_request = %d",
824                 mono_native_thread_id_get (), tpdomain->domain, tpdomain->outstanding_request);
825
826         mono_coop_mutex_unlock (&threadpool->domains_lock);
827
828         if (threadpool->suspended)
829                 return FALSE;
830
831         monitor_ensure_running ();
832
833         if (worker_try_unpark ()) {
834                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, unparked", mono_native_thread_id_get ());
835                 return TRUE;
836         }
837
838         if (worker_try_create ()) {
839                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, created", mono_native_thread_id_get ());
840                 return TRUE;
841         }
842
843         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, failed", mono_native_thread_id_get ());
844         return FALSE;
845 }
846
847 static gboolean
848 monitor_should_keep_running (void)
849 {
850         static gint64 last_should_keep_running = -1;
851
852         g_assert (monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || monitor_status == MONITOR_STATUS_REQUESTED);
853
854         if (InterlockedExchange (&monitor_status, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST) {
855                 gboolean should_keep_running = TRUE, force_should_keep_running = FALSE;
856
857                 if (mono_runtime_is_shutting_down ()) {
858                         should_keep_running = FALSE;
859                 } else {
860                         mono_coop_mutex_lock (&threadpool->domains_lock);
861                         if (!domain_any_has_request ())
862                                 should_keep_running = FALSE;
863                         mono_coop_mutex_unlock (&threadpool->domains_lock);
864
865                         if (!should_keep_running) {
866                                 if (last_should_keep_running == -1 || mono_100ns_ticks () - last_should_keep_running < MONITOR_MINIMAL_LIFETIME * 1000 * 10) {
867                                         should_keep_running = force_should_keep_running = TRUE;
868                                 }
869                         }
870                 }
871
872                 if (should_keep_running) {
873                         if (last_should_keep_running == -1 || !force_should_keep_running)
874                                 last_should_keep_running = mono_100ns_ticks ();
875                 } else {
876                         last_should_keep_running = -1;
877                         if (InterlockedCompareExchange (&monitor_status, MONITOR_STATUS_NOT_RUNNING, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST)
878                                 return FALSE;
879                 }
880         }
881
882         g_assert (monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || monitor_status == MONITOR_STATUS_REQUESTED);
883
884         return TRUE;
885 }
886
887 static gboolean
888 monitor_sufficient_delay_since_last_dequeue (void)
889 {
890         gint64 threshold;
891
892         g_assert (threadpool);
893
894         if (threadpool->cpu_usage < CPU_USAGE_LOW) {
895                 threshold = MONITOR_INTERVAL;
896         } else {
897                 ThreadPoolCounter counter;
898                 counter.as_gint64 = COUNTER_READ();
899                 threshold = counter._.max_working * MONITOR_INTERVAL * 2;
900         }
901
902         return mono_msec_ticks () >= threadpool->heuristic_last_dequeue + threshold;
903 }
904
905 static void hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition);
906
907 static void
908 monitor_thread (void)
909 {
910         MonoInternalThread *current_thread = mono_thread_internal_current ();
911         guint i;
912
913         mono_cpu_usage (threadpool->cpu_usage_state);
914
915         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, started", mono_native_thread_id_get ());
916
917         do {
918                 ThreadPoolCounter counter;
919                 gboolean limit_worker_max_reached;
920                 gint32 interval_left = MONITOR_INTERVAL;
921                 gint32 awake = 0; /* number of spurious awakes we tolerate before doing a round of rebalancing */
922
923                 g_assert (monitor_status != MONITOR_STATUS_NOT_RUNNING);
924
925                 mono_gc_set_skip_thread (TRUE);
926
927                 do {
928                         gint64 ts;
929                         gboolean alerted = FALSE;
930
931                         if (mono_runtime_is_shutting_down ())
932                                 break;
933
934                         ts = mono_msec_ticks ();
935                         if (mono_thread_info_sleep (interval_left, &alerted) == 0)
936                                 break;
937                         interval_left -= mono_msec_ticks () - ts;
938
939                         mono_gc_set_skip_thread (FALSE);
940                         if ((current_thread->state & (ThreadState_StopRequested | ThreadState_SuspendRequested)) != 0)
941                                 mono_thread_interruption_checkpoint ();
942                         mono_gc_set_skip_thread (TRUE);
943                 } while (interval_left > 0 && ++awake < 10);
944
945                 mono_gc_set_skip_thread (FALSE);
946
947                 if (threadpool->suspended)
948                         continue;
949
950                 if (mono_runtime_is_shutting_down ())
951                         continue;
952
953                 mono_coop_mutex_lock (&threadpool->domains_lock);
954                 if (!domain_any_has_request ()) {
955                         mono_coop_mutex_unlock (&threadpool->domains_lock);
956                         continue;
957                 }
958                 mono_coop_mutex_unlock (&threadpool->domains_lock);
959
960                 threadpool->cpu_usage = mono_cpu_usage (threadpool->cpu_usage_state);
961
962                 if (!monitor_sufficient_delay_since_last_dequeue ())
963                         continue;
964
965                 limit_worker_max_reached = FALSE;
966
967                 COUNTER_ATOMIC (counter, {
968                         if (counter._.max_working >= threadpool->limit_worker_max) {
969                                 limit_worker_max_reached = TRUE;
970                                 break;
971                         }
972                         counter._.max_working ++;
973                 });
974
975                 if (limit_worker_max_reached)
976                         continue;
977
978                 hill_climbing_force_change (counter._.max_working, TRANSITION_STARVATION);
979
980                 for (i = 0; i < 5; ++i) {
981                         if (mono_runtime_is_shutting_down ())
982                                 break;
983
984                         if (worker_try_unpark ()) {
985                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, unparked", mono_native_thread_id_get ());
986                                 break;
987                         }
988
989                         if (worker_try_create ()) {
990                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, created", mono_native_thread_id_get ());
991                                 break;
992                         }
993                 }
994         } while (monitor_should_keep_running ());
995
996         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, finished", mono_native_thread_id_get ());
997 }
998
999 static void
1000 monitor_ensure_running (void)
1001 {
1002         MonoError error;
1003         for (;;) {
1004                 switch (monitor_status) {
1005                 case MONITOR_STATUS_REQUESTED:
1006                         return;
1007                 case MONITOR_STATUS_WAITING_FOR_REQUEST:
1008                         InterlockedCompareExchange (&monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_WAITING_FOR_REQUEST);
1009                         break;
1010                 case MONITOR_STATUS_NOT_RUNNING:
1011                         if (mono_runtime_is_shutting_down ())
1012                                 return;
1013                         if (InterlockedCompareExchange (&monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_NOT_RUNNING) == MONITOR_STATUS_NOT_RUNNING) {
1014                                 if (!mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, TRUE, SMALL_STACK, &error)) {
1015                                         monitor_status = MONITOR_STATUS_NOT_RUNNING;
1016                                         mono_error_cleanup (&error);
1017                                 }
1018                                 return;
1019                         }
1020                         break;
1021                 default: g_assert_not_reached ();
1022                 }
1023         }
1024 }
1025
1026 static void
1027 hill_climbing_change_thread_count (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
1028 {
1029         ThreadPoolHillClimbing *hc;
1030
1031         g_assert (threadpool);
1032
1033         hc = &threadpool->heuristic_hill_climbing;
1034
1035         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] hill climbing, change max number of threads %d", mono_native_thread_id_get (), new_thread_count);
1036
1037         hc->last_thread_count = new_thread_count;
1038         hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
1039         hc->elapsed_since_last_change = 0;
1040         hc->completions_since_last_change = 0;
1041 }
1042
1043 static void
1044 hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
1045 {
1046         ThreadPoolHillClimbing *hc;
1047
1048         g_assert (threadpool);
1049
1050         hc = &threadpool->heuristic_hill_climbing;
1051
1052         if (new_thread_count != hc->last_thread_count) {
1053                 hc->current_control_setting += new_thread_count - hc->last_thread_count;
1054                 hill_climbing_change_thread_count (new_thread_count, transition);
1055         }
1056 }
1057
1058 static double_complex
1059 hill_climbing_get_wave_component (gdouble *samples, guint sample_count, gdouble period)
1060 {
1061         ThreadPoolHillClimbing *hc;
1062         gdouble w, cosine, sine, coeff, q0, q1, q2;
1063         guint i;
1064
1065         g_assert (threadpool);
1066         g_assert (sample_count >= period);
1067         g_assert (period >= 2);
1068
1069         hc = &threadpool->heuristic_hill_climbing;
1070
1071         w = 2.0 * M_PI / period;
1072         cosine = cos (w);
1073         sine = sin (w);
1074         coeff = 2.0 * cosine;
1075         q0 = q1 = q2 = 0;
1076
1077         for (i = 0; i < sample_count; ++i) {
1078                 q0 = coeff * q1 - q2 + samples [(hc->total_samples - sample_count + i) % hc->samples_to_measure];
1079                 q2 = q1;
1080                 q1 = q0;
1081         }
1082
1083         return mono_double_complex_scalar_div (mono_double_complex_make (q1 - q2 * cosine, (q2 * sine)), ((gdouble)sample_count));
1084 }
1085
1086 static gint16
1087 hill_climbing_update (gint16 current_thread_count, guint32 sample_duration, gint32 completions, gint64 *adjustment_interval)
1088 {
1089         ThreadPoolHillClimbing *hc;
1090         ThreadPoolHeuristicStateTransition transition;
1091         gdouble throughput;
1092         gdouble throughput_error_estimate;
1093         gdouble confidence;
1094         gdouble move;
1095         gdouble gain;
1096         gint sample_index;
1097         gint sample_count;
1098         gint new_thread_wave_magnitude;
1099         gint new_thread_count;
1100         double_complex thread_wave_component;
1101         double_complex throughput_wave_component;
1102         double_complex ratio;
1103
1104         g_assert (threadpool);
1105         g_assert (adjustment_interval);
1106
1107         hc = &threadpool->heuristic_hill_climbing;
1108
1109         /* If someone changed the thread count without telling us, update our records accordingly. */
1110         if (current_thread_count != hc->last_thread_count)
1111                 hill_climbing_force_change (current_thread_count, TRANSITION_INITIALIZING);
1112
1113         /* Update the cumulative stats for this thread count */
1114         hc->elapsed_since_last_change += sample_duration;
1115         hc->completions_since_last_change += completions;
1116
1117         /* Add in any data we've already collected about this sample */
1118         sample_duration += hc->accumulated_sample_duration;
1119         completions += hc->accumulated_completion_count;
1120
1121         /* We need to make sure we're collecting reasonably accurate data. Since we're just counting the end
1122          * of each work item, we are goinng to be missing some data about what really happened during the
1123          * sample interval. The count produced by each thread includes an initial work item that may have
1124          * started well before the start of the interval, and each thread may have been running some new
1125          * work item for some time before the end of the interval, which did not yet get counted. So
1126          * our count is going to be off by +/- threadCount workitems.
1127          *
1128          * The exception is that the thread that reported to us last time definitely wasn't running any work
1129          * at that time, and the thread that's reporting now definitely isn't running a work item now. So
1130          * we really only need to consider threadCount-1 threads.
1131          *
1132          * Thus the percent error in our count is +/- (threadCount-1)/numCompletions.
1133          *
1134          * We cannot rely on the frequency-domain analysis we'll be doing later to filter out this error, because
1135          * of the way it accumulates over time. If this sample is off by, say, 33% in the negative direction,
1136          * then the next one likely will be too. The one after that will include the sum of the completions
1137          * we missed in the previous samples, and so will be 33% positive. So every three samples we'll have
1138          * two "low" samples and one "high" sample. This will appear as periodic variation right in the frequency
1139          * range we're targeting, which will not be filtered by the frequency-domain translation. */
1140         if (hc->total_samples > 0 && ((current_thread_count - 1.0) / completions) >= hc->max_sample_error) {
1141                 /* Not accurate enough yet. Let's accumulate the data so
1142                  * far, and tell the ThreadPool to collect a little more. */
1143                 hc->accumulated_sample_duration = sample_duration;
1144                 hc->accumulated_completion_count = completions;
1145                 *adjustment_interval = 10;
1146                 return current_thread_count;
1147         }
1148
1149         /* We've got enouugh data for our sample; reset our accumulators for next time. */
1150         hc->accumulated_sample_duration = 0;
1151         hc->accumulated_completion_count = 0;
1152
1153         /* Add the current thread count and throughput sample to our history. */
1154         throughput = ((gdouble) completions) / sample_duration;
1155
1156         sample_index = hc->total_samples % hc->samples_to_measure;
1157         hc->samples [sample_index] = throughput;
1158         hc->thread_counts [sample_index] = current_thread_count;
1159         hc->total_samples ++;
1160
1161         /* Set up defaults for our metrics. */
1162         thread_wave_component = mono_double_complex_make(0, 0);
1163         throughput_wave_component = mono_double_complex_make(0, 0);
1164         throughput_error_estimate = 0;
1165         ratio = mono_double_complex_make(0, 0);
1166         confidence = 0;
1167
1168         transition = TRANSITION_WARMUP;
1169
1170         /* How many samples will we use? It must be at least the three wave periods we're looking for, and it must also
1171          * be a whole multiple of the primary wave's period; otherwise the frequency we're looking for will fall between
1172          * two frequency bands in the Fourier analysis, and we won't be able to measure it accurately. */
1173         sample_count = ((gint) MIN (hc->total_samples - 1, hc->samples_to_measure) / hc->wave_period) * hc->wave_period;
1174
1175         if (sample_count > hc->wave_period) {
1176                 guint i;
1177                 gdouble average_throughput;
1178                 gdouble average_thread_count;
1179                 gdouble sample_sum = 0;
1180                 gdouble thread_sum = 0;
1181
1182                 /* Average the throughput and thread count samples, so we can scale the wave magnitudes later. */
1183                 for (i = 0; i < sample_count; ++i) {
1184                         guint j = (hc->total_samples - sample_count + i) % hc->samples_to_measure;
1185                         sample_sum += hc->samples [j];
1186                         thread_sum += hc->thread_counts [j];
1187                 }
1188
1189                 average_throughput = sample_sum / sample_count;
1190                 average_thread_count = thread_sum / sample_count;
1191
1192                 if (average_throughput > 0 && average_thread_count > 0) {
1193                         gdouble noise_for_confidence, adjacent_period_1, adjacent_period_2;
1194
1195                         /* Calculate the periods of the adjacent frequency bands we'll be using to
1196                          * measure noise levels. We want the two adjacent Fourier frequency bands. */
1197                         adjacent_period_1 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) + 1);
1198                         adjacent_period_2 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) - 1);
1199
1200                         /* Get the the three different frequency components of the throughput (scaled by average
1201                          * throughput). Our "error" estimate (the amount of noise that might be present in the
1202                          * frequency band we're really interested in) is the average of the adjacent bands. */
1203                         throughput_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, hc->wave_period), average_throughput);
1204                         throughput_error_estimate = cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, adjacent_period_1), average_throughput));
1205
1206                         if (adjacent_period_2 <= sample_count) {
1207                                 throughput_error_estimate = MAX (throughput_error_estimate, cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (
1208                                         hc->samples, sample_count, adjacent_period_2), average_throughput)));
1209                         }
1210
1211                         /* Do the same for the thread counts, so we have something to compare to. We don't
1212                          * measure thread count noise, because there is none; these are exact measurements. */
1213                         thread_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->thread_counts, sample_count, hc->wave_period), average_thread_count);
1214
1215                         /* Update our moving average of the throughput noise. We'll use this
1216                          * later as feedback to determine the new size of the thread wave. */
1217                         if (hc->average_throughput_noise == 0) {
1218                                 hc->average_throughput_noise = throughput_error_estimate;
1219                         } else {
1220                                 hc->average_throughput_noise = (hc->throughput_error_smoothing_factor * throughput_error_estimate)
1221                                         + ((1.0 + hc->throughput_error_smoothing_factor) * hc->average_throughput_noise);
1222                         }
1223
1224                         if (cabs (thread_wave_component) > 0) {
1225                                 /* Adjust the throughput wave so it's centered around the target wave,
1226                                  * and then calculate the adjusted throughput/thread ratio. */
1227                                 ratio = mono_double_complex_div (mono_double_complex_sub (throughput_wave_component, mono_double_complex_scalar_mul(thread_wave_component, hc->target_throughput_ratio)), thread_wave_component);
1228                                 transition = TRANSITION_CLIMBING_MOVE;
1229                         } else {
1230                                 ratio = mono_double_complex_make (0, 0);
1231                                 transition = TRANSITION_STABILIZING;
1232                         }
1233
1234                         noise_for_confidence = MAX (hc->average_throughput_noise, throughput_error_estimate);
1235                         if (noise_for_confidence > 0) {
1236                                 confidence = cabs (thread_wave_component) / noise_for_confidence / hc->target_signal_to_noise_ratio;
1237                         } else {
1238                                 /* there is no noise! */
1239                                 confidence = 1.0;
1240                         }
1241                 }
1242         }
1243
1244         /* We use just the real part of the complex ratio we just calculated. If the throughput signal
1245          * is exactly in phase with the thread signal, this will be the same as taking the magnitude of
1246          * the complex move and moving that far up. If they're 180 degrees out of phase, we'll move
1247          * backward (because this indicates that our changes are having the opposite of the intended effect).
1248          * If they're 90 degrees out of phase, we won't move at all, because we can't tell wether we're
1249          * having a negative or positive effect on throughput. */
1250         move = creal (ratio);
1251         move = CLAMP (move, -1.0, 1.0);
1252
1253         /* Apply our confidence multiplier. */
1254         move *= CLAMP (confidence, -1.0, 1.0);
1255
1256         /* Now apply non-linear gain, such that values around zero are attenuated, while higher values
1257          * are enhanced. This allows us to move quickly if we're far away from the target, but more slowly
1258         * if we're getting close, giving us rapid ramp-up without wild oscillations around the target. */
1259         gain = hc->max_change_per_second * sample_duration;
1260         move = pow (fabs (move), hc->gain_exponent) * (move >= 0.0 ? 1 : -1) * gain;
1261         move = MIN (move, hc->max_change_per_sample);
1262
1263         /* If the result was positive, and CPU is > 95%, refuse the move. */
1264         if (move > 0.0 && threadpool->cpu_usage > CPU_USAGE_HIGH)
1265                 move = 0.0;
1266
1267         /* Apply the move to our control setting. */
1268         hc->current_control_setting += move;
1269
1270         /* Calculate the new thread wave magnitude, which is based on the moving average we've been keeping of the
1271          * throughput error.  This average starts at zero, so we'll start with a nice safe little wave at first. */
1272         new_thread_wave_magnitude = (gint)(0.5 + (hc->current_control_setting * hc->average_throughput_noise
1273                 * hc->target_signal_to_noise_ratio * hc->thread_magnitude_multiplier * 2.0));
1274         new_thread_wave_magnitude = CLAMP (new_thread_wave_magnitude, 1, hc->max_thread_wave_magnitude);
1275
1276         /* Make sure our control setting is within the ThreadPool's limits. */
1277         hc->current_control_setting = CLAMP (hc->current_control_setting, threadpool->limit_worker_min, threadpool->limit_worker_max - new_thread_wave_magnitude);
1278
1279         /* Calculate the new thread count (control setting + square wave). */
1280         new_thread_count = (gint)(hc->current_control_setting + new_thread_wave_magnitude * ((hc->total_samples / (hc->wave_period / 2)) % 2));
1281
1282         /* Make sure the new thread count doesn't exceed the ThreadPool's limits. */
1283         new_thread_count = CLAMP (new_thread_count, threadpool->limit_worker_min, threadpool->limit_worker_max);
1284
1285         if (new_thread_count != current_thread_count)
1286                 hill_climbing_change_thread_count (new_thread_count, transition);
1287
1288         if (creal (ratio) < 0.0 && new_thread_count == threadpool->limit_worker_min)
1289                 *adjustment_interval = (gint)(0.5 + hc->current_sample_interval * (10.0 * MAX (-1.0 * creal (ratio), 1.0)));
1290         else
1291                 *adjustment_interval = hc->current_sample_interval;
1292
1293         return new_thread_count;
1294 }
1295
1296 static void
1297 heuristic_notify_work_completed (void)
1298 {
1299         g_assert (threadpool);
1300
1301         InterlockedIncrement (&threadpool->heuristic_completions);
1302         threadpool->heuristic_last_dequeue = mono_msec_ticks ();
1303 }
1304
1305 static gboolean
1306 heuristic_should_adjust (void)
1307 {
1308         g_assert (threadpool);
1309
1310         if (threadpool->heuristic_last_dequeue > threadpool->heuristic_last_adjustment + threadpool->heuristic_adjustment_interval) {
1311                 ThreadPoolCounter counter;
1312                 counter.as_gint64 = COUNTER_READ();
1313                 if (counter._.working <= counter._.max_working)
1314                         return TRUE;
1315         }
1316
1317         return FALSE;
1318 }
1319
1320 static void
1321 heuristic_adjust (void)
1322 {
1323         g_assert (threadpool);
1324
1325         if (mono_coop_mutex_trylock (&threadpool->heuristic_lock) == 0) {
1326                 gint32 completions = InterlockedExchange (&threadpool->heuristic_completions, 0);
1327                 gint64 sample_end = mono_msec_ticks ();
1328                 gint64 sample_duration = sample_end - threadpool->heuristic_sample_start;
1329
1330                 if (sample_duration >= threadpool->heuristic_adjustment_interval / 2) {
1331                         ThreadPoolCounter counter;
1332                         gint16 new_thread_count;
1333
1334                         counter.as_gint64 = COUNTER_READ ();
1335                         new_thread_count = hill_climbing_update (counter._.max_working, sample_duration, completions, &threadpool->heuristic_adjustment_interval);
1336
1337                         COUNTER_ATOMIC (counter, { counter._.max_working = new_thread_count; });
1338
1339                         if (new_thread_count > counter._.max_working)
1340                                 worker_request (mono_domain_get ());
1341
1342                         threadpool->heuristic_sample_start = sample_end;
1343                         threadpool->heuristic_last_adjustment = mono_msec_ticks ();
1344                 }
1345
1346                 mono_coop_mutex_unlock (&threadpool->heuristic_lock);
1347         }
1348 }
1349
1350 void
1351 mono_threadpool_ms_cleanup (void)
1352 {
1353         #ifndef DISABLE_SOCKETS
1354                 mono_threadpool_ms_io_cleanup ();
1355         #endif
1356         mono_lazy_cleanup (&status, cleanup);
1357 }
1358
1359 MonoAsyncResult *
1360 mono_threadpool_ms_begin_invoke (MonoDomain *domain, MonoObject *target, MonoMethod *method, gpointer *params, MonoError *error)
1361 {
1362         static MonoClass *async_call_klass = NULL;
1363         MonoMethodMessage *message;
1364         MonoAsyncResult *async_result;
1365         MonoAsyncCall *async_call;
1366         MonoDelegate *async_callback = NULL;
1367         MonoObject *state = NULL;
1368
1369         if (!async_call_klass)
1370                 async_call_klass = mono_class_load_from_name (mono_defaults.corlib, "System", "MonoAsyncCall");
1371
1372         mono_lazy_initialize (&status, initialize);
1373
1374         mono_error_init (error);
1375
1376         message = mono_method_call_message_new (method, params, mono_get_delegate_invoke (method->klass), (params != NULL) ? (&async_callback) : NULL, (params != NULL) ? (&state) : NULL, error);
1377         return_val_if_nok (error, NULL);
1378
1379         async_call = (MonoAsyncCall*) mono_object_new_checked (domain, async_call_klass, error);
1380         return_val_if_nok (error, NULL);
1381
1382         MONO_OBJECT_SETREF (async_call, msg, message);
1383         MONO_OBJECT_SETREF (async_call, state, state);
1384
1385         if (async_callback) {
1386                 MONO_OBJECT_SETREF (async_call, cb_method, mono_get_delegate_invoke (((MonoObject*) async_callback)->vtable->klass));
1387                 MONO_OBJECT_SETREF (async_call, cb_target, async_callback);
1388         }
1389
1390         async_result = mono_async_result_new (domain, NULL, async_call->state, NULL, (MonoObject*) async_call, error);
1391         return_val_if_nok (error, NULL);
1392         MONO_OBJECT_SETREF (async_result, async_delegate, target);
1393
1394         mono_threadpool_ms_enqueue_work_item (domain, (MonoObject*) async_result, error);
1395         return_val_if_nok (error, NULL);
1396
1397         return async_result;
1398 }
1399
1400 MonoObject *
1401 mono_threadpool_ms_end_invoke (MonoAsyncResult *ares, MonoArray **out_args, MonoObject **exc, MonoError *error)
1402 {
1403         MonoAsyncCall *ac;
1404
1405         mono_error_init (error);
1406         g_assert (exc);
1407         g_assert (out_args);
1408
1409         *exc = NULL;
1410         *out_args = NULL;
1411
1412         /* check if already finished */
1413         mono_monitor_enter ((MonoObject*) ares);
1414
1415         if (ares->endinvoke_called) {
1416                 mono_error_set_invalid_operation(error, "Delegate EndInvoke method called more than once");
1417                 mono_monitor_exit ((MonoObject*) ares);
1418                 return NULL;
1419         }
1420
1421         ares->endinvoke_called = 1;
1422
1423         /* wait until we are really finished */
1424         if (ares->completed) {
1425                 mono_monitor_exit ((MonoObject *) ares);
1426         } else {
1427                 gpointer wait_event;
1428                 if (ares->handle) {
1429                         wait_event = mono_wait_handle_get_handle ((MonoWaitHandle*) ares->handle);
1430                 } else {
1431                         wait_event = mono_w32event_create (TRUE, FALSE);
1432                         g_assert(wait_event);
1433                         MonoWaitHandle *wait_handle = mono_wait_handle_new (mono_object_domain (ares), wait_event, error);
1434                         if (!is_ok (error)) {
1435                                 CloseHandle (wait_event);
1436                                 return NULL;
1437                         }
1438                         MONO_OBJECT_SETREF (ares, handle, (MonoObject*) wait_handle);
1439                 }
1440                 mono_monitor_exit ((MonoObject*) ares);
1441                 MONO_ENTER_GC_SAFE;
1442                 WaitForSingleObjectEx (wait_event, INFINITE, TRUE);
1443                 MONO_EXIT_GC_SAFE;
1444         }
1445
1446         ac = (MonoAsyncCall*) ares->object_data;
1447         g_assert (ac);
1448
1449         *exc = ac->msg->exc; /* FIXME: GC add write barrier */
1450         *out_args = ac->out_args;
1451         return ac->res;
1452 }
1453
1454 gboolean
1455 mono_threadpool_ms_remove_domain_jobs (MonoDomain *domain, int timeout)
1456 {
1457         gint64 end;
1458         ThreadPoolDomain *tpdomain;
1459         ThreadPoolDomainCleanupSemaphore *cleanup_semaphore;
1460         gboolean ret;
1461
1462         g_assert (domain);
1463         g_assert (timeout >= -1);
1464
1465         g_assert (mono_domain_is_unloading (domain));
1466
1467         if (timeout != -1)
1468                 end = mono_msec_ticks () + timeout;
1469
1470 #ifndef DISABLE_SOCKETS
1471         mono_threadpool_ms_io_remove_domain_jobs (domain);
1472         if (timeout != -1) {
1473                 if (mono_msec_ticks () > end)
1474                         return FALSE;
1475         }
1476 #endif
1477
1478         /*
1479         * There might be some threads out that could be about to execute stuff from the given domain.
1480         * We avoid that by waiting on a semaphore to be pulsed by the thread that reaches zero.
1481         * The semaphore is only created for domains which queued threadpool jobs.
1482         * We always wait on the semaphore rather than ensuring domain->threadpool_jobs is 0.
1483         * There may be pending outstanding requests which will create new jobs.
1484         * The semaphore is signaled the threadpool domain has been removed from list
1485         * and we know no more jobs for the domain will be processed.
1486         */
1487
1488         mono_lazy_initialize(&status, initialize);
1489         mono_coop_mutex_lock(&threadpool->domains_lock);
1490
1491         tpdomain = domain_get (domain, FALSE);
1492         if (!tpdomain || tpdomain->outstanding_request == 0) {
1493                 mono_coop_mutex_unlock(&threadpool->domains_lock);
1494                 return TRUE;
1495         }
1496
1497         g_assert (domain->cleanup_semaphore);
1498         cleanup_semaphore = (ThreadPoolDomainCleanupSemaphore*) domain->cleanup_semaphore;
1499
1500         ret = TRUE;
1501
1502         do {
1503                 if (timeout == -1) {
1504                         mono_coop_cond_wait (&cleanup_semaphore->cond, &threadpool->domains_lock);
1505                 } else {
1506                         gint64 now;
1507                         gint res;
1508
1509                         now = mono_msec_ticks();
1510                         if (now > end) {
1511                                 ret = FALSE;
1512                                 break;
1513                         }
1514
1515                         res = mono_coop_cond_timedwait (&cleanup_semaphore->cond, &threadpool->domains_lock, end - now);
1516                         if (res != 0) {
1517                                 ret = FALSE;
1518                                 break;
1519                         }
1520                 }
1521         } while (tpdomain->outstanding_request != 0);
1522
1523         if (InterlockedDecrement (&cleanup_semaphore->ref) == 0) {
1524                 mono_coop_cond_destroy (&cleanup_semaphore->cond);
1525                 g_free (cleanup_semaphore);
1526                 domain->cleanup_semaphore = NULL;
1527         }
1528
1529         mono_coop_mutex_unlock(&threadpool->domains_lock);
1530
1531         return ret;
1532 }
1533
1534 void
1535 mono_threadpool_ms_suspend (void)
1536 {
1537         if (threadpool)
1538                 threadpool->suspended = TRUE;
1539 }
1540
1541 void
1542 mono_threadpool_ms_resume (void)
1543 {
1544         if (threadpool)
1545                 threadpool->suspended = FALSE;
1546 }
1547
1548 void
1549 ves_icall_System_Threading_ThreadPool_GetAvailableThreadsNative (gint32 *worker_threads, gint32 *completion_port_threads)
1550 {
1551         ThreadPoolCounter counter;
1552
1553         if (!worker_threads || !completion_port_threads)
1554                 return;
1555
1556         mono_lazy_initialize (&status, initialize);
1557
1558         counter.as_gint64 = COUNTER_READ ();
1559
1560         *worker_threads = MAX (0, threadpool->limit_worker_max - counter._.active);
1561         *completion_port_threads = threadpool->limit_io_max;
1562 }
1563
1564 void
1565 ves_icall_System_Threading_ThreadPool_GetMinThreadsNative (gint32 *worker_threads, gint32 *completion_port_threads)
1566 {
1567         if (!worker_threads || !completion_port_threads)
1568                 return;
1569
1570         mono_lazy_initialize (&status, initialize);
1571
1572         *worker_threads = threadpool->limit_worker_min;
1573         *completion_port_threads = threadpool->limit_io_min;
1574 }
1575
1576 void
1577 ves_icall_System_Threading_ThreadPool_GetMaxThreadsNative (gint32 *worker_threads, gint32 *completion_port_threads)
1578 {
1579         if (!worker_threads || !completion_port_threads)
1580                 return;
1581
1582         mono_lazy_initialize (&status, initialize);
1583
1584         *worker_threads = threadpool->limit_worker_max;
1585         *completion_port_threads = threadpool->limit_io_max;
1586 }
1587
1588 MonoBoolean
1589 ves_icall_System_Threading_ThreadPool_SetMinThreadsNative (gint32 worker_threads, gint32 completion_port_threads)
1590 {
1591         mono_lazy_initialize (&status, initialize);
1592
1593         if (worker_threads <= 0 || worker_threads > threadpool->limit_worker_max)
1594                 return FALSE;
1595         if (completion_port_threads <= 0 || completion_port_threads > threadpool->limit_io_max)
1596                 return FALSE;
1597
1598         threadpool->limit_worker_min = worker_threads;
1599         threadpool->limit_io_min = completion_port_threads;
1600
1601         return TRUE;
1602 }
1603
1604 MonoBoolean
1605 ves_icall_System_Threading_ThreadPool_SetMaxThreadsNative (gint32 worker_threads, gint32 completion_port_threads)
1606 {
1607         gint cpu_count = mono_cpu_count ();
1608
1609         mono_lazy_initialize (&status, initialize);
1610
1611         if (worker_threads < threadpool->limit_worker_min || worker_threads < cpu_count)
1612                 return FALSE;
1613         if (completion_port_threads < threadpool->limit_io_min || completion_port_threads < cpu_count)
1614                 return FALSE;
1615
1616         threadpool->limit_worker_max = worker_threads;
1617         threadpool->limit_io_max = completion_port_threads;
1618
1619         return TRUE;
1620 }
1621
1622 void
1623 ves_icall_System_Threading_ThreadPool_InitializeVMTp (MonoBoolean *enable_worker_tracking)
1624 {
1625         if (enable_worker_tracking) {
1626                 // TODO implement some kind of switch to have the possibily to use it
1627                 *enable_worker_tracking = FALSE;
1628         }
1629
1630         mono_lazy_initialize (&status, initialize);
1631 }
1632
1633 MonoBoolean
1634 ves_icall_System_Threading_ThreadPool_NotifyWorkItemComplete (void)
1635 {
1636         ThreadPoolCounter counter;
1637
1638         if (mono_domain_is_unloading (mono_domain_get ()) || mono_runtime_is_shutting_down ())
1639                 return FALSE;
1640
1641         heuristic_notify_work_completed ();
1642
1643         if (heuristic_should_adjust ())
1644                 heuristic_adjust ();
1645
1646         counter.as_gint64 = COUNTER_READ ();
1647         return counter._.working <= counter._.max_working;
1648 }
1649
1650 void
1651 ves_icall_System_Threading_ThreadPool_NotifyWorkItemProgressNative (void)
1652 {
1653         heuristic_notify_work_completed ();
1654
1655         if (heuristic_should_adjust ())
1656                 heuristic_adjust ();
1657 }
1658
1659 void
1660 ves_icall_System_Threading_ThreadPool_ReportThreadStatus (MonoBoolean is_working)
1661 {
1662         // TODO
1663         MonoError error;
1664         mono_error_set_not_implemented (&error, "");
1665         mono_error_set_pending_exception (&error);
1666 }
1667
1668 MonoBoolean
1669 ves_icall_System_Threading_ThreadPool_RequestWorkerThread (void)
1670 {
1671         return worker_request (mono_domain_get ());
1672 }
1673
1674 MonoBoolean G_GNUC_UNUSED
1675 ves_icall_System_Threading_ThreadPool_PostQueuedCompletionStatus (MonoNativeOverlapped *native_overlapped)
1676 {
1677         /* This copy the behavior of the current Mono implementation */
1678         MonoError error;
1679         mono_error_set_not_implemented (&error, "");
1680         mono_error_set_pending_exception (&error);
1681         return FALSE;
1682 }
1683
1684 MonoBoolean G_GNUC_UNUSED
1685 ves_icall_System_Threading_ThreadPool_BindIOCompletionCallbackNative (gpointer file_handle)
1686 {
1687         /* This copy the behavior of the current Mono implementation */
1688         return TRUE;
1689 }
1690
1691 MonoBoolean G_GNUC_UNUSED
1692 ves_icall_System_Threading_ThreadPool_IsThreadPoolHosted (void)
1693 {
1694         return FALSE;
1695 }