Merge pull request #2781 from alexanderkyte/inflated_method_header_leak
[mono.git] / mono / metadata / threadpool-ms.c
1 /*
2  * threadpool-ms.c: Microsoft threadpool runtime support
3  *
4  * Author:
5  *      Ludovic Henry (ludovic.henry@xamarin.com)
6  *
7  * Copyright 2015 Xamarin, Inc (http://www.xamarin.com)
8  */
9
10 //
11 // Copyright (c) Microsoft. All rights reserved.
12 // Licensed under the MIT license. See LICENSE file in the project root for full license information.
13 //
14 // Files:
15 //  - src/vm/comthreadpool.cpp
16 //  - src/vm/win32threadpoolcpp
17 //  - src/vm/threadpoolrequest.cpp
18 //  - src/vm/hillclimbing.cpp
19 //
20 // Ported from C++ to C and adjusted to Mono runtime
21
22 #include <stdlib.h>
23 #define _USE_MATH_DEFINES // needed by MSVC to define math constants
24 #include <math.h>
25 #include <config.h>
26 #include <glib.h>
27
28 #include <mono/metadata/class-internals.h>
29 #include <mono/metadata/exception.h>
30 #include <mono/metadata/gc-internals.h>
31 #include <mono/metadata/object.h>
32 #include <mono/metadata/object-internals.h>
33 #include <mono/metadata/threadpool-ms.h>
34 #include <mono/metadata/threadpool-ms-io.h>
35 #include <mono/utils/atomic.h>
36 #include <mono/utils/mono-compiler.h>
37 #include <mono/utils/mono-complex.h>
38 #include <mono/utils/mono-lazy-init.h>
39 #include <mono/utils/mono-logger.h>
40 #include <mono/utils/mono-logger-internals.h>
41 #include <mono/utils/mono-proclib.h>
42 #include <mono/utils/mono-threads.h>
43 #include <mono/utils/mono-time.h>
44 #include <mono/utils/mono-rand.h>
45
46 #define CPU_USAGE_LOW 80
47 #define CPU_USAGE_HIGH 95
48
49 #define MONITOR_INTERVAL 500 // ms
50 #define MONITOR_MINIMAL_LIFETIME 60 * 1000 // ms
51
52 #define WORKER_CREATION_MAX_PER_SEC 10
53
54 /* The exponent to apply to the gain. 1.0 means to use linear gain,
55  * higher values will enhance large moves and damp small ones.
56  * default: 2.0 */
57 #define HILL_CLIMBING_GAIN_EXPONENT 2.0
58
59 /* The 'cost' of a thread. 0 means drive for increased throughput regardless
60  * of thread count, higher values bias more against higher thread counts.
61  * default: 0.15 */
62 #define HILL_CLIMBING_BIAS 0.15
63
64 #define HILL_CLIMBING_WAVE_PERIOD 4
65 #define HILL_CLIMBING_MAX_WAVE_MAGNITUDE 20
66 #define HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER 1.0
67 #define HILL_CLIMBING_WAVE_HISTORY_SIZE 8
68 #define HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO 3.0
69 #define HILL_CLIMBING_MAX_CHANGE_PER_SECOND 4
70 #define HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE 20
71 #define HILL_CLIMBING_SAMPLE_INTERVAL_LOW 10
72 #define HILL_CLIMBING_SAMPLE_INTERVAL_HIGH 200
73 #define HILL_CLIMBING_ERROR_SMOOTHING_FACTOR 0.01
74 #define HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT 0.15
75
76 typedef union {
77         struct {
78                 gint16 max_working; /* determined by heuristic */
79                 gint16 active; /* executing worker_thread */
80                 gint16 working; /* actively executing worker_thread, not parked */
81                 gint16 parked; /* parked */
82         } _;
83         gint64 as_gint64;
84 } ThreadPoolCounter;
85
86 typedef struct {
87         MonoDomain *domain;
88         gint32 outstanding_request;
89 } ThreadPoolDomain;
90
91 typedef MonoInternalThread ThreadPoolWorkingThread;
92
93 typedef struct {
94         gint32 wave_period;
95         gint32 samples_to_measure;
96         gdouble target_throughput_ratio;
97         gdouble target_signal_to_noise_ratio;
98         gdouble max_change_per_second;
99         gdouble max_change_per_sample;
100         gint32 max_thread_wave_magnitude;
101         gint32 sample_interval_low;
102         gdouble thread_magnitude_multiplier;
103         gint32 sample_interval_high;
104         gdouble throughput_error_smoothing_factor;
105         gdouble gain_exponent;
106         gdouble max_sample_error;
107
108         gdouble current_control_setting;
109         gint64 total_samples;
110         gint16 last_thread_count;
111         gdouble elapsed_since_last_change;
112         gdouble completions_since_last_change;
113
114         gdouble average_throughput_noise;
115
116         gdouble *samples;
117         gdouble *thread_counts;
118
119         guint32 current_sample_interval;
120         gpointer random_interval_generator;
121
122         gint32 accumulated_completion_count;
123         gdouble accumulated_sample_duration;
124 } ThreadPoolHillClimbing;
125
126 typedef struct {
127         ThreadPoolCounter counters;
128
129         GPtrArray *domains; // ThreadPoolDomain* []
130         MonoCoopMutex domains_lock;
131
132         GPtrArray *working_threads; // ThreadPoolWorkingThread* []
133         gint32 parked_threads_count;
134         MonoCoopCond parked_threads_cond;
135         MonoCoopMutex active_threads_lock; /* protect access to working_threads and parked_threads */
136
137         guint32 worker_creation_current_second;
138         guint32 worker_creation_current_count;
139         MonoCoopMutex worker_creation_lock;
140
141         gint32 heuristic_completions;
142         guint32 heuristic_sample_start;
143         guint32 heuristic_last_dequeue; // ms
144         guint32 heuristic_last_adjustment; // ms
145         guint32 heuristic_adjustment_interval; // ms
146         ThreadPoolHillClimbing heuristic_hill_climbing;
147         MonoCoopMutex heuristic_lock;
148
149         gint32 limit_worker_min;
150         gint32 limit_worker_max;
151         gint32 limit_io_min;
152         gint32 limit_io_max;
153
154         MonoCpuUsageState *cpu_usage_state;
155         gint32 cpu_usage;
156
157         /* suspended by the debugger */
158         gboolean suspended;
159 } ThreadPool;
160
161 typedef enum {
162         TRANSITION_WARMUP,
163         TRANSITION_INITIALIZING,
164         TRANSITION_RANDOM_MOVE,
165         TRANSITION_CLIMBING_MOVE,
166         TRANSITION_CHANGE_POINT,
167         TRANSITION_STABILIZING,
168         TRANSITION_STARVATION,
169         TRANSITION_THREAD_TIMED_OUT,
170         TRANSITION_UNDEFINED,
171 } ThreadPoolHeuristicStateTransition;
172
173 static mono_lazy_init_t status = MONO_LAZY_INIT_STATUS_NOT_INITIALIZED;
174
175 enum {
176         MONITOR_STATUS_REQUESTED,
177         MONITOR_STATUS_WAITING_FOR_REQUEST,
178         MONITOR_STATUS_NOT_RUNNING,
179 };
180
181 static gint32 monitor_status = MONITOR_STATUS_NOT_RUNNING;
182
183 static ThreadPool* threadpool;
184
185 #define COUNTER_CHECK(counter) \
186         do { \
187                 g_assert (counter._.max_working > 0); \
188                 g_assert (counter._.working >= 0); \
189                 g_assert (counter._.active >= 0); \
190         } while (0)
191
192 #define COUNTER_READ() (InterlockedRead64 (&threadpool->counters.as_gint64))
193
194 #define COUNTER_ATOMIC(var,block) \
195         do { \
196                 ThreadPoolCounter __old; \
197                 do { \
198                         g_assert (threadpool); \
199                         __old.as_gint64 = COUNTER_READ (); \
200                         (var) = __old; \
201                         { block; } \
202                         COUNTER_CHECK (var); \
203                 } while (InterlockedCompareExchange64 (&threadpool->counters.as_gint64, (var).as_gint64, __old.as_gint64) != __old.as_gint64); \
204         } while (0)
205
206 #define COUNTER_TRY_ATOMIC(res,var,block) \
207         do { \
208                 ThreadPoolCounter __old; \
209                 do { \
210                         g_assert (threadpool); \
211                         __old.as_gint64 = COUNTER_READ (); \
212                         (var) = __old; \
213                         (res) = FALSE; \
214                         { block; } \
215                         COUNTER_CHECK (var); \
216                         (res) = InterlockedCompareExchange64 (&threadpool->counters.as_gint64, (var).as_gint64, __old.as_gint64) == __old.as_gint64; \
217                 } while (0); \
218         } while (0)
219
220 static gpointer
221 rand_create (void)
222 {
223         mono_rand_open ();
224         return mono_rand_init (NULL, 0);
225 }
226
227 static guint32
228 rand_next (gpointer *handle, guint32 min, guint32 max)
229 {
230         MonoError error;
231         guint32 val;
232         mono_rand_try_get_uint32 (handle, &val, min, max, &error);
233         // FIXME handle error
234         mono_error_assert_ok (&error);
235         return val;
236 }
237
238 static void
239 rand_free (gpointer handle)
240 {
241         mono_rand_close (handle);
242 }
243
244 static void
245 initialize (void)
246 {
247         ThreadPoolHillClimbing *hc;
248         const char *threads_per_cpu_env;
249         gint threads_per_cpu;
250         gint threads_count;
251
252         g_assert (!threadpool);
253         threadpool = g_new0 (ThreadPool, 1);
254         g_assert (threadpool);
255
256         threadpool->domains = g_ptr_array_new ();
257         mono_coop_mutex_init (&threadpool->domains_lock);
258
259         threadpool->parked_threads_count = 0;
260         mono_coop_cond_init (&threadpool->parked_threads_cond);
261         threadpool->working_threads = g_ptr_array_new ();
262         mono_coop_mutex_init (&threadpool->active_threads_lock);
263
264         threadpool->worker_creation_current_second = -1;
265         mono_coop_mutex_init (&threadpool->worker_creation_lock);
266
267         threadpool->heuristic_adjustment_interval = 10;
268         mono_coop_mutex_init (&threadpool->heuristic_lock);
269
270         mono_rand_open ();
271
272         hc = &threadpool->heuristic_hill_climbing;
273
274         hc->wave_period = HILL_CLIMBING_WAVE_PERIOD;
275         hc->max_thread_wave_magnitude = HILL_CLIMBING_MAX_WAVE_MAGNITUDE;
276         hc->thread_magnitude_multiplier = (gdouble) HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER;
277         hc->samples_to_measure = hc->wave_period * HILL_CLIMBING_WAVE_HISTORY_SIZE;
278         hc->target_throughput_ratio = (gdouble) HILL_CLIMBING_BIAS;
279         hc->target_signal_to_noise_ratio = (gdouble) HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO;
280         hc->max_change_per_second = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SECOND;
281         hc->max_change_per_sample = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE;
282         hc->sample_interval_low = HILL_CLIMBING_SAMPLE_INTERVAL_LOW;
283         hc->sample_interval_high = HILL_CLIMBING_SAMPLE_INTERVAL_HIGH;
284         hc->throughput_error_smoothing_factor = (gdouble) HILL_CLIMBING_ERROR_SMOOTHING_FACTOR;
285         hc->gain_exponent = (gdouble) HILL_CLIMBING_GAIN_EXPONENT;
286         hc->max_sample_error = (gdouble) HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT;
287         hc->current_control_setting = 0;
288         hc->total_samples = 0;
289         hc->last_thread_count = 0;
290         hc->average_throughput_noise = 0;
291         hc->elapsed_since_last_change = 0;
292         hc->accumulated_completion_count = 0;
293         hc->accumulated_sample_duration = 0;
294         hc->samples = g_new0 (gdouble, hc->samples_to_measure);
295         hc->thread_counts = g_new0 (gdouble, hc->samples_to_measure);
296         hc->random_interval_generator = rand_create ();
297         hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
298
299         if (!(threads_per_cpu_env = g_getenv ("MONO_THREADS_PER_CPU")))
300                 threads_per_cpu = 1;
301         else
302                 threads_per_cpu = CLAMP (atoi (threads_per_cpu_env), 1, 50);
303
304         threads_count = mono_cpu_count () * threads_per_cpu;
305
306         threadpool->limit_worker_min = threadpool->limit_io_min = threads_count;
307
308 #if defined (PLATFORM_ANDROID) || defined (HOST_IOS)
309         threadpool->limit_worker_max = threadpool->limit_io_max = CLAMP (threads_count * 100, MIN (threads_count, 200), MAX (threads_count, 200));
310 #else
311         threadpool->limit_worker_max = threadpool->limit_io_max = threads_count * 100;
312 #endif
313
314         threadpool->counters._.max_working = threadpool->limit_worker_min;
315
316         threadpool->cpu_usage_state = g_new0 (MonoCpuUsageState, 1);
317
318         threadpool->suspended = FALSE;
319 }
320
321 static void worker_kill (ThreadPoolWorkingThread *thread);
322
323 static void
324 cleanup (void)
325 {
326         guint i;
327
328         /* we make the assumption along the code that we are
329          * cleaning up only if the runtime is shutting down */
330         g_assert (mono_runtime_is_shutting_down ());
331
332         while (monitor_status != MONITOR_STATUS_NOT_RUNNING)
333                 mono_thread_info_sleep (1, NULL);
334
335         mono_coop_mutex_lock (&threadpool->active_threads_lock);
336
337         /* stop all threadpool->working_threads */
338         for (i = 0; i < threadpool->working_threads->len; ++i)
339                 worker_kill ((ThreadPoolWorkingThread*) g_ptr_array_index (threadpool->working_threads, i));
340
341         /* unpark all threadpool->parked_threads */
342         mono_coop_cond_broadcast (&threadpool->parked_threads_cond);
343
344         mono_coop_mutex_unlock (&threadpool->active_threads_lock);
345 }
346
347 gboolean
348 mono_threadpool_ms_enqueue_work_item (MonoDomain *domain, MonoObject *work_item, MonoError *error)
349 {
350         static MonoClass *threadpool_class = NULL;
351         static MonoMethod *unsafe_queue_custom_work_item_method = NULL;
352         MonoDomain *current_domain;
353         MonoBoolean f;
354         gpointer args [2];
355
356         mono_error_init (error);
357         g_assert (work_item);
358
359         if (!threadpool_class)
360                 threadpool_class = mono_class_load_from_name (mono_defaults.corlib, "System.Threading", "ThreadPool");
361
362         if (!unsafe_queue_custom_work_item_method)
363                 unsafe_queue_custom_work_item_method = mono_class_get_method_from_name (threadpool_class, "UnsafeQueueCustomWorkItem", 2);
364         g_assert (unsafe_queue_custom_work_item_method);
365
366         f = FALSE;
367
368         args [0] = (gpointer) work_item;
369         args [1] = (gpointer) &f;
370
371         current_domain = mono_domain_get ();
372         if (current_domain == domain) {
373                 mono_runtime_invoke_checked (unsafe_queue_custom_work_item_method, NULL, args, error);
374                 return_val_if_nok (error, FALSE);
375         } else {
376                 mono_thread_push_appdomain_ref (domain);
377                 if (mono_domain_set (domain, FALSE)) {
378                         mono_runtime_invoke_checked (unsafe_queue_custom_work_item_method, NULL, args, error);
379                         if (!is_ok (error)) {
380                                 mono_thread_pop_appdomain_ref ();
381                                 return FALSE;
382                         }
383                         mono_domain_set (current_domain, TRUE);
384                 }
385                 mono_thread_pop_appdomain_ref ();
386         }
387         return TRUE;
388 }
389
390 /* LOCKING: threadpool->domains_lock must be held */
391 static void
392 domain_add (ThreadPoolDomain *tpdomain)
393 {
394         guint i, len;
395
396         g_assert (tpdomain);
397
398         len = threadpool->domains->len;
399         for (i = 0; i < len; ++i) {
400                 if (g_ptr_array_index (threadpool->domains, i) == tpdomain)
401                         break;
402         }
403
404         if (i == len)
405                 g_ptr_array_add (threadpool->domains, tpdomain);
406 }
407
408 /* LOCKING: threadpool->domains_lock must be held */
409 static gboolean
410 domain_remove (ThreadPoolDomain *tpdomain)
411 {
412         g_assert (tpdomain);
413         return g_ptr_array_remove (threadpool->domains, tpdomain);
414 }
415
416 /* LOCKING: threadpool->domains_lock must be held */
417 static ThreadPoolDomain *
418 domain_get (MonoDomain *domain, gboolean create)
419 {
420         ThreadPoolDomain *tpdomain = NULL;
421         guint i;
422
423         g_assert (domain);
424
425         for (i = 0; i < threadpool->domains->len; ++i) {
426                 tpdomain = (ThreadPoolDomain *)g_ptr_array_index (threadpool->domains, i);
427                 if (tpdomain->domain == domain)
428                         return tpdomain;
429         }
430
431         if (create) {
432                 tpdomain = g_new0 (ThreadPoolDomain, 1);
433                 tpdomain->domain = domain;
434                 domain_add (tpdomain);
435         }
436
437         return tpdomain;
438 }
439
440 static void
441 domain_free (ThreadPoolDomain *tpdomain)
442 {
443         g_free (tpdomain);
444 }
445
446 /* LOCKING: threadpool->domains_lock must be held */
447 static gboolean
448 domain_any_has_request (void)
449 {
450         guint i;
451
452         for (i = 0; i < threadpool->domains->len; ++i) {
453                 ThreadPoolDomain *tmp = (ThreadPoolDomain *)g_ptr_array_index (threadpool->domains, i);
454                 if (tmp->outstanding_request > 0)
455                         return TRUE;
456         }
457
458         return FALSE;
459 }
460
461 /* LOCKING: threadpool->domains_lock must be held */
462 static ThreadPoolDomain *
463 domain_get_next (ThreadPoolDomain *current)
464 {
465         ThreadPoolDomain *tpdomain = NULL;
466         guint len;
467
468         len = threadpool->domains->len;
469         if (len > 0) {
470                 guint i, current_idx = -1;
471                 if (current) {
472                         for (i = 0; i < len; ++i) {
473                                 if (current == g_ptr_array_index (threadpool->domains, i)) {
474                                         current_idx = i;
475                                         break;
476                                 }
477                         }
478                         g_assert (current_idx >= 0);
479                 }
480                 for (i = current_idx + 1; i < len + current_idx + 1; ++i) {
481                         ThreadPoolDomain *tmp = (ThreadPoolDomain *)g_ptr_array_index (threadpool->domains, i % len);
482                         if (tmp->outstanding_request > 0) {
483                                 tpdomain = tmp;
484                                 break;
485                         }
486                 }
487         }
488
489         return tpdomain;
490 }
491
492 static void
493 worker_wait_interrupt (gpointer data)
494 {
495         mono_coop_mutex_lock (&threadpool->active_threads_lock);
496         mono_coop_cond_signal (&threadpool->parked_threads_cond);
497         mono_coop_mutex_unlock (&threadpool->active_threads_lock);
498 }
499
500 /* return TRUE if timeout, FALSE otherwise (worker unpark or interrupt) */
501 static gboolean
502 worker_park (void)
503 {
504         gboolean timeout = FALSE;
505
506         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] current worker parking", mono_native_thread_id_get ());
507
508         mono_gc_set_skip_thread (TRUE);
509
510         mono_coop_mutex_lock (&threadpool->active_threads_lock);
511
512         if (!mono_runtime_is_shutting_down ()) {
513                 static gpointer rand_handle = NULL;
514                 MonoInternalThread *thread_internal;
515                 gboolean interrupted = FALSE;
516
517                 if (!rand_handle)
518                         rand_handle = rand_create ();
519                 g_assert (rand_handle);
520
521                 thread_internal = mono_thread_internal_current ();
522                 g_assert (thread_internal);
523
524                 threadpool->parked_threads_count += 1;
525                 g_ptr_array_remove_fast (threadpool->working_threads, thread_internal);
526
527                 mono_thread_info_install_interrupt (worker_wait_interrupt, NULL, &interrupted);
528                 if (interrupted)
529                         goto done;
530
531                 if (mono_coop_cond_timedwait (&threadpool->parked_threads_cond, &threadpool->active_threads_lock, rand_next ((void **)rand_handle, 5 * 1000, 60 * 1000)) != 0)
532                         timeout = TRUE;
533
534                 mono_thread_info_uninstall_interrupt (&interrupted);
535
536 done:
537                 g_ptr_array_add (threadpool->working_threads, thread_internal);
538                 threadpool->parked_threads_count -= 1;
539         }
540
541         mono_coop_mutex_unlock (&threadpool->active_threads_lock);
542
543         mono_gc_set_skip_thread (FALSE);
544
545         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] current worker unparking, timeout? %s", mono_native_thread_id_get (), timeout ? "yes" : "no");
546
547         return timeout;
548 }
549
550 static gboolean
551 worker_try_unpark (void)
552 {
553         gboolean res = FALSE;
554
555         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker", mono_native_thread_id_get ());
556
557         mono_coop_mutex_lock (&threadpool->active_threads_lock);
558         if (threadpool->parked_threads_count > 0) {
559                 mono_coop_cond_signal (&threadpool->parked_threads_cond);
560                 res = TRUE;
561         }
562         mono_coop_mutex_unlock (&threadpool->active_threads_lock);
563
564         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker, success? %s", mono_native_thread_id_get (), res ? "yes" : "no");
565
566         return res;
567 }
568
569 static void
570 worker_kill (ThreadPoolWorkingThread *thread)
571 {
572         if (thread == mono_thread_internal_current ())
573                 return;
574
575         mono_thread_internal_stop ((MonoInternalThread*) thread);
576 }
577
578 static void
579 worker_thread (gpointer data)
580 {
581         MonoError error;
582         MonoInternalThread *thread;
583         ThreadPoolDomain *tpdomain, *previous_tpdomain;
584         ThreadPoolCounter counter;
585         gboolean retire = FALSE;
586
587         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker starting", mono_native_thread_id_get ());
588
589         g_assert (threadpool);
590
591         thread = mono_thread_internal_current ();
592         g_assert (thread);
593
594         mono_thread_set_name_internal (thread, mono_string_new (mono_domain_get (), "Threadpool worker"), FALSE);
595
596         mono_coop_mutex_lock (&threadpool->active_threads_lock);
597         g_ptr_array_add (threadpool->working_threads, thread);
598         mono_coop_mutex_unlock (&threadpool->active_threads_lock);
599
600         previous_tpdomain = NULL;
601
602         mono_coop_mutex_lock (&threadpool->domains_lock);
603
604         while (!mono_runtime_is_shutting_down ()) {
605                 tpdomain = NULL;
606
607                 if ((thread->state & (ThreadState_StopRequested | ThreadState_SuspendRequested)) != 0) {
608                         mono_coop_mutex_unlock (&threadpool->domains_lock);
609                         mono_thread_interruption_checkpoint ();
610                         mono_coop_mutex_lock (&threadpool->domains_lock);
611                 }
612
613                 if (retire || !(tpdomain = domain_get_next (previous_tpdomain))) {
614                         gboolean timeout;
615
616                         COUNTER_ATOMIC (counter, {
617                                 counter._.working --;
618                                 counter._.parked ++;
619                         });
620
621                         mono_coop_mutex_unlock (&threadpool->domains_lock);
622                         timeout = worker_park ();
623                         mono_coop_mutex_lock (&threadpool->domains_lock);
624
625                         COUNTER_ATOMIC (counter, {
626                                 counter._.working ++;
627                                 counter._.parked --;
628                         });
629
630                         if (timeout)
631                                 break;
632
633                         if (retire)
634                                 retire = FALSE;
635
636                         continue;
637                 }
638
639                 tpdomain->outstanding_request --;
640                 g_assert (tpdomain->outstanding_request >= 0);
641
642                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker running in domain %p",
643                         mono_native_thread_id_get (), tpdomain->domain, tpdomain->outstanding_request);
644
645                 g_assert (tpdomain->domain);
646                 g_assert (tpdomain->domain->threadpool_jobs >= 0);
647                 tpdomain->domain->threadpool_jobs ++;
648
649                 mono_coop_mutex_unlock (&threadpool->domains_lock);
650
651                 mono_thread_push_appdomain_ref (tpdomain->domain);
652                 if (mono_domain_set (tpdomain->domain, FALSE)) {
653                         MonoObject *exc = NULL, *res;
654
655                         res = mono_runtime_try_invoke (mono_defaults.threadpool_perform_wait_callback_method, NULL, NULL, &exc, &error);
656                         if (exc || !mono_error_ok(&error)) {
657                                 if (exc == NULL)
658                                         exc = (MonoObject *) mono_error_convert_to_exception (&error);
659                                 else
660                                         mono_error_cleanup (&error);
661                                 mono_thread_internal_unhandled_exception (exc);
662                         } else if (res && *(MonoBoolean*) mono_object_unbox (res) == FALSE)
663                                 retire = TRUE;
664
665                         mono_thread_clr_state (thread, (MonoThreadState)~ThreadState_Background);
666                         if (!mono_thread_test_state (thread , ThreadState_Background))
667                                 ves_icall_System_Threading_Thread_SetState (thread, ThreadState_Background);
668
669                         mono_domain_set (mono_get_root_domain (), TRUE);
670                 }
671                 mono_thread_pop_appdomain_ref ();
672
673                 mono_coop_mutex_lock (&threadpool->domains_lock);
674
675                 tpdomain->domain->threadpool_jobs --;
676                 g_assert (tpdomain->domain->threadpool_jobs >= 0);
677
678                 if (tpdomain->domain->threadpool_jobs == 0 && mono_domain_is_unloading (tpdomain->domain)) {
679                         gboolean removed = domain_remove (tpdomain);
680                         g_assert (removed);
681                         if (tpdomain->domain->cleanup_semaphore)
682                                 ReleaseSemaphore (tpdomain->domain->cleanup_semaphore, 1, NULL);
683                         domain_free (tpdomain);
684                         tpdomain = NULL;
685                 }
686
687                 previous_tpdomain = tpdomain;
688         }
689
690         mono_coop_mutex_unlock (&threadpool->domains_lock);
691
692         mono_coop_mutex_lock (&threadpool->active_threads_lock);
693         g_ptr_array_remove_fast (threadpool->working_threads, thread);
694         mono_coop_mutex_unlock (&threadpool->active_threads_lock);
695
696         COUNTER_ATOMIC (counter, {
697                 counter._.working--;
698                 counter._.active --;
699         });
700
701         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker finishing", mono_native_thread_id_get ());
702 }
703
704 static gboolean
705 worker_try_create (void)
706 {
707         ThreadPoolCounter counter;
708         MonoInternalThread *thread;
709         gint32 now;
710
711         mono_coop_mutex_lock (&threadpool->worker_creation_lock);
712
713         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker", mono_native_thread_id_get ());
714
715         if ((now = mono_100ns_ticks () / 10 / 1000 / 1000) == 0) {
716                 g_warning ("failed to get 100ns ticks");
717         } else {
718                 if (threadpool->worker_creation_current_second != now) {
719                         threadpool->worker_creation_current_second = now;
720                         threadpool->worker_creation_current_count = 0;
721                 } else {
722                         g_assert (threadpool->worker_creation_current_count <= WORKER_CREATION_MAX_PER_SEC);
723                         if (threadpool->worker_creation_current_count == WORKER_CREATION_MAX_PER_SEC) {
724                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of worker created per second reached, current count = %d",
725                                         mono_native_thread_id_get (), threadpool->worker_creation_current_count);
726                                 mono_coop_mutex_unlock (&threadpool->worker_creation_lock);
727                                 return FALSE;
728                         }
729                 }
730         }
731
732         COUNTER_ATOMIC (counter, {
733                 if (counter._.working >= counter._.max_working) {
734                         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of working threads reached",
735                                 mono_native_thread_id_get ());
736                         mono_coop_mutex_unlock (&threadpool->worker_creation_lock);
737                         return FALSE;
738                 }
739                 counter._.working ++;
740                 counter._.active ++;
741         });
742
743         if ((thread = mono_thread_create_internal (mono_get_root_domain (), worker_thread, NULL, TRUE, 0)) != NULL) {
744                 threadpool->worker_creation_current_count += 1;
745
746                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, created %p, now = %d count = %d", mono_native_thread_id_get (), thread->tid, now, threadpool->worker_creation_current_count);
747                 mono_coop_mutex_unlock (&threadpool->worker_creation_lock);
748                 return TRUE;
749         }
750
751         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: could not create thread", mono_native_thread_id_get ());
752
753         COUNTER_ATOMIC (counter, {
754                 counter._.working --;
755                 counter._.active --;
756         });
757
758         mono_coop_mutex_unlock (&threadpool->worker_creation_lock);
759         return FALSE;
760 }
761
762 static void monitor_ensure_running (void);
763
764 static gboolean
765 worker_request (MonoDomain *domain)
766 {
767         ThreadPoolDomain *tpdomain;
768
769         g_assert (domain);
770         g_assert (threadpool);
771
772         if (mono_runtime_is_shutting_down ())
773                 return FALSE;
774
775         mono_coop_mutex_lock (&threadpool->domains_lock);
776
777         /* synchronize check with worker_thread */
778         if (mono_domain_is_unloading (domain)) {
779                 mono_coop_mutex_unlock (&threadpool->domains_lock);
780                 return FALSE;
781         }
782
783         tpdomain = domain_get (domain, TRUE);
784         g_assert (tpdomain);
785         tpdomain->outstanding_request ++;
786
787         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, domain = %p, outstanding_request = %d",
788                 mono_native_thread_id_get (), tpdomain->domain, tpdomain->outstanding_request);
789
790         mono_coop_mutex_unlock (&threadpool->domains_lock);
791
792         if (threadpool->suspended)
793                 return FALSE;
794
795         monitor_ensure_running ();
796
797         if (worker_try_unpark ()) {
798                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, unparked", mono_native_thread_id_get ());
799                 return TRUE;
800         }
801
802         if (worker_try_create ()) {
803                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, created", mono_native_thread_id_get ());
804                 return TRUE;
805         }
806
807         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, failed", mono_native_thread_id_get ());
808         return FALSE;
809 }
810
811 static gboolean
812 monitor_should_keep_running (void)
813 {
814         static gint64 last_should_keep_running = -1;
815
816         g_assert (monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || monitor_status == MONITOR_STATUS_REQUESTED);
817
818         if (InterlockedExchange (&monitor_status, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST) {
819                 gboolean should_keep_running = TRUE, force_should_keep_running = FALSE;
820
821                 if (mono_runtime_is_shutting_down ()) {
822                         should_keep_running = FALSE;
823                 } else {
824                         mono_coop_mutex_lock (&threadpool->domains_lock);
825                         if (!domain_any_has_request ())
826                                 should_keep_running = FALSE;
827                         mono_coop_mutex_unlock (&threadpool->domains_lock);
828
829                         if (!should_keep_running) {
830                                 if (last_should_keep_running == -1 || mono_100ns_ticks () - last_should_keep_running < MONITOR_MINIMAL_LIFETIME * 1000 * 10) {
831                                         should_keep_running = force_should_keep_running = TRUE;
832                                 }
833                         }
834                 }
835
836                 if (should_keep_running) {
837                         if (last_should_keep_running == -1 || !force_should_keep_running)
838                                 last_should_keep_running = mono_100ns_ticks ();
839                 } else {
840                         last_should_keep_running = -1;
841                         if (InterlockedCompareExchange (&monitor_status, MONITOR_STATUS_NOT_RUNNING, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST)
842                                 return FALSE;
843                 }
844         }
845
846         g_assert (monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || monitor_status == MONITOR_STATUS_REQUESTED);
847
848         return TRUE;
849 }
850
851 static gboolean
852 monitor_sufficient_delay_since_last_dequeue (void)
853 {
854         guint32 threshold;
855
856         g_assert (threadpool);
857
858         if (threadpool->cpu_usage < CPU_USAGE_LOW) {
859                 threshold = MONITOR_INTERVAL;
860         } else {
861                 ThreadPoolCounter counter;
862                 counter.as_gint64 = COUNTER_READ();
863                 threshold = counter._.max_working * MONITOR_INTERVAL * 2;
864         }
865
866         return mono_msec_ticks () >= threadpool->heuristic_last_dequeue + threshold;
867 }
868
869 static void hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition);
870
871 static void
872 monitor_thread (void)
873 {
874         MonoInternalThread *current_thread = mono_thread_internal_current ();
875         guint i;
876
877         mono_cpu_usage (threadpool->cpu_usage_state);
878
879         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, started", mono_native_thread_id_get ());
880
881         do {
882                 ThreadPoolCounter counter;
883                 gboolean limit_worker_max_reached;
884                 gint32 interval_left = MONITOR_INTERVAL;
885                 gint32 awake = 0; /* number of spurious awakes we tolerate before doing a round of rebalancing */
886
887                 g_assert (monitor_status != MONITOR_STATUS_NOT_RUNNING);
888
889                 mono_gc_set_skip_thread (TRUE);
890
891                 do {
892                         guint32 ts;
893                         gboolean alerted = FALSE;
894
895                         if (mono_runtime_is_shutting_down ())
896                                 break;
897
898                         ts = mono_msec_ticks ();
899                         if (mono_thread_info_sleep (interval_left, &alerted) == 0)
900                                 break;
901                         interval_left -= mono_msec_ticks () - ts;
902
903                         mono_gc_set_skip_thread (FALSE);
904                         if ((current_thread->state & (ThreadState_StopRequested | ThreadState_SuspendRequested)) != 0)
905                                 mono_thread_interruption_checkpoint ();
906                         mono_gc_set_skip_thread (TRUE);
907                 } while (interval_left > 0 && ++awake < 10);
908
909                 mono_gc_set_skip_thread (FALSE);
910
911                 if (threadpool->suspended)
912                         continue;
913
914                 if (mono_runtime_is_shutting_down ())
915                         continue;
916
917                 mono_coop_mutex_lock (&threadpool->domains_lock);
918                 if (!domain_any_has_request ()) {
919                         mono_coop_mutex_unlock (&threadpool->domains_lock);
920                         continue;
921                 }
922                 mono_coop_mutex_unlock (&threadpool->domains_lock);
923
924                 threadpool->cpu_usage = mono_cpu_usage (threadpool->cpu_usage_state);
925
926                 if (!monitor_sufficient_delay_since_last_dequeue ())
927                         continue;
928
929                 limit_worker_max_reached = FALSE;
930
931                 COUNTER_ATOMIC (counter, {
932                         if (counter._.max_working >= threadpool->limit_worker_max) {
933                                 limit_worker_max_reached = TRUE;
934                                 break;
935                         }
936                         counter._.max_working ++;
937                 });
938
939                 if (limit_worker_max_reached)
940                         continue;
941
942                 hill_climbing_force_change (counter._.max_working, TRANSITION_STARVATION);
943
944                 for (i = 0; i < 5; ++i) {
945                         if (mono_runtime_is_shutting_down ())
946                                 break;
947
948                         if (worker_try_unpark ()) {
949                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, unparked", mono_native_thread_id_get ());
950                                 break;
951                         }
952
953                         if (worker_try_create ()) {
954                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, created", mono_native_thread_id_get ());
955                                 break;
956                         }
957                 }
958         } while (monitor_should_keep_running ());
959
960         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, finished", mono_native_thread_id_get ());
961 }
962
963 static void
964 monitor_ensure_running (void)
965 {
966         for (;;) {
967                 switch (monitor_status) {
968                 case MONITOR_STATUS_REQUESTED:
969                         return;
970                 case MONITOR_STATUS_WAITING_FOR_REQUEST:
971                         InterlockedCompareExchange (&monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_WAITING_FOR_REQUEST);
972                         break;
973                 case MONITOR_STATUS_NOT_RUNNING:
974                         if (mono_runtime_is_shutting_down ())
975                                 return;
976                         if (InterlockedCompareExchange (&monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_NOT_RUNNING) == MONITOR_STATUS_NOT_RUNNING) {
977                                 if (!mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, TRUE, SMALL_STACK))
978                                         monitor_status = MONITOR_STATUS_NOT_RUNNING;
979                                 return;
980                         }
981                         break;
982                 default: g_assert_not_reached ();
983                 }
984         }
985 }
986
987 static void
988 hill_climbing_change_thread_count (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
989 {
990         ThreadPoolHillClimbing *hc;
991
992         g_assert (threadpool);
993
994         hc = &threadpool->heuristic_hill_climbing;
995
996         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] hill climbing, change max number of threads %d", mono_native_thread_id_get (), new_thread_count);
997
998         hc->last_thread_count = new_thread_count;
999         hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
1000         hc->elapsed_since_last_change = 0;
1001         hc->completions_since_last_change = 0;
1002 }
1003
1004 static void
1005 hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
1006 {
1007         ThreadPoolHillClimbing *hc;
1008
1009         g_assert (threadpool);
1010
1011         hc = &threadpool->heuristic_hill_climbing;
1012
1013         if (new_thread_count != hc->last_thread_count) {
1014                 hc->current_control_setting += new_thread_count - hc->last_thread_count;
1015                 hill_climbing_change_thread_count (new_thread_count, transition);
1016         }
1017 }
1018
1019 static double_complex
1020 hill_climbing_get_wave_component (gdouble *samples, guint sample_count, gdouble period)
1021 {
1022         ThreadPoolHillClimbing *hc;
1023         gdouble w, cosine, sine, coeff, q0, q1, q2;
1024         guint i;
1025
1026         g_assert (threadpool);
1027         g_assert (sample_count >= period);
1028         g_assert (period >= 2);
1029
1030         hc = &threadpool->heuristic_hill_climbing;
1031
1032         w = 2.0 * M_PI / period;
1033         cosine = cos (w);
1034         sine = sin (w);
1035         coeff = 2.0 * cosine;
1036         q0 = q1 = q2 = 0;
1037
1038         for (i = 0; i < sample_count; ++i) {
1039                 q0 = coeff * q1 - q2 + samples [(hc->total_samples - sample_count + i) % hc->samples_to_measure];
1040                 q2 = q1;
1041                 q1 = q0;
1042         }
1043
1044         return mono_double_complex_scalar_div (mono_double_complex_make (q1 - q2 * cosine, (q2 * sine)), ((gdouble)sample_count));
1045 }
1046
1047 static gint16
1048 hill_climbing_update (gint16 current_thread_count, guint32 sample_duration, gint32 completions, guint32 *adjustment_interval)
1049 {
1050         ThreadPoolHillClimbing *hc;
1051         ThreadPoolHeuristicStateTransition transition;
1052         gdouble throughput;
1053         gdouble throughput_error_estimate;
1054         gdouble confidence;
1055         gdouble move;
1056         gdouble gain;
1057         gint sample_index;
1058         gint sample_count;
1059         gint new_thread_wave_magnitude;
1060         gint new_thread_count;
1061         double_complex thread_wave_component;
1062         double_complex throughput_wave_component;
1063         double_complex ratio;
1064
1065         g_assert (threadpool);
1066         g_assert (adjustment_interval);
1067
1068         hc = &threadpool->heuristic_hill_climbing;
1069
1070         /* If someone changed the thread count without telling us, update our records accordingly. */
1071         if (current_thread_count != hc->last_thread_count)
1072                 hill_climbing_force_change (current_thread_count, TRANSITION_INITIALIZING);
1073
1074         /* Update the cumulative stats for this thread count */
1075         hc->elapsed_since_last_change += sample_duration;
1076         hc->completions_since_last_change += completions;
1077
1078         /* Add in any data we've already collected about this sample */
1079         sample_duration += hc->accumulated_sample_duration;
1080         completions += hc->accumulated_completion_count;
1081
1082         /* We need to make sure we're collecting reasonably accurate data. Since we're just counting the end
1083          * of each work item, we are goinng to be missing some data about what really happened during the
1084          * sample interval. The count produced by each thread includes an initial work item that may have
1085          * started well before the start of the interval, and each thread may have been running some new
1086          * work item for some time before the end of the interval, which did not yet get counted. So
1087          * our count is going to be off by +/- threadCount workitems.
1088          *
1089          * The exception is that the thread that reported to us last time definitely wasn't running any work
1090          * at that time, and the thread that's reporting now definitely isn't running a work item now. So
1091          * we really only need to consider threadCount-1 threads.
1092          *
1093          * Thus the percent error in our count is +/- (threadCount-1)/numCompletions.
1094          *
1095          * We cannot rely on the frequency-domain analysis we'll be doing later to filter out this error, because
1096          * of the way it accumulates over time. If this sample is off by, say, 33% in the negative direction,
1097          * then the next one likely will be too. The one after that will include the sum of the completions
1098          * we missed in the previous samples, and so will be 33% positive. So every three samples we'll have
1099          * two "low" samples and one "high" sample. This will appear as periodic variation right in the frequency
1100          * range we're targeting, which will not be filtered by the frequency-domain translation. */
1101         if (hc->total_samples > 0 && ((current_thread_count - 1.0) / completions) >= hc->max_sample_error) {
1102                 /* Not accurate enough yet. Let's accumulate the data so
1103                  * far, and tell the ThreadPool to collect a little more. */
1104                 hc->accumulated_sample_duration = sample_duration;
1105                 hc->accumulated_completion_count = completions;
1106                 *adjustment_interval = 10;
1107                 return current_thread_count;
1108         }
1109
1110         /* We've got enouugh data for our sample; reset our accumulators for next time. */
1111         hc->accumulated_sample_duration = 0;
1112         hc->accumulated_completion_count = 0;
1113
1114         /* Add the current thread count and throughput sample to our history. */
1115         throughput = ((gdouble) completions) / sample_duration;
1116
1117         sample_index = hc->total_samples % hc->samples_to_measure;
1118         hc->samples [sample_index] = throughput;
1119         hc->thread_counts [sample_index] = current_thread_count;
1120         hc->total_samples ++;
1121
1122         /* Set up defaults for our metrics. */
1123         thread_wave_component = mono_double_complex_make(0, 0);
1124         throughput_wave_component = mono_double_complex_make(0, 0);
1125         throughput_error_estimate = 0;
1126         ratio = mono_double_complex_make(0, 0);
1127         confidence = 0;
1128
1129         transition = TRANSITION_WARMUP;
1130
1131         /* How many samples will we use? It must be at least the three wave periods we're looking for, and it must also
1132          * be a whole multiple of the primary wave's period; otherwise the frequency we're looking for will fall between
1133          * two frequency bands in the Fourier analysis, and we won't be able to measure it accurately. */
1134         sample_count = ((gint) MIN (hc->total_samples - 1, hc->samples_to_measure) / hc->wave_period) * hc->wave_period;
1135
1136         if (sample_count > hc->wave_period) {
1137                 guint i;
1138                 gdouble average_throughput;
1139                 gdouble average_thread_count;
1140                 gdouble sample_sum = 0;
1141                 gdouble thread_sum = 0;
1142
1143                 /* Average the throughput and thread count samples, so we can scale the wave magnitudes later. */
1144                 for (i = 0; i < sample_count; ++i) {
1145                         guint j = (hc->total_samples - sample_count + i) % hc->samples_to_measure;
1146                         sample_sum += hc->samples [j];
1147                         thread_sum += hc->thread_counts [j];
1148                 }
1149
1150                 average_throughput = sample_sum / sample_count;
1151                 average_thread_count = thread_sum / sample_count;
1152
1153                 if (average_throughput > 0 && average_thread_count > 0) {
1154                         gdouble noise_for_confidence, adjacent_period_1, adjacent_period_2;
1155
1156                         /* Calculate the periods of the adjacent frequency bands we'll be using to
1157                          * measure noise levels. We want the two adjacent Fourier frequency bands. */
1158                         adjacent_period_1 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) + 1);
1159                         adjacent_period_2 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) - 1);
1160
1161                         /* Get the the three different frequency components of the throughput (scaled by average
1162                          * throughput). Our "error" estimate (the amount of noise that might be present in the
1163                          * frequency band we're really interested in) is the average of the adjacent bands. */
1164                         throughput_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, hc->wave_period), average_throughput);
1165                         throughput_error_estimate = cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, adjacent_period_1), average_throughput));
1166
1167                         if (adjacent_period_2 <= sample_count) {
1168                                 throughput_error_estimate = MAX (throughput_error_estimate, cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (
1169                                         hc->samples, sample_count, adjacent_period_2), average_throughput)));
1170                         }
1171
1172                         /* Do the same for the thread counts, so we have something to compare to. We don't
1173                          * measure thread count noise, because there is none; these are exact measurements. */
1174                         thread_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->thread_counts, sample_count, hc->wave_period), average_thread_count);
1175
1176                         /* Update our moving average of the throughput noise. We'll use this
1177                          * later as feedback to determine the new size of the thread wave. */
1178                         if (hc->average_throughput_noise == 0) {
1179                                 hc->average_throughput_noise = throughput_error_estimate;
1180                         } else {
1181                                 hc->average_throughput_noise = (hc->throughput_error_smoothing_factor * throughput_error_estimate)
1182                                         + ((1.0 + hc->throughput_error_smoothing_factor) * hc->average_throughput_noise);
1183                         }
1184
1185                         if (cabs (thread_wave_component) > 0) {
1186                                 /* Adjust the throughput wave so it's centered around the target wave,
1187                                  * and then calculate the adjusted throughput/thread ratio. */
1188                                 ratio = mono_double_complex_div (mono_double_complex_sub (throughput_wave_component, mono_double_complex_scalar_mul(thread_wave_component, hc->target_throughput_ratio)), thread_wave_component);
1189                                 transition = TRANSITION_CLIMBING_MOVE;
1190                         } else {
1191                                 ratio = mono_double_complex_make (0, 0);
1192                                 transition = TRANSITION_STABILIZING;
1193                         }
1194
1195                         noise_for_confidence = MAX (hc->average_throughput_noise, throughput_error_estimate);
1196                         if (noise_for_confidence > 0) {
1197                                 confidence = cabs (thread_wave_component) / noise_for_confidence / hc->target_signal_to_noise_ratio;
1198                         } else {
1199                                 /* there is no noise! */
1200                                 confidence = 1.0;
1201                         }
1202                 }
1203         }
1204
1205         /* We use just the real part of the complex ratio we just calculated. If the throughput signal
1206          * is exactly in phase with the thread signal, this will be the same as taking the magnitude of
1207          * the complex move and moving that far up. If they're 180 degrees out of phase, we'll move
1208          * backward (because this indicates that our changes are having the opposite of the intended effect).
1209          * If they're 90 degrees out of phase, we won't move at all, because we can't tell wether we're
1210          * having a negative or positive effect on throughput. */
1211         move = creal (ratio);
1212         move = CLAMP (move, -1.0, 1.0);
1213
1214         /* Apply our confidence multiplier. */
1215         move *= CLAMP (confidence, -1.0, 1.0);
1216
1217         /* Now apply non-linear gain, such that values around zero are attenuated, while higher values
1218          * are enhanced. This allows us to move quickly if we're far away from the target, but more slowly
1219         * if we're getting close, giving us rapid ramp-up without wild oscillations around the target. */
1220         gain = hc->max_change_per_second * sample_duration;
1221         move = pow (fabs (move), hc->gain_exponent) * (move >= 0.0 ? 1 : -1) * gain;
1222         move = MIN (move, hc->max_change_per_sample);
1223
1224         /* If the result was positive, and CPU is > 95%, refuse the move. */
1225         if (move > 0.0 && threadpool->cpu_usage > CPU_USAGE_HIGH)
1226                 move = 0.0;
1227
1228         /* Apply the move to our control setting. */
1229         hc->current_control_setting += move;
1230
1231         /* Calculate the new thread wave magnitude, which is based on the moving average we've been keeping of the
1232          * throughput error.  This average starts at zero, so we'll start with a nice safe little wave at first. */
1233         new_thread_wave_magnitude = (gint)(0.5 + (hc->current_control_setting * hc->average_throughput_noise
1234                 * hc->target_signal_to_noise_ratio * hc->thread_magnitude_multiplier * 2.0));
1235         new_thread_wave_magnitude = CLAMP (new_thread_wave_magnitude, 1, hc->max_thread_wave_magnitude);
1236
1237         /* Make sure our control setting is within the ThreadPool's limits. */
1238         hc->current_control_setting = CLAMP (hc->current_control_setting, threadpool->limit_worker_min, threadpool->limit_worker_max - new_thread_wave_magnitude);
1239
1240         /* Calculate the new thread count (control setting + square wave). */
1241         new_thread_count = (gint)(hc->current_control_setting + new_thread_wave_magnitude * ((hc->total_samples / (hc->wave_period / 2)) % 2));
1242
1243         /* Make sure the new thread count doesn't exceed the ThreadPool's limits. */
1244         new_thread_count = CLAMP (new_thread_count, threadpool->limit_worker_min, threadpool->limit_worker_max);
1245
1246         if (new_thread_count != current_thread_count)
1247                 hill_climbing_change_thread_count (new_thread_count, transition);
1248
1249         if (creal (ratio) < 0.0 && new_thread_count == threadpool->limit_worker_min)
1250                 *adjustment_interval = (gint)(0.5 + hc->current_sample_interval * (10.0 * MAX (-1.0 * creal (ratio), 1.0)));
1251         else
1252                 *adjustment_interval = hc->current_sample_interval;
1253
1254         return new_thread_count;
1255 }
1256
1257 static void
1258 heuristic_notify_work_completed (void)
1259 {
1260         g_assert (threadpool);
1261
1262         InterlockedIncrement (&threadpool->heuristic_completions);
1263         threadpool->heuristic_last_dequeue = mono_msec_ticks ();
1264 }
1265
1266 static gboolean
1267 heuristic_should_adjust (void)
1268 {
1269         g_assert (threadpool);
1270
1271         if (threadpool->heuristic_last_dequeue > threadpool->heuristic_last_adjustment + threadpool->heuristic_adjustment_interval) {
1272                 ThreadPoolCounter counter;
1273                 counter.as_gint64 = COUNTER_READ();
1274                 if (counter._.working <= counter._.max_working)
1275                         return TRUE;
1276         }
1277
1278         return FALSE;
1279 }
1280
1281 static void
1282 heuristic_adjust (void)
1283 {
1284         g_assert (threadpool);
1285
1286         if (mono_coop_mutex_trylock (&threadpool->heuristic_lock) == 0) {
1287                 gint32 completions = InterlockedExchange (&threadpool->heuristic_completions, 0);
1288                 guint32 sample_end = mono_msec_ticks ();
1289                 guint32 sample_duration = sample_end - threadpool->heuristic_sample_start;
1290
1291                 if (sample_duration >= threadpool->heuristic_adjustment_interval / 2) {
1292                         ThreadPoolCounter counter;
1293                         gint16 new_thread_count;
1294
1295                         counter.as_gint64 = COUNTER_READ ();
1296                         new_thread_count = hill_climbing_update (counter._.max_working, sample_duration, completions, &threadpool->heuristic_adjustment_interval);
1297
1298                         COUNTER_ATOMIC (counter, { counter._.max_working = new_thread_count; });
1299
1300                         if (new_thread_count > counter._.max_working)
1301                                 worker_request (mono_domain_get ());
1302
1303                         threadpool->heuristic_sample_start = sample_end;
1304                         threadpool->heuristic_last_adjustment = mono_msec_ticks ();
1305                 }
1306
1307                 mono_coop_mutex_unlock (&threadpool->heuristic_lock);
1308         }
1309 }
1310
1311 void
1312 mono_threadpool_ms_cleanup (void)
1313 {
1314         #ifndef DISABLE_SOCKETS
1315                 mono_threadpool_ms_io_cleanup ();
1316         #endif
1317         mono_lazy_cleanup (&status, cleanup);
1318 }
1319
1320 MonoAsyncResult *
1321 mono_threadpool_ms_begin_invoke (MonoDomain *domain, MonoObject *target, MonoMethod *method, gpointer *params, MonoError *error)
1322 {
1323         static MonoClass *async_call_klass = NULL;
1324         MonoMethodMessage *message;
1325         MonoAsyncResult *async_result;
1326         MonoAsyncCall *async_call;
1327         MonoDelegate *async_callback = NULL;
1328         MonoObject *state = NULL;
1329
1330         if (!async_call_klass)
1331                 async_call_klass = mono_class_load_from_name (mono_defaults.corlib, "System", "MonoAsyncCall");
1332
1333         mono_lazy_initialize (&status, initialize);
1334
1335         mono_error_init (error);
1336
1337         message = mono_method_call_message_new (method, params, mono_get_delegate_invoke (method->klass), (params != NULL) ? (&async_callback) : NULL, (params != NULL) ? (&state) : NULL);
1338
1339         async_call = (MonoAsyncCall*) mono_object_new_checked (domain, async_call_klass, error);
1340         return_val_if_nok (error, NULL);
1341
1342         MONO_OBJECT_SETREF (async_call, msg, message);
1343         MONO_OBJECT_SETREF (async_call, state, state);
1344
1345         if (async_callback) {
1346                 MONO_OBJECT_SETREF (async_call, cb_method, mono_get_delegate_invoke (((MonoObject*) async_callback)->vtable->klass));
1347                 MONO_OBJECT_SETREF (async_call, cb_target, async_callback);
1348         }
1349
1350         async_result = mono_async_result_new (domain, NULL, async_call->state, NULL, (MonoObject*) async_call);
1351         MONO_OBJECT_SETREF (async_result, async_delegate, target);
1352
1353         mono_threadpool_ms_enqueue_work_item (domain, (MonoObject*) async_result, error);
1354         return_val_if_nok (error, NULL);
1355
1356         return async_result;
1357 }
1358
1359 MonoObject *
1360 mono_threadpool_ms_end_invoke (MonoAsyncResult *ares, MonoArray **out_args, MonoObject **exc)
1361 {
1362         MonoAsyncCall *ac;
1363
1364         g_assert (exc);
1365         g_assert (out_args);
1366
1367         *exc = NULL;
1368         *out_args = NULL;
1369
1370         /* check if already finished */
1371         mono_monitor_enter ((MonoObject*) ares);
1372
1373         if (ares->endinvoke_called) {
1374                 *exc = (MonoObject*) mono_get_exception_invalid_operation (NULL);
1375                 mono_monitor_exit ((MonoObject*) ares);
1376                 return NULL;
1377         }
1378
1379         ares->endinvoke_called = 1;
1380
1381         /* wait until we are really finished */
1382         if (ares->completed) {
1383                 mono_monitor_exit ((MonoObject *) ares);
1384         } else {
1385                 gpointer wait_event;
1386                 if (ares->handle) {
1387                         wait_event = mono_wait_handle_get_handle ((MonoWaitHandle*) ares->handle);
1388                 } else {
1389                         wait_event = CreateEvent (NULL, TRUE, FALSE, NULL);
1390                         g_assert(wait_event);
1391                         MONO_OBJECT_SETREF (ares, handle, (MonoObject*) mono_wait_handle_new (mono_object_domain (ares), wait_event));
1392                 }
1393                 mono_monitor_exit ((MonoObject*) ares);
1394                 MONO_PREPARE_BLOCKING;
1395                 WaitForSingleObjectEx (wait_event, INFINITE, TRUE);
1396                 MONO_FINISH_BLOCKING;
1397         }
1398
1399         ac = (MonoAsyncCall*) ares->object_data;
1400         g_assert (ac);
1401
1402         *exc = ac->msg->exc; /* FIXME: GC add write barrier */
1403         *out_args = ac->out_args;
1404         return ac->res;
1405 }
1406
1407 gboolean
1408 mono_threadpool_ms_remove_domain_jobs (MonoDomain *domain, int timeout)
1409 {
1410         gboolean res = TRUE;
1411         guint32 start;
1412         gpointer sem;
1413
1414         g_assert (domain);
1415         g_assert (timeout >= -1);
1416
1417         g_assert (mono_domain_is_unloading (domain));
1418
1419         if (timeout != -1)
1420                 start = mono_msec_ticks ();
1421
1422 #ifndef DISABLE_SOCKETS
1423         mono_threadpool_ms_io_remove_domain_jobs (domain);
1424         if (timeout != -1) {
1425                 timeout -= mono_msec_ticks () - start;
1426                 if (timeout < 0)
1427                         return FALSE;
1428         }
1429 #endif
1430
1431         /*
1432          * There might be some threads out that could be about to execute stuff from the given domain.
1433          * We avoid that by setting up a semaphore to be pulsed by the thread that reaches zero.
1434          */
1435         sem = domain->cleanup_semaphore = CreateSemaphore (NULL, 0, 1, NULL);
1436
1437         /*
1438          * The memory barrier here is required to have global ordering between assigning to cleanup_semaphone
1439          * and reading threadpool_jobs. Otherwise this thread could read a stale version of threadpool_jobs
1440          * and wait forever.
1441          */
1442         mono_memory_write_barrier ();
1443
1444         while (domain->threadpool_jobs) {
1445                 MONO_PREPARE_BLOCKING;
1446                 WaitForSingleObject (sem, timeout);
1447                 MONO_FINISH_BLOCKING;
1448                 if (timeout != -1) {
1449                         timeout -= mono_msec_ticks () - start;
1450                         if (timeout <= 0) {
1451                                 res = FALSE;
1452                                 break;
1453                         }
1454                 }
1455         }
1456
1457         domain->cleanup_semaphore = NULL;
1458         CloseHandle (sem);
1459
1460         return res;
1461 }
1462
1463 void
1464 mono_threadpool_ms_suspend (void)
1465 {
1466         if (threadpool)
1467                 threadpool->suspended = TRUE;
1468 }
1469
1470 void
1471 mono_threadpool_ms_resume (void)
1472 {
1473         if (threadpool)
1474                 threadpool->suspended = FALSE;
1475 }
1476
1477 void
1478 ves_icall_System_Threading_ThreadPool_GetAvailableThreadsNative (gint32 *worker_threads, gint32 *completion_port_threads)
1479 {
1480         ThreadPoolCounter counter;
1481
1482         if (!worker_threads || !completion_port_threads)
1483                 return;
1484
1485         mono_lazy_initialize (&status, initialize);
1486
1487         counter.as_gint64 = COUNTER_READ ();
1488
1489         *worker_threads = MAX (0, threadpool->limit_worker_max - counter._.active);
1490         *completion_port_threads = threadpool->limit_io_max;
1491 }
1492
1493 void
1494 ves_icall_System_Threading_ThreadPool_GetMinThreadsNative (gint32 *worker_threads, gint32 *completion_port_threads)
1495 {
1496         if (!worker_threads || !completion_port_threads)
1497                 return;
1498
1499         mono_lazy_initialize (&status, initialize);
1500
1501         *worker_threads = threadpool->limit_worker_min;
1502         *completion_port_threads = threadpool->limit_io_min;
1503 }
1504
1505 void
1506 ves_icall_System_Threading_ThreadPool_GetMaxThreadsNative (gint32 *worker_threads, gint32 *completion_port_threads)
1507 {
1508         if (!worker_threads || !completion_port_threads)
1509                 return;
1510
1511         mono_lazy_initialize (&status, initialize);
1512
1513         *worker_threads = threadpool->limit_worker_max;
1514         *completion_port_threads = threadpool->limit_io_max;
1515 }
1516
1517 MonoBoolean
1518 ves_icall_System_Threading_ThreadPool_SetMinThreadsNative (gint32 worker_threads, gint32 completion_port_threads)
1519 {
1520         mono_lazy_initialize (&status, initialize);
1521
1522         if (worker_threads <= 0 || worker_threads > threadpool->limit_worker_max)
1523                 return FALSE;
1524         if (completion_port_threads <= 0 || completion_port_threads > threadpool->limit_io_max)
1525                 return FALSE;
1526
1527         threadpool->limit_worker_min = worker_threads;
1528         threadpool->limit_io_min = completion_port_threads;
1529
1530         return TRUE;
1531 }
1532
1533 MonoBoolean
1534 ves_icall_System_Threading_ThreadPool_SetMaxThreadsNative (gint32 worker_threads, gint32 completion_port_threads)
1535 {
1536         gint cpu_count = mono_cpu_count ();
1537
1538         mono_lazy_initialize (&status, initialize);
1539
1540         if (worker_threads < threadpool->limit_worker_min || worker_threads < cpu_count)
1541                 return FALSE;
1542         if (completion_port_threads < threadpool->limit_io_min || completion_port_threads < cpu_count)
1543                 return FALSE;
1544
1545         threadpool->limit_worker_max = worker_threads;
1546         threadpool->limit_io_max = completion_port_threads;
1547
1548         return TRUE;
1549 }
1550
1551 void
1552 ves_icall_System_Threading_ThreadPool_InitializeVMTp (MonoBoolean *enable_worker_tracking)
1553 {
1554         if (enable_worker_tracking) {
1555                 // TODO implement some kind of switch to have the possibily to use it
1556                 *enable_worker_tracking = FALSE;
1557         }
1558
1559         mono_lazy_initialize (&status, initialize);
1560 }
1561
1562 MonoBoolean
1563 ves_icall_System_Threading_ThreadPool_NotifyWorkItemComplete (void)
1564 {
1565         ThreadPoolCounter counter;
1566
1567         if (mono_domain_is_unloading (mono_domain_get ()) || mono_runtime_is_shutting_down ())
1568                 return FALSE;
1569
1570         heuristic_notify_work_completed ();
1571
1572         if (heuristic_should_adjust ())
1573                 heuristic_adjust ();
1574
1575         counter.as_gint64 = COUNTER_READ ();
1576         return counter._.working <= counter._.max_working;
1577 }
1578
1579 void
1580 ves_icall_System_Threading_ThreadPool_NotifyWorkItemProgressNative (void)
1581 {
1582         heuristic_notify_work_completed ();
1583
1584         if (heuristic_should_adjust ())
1585                 heuristic_adjust ();
1586 }
1587
1588 void
1589 ves_icall_System_Threading_ThreadPool_ReportThreadStatus (MonoBoolean is_working)
1590 {
1591         // TODO
1592         MonoError error;
1593         mono_error_set_not_implemented (&error, "");
1594         mono_error_set_pending_exception (&error);
1595 }
1596
1597 MonoBoolean
1598 ves_icall_System_Threading_ThreadPool_RequestWorkerThread (void)
1599 {
1600         return worker_request (mono_domain_get ());
1601 }
1602
1603 MonoBoolean G_GNUC_UNUSED
1604 ves_icall_System_Threading_ThreadPool_PostQueuedCompletionStatus (MonoNativeOverlapped *native_overlapped)
1605 {
1606         /* This copy the behavior of the current Mono implementation */
1607         MonoError error;
1608         mono_error_set_not_implemented (&error, "");
1609         mono_error_set_pending_exception (&error);
1610         return FALSE;
1611 }
1612
1613 MonoBoolean G_GNUC_UNUSED
1614 ves_icall_System_Threading_ThreadPool_BindIOCompletionCallbackNative (gpointer file_handle)
1615 {
1616         /* This copy the behavior of the current Mono implementation */
1617         return TRUE;
1618 }
1619
1620 MonoBoolean G_GNUC_UNUSED
1621 ves_icall_System_Threading_ThreadPool_IsThreadPoolHosted (void)
1622 {
1623         return FALSE;
1624 }