[threadpool] Make ThreadPoolWorker staticaly allocated
[mono.git] / mono / metadata / threadpool-worker-default.c
1 /*
2  * threadpool-worker.c: native threadpool worker
3  *
4  * Author:
5  *      Ludovic Henry (ludovic.henry@xamarin.com)
6  *
7  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
8  */
9
10 #include <stdlib.h>
11 #define _USE_MATH_DEFINES // needed by MSVC to define math constants
12 #include <math.h>
13 #include <config.h>
14 #include <glib.h>
15
16 #include <mono/metadata/class-internals.h>
17 #include <mono/metadata/exception.h>
18 #include <mono/metadata/gc-internals.h>
19 #include <mono/metadata/object.h>
20 #include <mono/metadata/object-internals.h>
21 #include <mono/metadata/threadpool.h>
22 #include <mono/metadata/threadpool-worker.h>
23 #include <mono/metadata/threadpool-io.h>
24 #include <mono/metadata/w32event.h>
25 #include <mono/utils/atomic.h>
26 #include <mono/utils/mono-compiler.h>
27 #include <mono/utils/mono-complex.h>
28 #include <mono/utils/mono-logger.h>
29 #include <mono/utils/mono-logger-internals.h>
30 #include <mono/utils/mono-proclib.h>
31 #include <mono/utils/mono-threads.h>
32 #include <mono/utils/mono-time.h>
33 #include <mono/utils/mono-rand.h>
34 #include <mono/utils/refcount.h>
35 #include <mono/utils/w32api.h>
36
37 #define CPU_USAGE_LOW 80
38 #define CPU_USAGE_HIGH 95
39
40 #define MONITOR_INTERVAL 500 // ms
41 #define MONITOR_MINIMAL_LIFETIME 60 * 1000 // ms
42
43 #define WORKER_CREATION_MAX_PER_SEC 10
44
45 /* The exponent to apply to the gain. 1.0 means to use linear gain,
46  * higher values will enhance large moves and damp small ones.
47  * default: 2.0 */
48 #define HILL_CLIMBING_GAIN_EXPONENT 2.0
49
50 /* The 'cost' of a thread. 0 means drive for increased throughput regardless
51  * of thread count, higher values bias more against higher thread counts.
52  * default: 0.15 */
53 #define HILL_CLIMBING_BIAS 0.15
54
55 #define HILL_CLIMBING_WAVE_PERIOD 4
56 #define HILL_CLIMBING_MAX_WAVE_MAGNITUDE 20
57 #define HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER 1.0
58 #define HILL_CLIMBING_WAVE_HISTORY_SIZE 8
59 #define HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO 3.0
60 #define HILL_CLIMBING_MAX_CHANGE_PER_SECOND 4
61 #define HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE 20
62 #define HILL_CLIMBING_SAMPLE_INTERVAL_LOW 10
63 #define HILL_CLIMBING_SAMPLE_INTERVAL_HIGH 200
64 #define HILL_CLIMBING_ERROR_SMOOTHING_FACTOR 0.01
65 #define HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT 0.15
66
67 typedef enum {
68         TRANSITION_WARMUP,
69         TRANSITION_INITIALIZING,
70         TRANSITION_RANDOM_MOVE,
71         TRANSITION_CLIMBING_MOVE,
72         TRANSITION_CHANGE_POINT,
73         TRANSITION_STABILIZING,
74         TRANSITION_STARVATION,
75         TRANSITION_THREAD_TIMED_OUT,
76         TRANSITION_UNDEFINED,
77 } ThreadPoolHeuristicStateTransition;
78
79 typedef struct {
80         gint32 wave_period;
81         gint32 samples_to_measure;
82         gdouble target_throughput_ratio;
83         gdouble target_signal_to_noise_ratio;
84         gdouble max_change_per_second;
85         gdouble max_change_per_sample;
86         gint32 max_thread_wave_magnitude;
87         gint32 sample_interval_low;
88         gdouble thread_magnitude_multiplier;
89         gint32 sample_interval_high;
90         gdouble throughput_error_smoothing_factor;
91         gdouble gain_exponent;
92         gdouble max_sample_error;
93
94         gdouble current_control_setting;
95         gint64 total_samples;
96         gint16 last_thread_count;
97         gdouble elapsed_since_last_change;
98         gdouble completions_since_last_change;
99
100         gdouble average_throughput_noise;
101
102         gdouble *samples;
103         gdouble *thread_counts;
104
105         guint32 current_sample_interval;
106         gpointer random_interval_generator;
107
108         gint32 accumulated_completion_count;
109         gdouble accumulated_sample_duration;
110 } ThreadPoolHillClimbing;
111
112 typedef struct {
113         MonoThreadPoolWorkerCallback callback;
114         gpointer data;
115 } ThreadPoolWorkItem;
116
117 typedef union {
118         struct {
119                 gint16 max_working; /* determined by heuristic */
120                 gint16 starting; /* starting, but not yet in worker_thread */
121                 gint16 working; /* executing worker_thread */
122                 gint16 parked; /* parked */
123         } _;
124         gint64 as_gint64;
125 } ThreadPoolWorkerCounter;
126
127 typedef MonoInternalThread ThreadPoolWorkerThread;
128
129 typedef struct {
130         MonoRefCount ref;
131
132         ThreadPoolWorkerCounter counters;
133
134         GPtrArray *threads; // ThreadPoolWorkerThread* []
135         MonoCoopMutex threads_lock; /* protect access to working_threads and parked_threads */
136         gint32 parked_threads_count;
137         MonoCoopCond parked_threads_cond;
138         MonoCoopCond threads_exit_cond;
139
140         ThreadPoolWorkItem *work_items; // ThreadPoolWorkItem []
141         gint32 work_items_count;
142         gint32 work_items_size;
143         MonoCoopMutex work_items_lock;
144
145         guint32 worker_creation_current_second;
146         guint32 worker_creation_current_count;
147         MonoCoopMutex worker_creation_lock;
148
149         gint32 heuristic_completions;
150         gint64 heuristic_sample_start;
151         gint64 heuristic_last_dequeue; // ms
152         gint64 heuristic_last_adjustment; // ms
153         gint64 heuristic_adjustment_interval; // ms
154         ThreadPoolHillClimbing heuristic_hill_climbing;
155         MonoCoopMutex heuristic_lock;
156
157         gint32 limit_worker_min;
158         gint32 limit_worker_max;
159
160         MonoCpuUsageState *cpu_usage_state;
161         gint32 cpu_usage;
162
163         /* suspended by the debugger */
164         gboolean suspended;
165
166         gint32 monitor_status;
167 } ThreadPoolWorker;
168
169 enum {
170         MONITOR_STATUS_REQUESTED,
171         MONITOR_STATUS_WAITING_FOR_REQUEST,
172         MONITOR_STATUS_NOT_RUNNING,
173 };
174
175 static ThreadPoolWorker worker;
176
177 #define COUNTER_CHECK(counter) \
178         do { \
179                 g_assert (counter._.max_working > 0); \
180                 g_assert (counter._.starting >= 0); \
181                 g_assert (counter._.working >= 0); \
182         } while (0)
183
184 #define COUNTER_ATOMIC(var,block) \
185         do { \
186                 ThreadPoolWorkerCounter __old; \
187                 do { \
188                         __old = COUNTER_READ (); \
189                         (var) = __old; \
190                         { block; } \
191                         COUNTER_CHECK (var); \
192                 } while (InterlockedCompareExchange64 (&worker.counters.as_gint64, (var).as_gint64, __old.as_gint64) != __old.as_gint64); \
193         } while (0)
194
195 static inline ThreadPoolWorkerCounter
196 COUNTER_READ (void)
197 {
198         ThreadPoolWorkerCounter counter;
199         counter.as_gint64 = InterlockedRead64 (&worker.counters.as_gint64);
200         return counter;
201 }
202
203 static gpointer
204 rand_create (void)
205 {
206         mono_rand_open ();
207         return mono_rand_init (NULL, 0);
208 }
209
210 static guint32
211 rand_next (gpointer *handle, guint32 min, guint32 max)
212 {
213         MonoError error;
214         guint32 val;
215         mono_rand_try_get_uint32 (handle, &val, min, max, &error);
216         // FIXME handle error
217         mono_error_assert_ok (&error);
218         return val;
219 }
220
221 static void
222 destroy (gpointer data)
223 {
224 #if 0
225         mono_coop_mutex_destroy (&worker.threads_lock);
226         mono_coop_cond_destroy (&worker.parked_threads_cond);
227
228         mono_coop_mutex_destroy (&worker.work_items_lock);
229
230         mono_coop_mutex_destroy (&worker.worker_creation_lock);
231
232         mono_coop_mutex_destroy (&worker.heuristic_lock);
233
234         g_free (worker.cpu_usage_state);
235 #endif
236 }
237
238 void
239 mono_threadpool_worker_init (void)
240 {
241         ThreadPoolHillClimbing *hc;
242         const char *threads_per_cpu_env;
243         gint threads_per_cpu;
244         gint threads_count;
245
246         mono_refcount_init (&worker, destroy);
247
248         worker.threads = g_ptr_array_new ();
249         mono_coop_mutex_init (&worker.threads_lock);
250         worker.parked_threads_count = 0;
251         mono_coop_cond_init (&worker.parked_threads_cond);
252         mono_coop_cond_init (&worker.threads_exit_cond);
253
254         /* worker.work_items_size is inited to 0 */
255         mono_coop_mutex_init (&worker.work_items_lock);
256
257         worker.worker_creation_current_second = -1;
258         mono_coop_mutex_init (&worker.worker_creation_lock);
259
260         worker.heuristic_adjustment_interval = 10;
261         mono_coop_mutex_init (&worker.heuristic_lock);
262
263         mono_rand_open ();
264
265         hc = &worker.heuristic_hill_climbing;
266
267         hc->wave_period = HILL_CLIMBING_WAVE_PERIOD;
268         hc->max_thread_wave_magnitude = HILL_CLIMBING_MAX_WAVE_MAGNITUDE;
269         hc->thread_magnitude_multiplier = (gdouble) HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER;
270         hc->samples_to_measure = hc->wave_period * HILL_CLIMBING_WAVE_HISTORY_SIZE;
271         hc->target_throughput_ratio = (gdouble) HILL_CLIMBING_BIAS;
272         hc->target_signal_to_noise_ratio = (gdouble) HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO;
273         hc->max_change_per_second = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SECOND;
274         hc->max_change_per_sample = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE;
275         hc->sample_interval_low = HILL_CLIMBING_SAMPLE_INTERVAL_LOW;
276         hc->sample_interval_high = HILL_CLIMBING_SAMPLE_INTERVAL_HIGH;
277         hc->throughput_error_smoothing_factor = (gdouble) HILL_CLIMBING_ERROR_SMOOTHING_FACTOR;
278         hc->gain_exponent = (gdouble) HILL_CLIMBING_GAIN_EXPONENT;
279         hc->max_sample_error = (gdouble) HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT;
280         hc->current_control_setting = 0;
281         hc->total_samples = 0;
282         hc->last_thread_count = 0;
283         hc->average_throughput_noise = 0;
284         hc->elapsed_since_last_change = 0;
285         hc->accumulated_completion_count = 0;
286         hc->accumulated_sample_duration = 0;
287         hc->samples = g_new0 (gdouble, hc->samples_to_measure);
288         hc->thread_counts = g_new0 (gdouble, hc->samples_to_measure);
289         hc->random_interval_generator = rand_create ();
290         hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
291
292         if (!(threads_per_cpu_env = g_getenv ("MONO_THREADS_PER_CPU")))
293                 threads_per_cpu = 1;
294         else
295                 threads_per_cpu = CLAMP (atoi (threads_per_cpu_env), 1, 50);
296
297         threads_count = mono_cpu_count () * threads_per_cpu;
298
299         worker.limit_worker_min = threads_count;
300
301 #if defined (PLATFORM_ANDROID) || defined (HOST_IOS)
302         worker.limit_worker_max = CLAMP (threads_count * 100, MIN (threads_count, 200), MAX (threads_count, 200));
303 #else
304         worker.limit_worker_max = threads_count * 100;
305 #endif
306
307         worker.counters._.max_working = worker.limit_worker_min;
308
309         worker.cpu_usage_state = g_new0 (MonoCpuUsageState, 1);
310
311         worker.suspended = FALSE;
312
313         worker.monitor_status = MONITOR_STATUS_NOT_RUNNING;
314 }
315
316 void
317 mono_threadpool_worker_cleanup (void)
318 {
319         MonoInternalThread *current;
320
321         /* we make the assumption along the code that we are
322          * cleaning up only if the runtime is shutting down */
323         g_assert (mono_runtime_is_shutting_down ());
324
325         current = mono_thread_internal_current ();
326
327         while (worker.monitor_status != MONITOR_STATUS_NOT_RUNNING)
328                 mono_thread_info_sleep (1, NULL);
329
330         mono_coop_mutex_lock (&worker.threads_lock);
331
332         /* unpark all worker.parked_threads */
333         mono_coop_cond_broadcast (&worker.parked_threads_cond);
334
335 #if 0
336         for (;;) {
337                 ThreadPoolWorkerCounter counter;
338
339                 counter = COUNTER_READ ();
340                 if (counter._.starting + counter._.working + counter._.parked == 0)
341                         break;
342
343                 if (counter._.starting + counter._.working + counter._.parked == 1) {
344                         if (worker.threads->len == 1 && g_ptr_array_index (worker.threads, 0) == current) {
345                                 /* We are waiting on ourselves */
346                                 break;
347                         }
348                 }
349
350                 mono_coop_cond_wait (&worker.threads_exit_cond, &worker.threads_lock);
351         }
352 #endif
353
354         mono_coop_mutex_unlock (&worker.threads_lock);
355
356         mono_refcount_dec (&worker);
357 }
358
359 static void
360 work_item_lock (void)
361 {
362         mono_coop_mutex_lock (&worker.work_items_lock);
363 }
364
365 static void
366 work_item_unlock (void)
367 {
368         mono_coop_mutex_unlock (&worker.work_items_lock);
369 }
370
371 static void
372 work_item_push (MonoThreadPoolWorkerCallback callback, gpointer data)
373 {
374         ThreadPoolWorkItem work_item;
375
376         g_assert (callback);
377
378         work_item.callback = callback;
379         work_item.data = data;
380
381         work_item_lock ();
382
383         g_assert (worker.work_items_count <= worker.work_items_size);
384
385         if (G_UNLIKELY (worker.work_items_count == worker.work_items_size)) {
386                 worker.work_items_size += 64;
387                 worker.work_items = g_renew (ThreadPoolWorkItem, worker.work_items, worker.work_items_size);
388         }
389
390         g_assert (worker.work_items);
391
392         worker.work_items [worker.work_items_count ++] = work_item;
393
394         // printf ("[push] worker.work_items = %p, worker.work_items_count = %d, worker.work_items_size = %d\n",
395         //      worker.work_items, worker.work_items_count, worker.work_items_size);
396
397         work_item_unlock ();
398 }
399
400 static gboolean
401 work_item_try_pop (ThreadPoolWorkItem *work_item)
402 {
403         g_assert (work_item);
404
405         work_item_lock ();
406
407         // printf ("[pop]  worker.work_items = %p, worker.work_items_count = %d, worker.work_items_size = %d\n",
408         //      worker.work_items, worker.work_items_count, worker.work_items_size);
409
410         if (worker.work_items_count == 0) {
411                 work_item_unlock ();
412                 return FALSE;
413         }
414
415         *work_item = worker.work_items [-- worker.work_items_count];
416
417         if (G_UNLIKELY (worker.work_items_count >= 64 * 3 && worker.work_items_count < worker.work_items_size / 2)) {
418                 worker.work_items_size -= 64;
419                 worker.work_items = g_renew (ThreadPoolWorkItem, worker.work_items, worker.work_items_size);
420         }
421
422         work_item_unlock ();
423
424         return TRUE;
425 }
426
427 static gint32
428 work_item_count (void)
429 {
430         gint32 count;
431
432         work_item_lock ();
433         count = worker.work_items_count;
434         work_item_unlock ();
435
436         return count;
437 }
438
439 static void worker_request (void);
440
441 void
442 mono_threadpool_worker_enqueue (MonoThreadPoolWorkerCallback callback, gpointer data)
443 {
444         work_item_push (callback, data);
445
446         worker_request ();
447 }
448
449 static void
450 worker_wait_interrupt (gpointer unused)
451 {
452         mono_coop_mutex_lock (&worker.threads_lock);
453         mono_coop_cond_signal (&worker.parked_threads_cond);
454         mono_coop_mutex_unlock (&worker.threads_lock);
455
456         mono_refcount_dec (&worker);
457 }
458
459 /* return TRUE if timeout, FALSE otherwise (worker unpark or interrupt) */
460 static gboolean
461 worker_park (void)
462 {
463         gboolean timeout = FALSE;
464
465         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker parking", mono_native_thread_id_get ());
466
467         mono_coop_mutex_lock (&worker.threads_lock);
468
469         if (!mono_runtime_is_shutting_down ()) {
470                 static gpointer rand_handle = NULL;
471                 MonoInternalThread *thread;
472                 gboolean interrupted = FALSE;
473                 ThreadPoolWorkerCounter counter;
474
475                 if (!rand_handle)
476                         rand_handle = rand_create ();
477                 g_assert (rand_handle);
478
479                 thread = mono_thread_internal_current ();
480                 g_assert (thread);
481
482                 COUNTER_ATOMIC (counter, {
483                         counter._.working --;
484                         counter._.parked ++;
485                 });
486
487                 worker.parked_threads_count += 1;
488
489                 mono_refcount_inc (&worker);
490                 mono_thread_info_install_interrupt (worker_wait_interrupt, NULL, &interrupted);
491                 if (interrupted) {
492                         mono_refcount_dec (&worker);
493                         goto done;
494                 }
495
496                 if (mono_coop_cond_timedwait (&worker.parked_threads_cond, &worker.threads_lock, rand_next (&rand_handle, 5 * 1000, 60 * 1000)) != 0)
497                         timeout = TRUE;
498
499                 mono_thread_info_uninstall_interrupt (&interrupted);
500                 if (!interrupted)
501                         mono_refcount_dec (&worker);
502
503 done:
504                 worker.parked_threads_count -= 1;
505
506                 COUNTER_ATOMIC (counter, {
507                         counter._.working ++;
508                         counter._.parked --;
509                 });
510         }
511
512         mono_coop_mutex_unlock (&worker.threads_lock);
513
514         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker unparking, timeout? %s", mono_native_thread_id_get (), timeout ? "yes" : "no");
515
516         return timeout;
517 }
518
519 static gboolean
520 worker_try_unpark (void)
521 {
522         gboolean res = FALSE;
523
524         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker", mono_native_thread_id_get ());
525
526         mono_coop_mutex_lock (&worker.threads_lock);
527         if (worker.parked_threads_count > 0) {
528                 mono_coop_cond_signal (&worker.parked_threads_cond);
529                 res = TRUE;
530         }
531         mono_coop_mutex_unlock (&worker.threads_lock);
532
533         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker, success? %s", mono_native_thread_id_get (), res ? "yes" : "no");
534
535         return res;
536 }
537
538 static gsize WINAPI
539 worker_thread (gpointer unused)
540 {
541         MonoInternalThread *thread;
542         ThreadPoolWorkerCounter counter;
543
544         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker starting", mono_native_thread_id_get ());
545
546         COUNTER_ATOMIC (counter, {
547                 counter._.starting --;
548                 counter._.working ++;
549         });
550
551         thread = mono_thread_internal_current ();
552         g_assert (thread);
553
554         mono_coop_mutex_lock (&worker.threads_lock);
555         g_ptr_array_add (worker.threads, thread);
556         mono_coop_mutex_unlock (&worker.threads_lock);
557
558         while (!mono_runtime_is_shutting_down ()) {
559                 ThreadPoolWorkItem work_item;
560
561                 if (mono_thread_interruption_checkpoint ())
562                         continue;
563
564                 if (!work_item_try_pop (&work_item)) {
565                         gboolean timeout;
566
567                         timeout = worker_park ();
568                         if (timeout)
569                                 break;
570
571                         continue;
572                 }
573
574                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker executing %p (%p)",
575                         mono_native_thread_id_get (), work_item.callback, work_item.data);
576
577                 work_item.callback (work_item.data);
578         }
579
580         mono_coop_mutex_lock (&worker.threads_lock);
581
582         COUNTER_ATOMIC (counter, {
583                 counter._.working --;
584         });
585
586         g_ptr_array_remove (worker.threads, thread);
587
588         mono_coop_cond_signal (&worker.threads_exit_cond);
589
590         mono_coop_mutex_unlock (&worker.threads_lock);
591
592         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker finishing", mono_native_thread_id_get ());
593
594         mono_refcount_dec (&worker);
595
596         return 0;
597 }
598
599 static gboolean
600 worker_try_create (void)
601 {
602         MonoError error;
603         MonoInternalThread *thread;
604         gint64 current_ticks;
605         gint32 now;
606         ThreadPoolWorkerCounter counter;
607
608         if (mono_runtime_is_shutting_down ())
609                 return FALSE;
610
611         mono_coop_mutex_lock (&worker.worker_creation_lock);
612
613         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker", mono_native_thread_id_get ());
614
615         current_ticks = mono_100ns_ticks ();
616         if (0 == current_ticks) {
617                 g_warning ("failed to get 100ns ticks");
618         } else {
619                 now = current_ticks / (10 * 1000 * 1000);
620                 if (worker.worker_creation_current_second != now) {
621                         worker.worker_creation_current_second = now;
622                         worker.worker_creation_current_count = 0;
623                 } else {
624                         g_assert (worker.worker_creation_current_count <= WORKER_CREATION_MAX_PER_SEC);
625                         if (worker.worker_creation_current_count == WORKER_CREATION_MAX_PER_SEC) {
626                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of worker created per second reached, current count = %d",
627                                         mono_native_thread_id_get (), worker.worker_creation_current_count);
628                                 mono_coop_mutex_unlock (&worker.worker_creation_lock);
629                                 return FALSE;
630                         }
631                 }
632         }
633
634         COUNTER_ATOMIC (counter, {
635                 if (counter._.working >= counter._.max_working) {
636                         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of working threads reached",
637                                 mono_native_thread_id_get ());
638                         mono_coop_mutex_unlock (&worker.worker_creation_lock);
639                         return FALSE;
640                 }
641                 counter._.starting ++;
642         });
643
644         mono_refcount_inc (&worker);
645         thread = mono_thread_create_internal (mono_get_root_domain (), worker_thread, NULL, TRUE, 0, &error);
646         if (!thread) {
647                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: could not create thread due to %s", mono_native_thread_id_get (), mono_error_get_message (&error));
648                 mono_error_cleanup (&error);
649
650                 COUNTER_ATOMIC (counter, {
651                         counter._.starting --;
652                 });
653
654                 mono_coop_mutex_unlock (&worker.worker_creation_lock);
655
656                 mono_refcount_dec (&worker);
657
658                 return FALSE;
659         }
660
661         worker.worker_creation_current_count += 1;
662
663         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, created %p, now = %d count = %d",
664                 mono_native_thread_id_get (), (gpointer) thread->tid, now, worker.worker_creation_current_count);
665
666         mono_coop_mutex_unlock (&worker.worker_creation_lock);
667         return TRUE;
668 }
669
670 static void monitor_ensure_running (void);
671
672 static void
673 worker_request (void)
674 {
675         if (worker.suspended)
676                 return;
677
678         monitor_ensure_running ();
679
680         if (worker_try_unpark ()) {
681                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, unparked", mono_native_thread_id_get ());
682                 return;
683         }
684
685         if (worker_try_create ()) {
686                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, created", mono_native_thread_id_get ());
687                 return;
688         }
689
690         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, failed", mono_native_thread_id_get ());
691 }
692
693 static gboolean
694 monitor_should_keep_running (void)
695 {
696         static gint64 last_should_keep_running = -1;
697
698         g_assert (worker.monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || worker.monitor_status == MONITOR_STATUS_REQUESTED);
699
700         if (InterlockedExchange (&worker.monitor_status, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST) {
701                 gboolean should_keep_running = TRUE, force_should_keep_running = FALSE;
702
703                 if (mono_runtime_is_shutting_down ()) {
704                         should_keep_running = FALSE;
705                 } else {
706                         if (work_item_count () == 0)
707                                 should_keep_running = FALSE;
708
709                         if (!should_keep_running) {
710                                 if (last_should_keep_running == -1 || mono_100ns_ticks () - last_should_keep_running < MONITOR_MINIMAL_LIFETIME * 1000 * 10) {
711                                         should_keep_running = force_should_keep_running = TRUE;
712                                 }
713                         }
714                 }
715
716                 if (should_keep_running) {
717                         if (last_should_keep_running == -1 || !force_should_keep_running)
718                                 last_should_keep_running = mono_100ns_ticks ();
719                 } else {
720                         last_should_keep_running = -1;
721                         if (InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_NOT_RUNNING, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST)
722                                 return FALSE;
723                 }
724         }
725
726         g_assert (worker.monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || worker.monitor_status == MONITOR_STATUS_REQUESTED);
727
728         return TRUE;
729 }
730
731 static gboolean
732 monitor_sufficient_delay_since_last_dequeue (void)
733 {
734         gint64 threshold;
735
736         if (worker.cpu_usage < CPU_USAGE_LOW) {
737                 threshold = MONITOR_INTERVAL;
738         } else {
739                 ThreadPoolWorkerCounter counter;
740                 counter = COUNTER_READ ();
741                 threshold = counter._.max_working * MONITOR_INTERVAL * 2;
742         }
743
744         return mono_msec_ticks () >= worker.heuristic_last_dequeue + threshold;
745 }
746
747 static void hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition);
748
749 static gsize WINAPI
750 monitor_thread (gpointer unused)
751 {
752         MonoInternalThread *internal;
753         guint i;
754
755         internal = mono_thread_internal_current ();
756         g_assert (internal);
757
758         mono_cpu_usage (worker.cpu_usage_state);
759
760         // printf ("monitor_thread: start\n");
761
762         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, started", mono_native_thread_id_get ());
763
764         do {
765                 ThreadPoolWorkerCounter counter;
766                 gboolean limit_worker_max_reached;
767                 gint32 interval_left = MONITOR_INTERVAL;
768                 gint32 awake = 0; /* number of spurious awakes we tolerate before doing a round of rebalancing */
769
770                 g_assert (worker.monitor_status != MONITOR_STATUS_NOT_RUNNING);
771
772                 // counter = COUNTER_READ ();
773                 // printf ("monitor_thread: starting = %d working = %d parked = %d max_working = %d\n",
774                 //      counter._.starting, counter._.working, counter._.parked, counter._.max_working);
775
776                 do {
777                         gint64 ts;
778                         gboolean alerted = FALSE;
779
780                         if (mono_runtime_is_shutting_down ())
781                                 break;
782
783                         ts = mono_msec_ticks ();
784                         if (mono_thread_info_sleep (interval_left, &alerted) == 0)
785                                 break;
786                         interval_left -= mono_msec_ticks () - ts;
787
788                         g_assert (!(internal->state & ThreadState_StopRequested));
789                         mono_thread_interruption_checkpoint ();
790                 } while (interval_left > 0 && ++awake < 10);
791
792                 if (mono_runtime_is_shutting_down ())
793                         continue;
794
795                 if (worker.suspended)
796                         continue;
797
798                 if (work_item_count () == 0)
799                         continue;
800
801                 worker.cpu_usage = mono_cpu_usage (worker.cpu_usage_state);
802
803                 if (!monitor_sufficient_delay_since_last_dequeue ())
804                         continue;
805
806                 limit_worker_max_reached = FALSE;
807
808                 COUNTER_ATOMIC (counter, {
809                         if (counter._.max_working >= worker.limit_worker_max) {
810                                 limit_worker_max_reached = TRUE;
811                                 break;
812                         }
813                         counter._.max_working ++;
814                 });
815
816                 if (limit_worker_max_reached)
817                         continue;
818
819                 hill_climbing_force_change (counter._.max_working, TRANSITION_STARVATION);
820
821                 for (i = 0; i < 5; ++i) {
822                         if (mono_runtime_is_shutting_down ())
823                                 break;
824
825                         if (worker_try_unpark ()) {
826                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, unparked", mono_native_thread_id_get ());
827                                 break;
828                         }
829
830                         if (worker_try_create ()) {
831                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, created", mono_native_thread_id_get ());
832                                 break;
833                         }
834                 }
835         } while (monitor_should_keep_running ());
836
837         // printf ("monitor_thread: stop\n");
838
839         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, finished", mono_native_thread_id_get ());
840
841         return 0;
842 }
843
844 static void
845 monitor_ensure_running (void)
846 {
847         MonoError error;
848         for (;;) {
849                 switch (worker.monitor_status) {
850                 case MONITOR_STATUS_REQUESTED:
851                         // printf ("monitor_thread: requested\n");
852                         return;
853                 case MONITOR_STATUS_WAITING_FOR_REQUEST:
854                         // printf ("monitor_thread: waiting for request\n");
855                         InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_WAITING_FOR_REQUEST);
856                         break;
857                 case MONITOR_STATUS_NOT_RUNNING:
858                         // printf ("monitor_thread: not running\n");
859                         if (mono_runtime_is_shutting_down ())
860                                 return;
861                         if (InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_NOT_RUNNING) == MONITOR_STATUS_NOT_RUNNING) {
862                                 // printf ("monitor_thread: creating\n");
863                                 if (!mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, TRUE, SMALL_STACK, &error)) {
864                                         // printf ("monitor_thread: creating failed\n");
865                                         worker.monitor_status = MONITOR_STATUS_NOT_RUNNING;
866                                         mono_error_cleanup (&error);
867                                 }
868                                 return;
869                         }
870                         break;
871                 default: g_assert_not_reached ();
872                 }
873         }
874 }
875
876 static void
877 hill_climbing_change_thread_count (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
878 {
879         ThreadPoolHillClimbing *hc;
880
881         hc = &worker.heuristic_hill_climbing;
882
883         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] hill climbing, change max number of threads %d", mono_native_thread_id_get (), new_thread_count);
884
885         hc->last_thread_count = new_thread_count;
886         hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
887         hc->elapsed_since_last_change = 0;
888         hc->completions_since_last_change = 0;
889 }
890
891 static void
892 hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
893 {
894         ThreadPoolHillClimbing *hc;
895
896         hc = &worker.heuristic_hill_climbing;
897
898         if (new_thread_count != hc->last_thread_count) {
899                 hc->current_control_setting += new_thread_count - hc->last_thread_count;
900                 hill_climbing_change_thread_count (new_thread_count, transition);
901         }
902 }
903
904 static double_complex
905 hill_climbing_get_wave_component (gdouble *samples, guint sample_count, gdouble period)
906 {
907         ThreadPoolHillClimbing *hc;
908         gdouble w, cosine, sine, coeff, q0, q1, q2;
909         guint i;
910
911         g_assert (sample_count >= period);
912         g_assert (period >= 2);
913
914         hc = &worker.heuristic_hill_climbing;
915
916         w = 2.0 * M_PI / period;
917         cosine = cos (w);
918         sine = sin (w);
919         coeff = 2.0 * cosine;
920         q0 = q1 = q2 = 0;
921
922         for (i = 0; i < sample_count; ++i) {
923                 q0 = coeff * q1 - q2 + samples [(hc->total_samples - sample_count + i) % hc->samples_to_measure];
924                 q2 = q1;
925                 q1 = q0;
926         }
927
928         return mono_double_complex_scalar_div (mono_double_complex_make (q1 - q2 * cosine, (q2 * sine)), ((gdouble)sample_count));
929 }
930
931 static gint16
932 hill_climbing_update (gint16 current_thread_count, guint32 sample_duration, gint32 completions, gint64 *adjustment_interval)
933 {
934         ThreadPoolHillClimbing *hc;
935         ThreadPoolHeuristicStateTransition transition;
936         gdouble throughput;
937         gdouble throughput_error_estimate;
938         gdouble confidence;
939         gdouble move;
940         gdouble gain;
941         gint sample_index;
942         gint sample_count;
943         gint new_thread_wave_magnitude;
944         gint new_thread_count;
945         double_complex thread_wave_component;
946         double_complex throughput_wave_component;
947         double_complex ratio;
948
949         g_assert (adjustment_interval);
950
951         hc = &worker.heuristic_hill_climbing;
952
953         /* If someone changed the thread count without telling us, update our records accordingly. */
954         if (current_thread_count != hc->last_thread_count)
955                 hill_climbing_force_change (current_thread_count, TRANSITION_INITIALIZING);
956
957         /* Update the cumulative stats for this thread count */
958         hc->elapsed_since_last_change += sample_duration;
959         hc->completions_since_last_change += completions;
960
961         /* Add in any data we've already collected about this sample */
962         sample_duration += hc->accumulated_sample_duration;
963         completions += hc->accumulated_completion_count;
964
965         /* We need to make sure we're collecting reasonably accurate data. Since we're just counting the end
966          * of each work item, we are goinng to be missing some data about what really happened during the
967          * sample interval. The count produced by each thread includes an initial work item that may have
968          * started well before the start of the interval, and each thread may have been running some new
969          * work item for some time before the end of the interval, which did not yet get counted. So
970          * our count is going to be off by +/- threadCount workitems.
971          *
972          * The exception is that the thread that reported to us last time definitely wasn't running any work
973          * at that time, and the thread that's reporting now definitely isn't running a work item now. So
974          * we really only need to consider threadCount-1 threads.
975          *
976          * Thus the percent error in our count is +/- (threadCount-1)/numCompletions.
977          *
978          * We cannot rely on the frequency-domain analysis we'll be doing later to filter out this error, because
979          * of the way it accumulates over time. If this sample is off by, say, 33% in the negative direction,
980          * then the next one likely will be too. The one after that will include the sum of the completions
981          * we missed in the previous samples, and so will be 33% positive. So every three samples we'll have
982          * two "low" samples and one "high" sample. This will appear as periodic variation right in the frequency
983          * range we're targeting, which will not be filtered by the frequency-domain translation. */
984         if (hc->total_samples > 0 && ((current_thread_count - 1.0) / completions) >= hc->max_sample_error) {
985                 /* Not accurate enough yet. Let's accumulate the data so
986                  * far, and tell the ThreadPoolWorker to collect a little more. */
987                 hc->accumulated_sample_duration = sample_duration;
988                 hc->accumulated_completion_count = completions;
989                 *adjustment_interval = 10;
990                 return current_thread_count;
991         }
992
993         /* We've got enouugh data for our sample; reset our accumulators for next time. */
994         hc->accumulated_sample_duration = 0;
995         hc->accumulated_completion_count = 0;
996
997         /* Add the current thread count and throughput sample to our history. */
998         throughput = ((gdouble) completions) / sample_duration;
999
1000         sample_index = hc->total_samples % hc->samples_to_measure;
1001         hc->samples [sample_index] = throughput;
1002         hc->thread_counts [sample_index] = current_thread_count;
1003         hc->total_samples ++;
1004
1005         /* Set up defaults for our metrics. */
1006         thread_wave_component = mono_double_complex_make(0, 0);
1007         throughput_wave_component = mono_double_complex_make(0, 0);
1008         throughput_error_estimate = 0;
1009         ratio = mono_double_complex_make(0, 0);
1010         confidence = 0;
1011
1012         transition = TRANSITION_WARMUP;
1013
1014         /* How many samples will we use? It must be at least the three wave periods we're looking for, and it must also
1015          * be a whole multiple of the primary wave's period; otherwise the frequency we're looking for will fall between
1016          * two frequency bands in the Fourier analysis, and we won't be able to measure it accurately. */
1017         sample_count = ((gint) MIN (hc->total_samples - 1, hc->samples_to_measure) / hc->wave_period) * hc->wave_period;
1018
1019         if (sample_count > hc->wave_period) {
1020                 guint i;
1021                 gdouble average_throughput;
1022                 gdouble average_thread_count;
1023                 gdouble sample_sum = 0;
1024                 gdouble thread_sum = 0;
1025
1026                 /* Average the throughput and thread count samples, so we can scale the wave magnitudes later. */
1027                 for (i = 0; i < sample_count; ++i) {
1028                         guint j = (hc->total_samples - sample_count + i) % hc->samples_to_measure;
1029                         sample_sum += hc->samples [j];
1030                         thread_sum += hc->thread_counts [j];
1031                 }
1032
1033                 average_throughput = sample_sum / sample_count;
1034                 average_thread_count = thread_sum / sample_count;
1035
1036                 if (average_throughput > 0 && average_thread_count > 0) {
1037                         gdouble noise_for_confidence, adjacent_period_1, adjacent_period_2;
1038
1039                         /* Calculate the periods of the adjacent frequency bands we'll be using to
1040                          * measure noise levels. We want the two adjacent Fourier frequency bands. */
1041                         adjacent_period_1 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) + 1);
1042                         adjacent_period_2 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) - 1);
1043
1044                         /* Get the the three different frequency components of the throughput (scaled by average
1045                          * throughput). Our "error" estimate (the amount of noise that might be present in the
1046                          * frequency band we're really interested in) is the average of the adjacent bands. */
1047                         throughput_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, hc->wave_period), average_throughput);
1048                         throughput_error_estimate = cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, adjacent_period_1), average_throughput));
1049
1050                         if (adjacent_period_2 <= sample_count) {
1051                                 throughput_error_estimate = MAX (throughput_error_estimate, cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (
1052                                         hc->samples, sample_count, adjacent_period_2), average_throughput)));
1053                         }
1054
1055                         /* Do the same for the thread counts, so we have something to compare to. We don't
1056                          * measure thread count noise, because there is none; these are exact measurements. */
1057                         thread_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->thread_counts, sample_count, hc->wave_period), average_thread_count);
1058
1059                         /* Update our moving average of the throughput noise. We'll use this
1060                          * later as feedback to determine the new size of the thread wave. */
1061                         if (hc->average_throughput_noise == 0) {
1062                                 hc->average_throughput_noise = throughput_error_estimate;
1063                         } else {
1064                                 hc->average_throughput_noise = (hc->throughput_error_smoothing_factor * throughput_error_estimate)
1065                                         + ((1.0 + hc->throughput_error_smoothing_factor) * hc->average_throughput_noise);
1066                         }
1067
1068                         if (cabs (thread_wave_component) > 0) {
1069                                 /* Adjust the throughput wave so it's centered around the target wave,
1070                                  * and then calculate the adjusted throughput/thread ratio. */
1071                                 ratio = mono_double_complex_div (mono_double_complex_sub (throughput_wave_component, mono_double_complex_scalar_mul(thread_wave_component, hc->target_throughput_ratio)), thread_wave_component);
1072                                 transition = TRANSITION_CLIMBING_MOVE;
1073                         } else {
1074                                 ratio = mono_double_complex_make (0, 0);
1075                                 transition = TRANSITION_STABILIZING;
1076                         }
1077
1078                         noise_for_confidence = MAX (hc->average_throughput_noise, throughput_error_estimate);
1079                         if (noise_for_confidence > 0) {
1080                                 confidence = cabs (thread_wave_component) / noise_for_confidence / hc->target_signal_to_noise_ratio;
1081                         } else {
1082                                 /* there is no noise! */
1083                                 confidence = 1.0;
1084                         }
1085                 }
1086         }
1087
1088         /* We use just the real part of the complex ratio we just calculated. If the throughput signal
1089          * is exactly in phase with the thread signal, this will be the same as taking the magnitude of
1090          * the complex move and moving that far up. If they're 180 degrees out of phase, we'll move
1091          * backward (because this indicates that our changes are having the opposite of the intended effect).
1092          * If they're 90 degrees out of phase, we won't move at all, because we can't tell wether we're
1093          * having a negative or positive effect on throughput. */
1094         move = creal (ratio);
1095         move = CLAMP (move, -1.0, 1.0);
1096
1097         /* Apply our confidence multiplier. */
1098         move *= CLAMP (confidence, -1.0, 1.0);
1099
1100         /* Now apply non-linear gain, such that values around zero are attenuated, while higher values
1101          * are enhanced. This allows us to move quickly if we're far away from the target, but more slowly
1102         * if we're getting close, giving us rapid ramp-up without wild oscillations around the target. */
1103         gain = hc->max_change_per_second * sample_duration;
1104         move = pow (fabs (move), hc->gain_exponent) * (move >= 0.0 ? 1 : -1) * gain;
1105         move = MIN (move, hc->max_change_per_sample);
1106
1107         /* If the result was positive, and CPU is > 95%, refuse the move. */
1108         if (move > 0.0 && worker.cpu_usage > CPU_USAGE_HIGH)
1109                 move = 0.0;
1110
1111         /* Apply the move to our control setting. */
1112         hc->current_control_setting += move;
1113
1114         /* Calculate the new thread wave magnitude, which is based on the moving average we've been keeping of the
1115          * throughput error.  This average starts at zero, so we'll start with a nice safe little wave at first. */
1116         new_thread_wave_magnitude = (gint)(0.5 + (hc->current_control_setting * hc->average_throughput_noise
1117                 * hc->target_signal_to_noise_ratio * hc->thread_magnitude_multiplier * 2.0));
1118         new_thread_wave_magnitude = CLAMP (new_thread_wave_magnitude, 1, hc->max_thread_wave_magnitude);
1119
1120         /* Make sure our control setting is within the ThreadPoolWorker's limits. */
1121         hc->current_control_setting = CLAMP (hc->current_control_setting, worker.limit_worker_min, worker.limit_worker_max - new_thread_wave_magnitude);
1122
1123         /* Calculate the new thread count (control setting + square wave). */
1124         new_thread_count = (gint)(hc->current_control_setting + new_thread_wave_magnitude * ((hc->total_samples / (hc->wave_period / 2)) % 2));
1125
1126         /* Make sure the new thread count doesn't exceed the ThreadPoolWorker's limits. */
1127         new_thread_count = CLAMP (new_thread_count, worker.limit_worker_min, worker.limit_worker_max);
1128
1129         if (new_thread_count != current_thread_count)
1130                 hill_climbing_change_thread_count (new_thread_count, transition);
1131
1132         if (creal (ratio) < 0.0 && new_thread_count == worker.limit_worker_min)
1133                 *adjustment_interval = (gint)(0.5 + hc->current_sample_interval * (10.0 * MAX (-1.0 * creal (ratio), 1.0)));
1134         else
1135                 *adjustment_interval = hc->current_sample_interval;
1136
1137         return new_thread_count;
1138 }
1139
1140 static gboolean
1141 heuristic_should_adjust (void)
1142 {
1143         if (worker.heuristic_last_dequeue > worker.heuristic_last_adjustment + worker.heuristic_adjustment_interval) {
1144                 ThreadPoolWorkerCounter counter;
1145                 counter = COUNTER_READ ();
1146                 if (counter._.working <= counter._.max_working)
1147                         return TRUE;
1148         }
1149
1150         return FALSE;
1151 }
1152
1153 static void
1154 heuristic_adjust (void)
1155 {
1156         if (mono_coop_mutex_trylock (&worker.heuristic_lock) == 0) {
1157                 gint32 completions = InterlockedExchange (&worker.heuristic_completions, 0);
1158                 gint64 sample_end = mono_msec_ticks ();
1159                 gint64 sample_duration = sample_end - worker.heuristic_sample_start;
1160
1161                 if (sample_duration >= worker.heuristic_adjustment_interval / 2) {
1162                         ThreadPoolWorkerCounter counter;
1163                         gint16 new_thread_count;
1164
1165                         counter = COUNTER_READ ();
1166                         new_thread_count = hill_climbing_update (counter._.max_working, sample_duration, completions, &worker.heuristic_adjustment_interval);
1167
1168                         COUNTER_ATOMIC (counter, {
1169                                 counter._.max_working = new_thread_count;
1170                         });
1171
1172                         if (new_thread_count > counter._.max_working)
1173                                 worker_request ();
1174
1175                         worker.heuristic_sample_start = sample_end;
1176                         worker.heuristic_last_adjustment = mono_msec_ticks ();
1177                 }
1178
1179                 mono_coop_mutex_unlock (&worker.heuristic_lock);
1180         }
1181 }
1182
1183 static void
1184 heuristic_notify_work_completed (void)
1185 {
1186         InterlockedIncrement (&worker.heuristic_completions);
1187         worker.heuristic_last_dequeue = mono_msec_ticks ();
1188
1189         if (heuristic_should_adjust ())
1190                 heuristic_adjust ();
1191 }
1192
1193 gboolean
1194 mono_threadpool_worker_notify_completed (void)
1195 {
1196         ThreadPoolWorkerCounter counter;
1197
1198         heuristic_notify_work_completed ();
1199
1200         counter = COUNTER_READ ();
1201         return counter._.working <= counter._.max_working;
1202 }
1203
1204 gint32
1205 mono_threadpool_worker_get_min (void)
1206 {
1207         return worker.limit_worker_min;
1208 }
1209
1210 gboolean
1211 mono_threadpool_worker_set_min (gint32 value)
1212 {
1213         if (value <= 0 || value > worker.limit_worker_max)
1214                 return FALSE;
1215
1216         worker.limit_worker_min = value;
1217         return TRUE;
1218 }
1219
1220 gint32
1221 mono_threadpool_worker_get_max (void)
1222 {
1223         return worker.limit_worker_max;
1224 }
1225
1226 gboolean
1227 mono_threadpool_worker_set_max (gint32 value)
1228 {
1229         gint32 cpu_count;
1230
1231         cpu_count = mono_cpu_count ();
1232         if (value < worker.limit_worker_min || value < cpu_count)
1233                 return FALSE;
1234
1235         if (value < worker.limit_worker_min || value < cpu_count)
1236                 return FALSE;
1237
1238         worker.limit_worker_max = value;
1239         return TRUE;
1240 }
1241
1242 void
1243 mono_threadpool_worker_set_suspended (gboolean suspended)
1244 {
1245         worker.suspended = suspended;
1246         if (!suspended)
1247                 worker_request ();
1248 }