f410534223dacd272fa9c95d6320d0426eab7438
[mono.git] / mono / metadata / threadpool-worker-default.c
1 /**
2  * \file
3  * native threadpool worker
4  *
5  * Author:
6  *      Ludovic Henry (ludovic.henry@xamarin.com)
7  *
8  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
9  */
10
11 #include <stdlib.h>
12 #define _USE_MATH_DEFINES // needed by MSVC to define math constants
13 #include <math.h>
14 #include <config.h>
15 #include <glib.h>
16
17 #include <mono/metadata/class-internals.h>
18 #include <mono/metadata/exception.h>
19 #include <mono/metadata/gc-internals.h>
20 #include <mono/metadata/object.h>
21 #include <mono/metadata/object-internals.h>
22 #include <mono/metadata/threadpool.h>
23 #include <mono/metadata/threadpool-worker.h>
24 #include <mono/metadata/threadpool-io.h>
25 #include <mono/metadata/w32event.h>
26 #include <mono/utils/atomic.h>
27 #include <mono/utils/mono-compiler.h>
28 #include <mono/utils/mono-complex.h>
29 #include <mono/utils/mono-logger.h>
30 #include <mono/utils/mono-logger-internals.h>
31 #include <mono/utils/mono-proclib.h>
32 #include <mono/utils/mono-threads.h>
33 #include <mono/utils/mono-time.h>
34 #include <mono/utils/mono-rand.h>
35 #include <mono/utils/refcount.h>
36 #include <mono/utils/w32api.h>
37
38 #define CPU_USAGE_LOW 80
39 #define CPU_USAGE_HIGH 95
40
41 #define MONITOR_INTERVAL 500 // ms
42 #define MONITOR_MINIMAL_LIFETIME 60 * 1000 // ms
43
44 #define WORKER_CREATION_MAX_PER_SEC 10
45
46 /* The exponent to apply to the gain. 1.0 means to use linear gain,
47  * higher values will enhance large moves and damp small ones.
48  * default: 2.0 */
49 #define HILL_CLIMBING_GAIN_EXPONENT 2.0
50
51 /* The 'cost' of a thread. 0 means drive for increased throughput regardless
52  * of thread count, higher values bias more against higher thread counts.
53  * default: 0.15 */
54 #define HILL_CLIMBING_BIAS 0.15
55
56 #define HILL_CLIMBING_WAVE_PERIOD 4
57 #define HILL_CLIMBING_MAX_WAVE_MAGNITUDE 20
58 #define HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER 1.0
59 #define HILL_CLIMBING_WAVE_HISTORY_SIZE 8
60 #define HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO 3.0
61 #define HILL_CLIMBING_MAX_CHANGE_PER_SECOND 4
62 #define HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE 20
63 #define HILL_CLIMBING_SAMPLE_INTERVAL_LOW 10
64 #define HILL_CLIMBING_SAMPLE_INTERVAL_HIGH 200
65 #define HILL_CLIMBING_ERROR_SMOOTHING_FACTOR 0.01
66 #define HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT 0.15
67
68 typedef enum {
69         TRANSITION_WARMUP,
70         TRANSITION_INITIALIZING,
71         TRANSITION_RANDOM_MOVE,
72         TRANSITION_CLIMBING_MOVE,
73         TRANSITION_CHANGE_POINT,
74         TRANSITION_STABILIZING,
75         TRANSITION_STARVATION,
76         TRANSITION_THREAD_TIMED_OUT,
77         TRANSITION_UNDEFINED,
78 } ThreadPoolHeuristicStateTransition;
79
80 typedef struct {
81         gint32 wave_period;
82         gint32 samples_to_measure;
83         gdouble target_throughput_ratio;
84         gdouble target_signal_to_noise_ratio;
85         gdouble max_change_per_second;
86         gdouble max_change_per_sample;
87         gint32 max_thread_wave_magnitude;
88         gint32 sample_interval_low;
89         gdouble thread_magnitude_multiplier;
90         gint32 sample_interval_high;
91         gdouble throughput_error_smoothing_factor;
92         gdouble gain_exponent;
93         gdouble max_sample_error;
94
95         gdouble current_control_setting;
96         gint64 total_samples;
97         gint16 last_thread_count;
98         gdouble elapsed_since_last_change;
99         gdouble completions_since_last_change;
100
101         gdouble average_throughput_noise;
102
103         gdouble *samples;
104         gdouble *thread_counts;
105
106         guint32 current_sample_interval;
107         gpointer random_interval_generator;
108
109         gint32 accumulated_completion_count;
110         gdouble accumulated_sample_duration;
111 } ThreadPoolHillClimbing;
112
113 typedef struct {
114         MonoThreadPoolWorkerCallback callback;
115         gpointer data;
116 } ThreadPoolWorkItem;
117
118 typedef union {
119         struct {
120                 gint16 max_working; /* determined by heuristic */
121                 gint16 starting; /* starting, but not yet in worker_thread */
122                 gint16 working; /* executing worker_thread */
123                 gint16 parked; /* parked */
124         } _;
125         gint64 as_gint64;
126 } ThreadPoolWorkerCounter;
127
128 typedef struct {
129         MonoRefCount ref;
130
131         ThreadPoolWorkerCounter counters;
132
133         MonoCoopMutex parked_threads_lock;
134         gint32 parked_threads_count;
135         MonoCoopCond parked_threads_cond;
136
137         ThreadPoolWorkItem *work_items; // ThreadPoolWorkItem []
138         gint32 work_items_count;
139         gint32 work_items_size;
140         MonoCoopMutex work_items_lock;
141
142         guint32 worker_creation_current_second;
143         guint32 worker_creation_current_count;
144         MonoCoopMutex worker_creation_lock;
145
146         gint32 heuristic_completions;
147         gint64 heuristic_sample_start;
148         gint64 heuristic_last_dequeue; // ms
149         gint64 heuristic_last_adjustment; // ms
150         gint64 heuristic_adjustment_interval; // ms
151         ThreadPoolHillClimbing heuristic_hill_climbing;
152         MonoCoopMutex heuristic_lock;
153
154         gint32 limit_worker_min;
155         gint32 limit_worker_max;
156
157         MonoCpuUsageState *cpu_usage_state;
158         gint32 cpu_usage;
159
160         /* suspended by the debugger */
161         gboolean suspended;
162
163         gint32 monitor_status;
164 } ThreadPoolWorker;
165
166 enum {
167         MONITOR_STATUS_REQUESTED,
168         MONITOR_STATUS_WAITING_FOR_REQUEST,
169         MONITOR_STATUS_NOT_RUNNING,
170 };
171
172 static ThreadPoolWorker worker;
173
174 #define COUNTER_CHECK(counter) \
175         do { \
176                 g_assert (counter._.max_working > 0); \
177                 g_assert (counter._.starting >= 0); \
178                 g_assert (counter._.working >= 0); \
179         } while (0)
180
181 #define COUNTER_ATOMIC(var,block) \
182         do { \
183                 ThreadPoolWorkerCounter __old; \
184                 do { \
185                         __old = COUNTER_READ (); \
186                         (var) = __old; \
187                         { block; } \
188                         COUNTER_CHECK (var); \
189                 } while (InterlockedCompareExchange64 (&worker.counters.as_gint64, (var).as_gint64, __old.as_gint64) != __old.as_gint64); \
190         } while (0)
191
192 static inline ThreadPoolWorkerCounter
193 COUNTER_READ (void)
194 {
195         ThreadPoolWorkerCounter counter;
196         counter.as_gint64 = InterlockedRead64 (&worker.counters.as_gint64);
197         return counter;
198 }
199
200 static gpointer
201 rand_create (void)
202 {
203         mono_rand_open ();
204         return mono_rand_init (NULL, 0);
205 }
206
207 static guint32
208 rand_next (gpointer *handle, guint32 min, guint32 max)
209 {
210         MonoError error;
211         guint32 val;
212         mono_rand_try_get_uint32 (handle, &val, min, max, &error);
213         // FIXME handle error
214         mono_error_assert_ok (&error);
215         return val;
216 }
217
218 static void
219 destroy (gpointer data)
220 {
221         mono_coop_mutex_destroy (&worker.parked_threads_lock);
222         mono_coop_cond_destroy (&worker.parked_threads_cond);
223
224         mono_coop_mutex_destroy (&worker.work_items_lock);
225
226         mono_coop_mutex_destroy (&worker.worker_creation_lock);
227
228         mono_coop_mutex_destroy (&worker.heuristic_lock);
229
230         g_free (worker.cpu_usage_state);
231 }
232
233 void
234 mono_threadpool_worker_init (void)
235 {
236         ThreadPoolHillClimbing *hc;
237         const char *threads_per_cpu_env;
238         gint threads_per_cpu;
239         gint threads_count;
240
241         mono_refcount_init (&worker, destroy);
242
243         mono_coop_mutex_init (&worker.parked_threads_lock);
244         worker.parked_threads_count = 0;
245         mono_coop_cond_init (&worker.parked_threads_cond);
246
247         /* worker.work_items_size is inited to 0 */
248         mono_coop_mutex_init (&worker.work_items_lock);
249
250         worker.worker_creation_current_second = -1;
251         mono_coop_mutex_init (&worker.worker_creation_lock);
252
253         worker.heuristic_adjustment_interval = 10;
254         mono_coop_mutex_init (&worker.heuristic_lock);
255
256         mono_rand_open ();
257
258         hc = &worker.heuristic_hill_climbing;
259
260         hc->wave_period = HILL_CLIMBING_WAVE_PERIOD;
261         hc->max_thread_wave_magnitude = HILL_CLIMBING_MAX_WAVE_MAGNITUDE;
262         hc->thread_magnitude_multiplier = (gdouble) HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER;
263         hc->samples_to_measure = hc->wave_period * HILL_CLIMBING_WAVE_HISTORY_SIZE;
264         hc->target_throughput_ratio = (gdouble) HILL_CLIMBING_BIAS;
265         hc->target_signal_to_noise_ratio = (gdouble) HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO;
266         hc->max_change_per_second = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SECOND;
267         hc->max_change_per_sample = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE;
268         hc->sample_interval_low = HILL_CLIMBING_SAMPLE_INTERVAL_LOW;
269         hc->sample_interval_high = HILL_CLIMBING_SAMPLE_INTERVAL_HIGH;
270         hc->throughput_error_smoothing_factor = (gdouble) HILL_CLIMBING_ERROR_SMOOTHING_FACTOR;
271         hc->gain_exponent = (gdouble) HILL_CLIMBING_GAIN_EXPONENT;
272         hc->max_sample_error = (gdouble) HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT;
273         hc->current_control_setting = 0;
274         hc->total_samples = 0;
275         hc->last_thread_count = 0;
276         hc->average_throughput_noise = 0;
277         hc->elapsed_since_last_change = 0;
278         hc->accumulated_completion_count = 0;
279         hc->accumulated_sample_duration = 0;
280         hc->samples = g_new0 (gdouble, hc->samples_to_measure);
281         hc->thread_counts = g_new0 (gdouble, hc->samples_to_measure);
282         hc->random_interval_generator = rand_create ();
283         hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
284
285         if (!(threads_per_cpu_env = g_getenv ("MONO_THREADS_PER_CPU")))
286                 threads_per_cpu = 1;
287         else
288                 threads_per_cpu = CLAMP (atoi (threads_per_cpu_env), 1, 50);
289
290         threads_count = mono_cpu_count () * threads_per_cpu;
291
292         worker.limit_worker_min = threads_count;
293
294 #if defined (PLATFORM_ANDROID) || defined (HOST_IOS)
295         worker.limit_worker_max = CLAMP (threads_count * 100, MIN (threads_count, 200), MAX (threads_count, 200));
296 #else
297         worker.limit_worker_max = threads_count * 100;
298 #endif
299
300         worker.counters._.max_working = worker.limit_worker_min;
301
302         worker.cpu_usage_state = g_new0 (MonoCpuUsageState, 1);
303
304         worker.suspended = FALSE;
305
306         worker.monitor_status = MONITOR_STATUS_NOT_RUNNING;
307 }
308
309 void
310 mono_threadpool_worker_cleanup (void)
311 {
312         mono_refcount_dec (&worker);
313 }
314
315 static void
316 work_item_lock (void)
317 {
318         mono_coop_mutex_lock (&worker.work_items_lock);
319 }
320
321 static void
322 work_item_unlock (void)
323 {
324         mono_coop_mutex_unlock (&worker.work_items_lock);
325 }
326
327 static void
328 work_item_push (MonoThreadPoolWorkerCallback callback, gpointer data)
329 {
330         ThreadPoolWorkItem work_item;
331
332         g_assert (callback);
333
334         work_item.callback = callback;
335         work_item.data = data;
336
337         work_item_lock ();
338
339         g_assert (worker.work_items_count <= worker.work_items_size);
340
341         if (G_UNLIKELY (worker.work_items_count == worker.work_items_size)) {
342                 worker.work_items_size += 64;
343                 worker.work_items = g_renew (ThreadPoolWorkItem, worker.work_items, worker.work_items_size);
344         }
345
346         g_assert (worker.work_items);
347
348         worker.work_items [worker.work_items_count ++] = work_item;
349
350         // printf ("[push] worker.work_items = %p, worker.work_items_count = %d, worker.work_items_size = %d\n",
351         //      worker.work_items, worker.work_items_count, worker.work_items_size);
352
353         work_item_unlock ();
354 }
355
356 static gboolean
357 work_item_try_pop (ThreadPoolWorkItem *work_item)
358 {
359         g_assert (work_item);
360
361         work_item_lock ();
362
363         // printf ("[pop]  worker.work_items = %p, worker.work_items_count = %d, worker.work_items_size = %d\n",
364         //      worker.work_items, worker.work_items_count, worker.work_items_size);
365
366         if (worker.work_items_count == 0) {
367                 work_item_unlock ();
368                 return FALSE;
369         }
370
371         *work_item = worker.work_items [-- worker.work_items_count];
372
373         if (G_UNLIKELY (worker.work_items_count >= 64 * 3 && worker.work_items_count < worker.work_items_size / 2)) {
374                 worker.work_items_size -= 64;
375                 worker.work_items = g_renew (ThreadPoolWorkItem, worker.work_items, worker.work_items_size);
376         }
377
378         work_item_unlock ();
379
380         return TRUE;
381 }
382
383 static gint32
384 work_item_count (void)
385 {
386         gint32 count;
387
388         work_item_lock ();
389         count = worker.work_items_count;
390         work_item_unlock ();
391
392         return count;
393 }
394
395 static void worker_request (void);
396
397 void
398 mono_threadpool_worker_enqueue (MonoThreadPoolWorkerCallback callback, gpointer data)
399 {
400         if (!mono_refcount_tryinc (&worker))
401                 return;
402
403         work_item_push (callback, data);
404
405         worker_request ();
406
407         mono_refcount_dec (&worker);
408 }
409
410 static void
411 worker_wait_interrupt (gpointer unused)
412 {
413         /* If the runtime is not shutting down, we are not using this mechanism to wake up a unparked thread, and if the
414          * runtime is shutting down, then we need to wake up ALL the threads.
415          * It might be a bit wasteful, but I witnessed shutdown hang where the main thread would abort and then wait for all
416          * background threads to exit (see mono_thread_manage). This would go wrong because not all threadpool threads would
417          * be unparked. It would end up getting unstucked because of the timeout, but that would delay shutdown by 5-60s. */
418         if (!mono_runtime_is_shutting_down ())
419                 return;
420
421         if (!mono_refcount_tryinc (&worker))
422                 return;
423
424         mono_coop_mutex_lock (&worker.parked_threads_lock);
425         mono_coop_cond_broadcast (&worker.parked_threads_cond);
426         mono_coop_mutex_unlock (&worker.parked_threads_lock);
427
428         mono_refcount_dec (&worker);
429 }
430
431 /* return TRUE if timeout, FALSE otherwise (worker unpark or interrupt) */
432 static gboolean
433 worker_park (void)
434 {
435         gboolean timeout = FALSE;
436         gboolean interrupted = FALSE;
437
438         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker parking", mono_native_thread_id_get ());
439
440         mono_coop_mutex_lock (&worker.parked_threads_lock);
441
442         if (!mono_runtime_is_shutting_down ()) {
443                 static gpointer rand_handle = NULL;
444                 MonoInternalThread *thread;
445                 ThreadPoolWorkerCounter counter;
446
447                 if (!rand_handle)
448                         rand_handle = rand_create ();
449                 g_assert (rand_handle);
450
451                 thread = mono_thread_internal_current ();
452                 g_assert (thread);
453
454                 COUNTER_ATOMIC (counter, {
455                         counter._.working --;
456                         counter._.parked ++;
457                 });
458
459                 worker.parked_threads_count += 1;
460
461                 mono_thread_info_install_interrupt (worker_wait_interrupt, NULL, &interrupted);
462                 if (interrupted)
463                         goto done;
464
465                 if (mono_coop_cond_timedwait (&worker.parked_threads_cond, &worker.parked_threads_lock, rand_next (&rand_handle, 5 * 1000, 60 * 1000)) != 0)
466                         timeout = TRUE;
467
468                 mono_thread_info_uninstall_interrupt (&interrupted);
469
470 done:
471                 worker.parked_threads_count -= 1;
472
473                 COUNTER_ATOMIC (counter, {
474                         counter._.working ++;
475                         counter._.parked --;
476                 });
477         }
478
479         mono_coop_mutex_unlock (&worker.parked_threads_lock);
480
481         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker unparking, timeout? %s interrupted? %s",
482                 mono_native_thread_id_get (), timeout ? "yes" : "no", interrupted ? "yes" : "no");
483
484         return timeout;
485 }
486
487 static gboolean
488 worker_try_unpark (void)
489 {
490         gboolean res = FALSE;
491
492         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker", mono_native_thread_id_get ());
493
494         mono_coop_mutex_lock (&worker.parked_threads_lock);
495         if (worker.parked_threads_count > 0) {
496                 mono_coop_cond_signal (&worker.parked_threads_cond);
497                 res = TRUE;
498         }
499         mono_coop_mutex_unlock (&worker.parked_threads_lock);
500
501         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker, success? %s", mono_native_thread_id_get (), res ? "yes" : "no");
502
503         return res;
504 }
505
506 static gsize WINAPI
507 worker_thread (gpointer unused)
508 {
509         MonoInternalThread *thread;
510         ThreadPoolWorkerCounter counter;
511
512         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker starting", mono_native_thread_id_get ());
513
514         if (!mono_refcount_tryinc (&worker))
515                 return 0;
516
517         COUNTER_ATOMIC (counter, {
518                 counter._.starting --;
519                 counter._.working ++;
520         });
521
522         thread = mono_thread_internal_current ();
523         g_assert (thread);
524
525         while (!mono_runtime_is_shutting_down ()) {
526                 ThreadPoolWorkItem work_item;
527
528                 if (mono_thread_interruption_checkpoint ())
529                         continue;
530
531                 if (!work_item_try_pop (&work_item)) {
532                         gboolean timeout;
533
534                         timeout = worker_park ();
535                         if (timeout)
536                                 break;
537
538                         continue;
539                 }
540
541                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker executing %p (%p)",
542                         mono_native_thread_id_get (), work_item.callback, work_item.data);
543
544                 work_item.callback (work_item.data);
545         }
546
547         COUNTER_ATOMIC (counter, {
548                 counter._.working --;
549         });
550
551         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker finishing", mono_native_thread_id_get ());
552
553         mono_refcount_dec (&worker);
554
555         return 0;
556 }
557
558 static gboolean
559 worker_try_create (void)
560 {
561         MonoError error;
562         MonoInternalThread *thread;
563         gint64 current_ticks;
564         gint32 now;
565         ThreadPoolWorkerCounter counter;
566
567         if (mono_runtime_is_shutting_down ())
568                 return FALSE;
569
570         mono_coop_mutex_lock (&worker.worker_creation_lock);
571
572         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker", mono_native_thread_id_get ());
573
574         current_ticks = mono_100ns_ticks ();
575         if (0 == current_ticks) {
576                 g_warning ("failed to get 100ns ticks");
577         } else {
578                 now = current_ticks / (10 * 1000 * 1000);
579                 if (worker.worker_creation_current_second != now) {
580                         worker.worker_creation_current_second = now;
581                         worker.worker_creation_current_count = 0;
582                 } else {
583                         g_assert (worker.worker_creation_current_count <= WORKER_CREATION_MAX_PER_SEC);
584                         if (worker.worker_creation_current_count == WORKER_CREATION_MAX_PER_SEC) {
585                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of worker created per second reached, current count = %d",
586                                         mono_native_thread_id_get (), worker.worker_creation_current_count);
587                                 mono_coop_mutex_unlock (&worker.worker_creation_lock);
588                                 return FALSE;
589                         }
590                 }
591         }
592
593         COUNTER_ATOMIC (counter, {
594                 if (counter._.working >= counter._.max_working) {
595                         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of working threads reached",
596                                 mono_native_thread_id_get ());
597                         mono_coop_mutex_unlock (&worker.worker_creation_lock);
598                         return FALSE;
599                 }
600                 counter._.starting ++;
601         });
602
603         thread = mono_thread_create_internal (mono_get_root_domain (), worker_thread, NULL, MONO_THREAD_CREATE_FLAGS_THREADPOOL, &error);
604         if (!thread) {
605                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: could not create thread due to %s", mono_native_thread_id_get (), mono_error_get_message (&error));
606                 mono_error_cleanup (&error);
607
608                 COUNTER_ATOMIC (counter, {
609                         counter._.starting --;
610                 });
611
612                 mono_coop_mutex_unlock (&worker.worker_creation_lock);
613
614                 return FALSE;
615         }
616
617         worker.worker_creation_current_count += 1;
618
619         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, created %p, now = %d count = %d",
620                 mono_native_thread_id_get (), (gpointer) thread->tid, now, worker.worker_creation_current_count);
621
622         mono_coop_mutex_unlock (&worker.worker_creation_lock);
623         return TRUE;
624 }
625
626 static void monitor_ensure_running (void);
627
628 static void
629 worker_request (void)
630 {
631         if (worker.suspended)
632                 return;
633
634         monitor_ensure_running ();
635
636         if (worker_try_unpark ()) {
637                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, unparked", mono_native_thread_id_get ());
638                 return;
639         }
640
641         if (worker_try_create ()) {
642                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, created", mono_native_thread_id_get ());
643                 return;
644         }
645
646         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, failed", mono_native_thread_id_get ());
647 }
648
649 static gboolean
650 monitor_should_keep_running (void)
651 {
652         static gint64 last_should_keep_running = -1;
653
654         g_assert (worker.monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || worker.monitor_status == MONITOR_STATUS_REQUESTED);
655
656         if (InterlockedExchange (&worker.monitor_status, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST) {
657                 gboolean should_keep_running = TRUE, force_should_keep_running = FALSE;
658
659                 if (mono_runtime_is_shutting_down ()) {
660                         should_keep_running = FALSE;
661                 } else {
662                         if (work_item_count () == 0)
663                                 should_keep_running = FALSE;
664
665                         if (!should_keep_running) {
666                                 if (last_should_keep_running == -1 || mono_100ns_ticks () - last_should_keep_running < MONITOR_MINIMAL_LIFETIME * 1000 * 10) {
667                                         should_keep_running = force_should_keep_running = TRUE;
668                                 }
669                         }
670                 }
671
672                 if (should_keep_running) {
673                         if (last_should_keep_running == -1 || !force_should_keep_running)
674                                 last_should_keep_running = mono_100ns_ticks ();
675                 } else {
676                         last_should_keep_running = -1;
677                         if (InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_NOT_RUNNING, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST)
678                                 return FALSE;
679                 }
680         }
681
682         g_assert (worker.monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || worker.monitor_status == MONITOR_STATUS_REQUESTED);
683
684         return TRUE;
685 }
686
687 static gboolean
688 monitor_sufficient_delay_since_last_dequeue (void)
689 {
690         gint64 threshold;
691
692         if (worker.cpu_usage < CPU_USAGE_LOW) {
693                 threshold = MONITOR_INTERVAL;
694         } else {
695                 ThreadPoolWorkerCounter counter;
696                 counter = COUNTER_READ ();
697                 threshold = counter._.max_working * MONITOR_INTERVAL * 2;
698         }
699
700         return mono_msec_ticks () >= worker.heuristic_last_dequeue + threshold;
701 }
702
703 static void hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition);
704
705 static gsize WINAPI
706 monitor_thread (gpointer unused)
707 {
708         MonoInternalThread *internal;
709         guint i;
710
711         if (!mono_refcount_tryinc (&worker))
712                 return 0;
713
714         internal = mono_thread_internal_current ();
715         g_assert (internal);
716
717         mono_cpu_usage (worker.cpu_usage_state);
718
719         // printf ("monitor_thread: start\n");
720
721         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, started", mono_native_thread_id_get ());
722
723         do {
724                 ThreadPoolWorkerCounter counter;
725                 gboolean limit_worker_max_reached;
726                 gint32 interval_left = MONITOR_INTERVAL;
727                 gint32 awake = 0; /* number of spurious awakes we tolerate before doing a round of rebalancing */
728
729                 g_assert (worker.monitor_status != MONITOR_STATUS_NOT_RUNNING);
730
731                 // counter = COUNTER_READ ();
732                 // printf ("monitor_thread: starting = %d working = %d parked = %d max_working = %d\n",
733                 //      counter._.starting, counter._.working, counter._.parked, counter._.max_working);
734
735                 do {
736                         gint64 ts;
737                         gboolean alerted = FALSE;
738
739                         if (mono_runtime_is_shutting_down ())
740                                 break;
741
742                         ts = mono_msec_ticks ();
743                         if (mono_thread_info_sleep (interval_left, &alerted) == 0)
744                                 break;
745                         interval_left -= mono_msec_ticks () - ts;
746
747                         mono_thread_interruption_checkpoint ();
748                 } while (interval_left > 0 && ++awake < 10);
749
750                 if (mono_runtime_is_shutting_down ())
751                         continue;
752
753                 if (worker.suspended)
754                         continue;
755
756                 if (work_item_count () == 0)
757                         continue;
758
759                 worker.cpu_usage = mono_cpu_usage (worker.cpu_usage_state);
760
761                 if (!monitor_sufficient_delay_since_last_dequeue ())
762                         continue;
763
764                 limit_worker_max_reached = FALSE;
765
766                 COUNTER_ATOMIC (counter, {
767                         if (counter._.max_working >= worker.limit_worker_max) {
768                                 limit_worker_max_reached = TRUE;
769                                 break;
770                         }
771                         counter._.max_working ++;
772                 });
773
774                 if (limit_worker_max_reached)
775                         continue;
776
777                 hill_climbing_force_change (counter._.max_working, TRANSITION_STARVATION);
778
779                 for (i = 0; i < 5; ++i) {
780                         if (mono_runtime_is_shutting_down ())
781                                 break;
782
783                         if (worker_try_unpark ()) {
784                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, unparked", mono_native_thread_id_get ());
785                                 break;
786                         }
787
788                         if (worker_try_create ()) {
789                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, created", mono_native_thread_id_get ());
790                                 break;
791                         }
792                 }
793         } while (monitor_should_keep_running ());
794
795         // printf ("monitor_thread: stop\n");
796
797         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, finished", mono_native_thread_id_get ());
798
799         mono_refcount_dec (&worker);
800         return 0;
801 }
802
803 static void
804 monitor_ensure_running (void)
805 {
806         MonoError error;
807         for (;;) {
808                 switch (worker.monitor_status) {
809                 case MONITOR_STATUS_REQUESTED:
810                         // printf ("monitor_thread: requested\n");
811                         return;
812                 case MONITOR_STATUS_WAITING_FOR_REQUEST:
813                         // printf ("monitor_thread: waiting for request\n");
814                         InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_WAITING_FOR_REQUEST);
815                         break;
816                 case MONITOR_STATUS_NOT_RUNNING:
817                         // printf ("monitor_thread: not running\n");
818                         if (mono_runtime_is_shutting_down ())
819                                 return;
820                         if (InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_NOT_RUNNING) == MONITOR_STATUS_NOT_RUNNING) {
821                                 // printf ("monitor_thread: creating\n");
822                                 if (!mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, MONO_THREAD_CREATE_FLAGS_THREADPOOL | MONO_THREAD_CREATE_FLAGS_SMALL_STACK, &error)) {
823                                         // printf ("monitor_thread: creating failed\n");
824                                         worker.monitor_status = MONITOR_STATUS_NOT_RUNNING;
825                                         mono_error_cleanup (&error);
826                                         mono_refcount_dec (&worker);
827                                 }
828                                 return;
829                         }
830                         break;
831                 default: g_assert_not_reached ();
832                 }
833         }
834 }
835
836 static void
837 hill_climbing_change_thread_count (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
838 {
839         ThreadPoolHillClimbing *hc;
840
841         hc = &worker.heuristic_hill_climbing;
842
843         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] hill climbing, change max number of threads %d", mono_native_thread_id_get (), new_thread_count);
844
845         hc->last_thread_count = new_thread_count;
846         hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
847         hc->elapsed_since_last_change = 0;
848         hc->completions_since_last_change = 0;
849 }
850
851 static void
852 hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
853 {
854         ThreadPoolHillClimbing *hc;
855
856         hc = &worker.heuristic_hill_climbing;
857
858         if (new_thread_count != hc->last_thread_count) {
859                 hc->current_control_setting += new_thread_count - hc->last_thread_count;
860                 hill_climbing_change_thread_count (new_thread_count, transition);
861         }
862 }
863
864 static double_complex
865 hill_climbing_get_wave_component (gdouble *samples, guint sample_count, gdouble period)
866 {
867         ThreadPoolHillClimbing *hc;
868         gdouble w, cosine, sine, coeff, q0, q1, q2;
869         guint i;
870
871         g_assert (sample_count >= period);
872         g_assert (period >= 2);
873
874         hc = &worker.heuristic_hill_climbing;
875
876         w = 2.0 * M_PI / period;
877         cosine = cos (w);
878         sine = sin (w);
879         coeff = 2.0 * cosine;
880         q0 = q1 = q2 = 0;
881
882         for (i = 0; i < sample_count; ++i) {
883                 q0 = coeff * q1 - q2 + samples [(hc->total_samples - sample_count + i) % hc->samples_to_measure];
884                 q2 = q1;
885                 q1 = q0;
886         }
887
888         return mono_double_complex_scalar_div (mono_double_complex_make (q1 - q2 * cosine, (q2 * sine)), ((gdouble)sample_count));
889 }
890
891 static gint16
892 hill_climbing_update (gint16 current_thread_count, guint32 sample_duration, gint32 completions, gint64 *adjustment_interval)
893 {
894         ThreadPoolHillClimbing *hc;
895         ThreadPoolHeuristicStateTransition transition;
896         gdouble throughput;
897         gdouble throughput_error_estimate;
898         gdouble confidence;
899         gdouble move;
900         gdouble gain;
901         gint sample_index;
902         gint sample_count;
903         gint new_thread_wave_magnitude;
904         gint new_thread_count;
905         double_complex thread_wave_component;
906         double_complex throughput_wave_component;
907         double_complex ratio;
908
909         g_assert (adjustment_interval);
910
911         hc = &worker.heuristic_hill_climbing;
912
913         /* If someone changed the thread count without telling us, update our records accordingly. */
914         if (current_thread_count != hc->last_thread_count)
915                 hill_climbing_force_change (current_thread_count, TRANSITION_INITIALIZING);
916
917         /* Update the cumulative stats for this thread count */
918         hc->elapsed_since_last_change += sample_duration;
919         hc->completions_since_last_change += completions;
920
921         /* Add in any data we've already collected about this sample */
922         sample_duration += hc->accumulated_sample_duration;
923         completions += hc->accumulated_completion_count;
924
925         /* We need to make sure we're collecting reasonably accurate data. Since we're just counting the end
926          * of each work item, we are goinng to be missing some data about what really happened during the
927          * sample interval. The count produced by each thread includes an initial work item that may have
928          * started well before the start of the interval, and each thread may have been running some new
929          * work item for some time before the end of the interval, which did not yet get counted. So
930          * our count is going to be off by +/- threadCount workitems.
931          *
932          * The exception is that the thread that reported to us last time definitely wasn't running any work
933          * at that time, and the thread that's reporting now definitely isn't running a work item now. So
934          * we really only need to consider threadCount-1 threads.
935          *
936          * Thus the percent error in our count is +/- (threadCount-1)/numCompletions.
937          *
938          * We cannot rely on the frequency-domain analysis we'll be doing later to filter out this error, because
939          * of the way it accumulates over time. If this sample is off by, say, 33% in the negative direction,
940          * then the next one likely will be too. The one after that will include the sum of the completions
941          * we missed in the previous samples, and so will be 33% positive. So every three samples we'll have
942          * two "low" samples and one "high" sample. This will appear as periodic variation right in the frequency
943          * range we're targeting, which will not be filtered by the frequency-domain translation. */
944         if (hc->total_samples > 0 && ((current_thread_count - 1.0) / completions) >= hc->max_sample_error) {
945                 /* Not accurate enough yet. Let's accumulate the data so
946                  * far, and tell the ThreadPoolWorker to collect a little more. */
947                 hc->accumulated_sample_duration = sample_duration;
948                 hc->accumulated_completion_count = completions;
949                 *adjustment_interval = 10;
950                 return current_thread_count;
951         }
952
953         /* We've got enouugh data for our sample; reset our accumulators for next time. */
954         hc->accumulated_sample_duration = 0;
955         hc->accumulated_completion_count = 0;
956
957         /* Add the current thread count and throughput sample to our history. */
958         throughput = ((gdouble) completions) / sample_duration;
959
960         sample_index = hc->total_samples % hc->samples_to_measure;
961         hc->samples [sample_index] = throughput;
962         hc->thread_counts [sample_index] = current_thread_count;
963         hc->total_samples ++;
964
965         /* Set up defaults for our metrics. */
966         thread_wave_component = mono_double_complex_make(0, 0);
967         throughput_wave_component = mono_double_complex_make(0, 0);
968         throughput_error_estimate = 0;
969         ratio = mono_double_complex_make(0, 0);
970         confidence = 0;
971
972         transition = TRANSITION_WARMUP;
973
974         /* How many samples will we use? It must be at least the three wave periods we're looking for, and it must also
975          * be a whole multiple of the primary wave's period; otherwise the frequency we're looking for will fall between
976          * two frequency bands in the Fourier analysis, and we won't be able to measure it accurately. */
977         sample_count = ((gint) MIN (hc->total_samples - 1, hc->samples_to_measure) / hc->wave_period) * hc->wave_period;
978
979         if (sample_count > hc->wave_period) {
980                 guint i;
981                 gdouble average_throughput;
982                 gdouble average_thread_count;
983                 gdouble sample_sum = 0;
984                 gdouble thread_sum = 0;
985
986                 /* Average the throughput and thread count samples, so we can scale the wave magnitudes later. */
987                 for (i = 0; i < sample_count; ++i) {
988                         guint j = (hc->total_samples - sample_count + i) % hc->samples_to_measure;
989                         sample_sum += hc->samples [j];
990                         thread_sum += hc->thread_counts [j];
991                 }
992
993                 average_throughput = sample_sum / sample_count;
994                 average_thread_count = thread_sum / sample_count;
995
996                 if (average_throughput > 0 && average_thread_count > 0) {
997                         gdouble noise_for_confidence, adjacent_period_1, adjacent_period_2;
998
999                         /* Calculate the periods of the adjacent frequency bands we'll be using to
1000                          * measure noise levels. We want the two adjacent Fourier frequency bands. */
1001                         adjacent_period_1 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) + 1);
1002                         adjacent_period_2 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) - 1);
1003
1004                         /* Get the the three different frequency components of the throughput (scaled by average
1005                          * throughput). Our "error" estimate (the amount of noise that might be present in the
1006                          * frequency band we're really interested in) is the average of the adjacent bands. */
1007                         throughput_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, hc->wave_period), average_throughput);
1008                         throughput_error_estimate = cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, adjacent_period_1), average_throughput));
1009
1010                         if (adjacent_period_2 <= sample_count) {
1011                                 throughput_error_estimate = MAX (throughput_error_estimate, cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (
1012                                         hc->samples, sample_count, adjacent_period_2), average_throughput)));
1013                         }
1014
1015                         /* Do the same for the thread counts, so we have something to compare to. We don't
1016                          * measure thread count noise, because there is none; these are exact measurements. */
1017                         thread_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->thread_counts, sample_count, hc->wave_period), average_thread_count);
1018
1019                         /* Update our moving average of the throughput noise. We'll use this
1020                          * later as feedback to determine the new size of the thread wave. */
1021                         if (hc->average_throughput_noise == 0) {
1022                                 hc->average_throughput_noise = throughput_error_estimate;
1023                         } else {
1024                                 hc->average_throughput_noise = (hc->throughput_error_smoothing_factor * throughput_error_estimate)
1025                                         + ((1.0 + hc->throughput_error_smoothing_factor) * hc->average_throughput_noise);
1026                         }
1027
1028                         if (cabs (thread_wave_component) > 0) {
1029                                 /* Adjust the throughput wave so it's centered around the target wave,
1030                                  * and then calculate the adjusted throughput/thread ratio. */
1031                                 ratio = mono_double_complex_div (mono_double_complex_sub (throughput_wave_component, mono_double_complex_scalar_mul(thread_wave_component, hc->target_throughput_ratio)), thread_wave_component);
1032                                 transition = TRANSITION_CLIMBING_MOVE;
1033                         } else {
1034                                 ratio = mono_double_complex_make (0, 0);
1035                                 transition = TRANSITION_STABILIZING;
1036                         }
1037
1038                         noise_for_confidence = MAX (hc->average_throughput_noise, throughput_error_estimate);
1039                         if (noise_for_confidence > 0) {
1040                                 confidence = cabs (thread_wave_component) / noise_for_confidence / hc->target_signal_to_noise_ratio;
1041                         } else {
1042                                 /* there is no noise! */
1043                                 confidence = 1.0;
1044                         }
1045                 }
1046         }
1047
1048         /* We use just the real part of the complex ratio we just calculated. If the throughput signal
1049          * is exactly in phase with the thread signal, this will be the same as taking the magnitude of
1050          * the complex move and moving that far up. If they're 180 degrees out of phase, we'll move
1051          * backward (because this indicates that our changes are having the opposite of the intended effect).
1052          * If they're 90 degrees out of phase, we won't move at all, because we can't tell wether we're
1053          * having a negative or positive effect on throughput. */
1054         move = creal (ratio);
1055         move = CLAMP (move, -1.0, 1.0);
1056
1057         /* Apply our confidence multiplier. */
1058         move *= CLAMP (confidence, -1.0, 1.0);
1059
1060         /* Now apply non-linear gain, such that values around zero are attenuated, while higher values
1061          * are enhanced. This allows us to move quickly if we're far away from the target, but more slowly
1062         * if we're getting close, giving us rapid ramp-up without wild oscillations around the target. */
1063         gain = hc->max_change_per_second * sample_duration;
1064         move = pow (fabs (move), hc->gain_exponent) * (move >= 0.0 ? 1 : -1) * gain;
1065         move = MIN (move, hc->max_change_per_sample);
1066
1067         /* If the result was positive, and CPU is > 95%, refuse the move. */
1068         if (move > 0.0 && worker.cpu_usage > CPU_USAGE_HIGH)
1069                 move = 0.0;
1070
1071         /* Apply the move to our control setting. */
1072         hc->current_control_setting += move;
1073
1074         /* Calculate the new thread wave magnitude, which is based on the moving average we've been keeping of the
1075          * throughput error.  This average starts at zero, so we'll start with a nice safe little wave at first. */
1076         new_thread_wave_magnitude = (gint)(0.5 + (hc->current_control_setting * hc->average_throughput_noise
1077                 * hc->target_signal_to_noise_ratio * hc->thread_magnitude_multiplier * 2.0));
1078         new_thread_wave_magnitude = CLAMP (new_thread_wave_magnitude, 1, hc->max_thread_wave_magnitude);
1079
1080         /* Make sure our control setting is within the ThreadPoolWorker's limits. */
1081         hc->current_control_setting = CLAMP (hc->current_control_setting, worker.limit_worker_min, worker.limit_worker_max - new_thread_wave_magnitude);
1082
1083         /* Calculate the new thread count (control setting + square wave). */
1084         new_thread_count = (gint)(hc->current_control_setting + new_thread_wave_magnitude * ((hc->total_samples / (hc->wave_period / 2)) % 2));
1085
1086         /* Make sure the new thread count doesn't exceed the ThreadPoolWorker's limits. */
1087         new_thread_count = CLAMP (new_thread_count, worker.limit_worker_min, worker.limit_worker_max);
1088
1089         if (new_thread_count != current_thread_count)
1090                 hill_climbing_change_thread_count (new_thread_count, transition);
1091
1092         if (creal (ratio) < 0.0 && new_thread_count == worker.limit_worker_min)
1093                 *adjustment_interval = (gint)(0.5 + hc->current_sample_interval * (10.0 * MAX (-1.0 * creal (ratio), 1.0)));
1094         else
1095                 *adjustment_interval = hc->current_sample_interval;
1096
1097         return new_thread_count;
1098 }
1099
1100 static gboolean
1101 heuristic_should_adjust (void)
1102 {
1103         if (worker.heuristic_last_dequeue > worker.heuristic_last_adjustment + worker.heuristic_adjustment_interval) {
1104                 ThreadPoolWorkerCounter counter;
1105                 counter = COUNTER_READ ();
1106                 if (counter._.working <= counter._.max_working)
1107                         return TRUE;
1108         }
1109
1110         return FALSE;
1111 }
1112
1113 static void
1114 heuristic_adjust (void)
1115 {
1116         if (mono_coop_mutex_trylock (&worker.heuristic_lock) == 0) {
1117                 gint32 completions = InterlockedExchange (&worker.heuristic_completions, 0);
1118                 gint64 sample_end = mono_msec_ticks ();
1119                 gint64 sample_duration = sample_end - worker.heuristic_sample_start;
1120
1121                 if (sample_duration >= worker.heuristic_adjustment_interval / 2) {
1122                         ThreadPoolWorkerCounter counter;
1123                         gint16 new_thread_count;
1124
1125                         counter = COUNTER_READ ();
1126                         new_thread_count = hill_climbing_update (counter._.max_working, sample_duration, completions, &worker.heuristic_adjustment_interval);
1127
1128                         COUNTER_ATOMIC (counter, {
1129                                 counter._.max_working = new_thread_count;
1130                         });
1131
1132                         if (new_thread_count > counter._.max_working)
1133                                 worker_request ();
1134
1135                         worker.heuristic_sample_start = sample_end;
1136                         worker.heuristic_last_adjustment = mono_msec_ticks ();
1137                 }
1138
1139                 mono_coop_mutex_unlock (&worker.heuristic_lock);
1140         }
1141 }
1142
1143 static void
1144 heuristic_notify_work_completed (void)
1145 {
1146         InterlockedIncrement (&worker.heuristic_completions);
1147         worker.heuristic_last_dequeue = mono_msec_ticks ();
1148
1149         if (heuristic_should_adjust ())
1150                 heuristic_adjust ();
1151 }
1152
1153 gboolean
1154 mono_threadpool_worker_notify_completed (void)
1155 {
1156         ThreadPoolWorkerCounter counter;
1157
1158         heuristic_notify_work_completed ();
1159
1160         counter = COUNTER_READ ();
1161         return counter._.working <= counter._.max_working;
1162 }
1163
1164 gint32
1165 mono_threadpool_worker_get_min (void)
1166 {
1167         gint32 ret;
1168
1169         if (!mono_refcount_tryinc (&worker))
1170                 return 0;
1171
1172         ret = worker.limit_worker_min;
1173
1174         mono_refcount_dec (&worker);
1175         return ret;
1176 }
1177
1178 gboolean
1179 mono_threadpool_worker_set_min (gint32 value)
1180 {
1181         if (value <= 0 || value > worker.limit_worker_max)
1182                 return FALSE;
1183
1184         if (!mono_refcount_tryinc (&worker))
1185                 return FALSE;
1186
1187         worker.limit_worker_min = value;
1188
1189         mono_refcount_dec (&worker);
1190         return TRUE;
1191 }
1192
1193 gint32
1194 mono_threadpool_worker_get_max (void)
1195 {
1196         gint32 ret;
1197
1198         if (!mono_refcount_tryinc (&worker))
1199                 return 0;
1200
1201         ret = worker.limit_worker_max;
1202
1203         mono_refcount_dec (&worker);
1204         return ret;
1205 }
1206
1207 gboolean
1208 mono_threadpool_worker_set_max (gint32 value)
1209 {
1210         gint32 cpu_count;
1211
1212         cpu_count = mono_cpu_count ();
1213         if (value < worker.limit_worker_min || value < cpu_count)
1214                 return FALSE;
1215
1216         if (!mono_refcount_tryinc (&worker))
1217                 return FALSE;
1218
1219         worker.limit_worker_max = value;
1220
1221         mono_refcount_dec (&worker);
1222         return TRUE;
1223 }
1224
1225 void
1226 mono_threadpool_worker_set_suspended (gboolean suspended)
1227 {
1228         if (!mono_refcount_tryinc (&worker))
1229                 return;
1230
1231         worker.suspended = suspended;
1232         if (!suspended)
1233                 worker_request ();
1234
1235         mono_refcount_dec (&worker);
1236 }