[threads] Remove ThreadState_StopRequested (#4462)
[mono.git] / mono / metadata / threadpool-worker-default.c
1 /*
2  * threadpool-worker.c: native threadpool worker
3  *
4  * Author:
5  *      Ludovic Henry (ludovic.henry@xamarin.com)
6  *
7  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
8  */
9
10 #include <stdlib.h>
11 #define _USE_MATH_DEFINES // needed by MSVC to define math constants
12 #include <math.h>
13 #include <config.h>
14 #include <glib.h>
15
16 #include <mono/metadata/class-internals.h>
17 #include <mono/metadata/exception.h>
18 #include <mono/metadata/gc-internals.h>
19 #include <mono/metadata/object.h>
20 #include <mono/metadata/object-internals.h>
21 #include <mono/metadata/threadpool.h>
22 #include <mono/metadata/threadpool-worker.h>
23 #include <mono/metadata/threadpool-io.h>
24 #include <mono/metadata/w32event.h>
25 #include <mono/utils/atomic.h>
26 #include <mono/utils/mono-compiler.h>
27 #include <mono/utils/mono-complex.h>
28 #include <mono/utils/mono-logger.h>
29 #include <mono/utils/mono-logger-internals.h>
30 #include <mono/utils/mono-proclib.h>
31 #include <mono/utils/mono-threads.h>
32 #include <mono/utils/mono-time.h>
33 #include <mono/utils/mono-rand.h>
34 #include <mono/utils/refcount.h>
35 #include <mono/utils/w32api.h>
36
37 #define CPU_USAGE_LOW 80
38 #define CPU_USAGE_HIGH 95
39
40 #define MONITOR_INTERVAL 500 // ms
41 #define MONITOR_MINIMAL_LIFETIME 60 * 1000 // ms
42
43 #define WORKER_CREATION_MAX_PER_SEC 10
44
45 /* The exponent to apply to the gain. 1.0 means to use linear gain,
46  * higher values will enhance large moves and damp small ones.
47  * default: 2.0 */
48 #define HILL_CLIMBING_GAIN_EXPONENT 2.0
49
50 /* The 'cost' of a thread. 0 means drive for increased throughput regardless
51  * of thread count, higher values bias more against higher thread counts.
52  * default: 0.15 */
53 #define HILL_CLIMBING_BIAS 0.15
54
55 #define HILL_CLIMBING_WAVE_PERIOD 4
56 #define HILL_CLIMBING_MAX_WAVE_MAGNITUDE 20
57 #define HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER 1.0
58 #define HILL_CLIMBING_WAVE_HISTORY_SIZE 8
59 #define HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO 3.0
60 #define HILL_CLIMBING_MAX_CHANGE_PER_SECOND 4
61 #define HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE 20
62 #define HILL_CLIMBING_SAMPLE_INTERVAL_LOW 10
63 #define HILL_CLIMBING_SAMPLE_INTERVAL_HIGH 200
64 #define HILL_CLIMBING_ERROR_SMOOTHING_FACTOR 0.01
65 #define HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT 0.15
66
67 typedef enum {
68         TRANSITION_WARMUP,
69         TRANSITION_INITIALIZING,
70         TRANSITION_RANDOM_MOVE,
71         TRANSITION_CLIMBING_MOVE,
72         TRANSITION_CHANGE_POINT,
73         TRANSITION_STABILIZING,
74         TRANSITION_STARVATION,
75         TRANSITION_THREAD_TIMED_OUT,
76         TRANSITION_UNDEFINED,
77 } ThreadPoolHeuristicStateTransition;
78
79 typedef struct {
80         gint32 wave_period;
81         gint32 samples_to_measure;
82         gdouble target_throughput_ratio;
83         gdouble target_signal_to_noise_ratio;
84         gdouble max_change_per_second;
85         gdouble max_change_per_sample;
86         gint32 max_thread_wave_magnitude;
87         gint32 sample_interval_low;
88         gdouble thread_magnitude_multiplier;
89         gint32 sample_interval_high;
90         gdouble throughput_error_smoothing_factor;
91         gdouble gain_exponent;
92         gdouble max_sample_error;
93
94         gdouble current_control_setting;
95         gint64 total_samples;
96         gint16 last_thread_count;
97         gdouble elapsed_since_last_change;
98         gdouble completions_since_last_change;
99
100         gdouble average_throughput_noise;
101
102         gdouble *samples;
103         gdouble *thread_counts;
104
105         guint32 current_sample_interval;
106         gpointer random_interval_generator;
107
108         gint32 accumulated_completion_count;
109         gdouble accumulated_sample_duration;
110 } ThreadPoolHillClimbing;
111
112 typedef struct {
113         MonoThreadPoolWorkerCallback callback;
114         gpointer data;
115 } ThreadPoolWorkItem;
116
117 typedef union {
118         struct {
119                 gint16 max_working; /* determined by heuristic */
120                 gint16 starting; /* starting, but not yet in worker_thread */
121                 gint16 working; /* executing worker_thread */
122                 gint16 parked; /* parked */
123         } _;
124         gint64 as_gint64;
125 } ThreadPoolWorkerCounter;
126
127 typedef struct {
128         MonoRefCount ref;
129
130         ThreadPoolWorkerCounter counters;
131
132         MonoCoopMutex parked_threads_lock;
133         gint32 parked_threads_count;
134         MonoCoopCond parked_threads_cond;
135
136         ThreadPoolWorkItem *work_items; // ThreadPoolWorkItem []
137         gint32 work_items_count;
138         gint32 work_items_size;
139         MonoCoopMutex work_items_lock;
140
141         guint32 worker_creation_current_second;
142         guint32 worker_creation_current_count;
143         MonoCoopMutex worker_creation_lock;
144
145         gint32 heuristic_completions;
146         gint64 heuristic_sample_start;
147         gint64 heuristic_last_dequeue; // ms
148         gint64 heuristic_last_adjustment; // ms
149         gint64 heuristic_adjustment_interval; // ms
150         ThreadPoolHillClimbing heuristic_hill_climbing;
151         MonoCoopMutex heuristic_lock;
152
153         gint32 limit_worker_min;
154         gint32 limit_worker_max;
155
156         MonoCpuUsageState *cpu_usage_state;
157         gint32 cpu_usage;
158
159         /* suspended by the debugger */
160         gboolean suspended;
161
162         gint32 monitor_status;
163 } ThreadPoolWorker;
164
165 enum {
166         MONITOR_STATUS_REQUESTED,
167         MONITOR_STATUS_WAITING_FOR_REQUEST,
168         MONITOR_STATUS_NOT_RUNNING,
169 };
170
171 static ThreadPoolWorker worker;
172
173 #define COUNTER_CHECK(counter) \
174         do { \
175                 g_assert (counter._.max_working > 0); \
176                 g_assert (counter._.starting >= 0); \
177                 g_assert (counter._.working >= 0); \
178         } while (0)
179
180 #define COUNTER_ATOMIC(var,block) \
181         do { \
182                 ThreadPoolWorkerCounter __old; \
183                 do { \
184                         __old = COUNTER_READ (); \
185                         (var) = __old; \
186                         { block; } \
187                         COUNTER_CHECK (var); \
188                 } while (InterlockedCompareExchange64 (&worker.counters.as_gint64, (var).as_gint64, __old.as_gint64) != __old.as_gint64); \
189         } while (0)
190
191 static inline ThreadPoolWorkerCounter
192 COUNTER_READ (void)
193 {
194         ThreadPoolWorkerCounter counter;
195         counter.as_gint64 = InterlockedRead64 (&worker.counters.as_gint64);
196         return counter;
197 }
198
199 static gpointer
200 rand_create (void)
201 {
202         mono_rand_open ();
203         return mono_rand_init (NULL, 0);
204 }
205
206 static guint32
207 rand_next (gpointer *handle, guint32 min, guint32 max)
208 {
209         MonoError error;
210         guint32 val;
211         mono_rand_try_get_uint32 (handle, &val, min, max, &error);
212         // FIXME handle error
213         mono_error_assert_ok (&error);
214         return val;
215 }
216
217 static void
218 destroy (gpointer data)
219 {
220         mono_coop_mutex_destroy (&worker.parked_threads_lock);
221         mono_coop_cond_destroy (&worker.parked_threads_cond);
222
223         mono_coop_mutex_destroy (&worker.work_items_lock);
224
225         mono_coop_mutex_destroy (&worker.worker_creation_lock);
226
227         mono_coop_mutex_destroy (&worker.heuristic_lock);
228
229         g_free (worker.cpu_usage_state);
230 }
231
232 void
233 mono_threadpool_worker_init (void)
234 {
235         ThreadPoolHillClimbing *hc;
236         const char *threads_per_cpu_env;
237         gint threads_per_cpu;
238         gint threads_count;
239
240         mono_refcount_init (&worker, destroy);
241
242         mono_coop_mutex_init (&worker.parked_threads_lock);
243         worker.parked_threads_count = 0;
244         mono_coop_cond_init (&worker.parked_threads_cond);
245
246         /* worker.work_items_size is inited to 0 */
247         mono_coop_mutex_init (&worker.work_items_lock);
248
249         worker.worker_creation_current_second = -1;
250         mono_coop_mutex_init (&worker.worker_creation_lock);
251
252         worker.heuristic_adjustment_interval = 10;
253         mono_coop_mutex_init (&worker.heuristic_lock);
254
255         mono_rand_open ();
256
257         hc = &worker.heuristic_hill_climbing;
258
259         hc->wave_period = HILL_CLIMBING_WAVE_PERIOD;
260         hc->max_thread_wave_magnitude = HILL_CLIMBING_MAX_WAVE_MAGNITUDE;
261         hc->thread_magnitude_multiplier = (gdouble) HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER;
262         hc->samples_to_measure = hc->wave_period * HILL_CLIMBING_WAVE_HISTORY_SIZE;
263         hc->target_throughput_ratio = (gdouble) HILL_CLIMBING_BIAS;
264         hc->target_signal_to_noise_ratio = (gdouble) HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO;
265         hc->max_change_per_second = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SECOND;
266         hc->max_change_per_sample = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE;
267         hc->sample_interval_low = HILL_CLIMBING_SAMPLE_INTERVAL_LOW;
268         hc->sample_interval_high = HILL_CLIMBING_SAMPLE_INTERVAL_HIGH;
269         hc->throughput_error_smoothing_factor = (gdouble) HILL_CLIMBING_ERROR_SMOOTHING_FACTOR;
270         hc->gain_exponent = (gdouble) HILL_CLIMBING_GAIN_EXPONENT;
271         hc->max_sample_error = (gdouble) HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT;
272         hc->current_control_setting = 0;
273         hc->total_samples = 0;
274         hc->last_thread_count = 0;
275         hc->average_throughput_noise = 0;
276         hc->elapsed_since_last_change = 0;
277         hc->accumulated_completion_count = 0;
278         hc->accumulated_sample_duration = 0;
279         hc->samples = g_new0 (gdouble, hc->samples_to_measure);
280         hc->thread_counts = g_new0 (gdouble, hc->samples_to_measure);
281         hc->random_interval_generator = rand_create ();
282         hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
283
284         if (!(threads_per_cpu_env = g_getenv ("MONO_THREADS_PER_CPU")))
285                 threads_per_cpu = 1;
286         else
287                 threads_per_cpu = CLAMP (atoi (threads_per_cpu_env), 1, 50);
288
289         threads_count = mono_cpu_count () * threads_per_cpu;
290
291         worker.limit_worker_min = threads_count;
292
293 #if defined (PLATFORM_ANDROID) || defined (HOST_IOS)
294         worker.limit_worker_max = CLAMP (threads_count * 100, MIN (threads_count, 200), MAX (threads_count, 200));
295 #else
296         worker.limit_worker_max = threads_count * 100;
297 #endif
298
299         worker.counters._.max_working = worker.limit_worker_min;
300
301         worker.cpu_usage_state = g_new0 (MonoCpuUsageState, 1);
302
303         worker.suspended = FALSE;
304
305         worker.monitor_status = MONITOR_STATUS_NOT_RUNNING;
306 }
307
308 void
309 mono_threadpool_worker_cleanup (void)
310 {
311         mono_refcount_dec (&worker);
312 }
313
314 static void
315 work_item_lock (void)
316 {
317         mono_coop_mutex_lock (&worker.work_items_lock);
318 }
319
320 static void
321 work_item_unlock (void)
322 {
323         mono_coop_mutex_unlock (&worker.work_items_lock);
324 }
325
326 static void
327 work_item_push (MonoThreadPoolWorkerCallback callback, gpointer data)
328 {
329         ThreadPoolWorkItem work_item;
330
331         g_assert (callback);
332
333         work_item.callback = callback;
334         work_item.data = data;
335
336         work_item_lock ();
337
338         g_assert (worker.work_items_count <= worker.work_items_size);
339
340         if (G_UNLIKELY (worker.work_items_count == worker.work_items_size)) {
341                 worker.work_items_size += 64;
342                 worker.work_items = g_renew (ThreadPoolWorkItem, worker.work_items, worker.work_items_size);
343         }
344
345         g_assert (worker.work_items);
346
347         worker.work_items [worker.work_items_count ++] = work_item;
348
349         // printf ("[push] worker.work_items = %p, worker.work_items_count = %d, worker.work_items_size = %d\n",
350         //      worker.work_items, worker.work_items_count, worker.work_items_size);
351
352         work_item_unlock ();
353 }
354
355 static gboolean
356 work_item_try_pop (ThreadPoolWorkItem *work_item)
357 {
358         g_assert (work_item);
359
360         work_item_lock ();
361
362         // printf ("[pop]  worker.work_items = %p, worker.work_items_count = %d, worker.work_items_size = %d\n",
363         //      worker.work_items, worker.work_items_count, worker.work_items_size);
364
365         if (worker.work_items_count == 0) {
366                 work_item_unlock ();
367                 return FALSE;
368         }
369
370         *work_item = worker.work_items [-- worker.work_items_count];
371
372         if (G_UNLIKELY (worker.work_items_count >= 64 * 3 && worker.work_items_count < worker.work_items_size / 2)) {
373                 worker.work_items_size -= 64;
374                 worker.work_items = g_renew (ThreadPoolWorkItem, worker.work_items, worker.work_items_size);
375         }
376
377         work_item_unlock ();
378
379         return TRUE;
380 }
381
382 static gint32
383 work_item_count (void)
384 {
385         gint32 count;
386
387         work_item_lock ();
388         count = worker.work_items_count;
389         work_item_unlock ();
390
391         return count;
392 }
393
394 static void worker_request (void);
395
396 void
397 mono_threadpool_worker_enqueue (MonoThreadPoolWorkerCallback callback, gpointer data)
398 {
399         if (!mono_refcount_tryinc (&worker))
400                 return;
401
402         work_item_push (callback, data);
403
404         worker_request ();
405
406         mono_refcount_dec (&worker);
407 }
408
409 static void
410 worker_wait_interrupt (gpointer unused)
411 {
412         /* If the runtime is not shutting down, we are not using this mechanism to wake up a unparked thread, and if the
413          * runtime is shutting down, then we need to wake up ALL the threads.
414          * It might be a bit wasteful, but I witnessed shutdown hang where the main thread would abort and then wait for all
415          * background threads to exit (see mono_thread_manage). This would go wrong because not all threadpool threads would
416          * be unparked. It would end up getting unstucked because of the timeout, but that would delay shutdown by 5-60s. */
417         if (!mono_runtime_is_shutting_down ())
418                 return;
419
420         if (!mono_refcount_tryinc (&worker))
421                 return;
422
423         mono_coop_mutex_lock (&worker.parked_threads_lock);
424         mono_coop_cond_broadcast (&worker.parked_threads_cond);
425         mono_coop_mutex_unlock (&worker.parked_threads_lock);
426
427         mono_refcount_dec (&worker);
428 }
429
430 /* return TRUE if timeout, FALSE otherwise (worker unpark or interrupt) */
431 static gboolean
432 worker_park (void)
433 {
434         gboolean timeout = FALSE;
435         gboolean interrupted = FALSE;
436
437         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker parking", mono_native_thread_id_get ());
438
439         mono_coop_mutex_lock (&worker.parked_threads_lock);
440
441         if (!mono_runtime_is_shutting_down ()) {
442                 static gpointer rand_handle = NULL;
443                 MonoInternalThread *thread;
444                 ThreadPoolWorkerCounter counter;
445
446                 if (!rand_handle)
447                         rand_handle = rand_create ();
448                 g_assert (rand_handle);
449
450                 thread = mono_thread_internal_current ();
451                 g_assert (thread);
452
453                 COUNTER_ATOMIC (counter, {
454                         counter._.working --;
455                         counter._.parked ++;
456                 });
457
458                 worker.parked_threads_count += 1;
459
460                 mono_thread_info_install_interrupt (worker_wait_interrupt, NULL, &interrupted);
461                 if (interrupted)
462                         goto done;
463
464                 if (mono_coop_cond_timedwait (&worker.parked_threads_cond, &worker.parked_threads_lock, rand_next (&rand_handle, 5 * 1000, 60 * 1000)) != 0)
465                         timeout = TRUE;
466
467                 mono_thread_info_uninstall_interrupt (&interrupted);
468
469 done:
470                 worker.parked_threads_count -= 1;
471
472                 COUNTER_ATOMIC (counter, {
473                         counter._.working ++;
474                         counter._.parked --;
475                 });
476         }
477
478         mono_coop_mutex_unlock (&worker.parked_threads_lock);
479
480         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker unparking, timeout? %s interrupted? %s",
481                 mono_native_thread_id_get (), timeout ? "yes" : "no", interrupted ? "yes" : "no");
482
483         return timeout;
484 }
485
486 static gboolean
487 worker_try_unpark (void)
488 {
489         gboolean res = FALSE;
490
491         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker", mono_native_thread_id_get ());
492
493         mono_coop_mutex_lock (&worker.parked_threads_lock);
494         if (worker.parked_threads_count > 0) {
495                 mono_coop_cond_signal (&worker.parked_threads_cond);
496                 res = TRUE;
497         }
498         mono_coop_mutex_unlock (&worker.parked_threads_lock);
499
500         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try unpark worker, success? %s", mono_native_thread_id_get (), res ? "yes" : "no");
501
502         return res;
503 }
504
505 static gsize WINAPI
506 worker_thread (gpointer unused)
507 {
508         MonoInternalThread *thread;
509         ThreadPoolWorkerCounter counter;
510
511         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker starting", mono_native_thread_id_get ());
512
513         if (!mono_refcount_tryinc (&worker))
514                 return 0;
515
516         COUNTER_ATOMIC (counter, {
517                 counter._.starting --;
518                 counter._.working ++;
519         });
520
521         thread = mono_thread_internal_current ();
522         g_assert (thread);
523
524         while (!mono_runtime_is_shutting_down ()) {
525                 ThreadPoolWorkItem work_item;
526
527                 if (mono_thread_interruption_checkpoint ())
528                         continue;
529
530                 if (!work_item_try_pop (&work_item)) {
531                         gboolean timeout;
532
533                         timeout = worker_park ();
534                         if (timeout)
535                                 break;
536
537                         continue;
538                 }
539
540                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] worker executing %p (%p)",
541                         mono_native_thread_id_get (), work_item.callback, work_item.data);
542
543                 work_item.callback (work_item.data);
544         }
545
546         COUNTER_ATOMIC (counter, {
547                 counter._.working --;
548         });
549
550         mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_THREADPOOL, "[%p] worker finishing", mono_native_thread_id_get ());
551
552         mono_refcount_dec (&worker);
553
554         return 0;
555 }
556
557 static gboolean
558 worker_try_create (void)
559 {
560         MonoError error;
561         MonoInternalThread *thread;
562         gint64 current_ticks;
563         gint32 now;
564         ThreadPoolWorkerCounter counter;
565
566         if (mono_runtime_is_shutting_down ())
567                 return FALSE;
568
569         mono_coop_mutex_lock (&worker.worker_creation_lock);
570
571         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker", mono_native_thread_id_get ());
572
573         current_ticks = mono_100ns_ticks ();
574         if (0 == current_ticks) {
575                 g_warning ("failed to get 100ns ticks");
576         } else {
577                 now = current_ticks / (10 * 1000 * 1000);
578                 if (worker.worker_creation_current_second != now) {
579                         worker.worker_creation_current_second = now;
580                         worker.worker_creation_current_count = 0;
581                 } else {
582                         g_assert (worker.worker_creation_current_count <= WORKER_CREATION_MAX_PER_SEC);
583                         if (worker.worker_creation_current_count == WORKER_CREATION_MAX_PER_SEC) {
584                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of worker created per second reached, current count = %d",
585                                         mono_native_thread_id_get (), worker.worker_creation_current_count);
586                                 mono_coop_mutex_unlock (&worker.worker_creation_lock);
587                                 return FALSE;
588                         }
589                 }
590         }
591
592         COUNTER_ATOMIC (counter, {
593                 if (counter._.working >= counter._.max_working) {
594                         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: maximum number of working threads reached",
595                                 mono_native_thread_id_get ());
596                         mono_coop_mutex_unlock (&worker.worker_creation_lock);
597                         return FALSE;
598                 }
599                 counter._.starting ++;
600         });
601
602         thread = mono_thread_create_internal (mono_get_root_domain (), worker_thread, NULL, MONO_THREAD_CREATE_FLAGS_THREADPOOL, &error);
603         if (!thread) {
604                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, failed: could not create thread due to %s", mono_native_thread_id_get (), mono_error_get_message (&error));
605                 mono_error_cleanup (&error);
606
607                 COUNTER_ATOMIC (counter, {
608                         counter._.starting --;
609                 });
610
611                 mono_coop_mutex_unlock (&worker.worker_creation_lock);
612
613                 return FALSE;
614         }
615
616         worker.worker_creation_current_count += 1;
617
618         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] try create worker, created %p, now = %d count = %d",
619                 mono_native_thread_id_get (), (gpointer) thread->tid, now, worker.worker_creation_current_count);
620
621         mono_coop_mutex_unlock (&worker.worker_creation_lock);
622         return TRUE;
623 }
624
625 static void monitor_ensure_running (void);
626
627 static void
628 worker_request (void)
629 {
630         if (worker.suspended)
631                 return;
632
633         monitor_ensure_running ();
634
635         if (worker_try_unpark ()) {
636                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, unparked", mono_native_thread_id_get ());
637                 return;
638         }
639
640         if (worker_try_create ()) {
641                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, created", mono_native_thread_id_get ());
642                 return;
643         }
644
645         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] request worker, failed", mono_native_thread_id_get ());
646 }
647
648 static gboolean
649 monitor_should_keep_running (void)
650 {
651         static gint64 last_should_keep_running = -1;
652
653         g_assert (worker.monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || worker.monitor_status == MONITOR_STATUS_REQUESTED);
654
655         if (InterlockedExchange (&worker.monitor_status, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST) {
656                 gboolean should_keep_running = TRUE, force_should_keep_running = FALSE;
657
658                 if (mono_runtime_is_shutting_down ()) {
659                         should_keep_running = FALSE;
660                 } else {
661                         if (work_item_count () == 0)
662                                 should_keep_running = FALSE;
663
664                         if (!should_keep_running) {
665                                 if (last_should_keep_running == -1 || mono_100ns_ticks () - last_should_keep_running < MONITOR_MINIMAL_LIFETIME * 1000 * 10) {
666                                         should_keep_running = force_should_keep_running = TRUE;
667                                 }
668                         }
669                 }
670
671                 if (should_keep_running) {
672                         if (last_should_keep_running == -1 || !force_should_keep_running)
673                                 last_should_keep_running = mono_100ns_ticks ();
674                 } else {
675                         last_should_keep_running = -1;
676                         if (InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_NOT_RUNNING, MONITOR_STATUS_WAITING_FOR_REQUEST) == MONITOR_STATUS_WAITING_FOR_REQUEST)
677                                 return FALSE;
678                 }
679         }
680
681         g_assert (worker.monitor_status == MONITOR_STATUS_WAITING_FOR_REQUEST || worker.monitor_status == MONITOR_STATUS_REQUESTED);
682
683         return TRUE;
684 }
685
686 static gboolean
687 monitor_sufficient_delay_since_last_dequeue (void)
688 {
689         gint64 threshold;
690
691         if (worker.cpu_usage < CPU_USAGE_LOW) {
692                 threshold = MONITOR_INTERVAL;
693         } else {
694                 ThreadPoolWorkerCounter counter;
695                 counter = COUNTER_READ ();
696                 threshold = counter._.max_working * MONITOR_INTERVAL * 2;
697         }
698
699         return mono_msec_ticks () >= worker.heuristic_last_dequeue + threshold;
700 }
701
702 static void hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition);
703
704 static gsize WINAPI
705 monitor_thread (gpointer unused)
706 {
707         MonoInternalThread *internal;
708         guint i;
709
710         if (!mono_refcount_tryinc (&worker))
711                 return 0;
712
713         internal = mono_thread_internal_current ();
714         g_assert (internal);
715
716         mono_cpu_usage (worker.cpu_usage_state);
717
718         // printf ("monitor_thread: start\n");
719
720         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, started", mono_native_thread_id_get ());
721
722         do {
723                 ThreadPoolWorkerCounter counter;
724                 gboolean limit_worker_max_reached;
725                 gint32 interval_left = MONITOR_INTERVAL;
726                 gint32 awake = 0; /* number of spurious awakes we tolerate before doing a round of rebalancing */
727
728                 g_assert (worker.monitor_status != MONITOR_STATUS_NOT_RUNNING);
729
730                 // counter = COUNTER_READ ();
731                 // printf ("monitor_thread: starting = %d working = %d parked = %d max_working = %d\n",
732                 //      counter._.starting, counter._.working, counter._.parked, counter._.max_working);
733
734                 do {
735                         gint64 ts;
736                         gboolean alerted = FALSE;
737
738                         if (mono_runtime_is_shutting_down ())
739                                 break;
740
741                         ts = mono_msec_ticks ();
742                         if (mono_thread_info_sleep (interval_left, &alerted) == 0)
743                                 break;
744                         interval_left -= mono_msec_ticks () - ts;
745
746                         mono_thread_interruption_checkpoint ();
747                 } while (interval_left > 0 && ++awake < 10);
748
749                 if (mono_runtime_is_shutting_down ())
750                         continue;
751
752                 if (worker.suspended)
753                         continue;
754
755                 if (work_item_count () == 0)
756                         continue;
757
758                 worker.cpu_usage = mono_cpu_usage (worker.cpu_usage_state);
759
760                 if (!monitor_sufficient_delay_since_last_dequeue ())
761                         continue;
762
763                 limit_worker_max_reached = FALSE;
764
765                 COUNTER_ATOMIC (counter, {
766                         if (counter._.max_working >= worker.limit_worker_max) {
767                                 limit_worker_max_reached = TRUE;
768                                 break;
769                         }
770                         counter._.max_working ++;
771                 });
772
773                 if (limit_worker_max_reached)
774                         continue;
775
776                 hill_climbing_force_change (counter._.max_working, TRANSITION_STARVATION);
777
778                 for (i = 0; i < 5; ++i) {
779                         if (mono_runtime_is_shutting_down ())
780                                 break;
781
782                         if (worker_try_unpark ()) {
783                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, unparked", mono_native_thread_id_get ());
784                                 break;
785                         }
786
787                         if (worker_try_create ()) {
788                                 mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, created", mono_native_thread_id_get ());
789                                 break;
790                         }
791                 }
792         } while (monitor_should_keep_running ());
793
794         // printf ("monitor_thread: stop\n");
795
796         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] monitor thread, finished", mono_native_thread_id_get ());
797
798         mono_refcount_dec (&worker);
799         return 0;
800 }
801
802 static void
803 monitor_ensure_running (void)
804 {
805         MonoError error;
806         for (;;) {
807                 switch (worker.monitor_status) {
808                 case MONITOR_STATUS_REQUESTED:
809                         // printf ("monitor_thread: requested\n");
810                         return;
811                 case MONITOR_STATUS_WAITING_FOR_REQUEST:
812                         // printf ("monitor_thread: waiting for request\n");
813                         InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_WAITING_FOR_REQUEST);
814                         break;
815                 case MONITOR_STATUS_NOT_RUNNING:
816                         // printf ("monitor_thread: not running\n");
817                         if (mono_runtime_is_shutting_down ())
818                                 return;
819                         if (InterlockedCompareExchange (&worker.monitor_status, MONITOR_STATUS_REQUESTED, MONITOR_STATUS_NOT_RUNNING) == MONITOR_STATUS_NOT_RUNNING) {
820                                 // printf ("monitor_thread: creating\n");
821                                 if (!mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, MONO_THREAD_CREATE_FLAGS_THREADPOOL | MONO_THREAD_CREATE_FLAGS_SMALL_STACK, &error)) {
822                                         // printf ("monitor_thread: creating failed\n");
823                                         worker.monitor_status = MONITOR_STATUS_NOT_RUNNING;
824                                         mono_error_cleanup (&error);
825                                         mono_refcount_dec (&worker);
826                                 }
827                                 return;
828                         }
829                         break;
830                 default: g_assert_not_reached ();
831                 }
832         }
833 }
834
835 static void
836 hill_climbing_change_thread_count (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
837 {
838         ThreadPoolHillClimbing *hc;
839
840         hc = &worker.heuristic_hill_climbing;
841
842         mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_THREADPOOL, "[%p] hill climbing, change max number of threads %d", mono_native_thread_id_get (), new_thread_count);
843
844         hc->last_thread_count = new_thread_count;
845         hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high);
846         hc->elapsed_since_last_change = 0;
847         hc->completions_since_last_change = 0;
848 }
849
850 static void
851 hill_climbing_force_change (gint16 new_thread_count, ThreadPoolHeuristicStateTransition transition)
852 {
853         ThreadPoolHillClimbing *hc;
854
855         hc = &worker.heuristic_hill_climbing;
856
857         if (new_thread_count != hc->last_thread_count) {
858                 hc->current_control_setting += new_thread_count - hc->last_thread_count;
859                 hill_climbing_change_thread_count (new_thread_count, transition);
860         }
861 }
862
863 static double_complex
864 hill_climbing_get_wave_component (gdouble *samples, guint sample_count, gdouble period)
865 {
866         ThreadPoolHillClimbing *hc;
867         gdouble w, cosine, sine, coeff, q0, q1, q2;
868         guint i;
869
870         g_assert (sample_count >= period);
871         g_assert (period >= 2);
872
873         hc = &worker.heuristic_hill_climbing;
874
875         w = 2.0 * M_PI / period;
876         cosine = cos (w);
877         sine = sin (w);
878         coeff = 2.0 * cosine;
879         q0 = q1 = q2 = 0;
880
881         for (i = 0; i < sample_count; ++i) {
882                 q0 = coeff * q1 - q2 + samples [(hc->total_samples - sample_count + i) % hc->samples_to_measure];
883                 q2 = q1;
884                 q1 = q0;
885         }
886
887         return mono_double_complex_scalar_div (mono_double_complex_make (q1 - q2 * cosine, (q2 * sine)), ((gdouble)sample_count));
888 }
889
890 static gint16
891 hill_climbing_update (gint16 current_thread_count, guint32 sample_duration, gint32 completions, gint64 *adjustment_interval)
892 {
893         ThreadPoolHillClimbing *hc;
894         ThreadPoolHeuristicStateTransition transition;
895         gdouble throughput;
896         gdouble throughput_error_estimate;
897         gdouble confidence;
898         gdouble move;
899         gdouble gain;
900         gint sample_index;
901         gint sample_count;
902         gint new_thread_wave_magnitude;
903         gint new_thread_count;
904         double_complex thread_wave_component;
905         double_complex throughput_wave_component;
906         double_complex ratio;
907
908         g_assert (adjustment_interval);
909
910         hc = &worker.heuristic_hill_climbing;
911
912         /* If someone changed the thread count without telling us, update our records accordingly. */
913         if (current_thread_count != hc->last_thread_count)
914                 hill_climbing_force_change (current_thread_count, TRANSITION_INITIALIZING);
915
916         /* Update the cumulative stats for this thread count */
917         hc->elapsed_since_last_change += sample_duration;
918         hc->completions_since_last_change += completions;
919
920         /* Add in any data we've already collected about this sample */
921         sample_duration += hc->accumulated_sample_duration;
922         completions += hc->accumulated_completion_count;
923
924         /* We need to make sure we're collecting reasonably accurate data. Since we're just counting the end
925          * of each work item, we are goinng to be missing some data about what really happened during the
926          * sample interval. The count produced by each thread includes an initial work item that may have
927          * started well before the start of the interval, and each thread may have been running some new
928          * work item for some time before the end of the interval, which did not yet get counted. So
929          * our count is going to be off by +/- threadCount workitems.
930          *
931          * The exception is that the thread that reported to us last time definitely wasn't running any work
932          * at that time, and the thread that's reporting now definitely isn't running a work item now. So
933          * we really only need to consider threadCount-1 threads.
934          *
935          * Thus the percent error in our count is +/- (threadCount-1)/numCompletions.
936          *
937          * We cannot rely on the frequency-domain analysis we'll be doing later to filter out this error, because
938          * of the way it accumulates over time. If this sample is off by, say, 33% in the negative direction,
939          * then the next one likely will be too. The one after that will include the sum of the completions
940          * we missed in the previous samples, and so will be 33% positive. So every three samples we'll have
941          * two "low" samples and one "high" sample. This will appear as periodic variation right in the frequency
942          * range we're targeting, which will not be filtered by the frequency-domain translation. */
943         if (hc->total_samples > 0 && ((current_thread_count - 1.0) / completions) >= hc->max_sample_error) {
944                 /* Not accurate enough yet. Let's accumulate the data so
945                  * far, and tell the ThreadPoolWorker to collect a little more. */
946                 hc->accumulated_sample_duration = sample_duration;
947                 hc->accumulated_completion_count = completions;
948                 *adjustment_interval = 10;
949                 return current_thread_count;
950         }
951
952         /* We've got enouugh data for our sample; reset our accumulators for next time. */
953         hc->accumulated_sample_duration = 0;
954         hc->accumulated_completion_count = 0;
955
956         /* Add the current thread count and throughput sample to our history. */
957         throughput = ((gdouble) completions) / sample_duration;
958
959         sample_index = hc->total_samples % hc->samples_to_measure;
960         hc->samples [sample_index] = throughput;
961         hc->thread_counts [sample_index] = current_thread_count;
962         hc->total_samples ++;
963
964         /* Set up defaults for our metrics. */
965         thread_wave_component = mono_double_complex_make(0, 0);
966         throughput_wave_component = mono_double_complex_make(0, 0);
967         throughput_error_estimate = 0;
968         ratio = mono_double_complex_make(0, 0);
969         confidence = 0;
970
971         transition = TRANSITION_WARMUP;
972
973         /* How many samples will we use? It must be at least the three wave periods we're looking for, and it must also
974          * be a whole multiple of the primary wave's period; otherwise the frequency we're looking for will fall between
975          * two frequency bands in the Fourier analysis, and we won't be able to measure it accurately. */
976         sample_count = ((gint) MIN (hc->total_samples - 1, hc->samples_to_measure) / hc->wave_period) * hc->wave_period;
977
978         if (sample_count > hc->wave_period) {
979                 guint i;
980                 gdouble average_throughput;
981                 gdouble average_thread_count;
982                 gdouble sample_sum = 0;
983                 gdouble thread_sum = 0;
984
985                 /* Average the throughput and thread count samples, so we can scale the wave magnitudes later. */
986                 for (i = 0; i < sample_count; ++i) {
987                         guint j = (hc->total_samples - sample_count + i) % hc->samples_to_measure;
988                         sample_sum += hc->samples [j];
989                         thread_sum += hc->thread_counts [j];
990                 }
991
992                 average_throughput = sample_sum / sample_count;
993                 average_thread_count = thread_sum / sample_count;
994
995                 if (average_throughput > 0 && average_thread_count > 0) {
996                         gdouble noise_for_confidence, adjacent_period_1, adjacent_period_2;
997
998                         /* Calculate the periods of the adjacent frequency bands we'll be using to
999                          * measure noise levels. We want the two adjacent Fourier frequency bands. */
1000                         adjacent_period_1 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) + 1);
1001                         adjacent_period_2 = sample_count / (((gdouble) sample_count) / ((gdouble) hc->wave_period) - 1);
1002
1003                         /* Get the the three different frequency components of the throughput (scaled by average
1004                          * throughput). Our "error" estimate (the amount of noise that might be present in the
1005                          * frequency band we're really interested in) is the average of the adjacent bands. */
1006                         throughput_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, hc->wave_period), average_throughput);
1007                         throughput_error_estimate = cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->samples, sample_count, adjacent_period_1), average_throughput));
1008
1009                         if (adjacent_period_2 <= sample_count) {
1010                                 throughput_error_estimate = MAX (throughput_error_estimate, cabs (mono_double_complex_scalar_div (hill_climbing_get_wave_component (
1011                                         hc->samples, sample_count, adjacent_period_2), average_throughput)));
1012                         }
1013
1014                         /* Do the same for the thread counts, so we have something to compare to. We don't
1015                          * measure thread count noise, because there is none; these are exact measurements. */
1016                         thread_wave_component = mono_double_complex_scalar_div (hill_climbing_get_wave_component (hc->thread_counts, sample_count, hc->wave_period), average_thread_count);
1017
1018                         /* Update our moving average of the throughput noise. We'll use this
1019                          * later as feedback to determine the new size of the thread wave. */
1020                         if (hc->average_throughput_noise == 0) {
1021                                 hc->average_throughput_noise = throughput_error_estimate;
1022                         } else {
1023                                 hc->average_throughput_noise = (hc->throughput_error_smoothing_factor * throughput_error_estimate)
1024                                         + ((1.0 + hc->throughput_error_smoothing_factor) * hc->average_throughput_noise);
1025                         }
1026
1027                         if (cabs (thread_wave_component) > 0) {
1028                                 /* Adjust the throughput wave so it's centered around the target wave,
1029                                  * and then calculate the adjusted throughput/thread ratio. */
1030                                 ratio = mono_double_complex_div (mono_double_complex_sub (throughput_wave_component, mono_double_complex_scalar_mul(thread_wave_component, hc->target_throughput_ratio)), thread_wave_component);
1031                                 transition = TRANSITION_CLIMBING_MOVE;
1032                         } else {
1033                                 ratio = mono_double_complex_make (0, 0);
1034                                 transition = TRANSITION_STABILIZING;
1035                         }
1036
1037                         noise_for_confidence = MAX (hc->average_throughput_noise, throughput_error_estimate);
1038                         if (noise_for_confidence > 0) {
1039                                 confidence = cabs (thread_wave_component) / noise_for_confidence / hc->target_signal_to_noise_ratio;
1040                         } else {
1041                                 /* there is no noise! */
1042                                 confidence = 1.0;
1043                         }
1044                 }
1045         }
1046
1047         /* We use just the real part of the complex ratio we just calculated. If the throughput signal
1048          * is exactly in phase with the thread signal, this will be the same as taking the magnitude of
1049          * the complex move and moving that far up. If they're 180 degrees out of phase, we'll move
1050          * backward (because this indicates that our changes are having the opposite of the intended effect).
1051          * If they're 90 degrees out of phase, we won't move at all, because we can't tell wether we're
1052          * having a negative or positive effect on throughput. */
1053         move = creal (ratio);
1054         move = CLAMP (move, -1.0, 1.0);
1055
1056         /* Apply our confidence multiplier. */
1057         move *= CLAMP (confidence, -1.0, 1.0);
1058
1059         /* Now apply non-linear gain, such that values around zero are attenuated, while higher values
1060          * are enhanced. This allows us to move quickly if we're far away from the target, but more slowly
1061         * if we're getting close, giving us rapid ramp-up without wild oscillations around the target. */
1062         gain = hc->max_change_per_second * sample_duration;
1063         move = pow (fabs (move), hc->gain_exponent) * (move >= 0.0 ? 1 : -1) * gain;
1064         move = MIN (move, hc->max_change_per_sample);
1065
1066         /* If the result was positive, and CPU is > 95%, refuse the move. */
1067         if (move > 0.0 && worker.cpu_usage > CPU_USAGE_HIGH)
1068                 move = 0.0;
1069
1070         /* Apply the move to our control setting. */
1071         hc->current_control_setting += move;
1072
1073         /* Calculate the new thread wave magnitude, which is based on the moving average we've been keeping of the
1074          * throughput error.  This average starts at zero, so we'll start with a nice safe little wave at first. */
1075         new_thread_wave_magnitude = (gint)(0.5 + (hc->current_control_setting * hc->average_throughput_noise
1076                 * hc->target_signal_to_noise_ratio * hc->thread_magnitude_multiplier * 2.0));
1077         new_thread_wave_magnitude = CLAMP (new_thread_wave_magnitude, 1, hc->max_thread_wave_magnitude);
1078
1079         /* Make sure our control setting is within the ThreadPoolWorker's limits. */
1080         hc->current_control_setting = CLAMP (hc->current_control_setting, worker.limit_worker_min, worker.limit_worker_max - new_thread_wave_magnitude);
1081
1082         /* Calculate the new thread count (control setting + square wave). */
1083         new_thread_count = (gint)(hc->current_control_setting + new_thread_wave_magnitude * ((hc->total_samples / (hc->wave_period / 2)) % 2));
1084
1085         /* Make sure the new thread count doesn't exceed the ThreadPoolWorker's limits. */
1086         new_thread_count = CLAMP (new_thread_count, worker.limit_worker_min, worker.limit_worker_max);
1087
1088         if (new_thread_count != current_thread_count)
1089                 hill_climbing_change_thread_count (new_thread_count, transition);
1090
1091         if (creal (ratio) < 0.0 && new_thread_count == worker.limit_worker_min)
1092                 *adjustment_interval = (gint)(0.5 + hc->current_sample_interval * (10.0 * MAX (-1.0 * creal (ratio), 1.0)));
1093         else
1094                 *adjustment_interval = hc->current_sample_interval;
1095
1096         return new_thread_count;
1097 }
1098
1099 static gboolean
1100 heuristic_should_adjust (void)
1101 {
1102         if (worker.heuristic_last_dequeue > worker.heuristic_last_adjustment + worker.heuristic_adjustment_interval) {
1103                 ThreadPoolWorkerCounter counter;
1104                 counter = COUNTER_READ ();
1105                 if (counter._.working <= counter._.max_working)
1106                         return TRUE;
1107         }
1108
1109         return FALSE;
1110 }
1111
1112 static void
1113 heuristic_adjust (void)
1114 {
1115         if (mono_coop_mutex_trylock (&worker.heuristic_lock) == 0) {
1116                 gint32 completions = InterlockedExchange (&worker.heuristic_completions, 0);
1117                 gint64 sample_end = mono_msec_ticks ();
1118                 gint64 sample_duration = sample_end - worker.heuristic_sample_start;
1119
1120                 if (sample_duration >= worker.heuristic_adjustment_interval / 2) {
1121                         ThreadPoolWorkerCounter counter;
1122                         gint16 new_thread_count;
1123
1124                         counter = COUNTER_READ ();
1125                         new_thread_count = hill_climbing_update (counter._.max_working, sample_duration, completions, &worker.heuristic_adjustment_interval);
1126
1127                         COUNTER_ATOMIC (counter, {
1128                                 counter._.max_working = new_thread_count;
1129                         });
1130
1131                         if (new_thread_count > counter._.max_working)
1132                                 worker_request ();
1133
1134                         worker.heuristic_sample_start = sample_end;
1135                         worker.heuristic_last_adjustment = mono_msec_ticks ();
1136                 }
1137
1138                 mono_coop_mutex_unlock (&worker.heuristic_lock);
1139         }
1140 }
1141
1142 static void
1143 heuristic_notify_work_completed (void)
1144 {
1145         InterlockedIncrement (&worker.heuristic_completions);
1146         worker.heuristic_last_dequeue = mono_msec_ticks ();
1147
1148         if (heuristic_should_adjust ())
1149                 heuristic_adjust ();
1150 }
1151
1152 gboolean
1153 mono_threadpool_worker_notify_completed (void)
1154 {
1155         ThreadPoolWorkerCounter counter;
1156
1157         heuristic_notify_work_completed ();
1158
1159         counter = COUNTER_READ ();
1160         return counter._.working <= counter._.max_working;
1161 }
1162
1163 gint32
1164 mono_threadpool_worker_get_min (void)
1165 {
1166         gint32 ret;
1167
1168         if (!mono_refcount_tryinc (&worker))
1169                 return 0;
1170
1171         ret = worker.limit_worker_min;
1172
1173         mono_refcount_dec (&worker);
1174         return ret;
1175 }
1176
1177 gboolean
1178 mono_threadpool_worker_set_min (gint32 value)
1179 {
1180         if (value <= 0 || value > worker.limit_worker_max)
1181                 return FALSE;
1182
1183         if (!mono_refcount_tryinc (&worker))
1184                 return FALSE;
1185
1186         worker.limit_worker_min = value;
1187
1188         mono_refcount_dec (&worker);
1189         return TRUE;
1190 }
1191
1192 gint32
1193 mono_threadpool_worker_get_max (void)
1194 {
1195         gint32 ret;
1196
1197         if (!mono_refcount_tryinc (&worker))
1198                 return 0;
1199
1200         ret = worker.limit_worker_max;
1201
1202         mono_refcount_dec (&worker);
1203         return ret;
1204 }
1205
1206 gboolean
1207 mono_threadpool_worker_set_max (gint32 value)
1208 {
1209         gint32 cpu_count;
1210
1211         cpu_count = mono_cpu_count ();
1212         if (value < worker.limit_worker_min || value < cpu_count)
1213                 return FALSE;
1214
1215         if (!mono_refcount_tryinc (&worker))
1216                 return FALSE;
1217
1218         worker.limit_worker_max = value;
1219
1220         mono_refcount_dec (&worker);
1221         return TRUE;
1222 }
1223
1224 void
1225 mono_threadpool_worker_set_suspended (gboolean suspended)
1226 {
1227         if (!mono_refcount_tryinc (&worker))
1228                 return;
1229
1230         worker.suspended = suspended;
1231         if (!suspended)
1232                 worker_request ();
1233
1234         mono_refcount_dec (&worker);
1235 }