2 * threadpool.c: global thread pool
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Gonzalo Paniagua Javier (gonzalo@ximian.com)
8 * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
9 * Copyright 2004-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2001 Xamarin Inc (http://www.xamarin.com)
16 #include <mono/metadata/profiler-private.h>
17 #include <mono/metadata/threads.h>
18 #include <mono/metadata/threads-types.h>
19 #include <mono/metadata/threadpool-internals.h>
20 #include <mono/metadata/exception.h>
21 #include <mono/metadata/environment.h>
22 #include <mono/metadata/mono-config.h>
23 #include <mono/metadata/mono-mlist.h>
24 #include <mono/metadata/mono-perfcounters.h>
25 #include <mono/metadata/socket-io.h>
26 #include <mono/metadata/mono-cq.h>
27 #include <mono/metadata/mono-wsq.h>
28 #include <mono/metadata/mono-ptr-array.h>
29 #include <mono/metadata/object-internals.h>
30 #include <mono/io-layer/io-layer.h>
31 #include <mono/utils/mono-time.h>
32 #include <mono/utils/mono-proclib.h>
33 #include <mono/utils/mono-semaphore.h>
34 #include <mono/utils/atomic.h>
36 #ifdef HAVE_SYS_TIME_H
39 #include <sys/types.h>
46 #ifdef HAVE_SYS_SOCKET_H
47 #include <sys/socket.h>
49 #include <mono/utils/mono-poll.h>
51 #include <sys/epoll.h>
54 #include <sys/event.h>
58 #ifndef DISABLE_SOCKETS
59 #include "mono/io-layer/socket-wrappers.h"
62 #include "threadpool.h"
63 #include "threadpool-ms.h"
64 #include "threadpool-ms-io.h"
67 use_ms_threadpool (void)
69 static gboolean use_ms_tp = -1;
70 const gchar *mono_threadpool_env;
73 else if (!(mono_threadpool_env = g_getenv ("MONO_THREADPOOL")))
74 return use_ms_tp = FALSE;
75 else if (strcmp (mono_threadpool_env, "microsoft") == 0)
76 return use_ms_tp = TRUE;
78 return use_ms_tp = FALSE;
81 #define THREAD_WANTS_A_BREAK(t) ((t->state & (ThreadState_StopRequested | \
82 ThreadState_SuspendRequested)) != 0)
84 /* DEBUG: prints tp data every 2s */
87 /* mono_thread_pool_init called */
88 static volatile int tp_inited;
98 MONITOR_STATE_FALLING_ASLEEP,
99 MONITOR_STATE_SLEEPING
102 static SocketIOData socket_io_data;
106 MonoCQ *queue; /* GC root */
108 volatile gint waiting; /* threads waiting for a work item */
111 volatile gint pool_status; /* 0 -> not initialized, 1 -> initialized, 2 -> cleaning up */
112 /* min, max, n and busy -> Interlocked */
113 volatile gint min_threads;
114 volatile gint max_threads;
115 volatile gint nthreads;
116 volatile gint busy_threads;
118 void (*async_invoke) (gpointer data);
119 void *pc_nitems; /* Performance counter for total number of items in added */
120 void *pc_nthreads; /* Performance counter for total number of active threads */
122 volatile gint destroy_thread;
124 volatile gint32 njobs;
126 volatile gint32 nexecuted;
130 static ThreadPool async_tp;
131 static ThreadPool async_io_tp;
133 static void async_invoke_thread (gpointer data);
134 static MonoObject *mono_async_invoke (ThreadPool *tp, MonoAsyncResult *ares);
135 static void threadpool_free_queue (ThreadPool *tp);
136 static void threadpool_append_job (ThreadPool *tp, MonoObject *ar);
137 static void threadpool_append_jobs (ThreadPool *tp, MonoObject **jobs, gint njobs);
138 static void threadpool_init (ThreadPool *tp, int min_threads, int max_threads, void (*async_invoke) (gpointer));
139 static void threadpool_start_idle_threads (ThreadPool *tp);
140 static void threadpool_kill_idle_threads (ThreadPool *tp);
141 static gboolean threadpool_start_thread (ThreadPool *tp);
142 static void threadpool_kill_thread (ThreadPool *tp);
143 static void monitor_thread (gpointer data);
144 static int get_event_from_state (MonoSocketAsyncResult *state);
146 static MonoClass *async_call_klass;
147 static MonoClass *socket_async_call_klass;
148 static MonoClass *process_async_call_klass;
150 static GPtrArray *threads;
151 mono_mutex_t threads_lock;
152 static GPtrArray *wsqs;
153 mono_mutex_t wsqs_lock;
154 static gboolean suspended;
156 static volatile gint32 monitor_njobs = 0;
157 static volatile gint32 monitor_state;
158 static MonoSemType monitor_sem;
159 static MonoInternalThread *monitor_internal_thread;
162 static MonoThreadPoolFunc tp_start_func;
163 static MonoThreadPoolFunc tp_finish_func;
164 static gpointer tp_hooks_user_data;
165 static MonoThreadPoolItemFunc tp_item_begin_func;
166 static MonoThreadPoolItemFunc tp_item_end_func;
167 static gpointer tp_item_user_data;
177 AIO_OP_RECV_JUST_CALLBACK,
178 AIO_OP_SEND_JUST_CALLBACK,
182 AIO_OP_ACCEPTRECEIVE,
183 AIO_OP_RECEIVE_BUFFERS,
188 // #include <mono/metadata/tpool-poll.c>
189 gpointer tp_poll_init (SocketIOData *data);
192 #include <mono/metadata/tpool-epoll.c>
193 #elif defined(USE_KQUEUE_FOR_THREADPOOL)
194 #include <mono/metadata/tpool-kqueue.c>
197 * Functions to check whenever a class is given system class. We need to cache things in MonoDomain since some of the
198 * assemblies can be unloaded.
202 is_system_type (MonoDomain *domain, MonoClass *klass)
204 if (domain->system_image == NULL)
205 domain->system_image = mono_image_loaded ("System");
207 return klass->image == domain->system_image;
211 is_corlib_type (MonoDomain *domain, MonoClass *klass)
213 return klass->image == mono_defaults.corlib;
217 * Note that we call it is_socket_type() where 'socket' refers to the image
218 * that contains the System.Net.Sockets.Socket type.
221 is_socket_type (MonoDomain *domain, MonoClass *klass)
223 return is_system_type (domain, klass);
226 #define check_type_cached(domain, ASSEMBLY, _class, _namespace, _name, loc) do { \
228 return *loc == _class; \
229 if (is_##ASSEMBLY##_type (domain, _class) && !strcmp (_name, _class->name) && !strcmp (_namespace, _class->name_space)) { \
236 #define check_corlib_type_cached(domain, _class, _namespace, _name, loc) check_type_cached (domain, corlib, _class, _namespace, _name, loc)
238 #define check_socket_type_cached(domain, _class, _namespace, _name, loc) check_type_cached (domain, socket, _class, _namespace, _name, loc)
240 #define check_system_type_cached(domain, _class, _namespace, _name, loc) check_type_cached (domain, system, _class, _namespace, _name, loc)
243 is_corlib_asyncresult (MonoDomain *domain, MonoClass *klass)
245 check_corlib_type_cached (domain, klass, "System.Runtime.Remoting.Messaging", "AsyncResult", &domain->corlib_asyncresult_class);
249 is_socket (MonoDomain *domain, MonoClass *klass)
251 check_socket_type_cached (domain, klass, "System.Net.Sockets", "Socket", &domain->socket_class);
255 is_socketasyncresult (MonoDomain *domain, MonoClass *klass)
257 static MonoClass *socket_async_result_klass = NULL;
258 check_system_type_cached (domain, klass, "System.Net.Sockets", "SocketAsyncResult", &socket_async_result_klass);
262 is_socketasynccall (MonoDomain *domain, MonoClass *klass)
264 return (klass->nested_in &&
265 is_socket (domain, klass->nested_in) &&
266 !strcmp (klass->name, "SocketAsyncCall"));
270 is_appdomainunloaded_exception (MonoDomain *domain, MonoClass *klass)
272 check_corlib_type_cached (domain, klass, "System", "AppDomainUnloadedException", &domain->ad_unloaded_ex_class);
276 is_sd_process (MonoDomain *domain, MonoClass *klass)
278 check_system_type_cached (domain, klass, "System.Diagnostics", "Process", &domain->process_class);
282 is_sdp_asyncreadhandler (MonoDomain *domain, MonoClass *klass)
285 return (klass->nested_in &&
286 is_sd_process (domain, klass->nested_in) &&
287 !strcmp (klass->name, "AsyncReadHandler"));
291 #ifdef DISABLE_SOCKETS
294 socket_io_cleanup (SocketIOData *data)
299 get_event_from_state (MonoSocketAsyncResult *state)
301 g_assert_not_reached ();
306 get_events_from_list (MonoMList *list)
314 socket_io_cleanup (SocketIOData *data)
316 mono_mutex_lock (&data->io_lock);
317 if (data->inited != 2) {
318 mono_mutex_unlock (&data->io_lock);
322 data->shutdown (data->event_data);
323 mono_mutex_unlock (&data->io_lock);
327 get_event_from_state (MonoSocketAsyncResult *state)
329 switch (state->operation) {
332 case AIO_OP_RECV_JUST_CALLBACK:
333 case AIO_OP_RECEIVEFROM:
334 case AIO_OP_READPIPE:
335 case AIO_OP_ACCEPTRECEIVE:
336 case AIO_OP_RECEIVE_BUFFERS:
339 case AIO_OP_SEND_JUST_CALLBACK:
342 case AIO_OP_SEND_BUFFERS:
343 case AIO_OP_DISCONNECT:
345 default: /* Should never happen */
346 g_message ("get_event_from_state: unknown value in switch!!!");
352 get_events_from_list (MonoMList *list)
354 MonoSocketAsyncResult *state;
357 while (list && (state = (MonoSocketAsyncResult *)mono_mlist_get_data (list))) {
358 events |= get_event_from_state (state);
359 list = mono_mlist_next (list);
365 #define ICALL_RECV(x) ves_icall_System_Net_Sockets_Socket_Receive_internal (\
366 (SOCKET)(gssize)x->handle, x->buffer, x->offset, x->size,\
367 x->socket_flags, &x->error);
369 #define ICALL_SEND(x) ves_icall_System_Net_Sockets_Socket_Send_internal (\
370 (SOCKET)(gssize)x->handle, x->buffer, x->offset, x->size,\
371 x->socket_flags, &x->error);
373 #endif /* !DISABLE_SOCKETS */
376 threadpool_jobs_inc (MonoObject *obj)
379 InterlockedIncrement (&obj->vtable->domain->threadpool_jobs);
383 threadpool_jobs_dec (MonoObject *obj)
391 domain = obj->vtable->domain;
392 remaining_jobs = InterlockedDecrement (&domain->threadpool_jobs);
393 if (remaining_jobs == 0 && domain->cleanup_semaphore) {
394 ReleaseSemaphore (domain->cleanup_semaphore, 1, NULL);
401 get_io_event (MonoMList **list, gint event)
411 state = mono_mlist_get_data (current);
412 if (get_event_from_state ((MonoSocketAsyncResult *) state) == event)
417 current = mono_mlist_next (current);
422 mono_mlist_set_next (prev, mono_mlist_next (current));
424 *list = mono_mlist_next (*list);
432 * select/poll wake up when a socket is closed, but epoll just removes
433 * the socket from its internal list without notification.
436 mono_thread_pool_remove_socket (int sock)
439 MonoSocketAsyncResult *state;
442 if (use_ms_threadpool ()) {
443 #ifndef DISABLE_SOCKETS
444 mono_threadpool_ms_io_remove_socket (sock);
449 if (socket_io_data.inited == 0)
452 mono_mutex_lock (&socket_io_data.io_lock);
453 if (socket_io_data.sock_to_state == NULL) {
454 mono_mutex_unlock (&socket_io_data.io_lock);
457 list = mono_g_hash_table_lookup (socket_io_data.sock_to_state, GINT_TO_POINTER (sock));
459 mono_g_hash_table_remove (socket_io_data.sock_to_state, GINT_TO_POINTER (sock));
460 mono_mutex_unlock (&socket_io_data.io_lock);
463 state = (MonoSocketAsyncResult *) mono_mlist_get_data (list);
464 if (state->operation == AIO_OP_RECEIVE)
465 state->operation = AIO_OP_RECV_JUST_CALLBACK;
466 else if (state->operation == AIO_OP_SEND)
467 state->operation = AIO_OP_SEND_JUST_CALLBACK;
469 ares = get_io_event (&list, MONO_POLLIN);
470 threadpool_append_job (&async_io_tp, ares);
472 ares = get_io_event (&list, MONO_POLLOUT);
473 threadpool_append_job (&async_io_tp, ares);
479 init_event_system (SocketIOData *data)
482 if (data->event_system == EPOLL_BACKEND) {
483 data->event_data = tp_epoll_init (data);
484 if (data->event_data == NULL) {
485 if (g_getenv ("MONO_DEBUG"))
486 g_message ("Falling back to poll()");
487 data->event_system = POLL_BACKEND;
490 #elif defined(USE_KQUEUE_FOR_THREADPOOL)
491 if (data->event_system == KQUEUE_BACKEND)
492 data->event_data = tp_kqueue_init (data);
494 if (data->event_system == POLL_BACKEND)
495 data->event_data = tp_poll_init (data);
499 socket_io_init (SocketIOData *data)
503 if (data->inited >= 2) // 2 -> initialized, 3-> cleaned up
506 inited = InterlockedCompareExchange (&data->inited, 1, 0);
509 if (data->inited >= 2)
515 mono_mutex_lock (&data->io_lock);
516 data->sock_to_state = mono_g_hash_table_new_type (g_direct_hash, g_direct_equal, MONO_HASH_VALUE_GC);
518 data->event_system = EPOLL_BACKEND;
519 #elif defined(USE_KQUEUE_FOR_THREADPOOL)
520 data->event_system = KQUEUE_BACKEND;
522 data->event_system = POLL_BACKEND;
524 if (g_getenv ("MONO_DISABLE_AIO") != NULL)
525 data->event_system = POLL_BACKEND;
527 init_event_system (data);
528 mono_thread_create_internal (mono_get_root_domain (), data->wait, data, TRUE, SMALL_STACK);
529 mono_mutex_unlock (&data->io_lock);
531 threadpool_start_thread (&async_io_tp);
535 socket_io_add (MonoAsyncResult *ares, MonoSocketAsyncResult *state)
538 SocketIOData *data = &socket_io_data;
543 socket_io_init (&socket_io_data);
544 if (mono_runtime_is_shutting_down () || data->inited == 3 || data->sock_to_state == NULL)
546 if (async_tp.pool_status == 2)
549 MONO_OBJECT_SETREF (state, ares, ares);
551 fd = GPOINTER_TO_INT (state->handle);
552 mono_mutex_lock (&data->io_lock);
553 if (data->sock_to_state == NULL) {
554 mono_mutex_unlock (&data->io_lock);
557 list = mono_g_hash_table_lookup (data->sock_to_state, GINT_TO_POINTER (fd));
559 list = mono_mlist_alloc ((MonoObject*)state);
562 list = mono_mlist_append (list, (MonoObject*)state);
566 mono_g_hash_table_replace (data->sock_to_state, state->handle, list);
567 ievt = get_events_from_list (list);
568 /* The modify function leaves the io_lock critical section. */
569 data->modify (data, fd, state->operation, ievt, is_new);
572 #ifndef DISABLE_SOCKETS
574 socket_io_filter (MonoObject *target, MonoObject *state)
577 MonoSocketAsyncResult *sock_res;
581 if (target == NULL || state == NULL)
584 domain = target->vtable->domain;
585 klass = target->vtable->klass;
586 if (socket_async_call_klass == NULL && is_socketasynccall (domain, klass))
587 socket_async_call_klass = klass;
589 if (process_async_call_klass == NULL && is_sdp_asyncreadhandler (domain, klass))
590 process_async_call_klass = klass;
592 if (klass != socket_async_call_klass && klass != process_async_call_klass)
595 sock_res = (MonoSocketAsyncResult *) state;
596 op = sock_res->operation;
597 if (op < AIO_OP_FIRST || op >= AIO_OP_LAST)
602 #endif /* !DISABLE_SOCKETS */
604 /* Returns the exception thrown when invoking, if any */
606 mono_async_invoke (ThreadPool *tp, MonoAsyncResult *ares)
608 MonoObject *exc = NULL;
610 mono_async_result_invoke (ares, &exc);
613 InterlockedDecrement (&tp->njobs);
616 InterlockedIncrement (&tp->nexecuted);
618 if (InterlockedDecrement (&monitor_njobs) == 0)
619 monitor_state = MONITOR_STATE_FALLING_ASLEEP;
625 threadpool_start_idle_threads (ThreadPool *tp)
630 stack_size = (!tp->is_io) ? 0 : SMALL_STACK;
634 if (n >= tp->min_threads)
636 if (InterlockedCompareExchange (&tp->nthreads, n + 1, n) == n)
639 #ifndef DISABLE_PERFCOUNTERS
640 mono_perfcounter_update_value (tp->pc_nthreads, TRUE, 1);
642 mono_thread_create_internal (mono_get_root_domain (), tp->async_invoke, tp, TRUE, stack_size);
648 threadpool_init (ThreadPool *tp, int min_threads, int max_threads, void (*async_invoke) (gpointer))
650 memset (tp, 0, sizeof (ThreadPool));
651 tp->min_threads = min_threads;
652 tp->max_threads = max_threads;
653 tp->async_invoke = async_invoke;
654 tp->queue = mono_cq_create ();
655 MONO_SEM_INIT (&tp->new_job, 0);
658 #ifndef DISABLE_PERFCOUNTERS
660 init_perf_counter (const char *category, const char *counter)
662 MonoString *category_str;
663 MonoString *counter_str;
669 if (category == NULL || counter == NULL)
671 root = mono_get_root_domain ();
672 category_str = mono_string_new (root, category);
673 counter_str = mono_string_new (root, counter);
674 machine = mono_string_new (root, ".");
675 return mono_perfcounter_get_impl (category_str, counter_str, NULL, machine, &type, &custom);
681 print_pool_info (ThreadPool *tp)
684 // if (tp->tail - tp->head == 0)
687 g_print ("Pool status? %d\n", InterlockedCompareExchange (&tp->pool_status, 0, 0));
688 g_print ("Min. threads: %d\n", InterlockedCompareExchange (&tp->min_threads, 0, 0));
689 g_print ("Max. threads: %d\n", InterlockedCompareExchange (&tp->max_threads, 0, 0));
690 g_print ("nthreads: %d\n", InterlockedCompareExchange (&tp->nthreads, 0, 0));
691 g_print ("busy threads: %d\n", InterlockedCompareExchange (&tp->busy_threads, 0, 0));
692 g_print ("Waiting: %d\n", InterlockedCompareExchange (&tp->waiting, 0, 0));
693 g_print ("Queued: %d\n", (tp->tail - tp->head));
694 if (tp == &async_tp) {
696 mono_mutex_lock (&wsqs_lock);
697 for (i = 0; i < wsqs->len; i++) {
698 g_print ("\tWSQ %d: %d\n", i, mono_wsq_count (g_ptr_array_index (wsqs, i)));
700 mono_mutex_unlock (&wsqs_lock);
702 g_print ("\tSockets: %d\n", mono_g_hash_table_size (socket_io_data.sock_to_state));
704 g_print ("-------------\n");
708 signal_handler (int signo)
713 g_print ("\n-----Non-IO-----\n");
714 print_pool_info (tp);
716 g_print ("\n-----IO-----\n");
717 print_pool_info (tp);
722 #define SAMPLES_PERIOD 500
723 #define HISTORY_SIZE 10
724 /* number of iteration without any jobs
725 in the queue before going to sleep */
726 #define NUM_WAITING_ITERATIONS 10
736 * - 1 if the number of threads should increase
737 * - 0 if it should not change
738 * - -1 if it should decrease
739 * - -2 in case of error
742 monitor_heuristic (gint16 *current, gint16 *history_size, SamplesHistory *history, ThreadPool *tp)
745 gint8 decision G_GNUC_UNUSED;
747 gboolean all_waitsleepjoin;
748 MonoInternalThread *thread;
751 * The following heuristic tries to approach the optimal number of threads to maximize jobs throughput. To
752 * achieve this, it simply stores the number of jobs executed (nexecuted), the number of Threads (nthreads)
753 * and the decision (nthreads_diff) for the past HISTORY_SIZE periods of time, each period being of
754 * duration SAMPLES_PERIOD ms. This history gives us an insight into what happened, and to see if we should
755 * increase or reduce the number of threads by comparing the last period (current) to the best one.
757 * The algorithm can be describe as following :
758 * - if we have a better throughput than the best period : we should either increase the number of threads
759 * in case we already have more threads, either reduce the number of threads if we have less threads; this
760 * is equivalent to move away from the number of threads of the best period, because we are currently better
761 * - if we have a worse throughput than the best period : we should either decrease the number of threads if
762 * we have more threads, either increase the number of threads if we have less threads; this is equivalent
763 * to get closer to the number of threads of the best period, because we are currently worse
766 *history_size = MIN (*history_size + 1, HISTORY_SIZE);
767 cur = *current = (*current + 1) % *history_size;
769 history [cur].nthreads = tp->nthreads;
770 history [cur].nexecuted = InterlockedExchange (&tp->nexecuted, 0);
773 /* if we have waiting thread in the pool, then do not create a new one */
774 history [cur].nthreads_diff = tp->waiting > 1 ? -1 : 0;
776 } else if (tp->nthreads < tp->min_threads) {
777 history [cur].nthreads_diff = 1;
779 } else if (*history_size <= 1) {
780 /* first iteration, let's add a thread by default */
781 history [cur].nthreads_diff = 1;
784 mono_mutex_lock (&threads_lock);
785 if (threads == NULL) {
786 mono_mutex_unlock (&threads_lock);
789 all_waitsleepjoin = TRUE;
790 for (i = 0; i < threads->len; ++i) {
791 thread = g_ptr_array_index (threads, i);
792 if (!(thread->state & ThreadState_WaitSleepJoin)) {
793 all_waitsleepjoin = FALSE;
797 mono_mutex_unlock (&threads_lock);
799 if (all_waitsleepjoin) {
800 /* we might be in a condition of starvation/deadlock with tasks waiting for each others */
801 history [cur].nthreads_diff = 1;
804 max = cur == 0 ? 1 : 0;
805 for (i = 0; i < *history_size; i++) {
808 if (history [i].nexecuted > history [max].nexecuted)
812 if (history [cur].nexecuted >= history [max].nexecuted) {
813 /* we improved the situation, let's continue ! */
814 history [cur].nthreads_diff = history [cur].nthreads >= history [max].nthreads ? 1 : -1;
817 /* we made it worse, let's return to previous situation */
818 history [cur].nthreads_diff = history [cur].nthreads >= history [max].nthreads ? -1 : 1;
825 printf ("monitor_thread: decision: %1d, history [current]: {nexecuted: %5d, nthreads: %3d, waiting: %2d, nthreads_diff: %2d}, history [max]: {nexecuted: %5d, nthreads: %3d}\n",
826 decision, history [cur].nexecuted, history [cur].nthreads, tp->waiting, history [cur].nthreads_diff, history [max].nexecuted, history [max].nthreads);
829 return history [cur].nthreads_diff;
833 monitor_thread (gpointer unused)
835 ThreadPool *pools [2];
836 MonoInternalThread *thread;
840 gint8 num_waiting_iterations = 0;
842 gint16 history_size = 0, current = -1;
843 SamplesHistory *history = malloc (sizeof (SamplesHistory) * HISTORY_SIZE);
845 pools [0] = &async_tp;
846 pools [1] = &async_io_tp;
847 thread = mono_thread_internal_current ();
848 ves_icall_System_Threading_Thread_SetName_internal (thread, mono_string_new (mono_domain_get (), "Threadpool monitor"));
851 i = 10; //number of spurious awakes we tolerate before doing a round of rebalancing.
854 ts = mono_msec_ticks ();
855 if (SleepEx (ms, TRUE) == 0)
857 ms -= (mono_msec_ticks () - ts);
858 if (mono_runtime_is_shutting_down ())
860 if (THREAD_WANTS_A_BREAK (thread))
861 mono_thread_interruption_checkpoint ();
862 } while (ms > 0 && i--);
864 if (mono_runtime_is_shutting_down ())
870 /* threadpool is cleaning up */
871 if (async_tp.pool_status == 2 || async_io_tp.pool_status == 2)
874 switch (monitor_state) {
875 case MONITOR_STATE_AWAKE:
876 num_waiting_iterations = 0;
878 case MONITOR_STATE_FALLING_ASLEEP:
879 if (++num_waiting_iterations == NUM_WAITING_ITERATIONS) {
880 if (monitor_state == MONITOR_STATE_FALLING_ASLEEP && InterlockedCompareExchange (&monitor_state, MONITOR_STATE_SLEEPING, MONITOR_STATE_FALLING_ASLEEP) == MONITOR_STATE_FALLING_ASLEEP) {
881 MONO_SEM_WAIT (&monitor_sem);
883 num_waiting_iterations = 0;
889 case MONITOR_STATE_SLEEPING:
890 g_assert_not_reached ();
893 for (i = 0; i < 2; i++) {
898 if (!tp->waiting && mono_cq_count (tp->queue) > 0)
899 threadpool_start_thread (tp);
901 gint8 nthreads_diff = monitor_heuristic (¤t, &history_size, history, tp);
903 if (nthreads_diff == 1)
904 threadpool_start_thread (tp);
905 else if (nthreads_diff == -1)
906 threadpool_kill_thread (tp);
913 mono_thread_pool_init_tls (void)
915 if (use_ms_threadpool ()) {
916 mono_threadpool_ms_init_tls ();
924 mono_thread_pool_init (void)
926 gint threads_per_cpu = 1;
931 if (use_ms_threadpool ()) {
932 mono_threadpool_ms_init ();
936 cpu_count = mono_cpu_count ();
941 result = InterlockedCompareExchange (&tp_inited, 1, 0);
950 MONO_GC_REGISTER_ROOT_FIXED (socket_io_data.sock_to_state);
951 mono_mutex_init_recursive (&socket_io_data.io_lock);
952 if (g_getenv ("MONO_THREADS_PER_CPU") != NULL) {
953 threads_per_cpu = atoi (g_getenv ("MONO_THREADS_PER_CPU"));
954 if (threads_per_cpu < 1)
958 thread_count = MIN (cpu_count * threads_per_cpu, 100 * cpu_count);
959 threadpool_init (&async_tp, thread_count, MAX (100 * cpu_count, thread_count), async_invoke_thread);
960 threadpool_init (&async_io_tp, cpu_count * 2, cpu_count * 4, async_invoke_thread);
961 async_io_tp.is_io = TRUE;
963 async_call_klass = mono_class_from_name (mono_defaults.corlib, "System", "MonoAsyncCall");
964 g_assert (async_call_klass);
966 mono_mutex_init (&threads_lock);
967 threads = g_ptr_array_sized_new (thread_count);
970 mono_mutex_init_recursive (&wsqs_lock);
971 wsqs = g_ptr_array_sized_new (MAX (100 * cpu_count, thread_count));
973 #ifndef DISABLE_PERFCOUNTERS
974 async_tp.pc_nitems = init_perf_counter ("Mono Threadpool", "Work Items Added");
975 g_assert (async_tp.pc_nitems);
977 async_io_tp.pc_nitems = init_perf_counter ("Mono Threadpool", "IO Work Items Added");
978 g_assert (async_io_tp.pc_nitems);
980 async_tp.pc_nthreads = init_perf_counter ("Mono Threadpool", "# of Threads");
981 g_assert (async_tp.pc_nthreads);
983 async_io_tp.pc_nthreads = init_perf_counter ("Mono Threadpool", "# of IO Threads");
984 g_assert (async_io_tp.pc_nthreads);
988 signal (SIGALRM, signal_handler);
992 MONO_SEM_INIT (&monitor_sem, 0);
993 monitor_state = MONITOR_STATE_AWAKE;
997 static MonoAsyncResult *
998 create_simple_asyncresult (MonoObject *target, MonoObject *state)
1000 MonoDomain *domain = mono_domain_get ();
1001 MonoAsyncResult *ares;
1003 /* Don't call mono_async_result_new() to avoid capturing the context */
1004 ares = (MonoAsyncResult *) mono_object_new (domain, mono_defaults.asyncresult_class);
1005 MONO_OBJECT_SETREF (ares, async_delegate, target);
1006 MONO_OBJECT_SETREF (ares, async_state, state);
1011 icall_append_io_job (MonoObject *target, MonoSocketAsyncResult *state)
1013 MonoAsyncResult *ares;
1015 ares = create_simple_asyncresult (target, (MonoObject *) state);
1017 if (use_ms_threadpool ()) {
1018 #ifndef DISABLE_SOCKETS
1019 mono_threadpool_ms_io_add (ares, state);
1024 socket_io_add (ares, state);
1028 mono_thread_pool_add (MonoObject *target, MonoMethodMessage *msg, MonoDelegate *async_callback,
1032 MonoAsyncResult *ares;
1035 if (use_ms_threadpool ())
1036 return mono_threadpool_ms_add (target, msg, async_callback, state);
1038 domain = mono_domain_get ();
1040 ac = (MonoAsyncCall*)mono_object_new (domain, async_call_klass);
1041 MONO_OBJECT_SETREF (ac, msg, msg);
1042 MONO_OBJECT_SETREF (ac, state, state);
1044 if (async_callback) {
1045 ac->cb_method = mono_get_delegate_invoke (((MonoObject *)async_callback)->vtable->klass);
1046 MONO_OBJECT_SETREF (ac, cb_target, async_callback);
1049 ares = mono_async_result_new (domain, NULL, ac->state, NULL, (MonoObject*)ac);
1050 MONO_OBJECT_SETREF (ares, async_delegate, target);
1052 #ifndef DISABLE_SOCKETS
1053 if (socket_io_filter (target, state)) {
1054 socket_io_add (ares, (MonoSocketAsyncResult *) state);
1058 threadpool_append_job (&async_tp, (MonoObject *) ares);
1063 mono_thread_pool_finish (MonoAsyncResult *ares, MonoArray **out_args, MonoObject **exc)
1068 if (use_ms_threadpool ()) {
1069 return mono_threadpool_ms_finish (ares, out_args, exc);
1075 /* check if already finished */
1076 mono_monitor_enter ((MonoObject *) ares);
1078 if (ares->endinvoke_called) {
1079 *exc = (MonoObject *) mono_get_exception_invalid_operation (NULL);
1080 mono_monitor_exit ((MonoObject *) ares);
1084 ares->endinvoke_called = 1;
1085 /* wait until we are really finished */
1086 if (!ares->completed) {
1087 if (ares->handle == NULL) {
1088 wait_event = CreateEvent (NULL, TRUE, FALSE, NULL);
1089 g_assert(wait_event != 0);
1090 MONO_OBJECT_SETREF (ares, handle, (MonoObject *) mono_wait_handle_new (mono_object_domain (ares), wait_event));
1092 wait_event = mono_wait_handle_get_handle ((MonoWaitHandle *) ares->handle);
1094 mono_monitor_exit ((MonoObject *) ares);
1095 WaitForSingleObjectEx (wait_event, INFINITE, TRUE);
1097 mono_monitor_exit ((MonoObject *) ares);
1100 ac = (MonoAsyncCall *) ares->object_data;
1101 g_assert (ac != NULL);
1102 *exc = ac->msg->exc; /* FIXME: GC add write barrier */
1103 *out_args = ac->out_args;
1109 threadpool_kill_idle_threads (ThreadPool *tp)
1113 n = (gint) InterlockedCompareExchange (&tp->max_threads, 0, -1);
1116 MONO_SEM_POST (&tp->new_job);
1121 mono_thread_pool_cleanup (void)
1123 if (use_ms_threadpool ()) {
1124 mono_threadpool_ms_cleanup ();
1128 if (InterlockedExchange (&async_io_tp.pool_status, 2) == 1) {
1129 socket_io_cleanup (&socket_io_data); /* Empty when DISABLE_SOCKETS is defined */
1130 threadpool_kill_idle_threads (&async_io_tp);
1133 if (async_io_tp.queue != NULL) {
1134 MONO_SEM_DESTROY (&async_io_tp.new_job);
1135 threadpool_free_queue (&async_io_tp);
1139 if (InterlockedExchange (&async_tp.pool_status, 2) == 1) {
1140 threadpool_kill_idle_threads (&async_tp);
1141 threadpool_free_queue (&async_tp);
1145 mono_mutex_lock (&threads_lock);
1147 g_ptr_array_free (threads, FALSE);
1149 mono_mutex_unlock (&threads_lock);
1153 mono_mutex_lock (&wsqs_lock);
1154 mono_wsq_cleanup ();
1156 g_ptr_array_free (wsqs, TRUE);
1158 mono_mutex_unlock (&wsqs_lock);
1159 MONO_SEM_DESTROY (&async_tp.new_job);
1162 MONO_SEM_DESTROY (&monitor_sem);
1166 threadpool_start_thread (ThreadPool *tp)
1170 MonoInternalThread *thread;
1172 stack_size = (!tp->is_io) ? 0 : SMALL_STACK;
1173 while (!mono_runtime_is_shutting_down () && (n = tp->nthreads) < tp->max_threads) {
1174 if (InterlockedCompareExchange (&tp->nthreads, n + 1, n) == n) {
1175 #ifndef DISABLE_PERFCOUNTERS
1176 mono_perfcounter_update_value (tp->pc_nthreads, TRUE, 1);
1179 thread = mono_thread_create_internal (mono_get_root_domain (), tp->async_invoke, tp, TRUE, stack_size);
1181 mono_mutex_lock (&threads_lock);
1182 thread = mono_thread_create_internal (mono_get_root_domain (), tp->async_invoke, tp, TRUE, stack_size);
1183 g_assert (threads != NULL);
1184 g_ptr_array_add (threads, thread);
1185 mono_mutex_unlock (&threads_lock);
1195 pulse_on_new_job (ThreadPool *tp)
1198 MONO_SEM_POST (&tp->new_job);
1202 threadpool_kill_thread (ThreadPool *tp)
1204 if (tp->destroy_thread == 0 && InterlockedCompareExchange (&tp->destroy_thread, 1, 0) == 0)
1205 pulse_on_new_job (tp);
1209 icall_append_job (MonoObject *ar)
1211 threadpool_append_jobs (&async_tp, &ar, 1);
1215 threadpool_append_job (ThreadPool *tp, MonoObject *ar)
1217 threadpool_append_jobs (tp, &ar, 1);
1221 threadpool_append_async_io_jobs (MonoObject **jobs, gint njobs)
1223 threadpool_append_jobs (&async_io_tp, jobs, njobs);
1227 threadpool_append_jobs (ThreadPool *tp, MonoObject **jobs, gint njobs)
1232 if (mono_runtime_is_shutting_down ())
1235 if (tp->pool_status == 0 && InterlockedCompareExchange (&tp->pool_status, 1, 0) == 0) {
1237 monitor_internal_thread = mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, TRUE, SMALL_STACK);
1238 monitor_internal_thread->flags |= MONO_THREAD_FLAG_DONT_MANAGE;
1239 threadpool_start_thread (tp);
1241 /* Create on demand up to min_threads to avoid startup penalty for apps that don't use
1242 * the threadpool that much
1244 if (mono_config_is_server_mode ()) {
1245 mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, tp, TRUE, SMALL_STACK);
1249 InterlockedAdd (&monitor_njobs, njobs);
1251 if (monitor_state == MONITOR_STATE_SLEEPING && InterlockedCompareExchange (&monitor_state, MONITOR_STATE_AWAKE, MONITOR_STATE_SLEEPING) == MONITOR_STATE_SLEEPING)
1252 MONO_SEM_POST (&monitor_sem);
1254 if (monitor_state == MONITOR_STATE_FALLING_ASLEEP)
1255 InterlockedCompareExchange (&monitor_state, MONITOR_STATE_AWAKE, MONITOR_STATE_FALLING_ASLEEP);
1257 for (i = 0; i < njobs; i++) {
1259 if (ar == NULL || mono_domain_is_unloading (ar->vtable->domain))
1260 continue; /* Might happen when cleaning domain jobs */
1261 threadpool_jobs_inc (ar);
1262 #ifndef DISABLE_PERFCOUNTERS
1263 mono_perfcounter_update_value (tp->pc_nitems, TRUE, 1);
1265 if (!tp->is_io && mono_wsq_local_push (ar))
1268 mono_cq_enqueue (tp->queue, ar);
1272 InterlockedAdd (&tp->njobs, njobs);
1275 for (i = 0; tp->waiting > 0 && i < MIN(njobs, tp->max_threads); i++)
1276 pulse_on_new_job (tp);
1280 threadpool_clear_queue (ThreadPool *tp, MonoDomain *domain)
1283 MonoMList *other = NULL;
1284 MonoCQ *queue = tp->queue;
1289 while (mono_cq_dequeue (queue, &obj)) {
1292 if (obj->vtable->domain != domain)
1293 other = mono_mlist_prepend (other, obj);
1294 threadpool_jobs_dec (obj);
1297 if (mono_runtime_is_shutting_down ())
1301 threadpool_append_job (tp, (MonoObject *) mono_mlist_get_data (other));
1302 other = mono_mlist_next (other);
1307 remove_sockstate_for_domain (gpointer key, gpointer value, gpointer user_data)
1309 MonoMList *list = value;
1310 gboolean remove = FALSE;
1312 MonoObject *data = mono_mlist_get_data (list);
1313 if (mono_object_domain (data) == user_data) {
1315 mono_mlist_set_data (list, NULL);
1317 list = mono_mlist_next (list);
1319 //FIXME is there some sort of additional unregistration we need to perform here?
1324 * Clean up the threadpool of all domain jobs.
1325 * Can only be called as part of the domain unloading process as
1326 * it will wait for all jobs to be visible to the interruption code.
1329 mono_thread_pool_remove_domain_jobs (MonoDomain *domain, int timeout)
1335 if (use_ms_threadpool ()) {
1336 return mono_threadpool_ms_remove_domain_jobs (domain, timeout);
1342 g_assert (domain->state == MONO_APPDOMAIN_UNLOADING);
1344 threadpool_clear_queue (&async_tp, domain);
1345 threadpool_clear_queue (&async_io_tp, domain);
1347 mono_mutex_lock (&socket_io_data.io_lock);
1348 if (socket_io_data.sock_to_state)
1349 mono_g_hash_table_foreach_remove (socket_io_data.sock_to_state, remove_sockstate_for_domain, domain);
1351 mono_mutex_unlock (&socket_io_data.io_lock);
1354 * There might be some threads out that could be about to execute stuff from the given domain.
1355 * We avoid that by setting up a semaphore to be pulsed by the thread that reaches zero.
1357 sem_handle = CreateSemaphore (NULL, 0, 1, NULL);
1359 domain->cleanup_semaphore = sem_handle;
1361 * The memory barrier here is required to have global ordering between assigning to cleanup_semaphone
1362 * and reading threadpool_jobs.
1363 * Otherwise this thread could read a stale version of threadpool_jobs and wait forever.
1365 mono_memory_write_barrier ();
1367 if (domain->threadpool_jobs && timeout != -1)
1368 start_time = mono_msec_ticks ();
1369 while (domain->threadpool_jobs) {
1370 WaitForSingleObject (sem_handle, timeout);
1371 if (timeout != -1 && (mono_msec_ticks () - start_time) > timeout) {
1377 domain->cleanup_semaphore = NULL;
1378 CloseHandle (sem_handle);
1383 threadpool_free_queue (ThreadPool *tp)
1385 mono_cq_destroy (tp->queue);
1390 mono_thread_pool_is_queue_array (MonoArray *o)
1392 if (use_ms_threadpool ()) {
1393 return mono_threadpool_ms_is_queue_array (o);
1396 // gpointer obj = o;
1398 // FIXME: need some fix in sgen code.
1408 mono_mutex_lock (&wsqs_lock);
1409 wsq = mono_wsq_create ();
1411 mono_mutex_unlock (&wsqs_lock);
1414 for (i = 0; i < wsqs->len; i++) {
1415 if (g_ptr_array_index (wsqs, i) == NULL) {
1416 wsqs->pdata [i] = wsq;
1417 mono_mutex_unlock (&wsqs_lock);
1421 g_ptr_array_add (wsqs, wsq);
1422 mono_mutex_unlock (&wsqs_lock);
1427 remove_wsq (MonoWSQ *wsq)
1434 mono_mutex_lock (&wsqs_lock);
1436 mono_mutex_unlock (&wsqs_lock);
1439 g_ptr_array_remove_fast (wsqs, wsq);
1442 * Only clean this up when shutting down, any other case will error out
1443 * if we're removing a queue that still has work items.
1445 if (mono_runtime_is_shutting_down ()) {
1446 while (mono_wsq_local_pop (&data)) {
1447 threadpool_jobs_dec (data);
1451 mono_wsq_destroy (wsq);
1452 mono_mutex_unlock (&wsqs_lock);
1456 try_steal (MonoWSQ *local_wsq, gpointer *data, gboolean retry)
1461 if (wsqs == NULL || data == NULL || *data != NULL)
1466 if (mono_runtime_is_shutting_down ())
1469 mono_mutex_lock (&wsqs_lock);
1470 for (i = 0; wsqs != NULL && i < wsqs->len; i++) {
1473 wsq = wsqs->pdata [i];
1474 if (wsq == local_wsq || mono_wsq_count (wsq) == 0)
1476 mono_wsq_try_steal (wsqs->pdata [i], data, ms);
1477 if (*data != NULL) {
1478 mono_mutex_unlock (&wsqs_lock);
1482 mono_mutex_unlock (&wsqs_lock);
1484 } while (retry && ms < 11);
1488 dequeue_or_steal (ThreadPool *tp, gpointer *data, MonoWSQ *local_wsq)
1490 MonoCQ *queue = tp->queue;
1491 if (mono_runtime_is_shutting_down () || !queue)
1493 mono_cq_dequeue (queue, (MonoObject **) data);
1494 if (!tp->is_io && !*data)
1495 try_steal (local_wsq, data, FALSE);
1496 return (*data != NULL);
1500 should_i_die (ThreadPool *tp)
1502 gboolean result = FALSE;
1503 if (tp->destroy_thread == 1 && InterlockedCompareExchange (&tp->destroy_thread, 0, 1) == 1)
1504 result = (tp->nthreads > tp->min_threads);
1509 set_tp_thread_info (ThreadPool *tp)
1512 MonoInternalThread *thread = mono_thread_internal_current ();
1514 mono_profiler_thread_start (thread->tid);
1515 name = (tp->is_io) ? "IO Threadpool worker" : "Threadpool worker";
1516 mono_thread_set_name_internal (thread, mono_string_new (mono_domain_get (), name), FALSE);
1520 clear_thread_state (void)
1522 MonoInternalThread *thread = mono_thread_internal_current ();
1523 /* If the callee changes the background status, set it back to TRUE */
1524 mono_thread_clr_state (thread , ~ThreadState_Background);
1525 if (!mono_thread_test_state (thread , ThreadState_Background))
1526 ves_icall_System_Threading_Thread_SetState (thread, ThreadState_Background);
1530 check_for_interruption_critical (void)
1532 MonoInternalThread *thread;
1533 /*RULE NUMBER ONE OF SKIP_THREAD: NEVER POKE MANAGED STATE.*/
1534 mono_gc_set_skip_thread (FALSE);
1536 thread = mono_thread_internal_current ();
1537 if (THREAD_WANTS_A_BREAK (thread))
1538 mono_thread_interruption_checkpoint ();
1540 /*RULE NUMBER TWO OF SKIP_THREAD: READ RULE NUMBER ONE.*/
1541 mono_gc_set_skip_thread (TRUE);
1545 fire_profiler_thread_end (void)
1547 MonoInternalThread *thread = mono_thread_internal_current ();
1548 mono_profiler_thread_end (thread->tid);
1552 async_invoke_thread (gpointer data)
1564 set_tp_thread_info (tp);
1567 tp_start_func (tp_hooks_user_data);
1571 MonoAsyncResult *ar;
1573 gboolean is_io_task;
1578 ar = (MonoAsyncResult *) data;
1580 InterlockedIncrement (&tp->busy_threads);
1581 domain = ((MonoObject *)ar)->vtable->domain;
1582 #ifndef DISABLE_SOCKETS
1583 klass = ((MonoObject *) data)->vtable->klass;
1584 is_io_task = !is_corlib_asyncresult (domain, klass);
1587 MonoSocketAsyncResult *state = (MonoSocketAsyncResult *) data;
1588 is_socket = is_socketasyncresult (domain, klass);
1590 switch (state->operation) {
1591 case AIO_OP_RECEIVE:
1592 state->total = ICALL_RECV (state);
1595 state->total = ICALL_SEND (state);
1600 /* worker threads invokes methods in different domains,
1601 * so we need to set the right domain here */
1604 if (mono_domain_is_unloading (domain) || mono_runtime_is_shutting_down ()) {
1605 threadpool_jobs_dec ((MonoObject *)ar);
1608 InterlockedDecrement (&tp->busy_threads);
1610 mono_thread_push_appdomain_ref (domain);
1611 if (threadpool_jobs_dec ((MonoObject *)ar)) {
1614 mono_thread_pop_appdomain_ref ();
1615 InterlockedDecrement (&tp->busy_threads);
1619 if (mono_domain_set (domain, FALSE)) {
1622 if (tp_item_begin_func)
1623 tp_item_begin_func (tp_item_user_data);
1625 exc = mono_async_invoke (tp, ar);
1626 if (tp_item_end_func)
1627 tp_item_end_func (tp_item_user_data);
1629 mono_internal_thread_unhandled_exception (exc);
1630 if (is_socket && tp->is_io) {
1631 MonoSocketAsyncResult *state = (MonoSocketAsyncResult *) data;
1633 if (state->completed && state->callback) {
1634 MonoAsyncResult *cb_ares;
1635 cb_ares = create_simple_asyncresult ((MonoObject *) state->callback,
1636 (MonoObject *) state);
1637 icall_append_job ((MonoObject *) cb_ares);
1640 mono_domain_set (mono_get_root_domain (), TRUE);
1642 mono_thread_pop_appdomain_ref ();
1643 InterlockedDecrement (&tp->busy_threads);
1644 clear_thread_state ();
1650 must_die = should_i_die (tp);
1652 mono_wsq_suspend (wsq);
1654 if (tp->is_io || !mono_wsq_local_pop (&data))
1655 dequeue_or_steal (tp, &data, wsq);
1659 while (!must_die && !data && n_naps < 4) {
1662 InterlockedIncrement (&tp->waiting);
1664 // Another thread may have added a job into its wsq since the last call to dequeue_or_steal
1665 // Check all the queues again before entering the wait loop
1666 dequeue_or_steal (tp, &data, wsq);
1668 InterlockedDecrement (&tp->waiting);
1672 mono_gc_set_skip_thread (TRUE);
1674 #if defined(__OpenBSD__)
1675 while (mono_cq_count (tp->queue) == 0 && (res = mono_sem_wait (&tp->new_job, TRUE)) == -1) {// && errno == EINTR) {
1677 while (mono_cq_count (tp->queue) == 0 && (res = mono_sem_timedwait (&tp->new_job, 2000, TRUE)) == -1) {// && errno == EINTR) {
1679 if (mono_runtime_is_shutting_down ())
1681 check_for_interruption_critical ();
1683 InterlockedDecrement (&tp->waiting);
1685 mono_gc_set_skip_thread (FALSE);
1687 if (mono_runtime_is_shutting_down ())
1689 must_die = should_i_die (tp);
1690 dequeue_or_steal (tp, &data, wsq);
1694 if (!data && !tp->is_io && !mono_runtime_is_shutting_down ()) {
1695 mono_wsq_local_pop (&data);
1696 if (data && must_die) {
1697 InterlockedCompareExchange (&tp->destroy_thread, 1, 0);
1698 pulse_on_new_job (tp);
1707 down = mono_runtime_is_shutting_down ();
1708 if (!down && nt <= tp->min_threads)
1710 if (down || InterlockedCompareExchange (&tp->nthreads, nt - 1, nt) == nt) {
1711 #ifndef DISABLE_PERFCOUNTERS
1712 mono_perfcounter_update_value (tp->pc_nthreads, TRUE, -1);
1718 fire_profiler_thread_end ();
1721 tp_finish_func (tp_hooks_user_data);
1725 mono_mutex_lock (&threads_lock);
1727 g_ptr_array_remove_fast (threads, mono_thread_current ()->internal_thread);
1728 mono_mutex_unlock (&threads_lock);
1738 g_assert_not_reached ();
1742 ves_icall_System_Threading_ThreadPool_GetAvailableThreads (gint *workerThreads, gint *completionPortThreads)
1744 *workerThreads = async_tp.max_threads - async_tp.busy_threads;
1745 *completionPortThreads = async_io_tp.max_threads - async_io_tp.busy_threads;
1749 ves_icall_System_Threading_ThreadPool_GetMaxThreads (gint *workerThreads, gint *completionPortThreads)
1751 *workerThreads = async_tp.max_threads;
1752 *completionPortThreads = async_io_tp.max_threads;
1756 ves_icall_System_Threading_ThreadPool_GetMinThreads (gint *workerThreads, gint *completionPortThreads)
1758 *workerThreads = async_tp.min_threads;
1759 *completionPortThreads = async_io_tp.min_threads;
1763 ves_icall_System_Threading_ThreadPool_SetMinThreads (gint workerThreads, gint completionPortThreads)
1766 gint max_io_threads;
1768 max_threads = async_tp.max_threads;
1769 if (workerThreads <= 0 || workerThreads > max_threads)
1772 max_io_threads = async_io_tp.max_threads;
1773 if (completionPortThreads <= 0 || completionPortThreads > max_io_threads)
1776 InterlockedExchange (&async_tp.min_threads, workerThreads);
1777 InterlockedExchange (&async_io_tp.min_threads, completionPortThreads);
1778 if (workerThreads > async_tp.nthreads)
1779 mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, &async_tp, TRUE, SMALL_STACK);
1780 if (completionPortThreads > async_io_tp.nthreads)
1781 mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, &async_io_tp, TRUE, SMALL_STACK);
1786 ves_icall_System_Threading_ThreadPool_SetMaxThreads (gint workerThreads, gint completionPortThreads)
1789 gint min_io_threads;
1792 cpu_count = mono_cpu_count ();
1793 min_threads = async_tp.min_threads;
1794 if (workerThreads < min_threads || workerThreads < cpu_count)
1797 /* We don't really have the concept of completion ports. Do we care here? */
1798 min_io_threads = async_io_tp.min_threads;
1799 if (completionPortThreads < min_io_threads || completionPortThreads < cpu_count)
1802 InterlockedExchange (&async_tp.max_threads, workerThreads);
1803 InterlockedExchange (&async_io_tp.max_threads, completionPortThreads);
1808 * mono_install_threadpool_thread_hooks
1809 * @start_func: the function to be called right after a new threadpool thread is created. Can be NULL.
1810 * @finish_func: the function to be called right before a thredpool thread is exiting. Can be NULL.
1811 * @user_data: argument passed to @start_func and @finish_func.
1813 * @start_fun will be called right after a threadpool thread is created and @finish_func right before a threadpool thread exits.
1814 * The calls will be made from the thread itself.
1817 mono_install_threadpool_thread_hooks (MonoThreadPoolFunc start_func, MonoThreadPoolFunc finish_func, gpointer user_data)
1819 tp_start_func = start_func;
1820 tp_finish_func = finish_func;
1821 tp_hooks_user_data = user_data;
1825 * mono_install_threadpool_item_hooks
1826 * @begin_func: the function to be called before a threadpool work item processing starts.
1827 * @end_func: the function to be called after a threadpool work item is finished.
1828 * @user_data: argument passed to @begin_func and @end_func.
1830 * The calls will be made from the thread itself and from the same AppDomain
1831 * where the work item was executed.
1835 mono_install_threadpool_item_hooks (MonoThreadPoolItemFunc begin_func, MonoThreadPoolItemFunc end_func, gpointer user_data)
1837 tp_item_begin_func = begin_func;
1838 tp_item_end_func = end_func;
1839 tp_item_user_data = user_data;
1843 mono_internal_thread_unhandled_exception (MonoObject* exc)
1845 if (mono_runtime_unhandled_exception_policy_get () == MONO_UNHANDLED_POLICY_CURRENT) {
1849 klass = exc->vtable->klass;
1850 unloaded = is_appdomainunloaded_exception (exc->vtable->domain, klass);
1851 if (!unloaded && klass != mono_defaults.threadabortexception_class) {
1852 mono_unhandled_exception (exc);
1853 if (mono_environment_exitcode_get () == 1)
1856 if (klass == mono_defaults.threadabortexception_class)
1857 mono_thread_internal_reset_abort (mono_thread_internal_current ());
1862 * Suspend creation of new threads.
1865 mono_thread_pool_suspend (void)
1867 if (use_ms_threadpool ()) {
1868 mono_threadpool_ms_suspend ();
1875 * Resume creation of new threads.
1878 mono_thread_pool_resume (void)
1880 if (use_ms_threadpool ()) {
1881 mono_threadpool_ms_resume ();