2 * threadpool.c: global thread pool
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Gonzalo Paniagua Javier (gonzalo@ximian.com)
8 * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
9 * Copyright 2004-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2001 Xamarin Inc (http://www.xamarin.com)
16 #include <mono/metadata/profiler-private.h>
17 #include <mono/metadata/threads.h>
18 #include <mono/metadata/threads-types.h>
19 #include <mono/metadata/threadpool-internals.h>
20 #include <mono/metadata/exception.h>
21 #include <mono/metadata/environment.h>
22 #include <mono/metadata/mono-config.h>
23 #include <mono/metadata/mono-mlist.h>
24 #include <mono/metadata/mono-perfcounters.h>
25 #include <mono/metadata/socket-io.h>
26 #include <mono/metadata/mono-cq.h>
27 #include <mono/metadata/mono-wsq.h>
28 #include <mono/metadata/mono-ptr-array.h>
29 #include <mono/metadata/object-internals.h>
30 #include <mono/io-layer/io-layer.h>
31 #include <mono/utils/mono-time.h>
32 #include <mono/utils/mono-proclib.h>
33 #include <mono/utils/mono-semaphore.h>
34 #include <mono/utils/atomic.h>
36 #ifdef HAVE_SYS_TIME_H
39 #include <sys/types.h>
46 #ifdef HAVE_SYS_SOCKET_H
47 #include <sys/socket.h>
49 #include <mono/utils/mono-poll.h>
51 #include <sys/epoll.h>
54 #include <sys/event.h>
58 #ifndef DISABLE_SOCKETS
59 #include "mono/io-layer/socket-wrappers.h"
62 #include "threadpool.h"
63 #include "threadpool-ms.h"
64 #include "threadpool-ms-io.h"
67 use_ms_threadpool (void)
69 static gboolean use_ms_tp = -1;
70 const gchar *mono_threadpool_env;
73 else if (!(mono_threadpool_env = g_getenv ("MONO_THREADPOOL")))
74 return use_ms_tp = FALSE;
75 else if (strcmp (mono_threadpool_env, "microsoft") == 0)
76 return use_ms_tp = TRUE;
78 return use_ms_tp = FALSE;
81 #define THREAD_WANTS_A_BREAK(t) ((t->state & (ThreadState_StopRequested | \
82 ThreadState_SuspendRequested)) != 0)
84 /* DEBUG: prints tp data every 2s */
87 /* mono_thread_pool_init called */
88 static volatile int tp_inited;
98 MONITOR_STATE_FALLING_ASLEEP,
99 MONITOR_STATE_SLEEPING
102 static SocketIOData socket_io_data;
106 MonoCQ *queue; /* GC root */
108 volatile gint waiting; /* threads waiting for a work item */
111 volatile gint pool_status; /* 0 -> not initialized, 1 -> initialized, 2 -> cleaning up */
112 /* min, max, n and busy -> Interlocked */
113 volatile gint min_threads;
114 volatile gint max_threads;
115 volatile gint nthreads;
116 volatile gint busy_threads;
118 void (*async_invoke) (gpointer data);
119 void *pc_nitems; /* Performance counter for total number of items in added */
120 void *pc_nthreads; /* Performance counter for total number of active threads */
122 volatile gint destroy_thread;
124 volatile gint32 njobs;
126 volatile gint32 nexecuted;
130 static ThreadPool async_tp;
131 static ThreadPool async_io_tp;
133 static void async_invoke_thread (gpointer data);
134 static MonoObject *mono_async_invoke (ThreadPool *tp, MonoAsyncResult *ares);
135 static void threadpool_free_queue (ThreadPool *tp);
136 static void threadpool_append_job (ThreadPool *tp, MonoObject *ar);
137 static void threadpool_append_jobs (ThreadPool *tp, MonoObject **jobs, gint njobs);
138 static void threadpool_init (ThreadPool *tp, int min_threads, int max_threads, void (*async_invoke) (gpointer));
139 static void threadpool_start_idle_threads (ThreadPool *tp);
140 static void threadpool_kill_idle_threads (ThreadPool *tp);
141 static gboolean threadpool_start_thread (ThreadPool *tp);
142 static void threadpool_kill_thread (ThreadPool *tp);
143 static void monitor_thread (gpointer data);
144 static int get_event_from_state (MonoSocketAsyncResult *state);
146 static MonoClass *async_call_klass;
147 static MonoClass *socket_async_call_klass;
148 static MonoClass *process_async_call_klass;
150 static GPtrArray *threads;
151 mono_mutex_t threads_lock;
152 static GPtrArray *wsqs;
153 mono_mutex_t wsqs_lock;
154 static gboolean suspended;
156 static volatile gint32 monitor_njobs = 0;
157 static volatile gint32 monitor_state;
158 static MonoSemType monitor_sem;
159 static MonoInternalThread *monitor_internal_thread;
162 static MonoThreadPoolFunc tp_start_func;
163 static MonoThreadPoolFunc tp_finish_func;
164 static gpointer tp_hooks_user_data;
165 static MonoThreadPoolItemFunc tp_item_begin_func;
166 static MonoThreadPoolItemFunc tp_item_end_func;
167 static gpointer tp_item_user_data;
177 AIO_OP_RECV_JUST_CALLBACK,
178 AIO_OP_SEND_JUST_CALLBACK,
182 AIO_OP_ACCEPTRECEIVE,
183 AIO_OP_RECEIVE_BUFFERS,
188 // #include <mono/metadata/tpool-poll.c>
189 gpointer tp_poll_init (SocketIOData *data);
192 #include <mono/metadata/tpool-epoll.c>
193 #elif defined(USE_KQUEUE_FOR_THREADPOOL)
194 #include <mono/metadata/tpool-kqueue.c>
197 * Functions to check whenever a class is given system class. We need to cache things in MonoDomain since some of the
198 * assemblies can be unloaded.
202 is_system_type (MonoDomain *domain, MonoClass *klass)
204 if (domain->system_image == NULL)
205 domain->system_image = mono_image_loaded ("System");
207 return klass->image == domain->system_image;
211 is_corlib_type (MonoDomain *domain, MonoClass *klass)
213 return klass->image == mono_defaults.corlib;
217 * Note that we call it is_socket_type() where 'socket' refers to the image
218 * that contains the System.Net.Sockets.Socket type.
221 is_socket_type (MonoDomain *domain, MonoClass *klass)
223 return is_system_type (domain, klass);
226 #define check_type_cached(domain, ASSEMBLY, _class, _namespace, _name, loc) do { \
228 return *loc == _class; \
229 if (is_##ASSEMBLY##_type (domain, _class) && !strcmp (_name, _class->name) && !strcmp (_namespace, _class->name_space)) { \
236 #define check_corlib_type_cached(domain, _class, _namespace, _name, loc) check_type_cached (domain, corlib, _class, _namespace, _name, loc)
238 #define check_socket_type_cached(domain, _class, _namespace, _name, loc) check_type_cached (domain, socket, _class, _namespace, _name, loc)
240 #define check_system_type_cached(domain, _class, _namespace, _name, loc) check_type_cached (domain, system, _class, _namespace, _name, loc)
243 is_corlib_asyncresult (MonoDomain *domain, MonoClass *klass)
245 check_corlib_type_cached (domain, klass, "System.Runtime.Remoting.Messaging", "AsyncResult", &domain->corlib_asyncresult_class);
249 is_socket (MonoDomain *domain, MonoClass *klass)
251 check_socket_type_cached (domain, klass, "System.Net.Sockets", "Socket", &domain->socket_class);
255 is_socketasyncresult (MonoDomain *domain, MonoClass *klass)
257 return (klass->nested_in &&
258 is_socket (domain, klass->nested_in) &&
259 !strcmp (klass->name, "SocketAsyncResult"));
263 is_socketasynccall (MonoDomain *domain, MonoClass *klass)
265 return (klass->nested_in &&
266 is_socket (domain, klass->nested_in) &&
267 !strcmp (klass->name, "SocketAsyncCall"));
271 is_appdomainunloaded_exception (MonoDomain *domain, MonoClass *klass)
273 check_corlib_type_cached (domain, klass, "System", "AppDomainUnloadedException", &domain->ad_unloaded_ex_class);
277 is_sd_process (MonoDomain *domain, MonoClass *klass)
279 check_system_type_cached (domain, klass, "System.Diagnostics", "Process", &domain->process_class);
283 is_sdp_asyncreadhandler (MonoDomain *domain, MonoClass *klass)
286 return (klass->nested_in &&
287 is_sd_process (domain, klass->nested_in) &&
288 !strcmp (klass->name, "AsyncReadHandler"));
292 #ifdef DISABLE_SOCKETS
295 socket_io_cleanup (SocketIOData *data)
300 get_event_from_state (MonoSocketAsyncResult *state)
302 g_assert_not_reached ();
307 get_events_from_list (MonoMList *list)
315 socket_io_cleanup (SocketIOData *data)
317 mono_mutex_lock (&data->io_lock);
318 if (data->inited != 2) {
319 mono_mutex_unlock (&data->io_lock);
323 data->shutdown (data->event_data);
324 mono_mutex_unlock (&data->io_lock);
328 get_event_from_state (MonoSocketAsyncResult *state)
330 switch (state->operation) {
333 case AIO_OP_RECV_JUST_CALLBACK:
334 case AIO_OP_RECEIVEFROM:
335 case AIO_OP_READPIPE:
336 case AIO_OP_ACCEPTRECEIVE:
337 case AIO_OP_RECEIVE_BUFFERS:
340 case AIO_OP_SEND_JUST_CALLBACK:
343 case AIO_OP_SEND_BUFFERS:
344 case AIO_OP_DISCONNECT:
346 default: /* Should never happen */
347 g_message ("get_event_from_state: unknown value in switch!!!");
353 get_events_from_list (MonoMList *list)
355 MonoSocketAsyncResult *state;
358 while (list && (state = (MonoSocketAsyncResult *)mono_mlist_get_data (list))) {
359 events |= get_event_from_state (state);
360 list = mono_mlist_next (list);
366 #define ICALL_RECV(x) ves_icall_System_Net_Sockets_Socket_Receive_internal (\
367 (SOCKET)(gssize)x->handle, x->buffer, x->offset, x->size,\
368 x->socket_flags, &x->error);
370 #define ICALL_SEND(x) ves_icall_System_Net_Sockets_Socket_Send_internal (\
371 (SOCKET)(gssize)x->handle, x->buffer, x->offset, x->size,\
372 x->socket_flags, &x->error);
374 #endif /* !DISABLE_SOCKETS */
377 threadpool_jobs_inc (MonoObject *obj)
380 InterlockedIncrement (&obj->vtable->domain->threadpool_jobs);
384 threadpool_jobs_dec (MonoObject *obj)
392 domain = obj->vtable->domain;
393 remaining_jobs = InterlockedDecrement (&domain->threadpool_jobs);
394 if (remaining_jobs == 0 && domain->cleanup_semaphore) {
395 ReleaseSemaphore (domain->cleanup_semaphore, 1, NULL);
402 get_io_event (MonoMList **list, gint event)
412 state = mono_mlist_get_data (current);
413 if (get_event_from_state ((MonoSocketAsyncResult *) state) == event)
418 current = mono_mlist_next (current);
423 mono_mlist_set_next (prev, mono_mlist_next (current));
425 *list = mono_mlist_next (*list);
433 * select/poll wake up when a socket is closed, but epoll just removes
434 * the socket from its internal list without notification.
437 mono_thread_pool_remove_socket (int sock)
440 MonoSocketAsyncResult *state;
443 if (use_ms_threadpool ()) {
444 #ifndef DISABLE_SOCKETS
445 mono_threadpool_ms_io_remove_socket (sock);
450 if (socket_io_data.inited == 0)
453 mono_mutex_lock (&socket_io_data.io_lock);
454 if (socket_io_data.sock_to_state == NULL) {
455 mono_mutex_unlock (&socket_io_data.io_lock);
458 list = mono_g_hash_table_lookup (socket_io_data.sock_to_state, GINT_TO_POINTER (sock));
460 mono_g_hash_table_remove (socket_io_data.sock_to_state, GINT_TO_POINTER (sock));
461 mono_mutex_unlock (&socket_io_data.io_lock);
464 state = (MonoSocketAsyncResult *) mono_mlist_get_data (list);
465 if (state->operation == AIO_OP_RECEIVE)
466 state->operation = AIO_OP_RECV_JUST_CALLBACK;
467 else if (state->operation == AIO_OP_SEND)
468 state->operation = AIO_OP_SEND_JUST_CALLBACK;
470 ares = get_io_event (&list, MONO_POLLIN);
471 threadpool_append_job (&async_io_tp, ares);
473 ares = get_io_event (&list, MONO_POLLOUT);
474 threadpool_append_job (&async_io_tp, ares);
480 init_event_system (SocketIOData *data)
483 if (data->event_system == EPOLL_BACKEND) {
484 data->event_data = tp_epoll_init (data);
485 if (data->event_data == NULL) {
486 if (g_getenv ("MONO_DEBUG"))
487 g_message ("Falling back to poll()");
488 data->event_system = POLL_BACKEND;
491 #elif defined(USE_KQUEUE_FOR_THREADPOOL)
492 if (data->event_system == KQUEUE_BACKEND)
493 data->event_data = tp_kqueue_init (data);
495 if (data->event_system == POLL_BACKEND)
496 data->event_data = tp_poll_init (data);
500 socket_io_init (SocketIOData *data)
504 if (data->inited >= 2) // 2 -> initialized, 3-> cleaned up
507 inited = InterlockedCompareExchange (&data->inited, 1, 0);
510 if (data->inited >= 2)
516 mono_mutex_lock (&data->io_lock);
517 data->sock_to_state = mono_g_hash_table_new_type (g_direct_hash, g_direct_equal, MONO_HASH_VALUE_GC);
519 data->event_system = EPOLL_BACKEND;
520 #elif defined(USE_KQUEUE_FOR_THREADPOOL)
521 data->event_system = KQUEUE_BACKEND;
523 data->event_system = POLL_BACKEND;
525 if (g_getenv ("MONO_DISABLE_AIO") != NULL)
526 data->event_system = POLL_BACKEND;
528 init_event_system (data);
529 mono_thread_create_internal (mono_get_root_domain (), data->wait, data, TRUE, SMALL_STACK);
530 mono_mutex_unlock (&data->io_lock);
532 threadpool_start_thread (&async_io_tp);
536 socket_io_add (MonoAsyncResult *ares, MonoSocketAsyncResult *state)
539 SocketIOData *data = &socket_io_data;
544 socket_io_init (&socket_io_data);
545 if (mono_runtime_is_shutting_down () || data->inited == 3 || data->sock_to_state == NULL)
547 if (async_tp.pool_status == 2)
550 MONO_OBJECT_SETREF (state, ares, ares);
552 fd = GPOINTER_TO_INT (state->handle);
553 mono_mutex_lock (&data->io_lock);
554 if (data->sock_to_state == NULL) {
555 mono_mutex_unlock (&data->io_lock);
558 list = mono_g_hash_table_lookup (data->sock_to_state, GINT_TO_POINTER (fd));
560 list = mono_mlist_alloc ((MonoObject*)state);
563 list = mono_mlist_append (list, (MonoObject*)state);
567 mono_g_hash_table_replace (data->sock_to_state, state->handle, list);
568 ievt = get_events_from_list (list);
569 /* The modify function leaves the io_lock critical section. */
570 data->modify (data, fd, state->operation, ievt, is_new);
573 #ifndef DISABLE_SOCKETS
575 socket_io_filter (MonoObject *target, MonoObject *state)
578 MonoSocketAsyncResult *sock_res;
582 if (target == NULL || state == NULL)
585 domain = target->vtable->domain;
586 klass = target->vtable->klass;
587 if (socket_async_call_klass == NULL && is_socketasynccall (domain, klass))
588 socket_async_call_klass = klass;
590 if (process_async_call_klass == NULL && is_sdp_asyncreadhandler (domain, klass))
591 process_async_call_klass = klass;
593 if (klass != socket_async_call_klass && klass != process_async_call_klass)
596 sock_res = (MonoSocketAsyncResult *) state;
597 op = sock_res->operation;
598 if (op < AIO_OP_FIRST || op >= AIO_OP_LAST)
603 #endif /* !DISABLE_SOCKETS */
605 /* Returns the exception thrown when invoking, if any */
607 mono_async_invoke (ThreadPool *tp, MonoAsyncResult *ares)
609 MonoObject *exc = NULL;
611 mono_async_result_invoke (ares, &exc);
614 InterlockedDecrement (&tp->njobs);
617 InterlockedIncrement (&tp->nexecuted);
619 if (InterlockedDecrement (&monitor_njobs) == 0)
620 monitor_state = MONITOR_STATE_FALLING_ASLEEP;
626 threadpool_start_idle_threads (ThreadPool *tp)
631 stack_size = (!tp->is_io) ? 0 : SMALL_STACK;
635 if (n >= tp->min_threads)
637 if (InterlockedCompareExchange (&tp->nthreads, n + 1, n) == n)
640 #ifndef DISABLE_PERFCOUNTERS
641 mono_perfcounter_update_value (tp->pc_nthreads, TRUE, 1);
643 mono_thread_create_internal (mono_get_root_domain (), tp->async_invoke, tp, TRUE, stack_size);
649 threadpool_init (ThreadPool *tp, int min_threads, int max_threads, void (*async_invoke) (gpointer))
651 memset (tp, 0, sizeof (ThreadPool));
652 tp->min_threads = min_threads;
653 tp->max_threads = max_threads;
654 tp->async_invoke = async_invoke;
655 tp->queue = mono_cq_create ();
656 MONO_SEM_INIT (&tp->new_job, 0);
659 #ifndef DISABLE_PERFCOUNTERS
661 init_perf_counter (const char *category, const char *counter)
663 MonoString *category_str;
664 MonoString *counter_str;
670 if (category == NULL || counter == NULL)
672 root = mono_get_root_domain ();
673 category_str = mono_string_new (root, category);
674 counter_str = mono_string_new (root, counter);
675 machine = mono_string_new (root, ".");
676 return mono_perfcounter_get_impl (category_str, counter_str, NULL, machine, &type, &custom);
682 print_pool_info (ThreadPool *tp)
685 // if (tp->tail - tp->head == 0)
688 g_print ("Pool status? %d\n", InterlockedCompareExchange (&tp->pool_status, 0, 0));
689 g_print ("Min. threads: %d\n", InterlockedCompareExchange (&tp->min_threads, 0, 0));
690 g_print ("Max. threads: %d\n", InterlockedCompareExchange (&tp->max_threads, 0, 0));
691 g_print ("nthreads: %d\n", InterlockedCompareExchange (&tp->nthreads, 0, 0));
692 g_print ("busy threads: %d\n", InterlockedCompareExchange (&tp->busy_threads, 0, 0));
693 g_print ("Waiting: %d\n", InterlockedCompareExchange (&tp->waiting, 0, 0));
694 g_print ("Queued: %d\n", (tp->tail - tp->head));
695 if (tp == &async_tp) {
697 mono_mutex_lock (&wsqs_lock);
698 for (i = 0; i < wsqs->len; i++) {
699 g_print ("\tWSQ %d: %d\n", i, mono_wsq_count (g_ptr_array_index (wsqs, i)));
701 mono_mutex_unlock (&wsqs_lock);
703 g_print ("\tSockets: %d\n", mono_g_hash_table_size (socket_io_data.sock_to_state));
705 g_print ("-------------\n");
709 signal_handler (int signo)
714 g_print ("\n-----Non-IO-----\n");
715 print_pool_info (tp);
717 g_print ("\n-----IO-----\n");
718 print_pool_info (tp);
723 #define SAMPLES_PERIOD 500
724 #define HISTORY_SIZE 10
725 /* number of iteration without any jobs
726 in the queue before going to sleep */
727 #define NUM_WAITING_ITERATIONS 10
737 * - 1 if the number of threads should increase
738 * - 0 if it should not change
739 * - -1 if it should decrease
740 * - -2 in case of error
743 monitor_heuristic (gint16 *current, gint16 *history_size, SamplesHistory *history, ThreadPool *tp)
746 gint8 decision G_GNUC_UNUSED;
748 gboolean all_waitsleepjoin;
749 MonoInternalThread *thread;
752 * The following heuristic tries to approach the optimal number of threads to maximize jobs throughput. To
753 * achieve this, it simply stores the number of jobs executed (nexecuted), the number of Threads (nthreads)
754 * and the decision (nthreads_diff) for the past HISTORY_SIZE periods of time, each period being of
755 * duration SAMPLES_PERIOD ms. This history gives us an insight into what happened, and to see if we should
756 * increase or reduce the number of threads by comparing the last period (current) to the best one.
758 * The algorithm can be describe as following :
759 * - if we have a better throughput than the best period : we should either increase the number of threads
760 * in case we already have more threads, either reduce the number of threads if we have less threads; this
761 * is equivalent to move away from the number of threads of the best period, because we are currently better
762 * - if we have a worse throughput than the best period : we should either decrease the number of threads if
763 * we have more threads, either increase the number of threads if we have less threads; this is equivalent
764 * to get closer to the number of threads of the best period, because we are currently worse
767 *history_size = MIN (*history_size + 1, HISTORY_SIZE);
768 cur = *current = (*current + 1) % *history_size;
770 history [cur].nthreads = tp->nthreads;
771 history [cur].nexecuted = InterlockedExchange (&tp->nexecuted, 0);
774 /* if we have waiting thread in the pool, then do not create a new one */
775 history [cur].nthreads_diff = tp->waiting > 1 ? -1 : 0;
777 } else if (tp->nthreads < tp->min_threads) {
778 history [cur].nthreads_diff = 1;
780 } else if (*history_size <= 1) {
781 /* first iteration, let's add a thread by default */
782 history [cur].nthreads_diff = 1;
785 mono_mutex_lock (&threads_lock);
786 if (threads == NULL) {
787 mono_mutex_unlock (&threads_lock);
790 all_waitsleepjoin = TRUE;
791 for (i = 0; i < threads->len; ++i) {
792 thread = g_ptr_array_index (threads, i);
793 if (!(thread->state & ThreadState_WaitSleepJoin)) {
794 all_waitsleepjoin = FALSE;
798 mono_mutex_unlock (&threads_lock);
800 if (all_waitsleepjoin) {
801 /* we might be in a condition of starvation/deadlock with tasks waiting for each others */
802 history [cur].nthreads_diff = 1;
805 max = cur == 0 ? 1 : 0;
806 for (i = 0; i < *history_size; i++) {
809 if (history [i].nexecuted > history [max].nexecuted)
813 if (history [cur].nexecuted >= history [max].nexecuted) {
814 /* we improved the situation, let's continue ! */
815 history [cur].nthreads_diff = history [cur].nthreads >= history [max].nthreads ? 1 : -1;
818 /* we made it worse, let's return to previous situation */
819 history [cur].nthreads_diff = history [cur].nthreads >= history [max].nthreads ? -1 : 1;
826 printf ("monitor_thread: decision: %1d, history [current]: {nexecuted: %5d, nthreads: %3d, waiting: %2d, nthreads_diff: %2d}, history [max]: {nexecuted: %5d, nthreads: %3d}\n",
827 decision, history [cur].nexecuted, history [cur].nthreads, tp->waiting, history [cur].nthreads_diff, history [max].nexecuted, history [max].nthreads);
830 return history [cur].nthreads_diff;
834 monitor_thread (gpointer unused)
836 ThreadPool *pools [2];
837 MonoInternalThread *thread;
841 gint8 num_waiting_iterations = 0;
843 gint16 history_size = 0, current = -1;
844 SamplesHistory *history = malloc (sizeof (SamplesHistory) * HISTORY_SIZE);
846 pools [0] = &async_tp;
847 pools [1] = &async_io_tp;
848 thread = mono_thread_internal_current ();
849 ves_icall_System_Threading_Thread_SetName_internal (thread, mono_string_new (mono_domain_get (), "Threadpool monitor"));
852 i = 10; //number of spurious awakes we tolerate before doing a round of rebalancing.
855 ts = mono_msec_ticks ();
856 if (SleepEx (ms, TRUE) == 0)
858 ms -= (mono_msec_ticks () - ts);
859 if (mono_runtime_is_shutting_down ())
861 if (THREAD_WANTS_A_BREAK (thread))
862 mono_thread_interruption_checkpoint ();
863 } while (ms > 0 && i--);
865 if (mono_runtime_is_shutting_down ())
871 /* threadpool is cleaning up */
872 if (async_tp.pool_status == 2 || async_io_tp.pool_status == 2)
875 switch (monitor_state) {
876 case MONITOR_STATE_AWAKE:
877 num_waiting_iterations = 0;
879 case MONITOR_STATE_FALLING_ASLEEP:
880 if (++num_waiting_iterations == NUM_WAITING_ITERATIONS) {
881 if (monitor_state == MONITOR_STATE_FALLING_ASLEEP && InterlockedCompareExchange (&monitor_state, MONITOR_STATE_SLEEPING, MONITOR_STATE_FALLING_ASLEEP) == MONITOR_STATE_FALLING_ASLEEP) {
882 MONO_SEM_WAIT (&monitor_sem);
884 num_waiting_iterations = 0;
890 case MONITOR_STATE_SLEEPING:
891 g_assert_not_reached ();
894 for (i = 0; i < 2; i++) {
899 if (!tp->waiting && mono_cq_count (tp->queue) > 0)
900 threadpool_start_thread (tp);
902 gint8 nthreads_diff = monitor_heuristic (¤t, &history_size, history, tp);
904 if (nthreads_diff == 1)
905 threadpool_start_thread (tp);
906 else if (nthreads_diff == -1)
907 threadpool_kill_thread (tp);
914 mono_thread_pool_init_tls (void)
916 if (use_ms_threadpool ()) {
917 mono_threadpool_ms_init_tls ();
925 mono_thread_pool_init (void)
927 gint threads_per_cpu = 1;
932 if (use_ms_threadpool ()) {
933 mono_threadpool_ms_init ();
937 cpu_count = mono_cpu_count ();
942 result = InterlockedCompareExchange (&tp_inited, 1, 0);
951 MONO_GC_REGISTER_ROOT_FIXED (socket_io_data.sock_to_state);
952 mono_mutex_init_recursive (&socket_io_data.io_lock);
953 if (g_getenv ("MONO_THREADS_PER_CPU") != NULL) {
954 threads_per_cpu = atoi (g_getenv ("MONO_THREADS_PER_CPU"));
955 if (threads_per_cpu < 1)
959 thread_count = MIN (cpu_count * threads_per_cpu, 100 * cpu_count);
960 threadpool_init (&async_tp, thread_count, MAX (100 * cpu_count, thread_count), async_invoke_thread);
961 threadpool_init (&async_io_tp, cpu_count * 2, cpu_count * 4, async_invoke_thread);
962 async_io_tp.is_io = TRUE;
964 async_call_klass = mono_class_from_name (mono_defaults.corlib, "System", "MonoAsyncCall");
965 g_assert (async_call_klass);
967 mono_mutex_init (&threads_lock);
968 threads = g_ptr_array_sized_new (thread_count);
971 mono_mutex_init_recursive (&wsqs_lock);
972 wsqs = g_ptr_array_sized_new (MAX (100 * cpu_count, thread_count));
974 #ifndef DISABLE_PERFCOUNTERS
975 async_tp.pc_nitems = init_perf_counter ("Mono Threadpool", "Work Items Added");
976 g_assert (async_tp.pc_nitems);
978 async_io_tp.pc_nitems = init_perf_counter ("Mono Threadpool", "IO Work Items Added");
979 g_assert (async_io_tp.pc_nitems);
981 async_tp.pc_nthreads = init_perf_counter ("Mono Threadpool", "# of Threads");
982 g_assert (async_tp.pc_nthreads);
984 async_io_tp.pc_nthreads = init_perf_counter ("Mono Threadpool", "# of IO Threads");
985 g_assert (async_io_tp.pc_nthreads);
989 signal (SIGALRM, signal_handler);
993 MONO_SEM_INIT (&monitor_sem, 0);
994 monitor_state = MONITOR_STATE_AWAKE;
998 static MonoAsyncResult *
999 create_simple_asyncresult (MonoObject *target, MonoObject *state)
1001 MonoDomain *domain = mono_domain_get ();
1002 MonoAsyncResult *ares;
1004 /* Don't call mono_async_result_new() to avoid capturing the context */
1005 ares = (MonoAsyncResult *) mono_object_new (domain, mono_defaults.asyncresult_class);
1006 MONO_OBJECT_SETREF (ares, async_delegate, target);
1007 MONO_OBJECT_SETREF (ares, async_state, state);
1012 icall_append_io_job (MonoObject *target, MonoSocketAsyncResult *state)
1014 MonoAsyncResult *ares;
1016 ares = create_simple_asyncresult (target, (MonoObject *) state);
1018 if (use_ms_threadpool ()) {
1019 #ifndef DISABLE_SOCKETS
1020 mono_threadpool_ms_io_add (ares, state);
1025 socket_io_add (ares, state);
1029 mono_thread_pool_add (MonoObject *target, MonoMethodMessage *msg, MonoDelegate *async_callback,
1033 MonoAsyncResult *ares;
1036 if (use_ms_threadpool ())
1037 return mono_threadpool_ms_add (target, msg, async_callback, state);
1039 domain = mono_domain_get ();
1041 ac = (MonoAsyncCall*)mono_object_new (domain, async_call_klass);
1042 MONO_OBJECT_SETREF (ac, msg, msg);
1043 MONO_OBJECT_SETREF (ac, state, state);
1045 if (async_callback) {
1046 ac->cb_method = mono_get_delegate_invoke (((MonoObject *)async_callback)->vtable->klass);
1047 MONO_OBJECT_SETREF (ac, cb_target, async_callback);
1050 ares = mono_async_result_new (domain, NULL, ac->state, NULL, (MonoObject*)ac);
1051 MONO_OBJECT_SETREF (ares, async_delegate, target);
1053 #ifndef DISABLE_SOCKETS
1054 if (socket_io_filter (target, state)) {
1055 socket_io_add (ares, (MonoSocketAsyncResult *) state);
1059 threadpool_append_job (&async_tp, (MonoObject *) ares);
1064 mono_thread_pool_finish (MonoAsyncResult *ares, MonoArray **out_args, MonoObject **exc)
1069 if (use_ms_threadpool ()) {
1070 return mono_threadpool_ms_finish (ares, out_args, exc);
1076 /* check if already finished */
1077 mono_monitor_enter ((MonoObject *) ares);
1079 if (ares->endinvoke_called) {
1080 *exc = (MonoObject *) mono_get_exception_invalid_operation (NULL);
1081 mono_monitor_exit ((MonoObject *) ares);
1085 ares->endinvoke_called = 1;
1086 /* wait until we are really finished */
1087 if (!ares->completed) {
1088 if (ares->handle == NULL) {
1089 wait_event = CreateEvent (NULL, TRUE, FALSE, NULL);
1090 g_assert(wait_event != 0);
1091 MONO_OBJECT_SETREF (ares, handle, (MonoObject *) mono_wait_handle_new (mono_object_domain (ares), wait_event));
1093 wait_event = mono_wait_handle_get_handle ((MonoWaitHandle *) ares->handle);
1095 mono_monitor_exit ((MonoObject *) ares);
1096 WaitForSingleObjectEx (wait_event, INFINITE, TRUE);
1098 mono_monitor_exit ((MonoObject *) ares);
1101 ac = (MonoAsyncCall *) ares->object_data;
1102 g_assert (ac != NULL);
1103 *exc = ac->msg->exc; /* FIXME: GC add write barrier */
1104 *out_args = ac->out_args;
1110 threadpool_kill_idle_threads (ThreadPool *tp)
1114 n = (gint) InterlockedCompareExchange (&tp->max_threads, 0, -1);
1117 MONO_SEM_POST (&tp->new_job);
1122 mono_thread_pool_cleanup (void)
1124 if (use_ms_threadpool ()) {
1125 mono_threadpool_ms_cleanup ();
1129 if (InterlockedExchange (&async_io_tp.pool_status, 2) == 1) {
1130 socket_io_cleanup (&socket_io_data); /* Empty when DISABLE_SOCKETS is defined */
1131 threadpool_kill_idle_threads (&async_io_tp);
1134 if (async_io_tp.queue != NULL) {
1135 MONO_SEM_DESTROY (&async_io_tp.new_job);
1136 threadpool_free_queue (&async_io_tp);
1140 if (InterlockedExchange (&async_tp.pool_status, 2) == 1) {
1141 threadpool_kill_idle_threads (&async_tp);
1142 threadpool_free_queue (&async_tp);
1146 mono_mutex_lock (&threads_lock);
1148 g_ptr_array_free (threads, FALSE);
1150 mono_mutex_unlock (&threads_lock);
1154 mono_mutex_lock (&wsqs_lock);
1155 mono_wsq_cleanup ();
1157 g_ptr_array_free (wsqs, TRUE);
1159 mono_mutex_unlock (&wsqs_lock);
1160 MONO_SEM_DESTROY (&async_tp.new_job);
1163 MONO_SEM_DESTROY (&monitor_sem);
1167 threadpool_start_thread (ThreadPool *tp)
1171 MonoInternalThread *thread;
1173 stack_size = (!tp->is_io) ? 0 : SMALL_STACK;
1174 while (!mono_runtime_is_shutting_down () && (n = tp->nthreads) < tp->max_threads) {
1175 if (InterlockedCompareExchange (&tp->nthreads, n + 1, n) == n) {
1176 #ifndef DISABLE_PERFCOUNTERS
1177 mono_perfcounter_update_value (tp->pc_nthreads, TRUE, 1);
1180 thread = mono_thread_create_internal (mono_get_root_domain (), tp->async_invoke, tp, TRUE, stack_size);
1182 mono_mutex_lock (&threads_lock);
1183 thread = mono_thread_create_internal (mono_get_root_domain (), tp->async_invoke, tp, TRUE, stack_size);
1184 g_assert (threads != NULL);
1185 g_ptr_array_add (threads, thread);
1186 mono_mutex_unlock (&threads_lock);
1196 pulse_on_new_job (ThreadPool *tp)
1199 MONO_SEM_POST (&tp->new_job);
1203 threadpool_kill_thread (ThreadPool *tp)
1205 if (tp->destroy_thread == 0 && InterlockedCompareExchange (&tp->destroy_thread, 1, 0) == 0)
1206 pulse_on_new_job (tp);
1210 icall_append_job (MonoObject *ar)
1212 threadpool_append_jobs (&async_tp, &ar, 1);
1216 threadpool_append_job (ThreadPool *tp, MonoObject *ar)
1218 threadpool_append_jobs (tp, &ar, 1);
1222 threadpool_append_async_io_jobs (MonoObject **jobs, gint njobs)
1224 threadpool_append_jobs (&async_io_tp, jobs, njobs);
1228 threadpool_append_jobs (ThreadPool *tp, MonoObject **jobs, gint njobs)
1233 if (mono_runtime_is_shutting_down ())
1236 if (tp->pool_status == 0 && InterlockedCompareExchange (&tp->pool_status, 1, 0) == 0) {
1238 monitor_internal_thread = mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, TRUE, SMALL_STACK);
1239 monitor_internal_thread->flags |= MONO_THREAD_FLAG_DONT_MANAGE;
1240 threadpool_start_thread (tp);
1242 /* Create on demand up to min_threads to avoid startup penalty for apps that don't use
1243 * the threadpool that much
1245 if (mono_config_is_server_mode ()) {
1246 mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, tp, TRUE, SMALL_STACK);
1250 InterlockedAdd (&monitor_njobs, njobs);
1252 if (monitor_state == MONITOR_STATE_SLEEPING && InterlockedCompareExchange (&monitor_state, MONITOR_STATE_AWAKE, MONITOR_STATE_SLEEPING) == MONITOR_STATE_SLEEPING)
1253 MONO_SEM_POST (&monitor_sem);
1255 if (monitor_state == MONITOR_STATE_FALLING_ASLEEP)
1256 InterlockedCompareExchange (&monitor_state, MONITOR_STATE_AWAKE, MONITOR_STATE_FALLING_ASLEEP);
1258 for (i = 0; i < njobs; i++) {
1260 if (ar == NULL || mono_domain_is_unloading (ar->vtable->domain))
1261 continue; /* Might happen when cleaning domain jobs */
1262 threadpool_jobs_inc (ar);
1263 #ifndef DISABLE_PERFCOUNTERS
1264 mono_perfcounter_update_value (tp->pc_nitems, TRUE, 1);
1266 if (!tp->is_io && mono_wsq_local_push (ar))
1269 mono_cq_enqueue (tp->queue, ar);
1273 InterlockedAdd (&tp->njobs, njobs);
1276 for (i = 0; tp->waiting > 0 && i < MIN(njobs, tp->max_threads); i++)
1277 pulse_on_new_job (tp);
1281 threadpool_clear_queue (ThreadPool *tp, MonoDomain *domain)
1284 MonoMList *other = NULL;
1285 MonoCQ *queue = tp->queue;
1290 while (mono_cq_dequeue (queue, &obj)) {
1293 if (obj->vtable->domain != domain)
1294 other = mono_mlist_prepend (other, obj);
1295 threadpool_jobs_dec (obj);
1298 if (mono_runtime_is_shutting_down ())
1302 threadpool_append_job (tp, (MonoObject *) mono_mlist_get_data (other));
1303 other = mono_mlist_next (other);
1308 remove_sockstate_for_domain (gpointer key, gpointer value, gpointer user_data)
1310 MonoMList *list = value;
1311 gboolean remove = FALSE;
1313 MonoObject *data = mono_mlist_get_data (list);
1314 if (mono_object_domain (data) == user_data) {
1316 mono_mlist_set_data (list, NULL);
1318 list = mono_mlist_next (list);
1320 //FIXME is there some sort of additional unregistration we need to perform here?
1325 * Clean up the threadpool of all domain jobs.
1326 * Can only be called as part of the domain unloading process as
1327 * it will wait for all jobs to be visible to the interruption code.
1330 mono_thread_pool_remove_domain_jobs (MonoDomain *domain, int timeout)
1336 if (use_ms_threadpool ()) {
1337 return mono_threadpool_ms_remove_domain_jobs (domain, timeout);
1343 g_assert (domain->state == MONO_APPDOMAIN_UNLOADING);
1345 threadpool_clear_queue (&async_tp, domain);
1346 threadpool_clear_queue (&async_io_tp, domain);
1348 mono_mutex_lock (&socket_io_data.io_lock);
1349 if (socket_io_data.sock_to_state)
1350 mono_g_hash_table_foreach_remove (socket_io_data.sock_to_state, remove_sockstate_for_domain, domain);
1352 mono_mutex_unlock (&socket_io_data.io_lock);
1355 * There might be some threads out that could be about to execute stuff from the given domain.
1356 * We avoid that by setting up a semaphore to be pulsed by the thread that reaches zero.
1358 sem_handle = CreateSemaphore (NULL, 0, 1, NULL);
1360 domain->cleanup_semaphore = sem_handle;
1362 * The memory barrier here is required to have global ordering between assigning to cleanup_semaphone
1363 * and reading threadpool_jobs.
1364 * Otherwise this thread could read a stale version of threadpool_jobs and wait forever.
1366 mono_memory_write_barrier ();
1368 if (domain->threadpool_jobs && timeout != -1)
1369 start_time = mono_msec_ticks ();
1370 while (domain->threadpool_jobs) {
1371 WaitForSingleObject (sem_handle, timeout);
1372 if (timeout != -1 && (mono_msec_ticks () - start_time) > timeout) {
1378 domain->cleanup_semaphore = NULL;
1379 CloseHandle (sem_handle);
1384 threadpool_free_queue (ThreadPool *tp)
1386 mono_cq_destroy (tp->queue);
1391 mono_thread_pool_is_queue_array (MonoArray *o)
1393 if (use_ms_threadpool ()) {
1394 return mono_threadpool_ms_is_queue_array (o);
1397 // gpointer obj = o;
1399 // FIXME: need some fix in sgen code.
1409 mono_mutex_lock (&wsqs_lock);
1410 wsq = mono_wsq_create ();
1412 mono_mutex_unlock (&wsqs_lock);
1415 for (i = 0; i < wsqs->len; i++) {
1416 if (g_ptr_array_index (wsqs, i) == NULL) {
1417 wsqs->pdata [i] = wsq;
1418 mono_mutex_unlock (&wsqs_lock);
1422 g_ptr_array_add (wsqs, wsq);
1423 mono_mutex_unlock (&wsqs_lock);
1428 remove_wsq (MonoWSQ *wsq)
1435 mono_mutex_lock (&wsqs_lock);
1437 mono_mutex_unlock (&wsqs_lock);
1440 g_ptr_array_remove_fast (wsqs, wsq);
1443 * Only clean this up when shutting down, any other case will error out
1444 * if we're removing a queue that still has work items.
1446 if (mono_runtime_is_shutting_down ()) {
1447 while (mono_wsq_local_pop (&data)) {
1448 threadpool_jobs_dec (data);
1452 mono_wsq_destroy (wsq);
1453 mono_mutex_unlock (&wsqs_lock);
1457 try_steal (MonoWSQ *local_wsq, gpointer *data, gboolean retry)
1462 if (wsqs == NULL || data == NULL || *data != NULL)
1467 if (mono_runtime_is_shutting_down ())
1470 mono_mutex_lock (&wsqs_lock);
1471 for (i = 0; wsqs != NULL && i < wsqs->len; i++) {
1474 wsq = wsqs->pdata [i];
1475 if (wsq == local_wsq || mono_wsq_count (wsq) == 0)
1477 mono_wsq_try_steal (wsqs->pdata [i], data, ms);
1478 if (*data != NULL) {
1479 mono_mutex_unlock (&wsqs_lock);
1483 mono_mutex_unlock (&wsqs_lock);
1485 } while (retry && ms < 11);
1489 dequeue_or_steal (ThreadPool *tp, gpointer *data, MonoWSQ *local_wsq)
1491 MonoCQ *queue = tp->queue;
1492 if (mono_runtime_is_shutting_down () || !queue)
1494 mono_cq_dequeue (queue, (MonoObject **) data);
1495 if (!tp->is_io && !*data)
1496 try_steal (local_wsq, data, FALSE);
1497 return (*data != NULL);
1501 should_i_die (ThreadPool *tp)
1503 gboolean result = FALSE;
1504 if (tp->destroy_thread == 1 && InterlockedCompareExchange (&tp->destroy_thread, 0, 1) == 1)
1505 result = (tp->nthreads > tp->min_threads);
1510 set_tp_thread_info (ThreadPool *tp)
1513 MonoInternalThread *thread = mono_thread_internal_current ();
1515 mono_profiler_thread_start (thread->tid);
1516 name = (tp->is_io) ? "IO Threadpool worker" : "Threadpool worker";
1517 mono_thread_set_name_internal (thread, mono_string_new (mono_domain_get (), name), FALSE);
1521 clear_thread_state (void)
1523 MonoInternalThread *thread = mono_thread_internal_current ();
1524 /* If the callee changes the background status, set it back to TRUE */
1525 mono_thread_clr_state (thread , ~ThreadState_Background);
1526 if (!mono_thread_test_state (thread , ThreadState_Background))
1527 ves_icall_System_Threading_Thread_SetState (thread, ThreadState_Background);
1531 check_for_interruption_critical (void)
1533 MonoInternalThread *thread;
1534 /*RULE NUMBER ONE OF SKIP_THREAD: NEVER POKE MANAGED STATE.*/
1535 mono_gc_set_skip_thread (FALSE);
1537 thread = mono_thread_internal_current ();
1538 if (THREAD_WANTS_A_BREAK (thread))
1539 mono_thread_interruption_checkpoint ();
1541 /*RULE NUMBER TWO OF SKIP_THREAD: READ RULE NUMBER ONE.*/
1542 mono_gc_set_skip_thread (TRUE);
1546 fire_profiler_thread_end (void)
1548 MonoInternalThread *thread = mono_thread_internal_current ();
1549 mono_profiler_thread_end (thread->tid);
1553 async_invoke_thread (gpointer data)
1565 set_tp_thread_info (tp);
1568 tp_start_func (tp_hooks_user_data);
1572 MonoAsyncResult *ar;
1574 gboolean is_io_task;
1579 ar = (MonoAsyncResult *) data;
1581 InterlockedIncrement (&tp->busy_threads);
1582 domain = ((MonoObject *)ar)->vtable->domain;
1583 #ifndef DISABLE_SOCKETS
1584 klass = ((MonoObject *) data)->vtable->klass;
1585 is_io_task = !is_corlib_asyncresult (domain, klass);
1588 MonoSocketAsyncResult *state = (MonoSocketAsyncResult *) data;
1589 is_socket = is_socketasyncresult (domain, klass);
1591 switch (state->operation) {
1592 case AIO_OP_RECEIVE:
1593 state->total = ICALL_RECV (state);
1596 state->total = ICALL_SEND (state);
1601 /* worker threads invokes methods in different domains,
1602 * so we need to set the right domain here */
1605 if (mono_domain_is_unloading (domain) || mono_runtime_is_shutting_down ()) {
1606 threadpool_jobs_dec ((MonoObject *)ar);
1609 InterlockedDecrement (&tp->busy_threads);
1611 mono_thread_push_appdomain_ref (domain);
1612 if (threadpool_jobs_dec ((MonoObject *)ar)) {
1615 mono_thread_pop_appdomain_ref ();
1616 InterlockedDecrement (&tp->busy_threads);
1620 if (mono_domain_set (domain, FALSE)) {
1623 if (tp_item_begin_func)
1624 tp_item_begin_func (tp_item_user_data);
1626 exc = mono_async_invoke (tp, ar);
1627 if (tp_item_end_func)
1628 tp_item_end_func (tp_item_user_data);
1630 mono_internal_thread_unhandled_exception (exc);
1631 if (is_socket && tp->is_io) {
1632 MonoSocketAsyncResult *state = (MonoSocketAsyncResult *) data;
1634 if (state->completed && state->callback) {
1635 MonoAsyncResult *cb_ares;
1636 cb_ares = create_simple_asyncresult ((MonoObject *) state->callback,
1637 (MonoObject *) state);
1638 icall_append_job ((MonoObject *) cb_ares);
1641 mono_domain_set (mono_get_root_domain (), TRUE);
1643 mono_thread_pop_appdomain_ref ();
1644 InterlockedDecrement (&tp->busy_threads);
1645 clear_thread_state ();
1651 must_die = should_i_die (tp);
1653 mono_wsq_suspend (wsq);
1655 if (tp->is_io || !mono_wsq_local_pop (&data))
1656 dequeue_or_steal (tp, &data, wsq);
1660 while (!must_die && !data && n_naps < 4) {
1663 InterlockedIncrement (&tp->waiting);
1665 // Another thread may have added a job into its wsq since the last call to dequeue_or_steal
1666 // Check all the queues again before entering the wait loop
1667 dequeue_or_steal (tp, &data, wsq);
1669 InterlockedDecrement (&tp->waiting);
1673 mono_gc_set_skip_thread (TRUE);
1675 #if defined(__OpenBSD__)
1676 while (mono_cq_count (tp->queue) == 0 && (res = mono_sem_wait (&tp->new_job, TRUE)) == -1) {// && errno == EINTR) {
1678 while (mono_cq_count (tp->queue) == 0 && (res = mono_sem_timedwait (&tp->new_job, 2000, TRUE)) == -1) {// && errno == EINTR) {
1680 if (mono_runtime_is_shutting_down ())
1682 check_for_interruption_critical ();
1684 InterlockedDecrement (&tp->waiting);
1686 mono_gc_set_skip_thread (FALSE);
1688 if (mono_runtime_is_shutting_down ())
1690 must_die = should_i_die (tp);
1691 dequeue_or_steal (tp, &data, wsq);
1695 if (!data && !tp->is_io && !mono_runtime_is_shutting_down ()) {
1696 mono_wsq_local_pop (&data);
1697 if (data && must_die) {
1698 InterlockedCompareExchange (&tp->destroy_thread, 1, 0);
1699 pulse_on_new_job (tp);
1708 down = mono_runtime_is_shutting_down ();
1709 if (!down && nt <= tp->min_threads)
1711 if (down || InterlockedCompareExchange (&tp->nthreads, nt - 1, nt) == nt) {
1712 #ifndef DISABLE_PERFCOUNTERS
1713 mono_perfcounter_update_value (tp->pc_nthreads, TRUE, -1);
1719 fire_profiler_thread_end ();
1722 tp_finish_func (tp_hooks_user_data);
1726 mono_mutex_lock (&threads_lock);
1728 g_ptr_array_remove_fast (threads, mono_thread_current ()->internal_thread);
1729 mono_mutex_unlock (&threads_lock);
1739 g_assert_not_reached ();
1743 ves_icall_System_Threading_ThreadPool_GetAvailableThreads (gint *workerThreads, gint *completionPortThreads)
1745 *workerThreads = async_tp.max_threads - async_tp.busy_threads;
1746 *completionPortThreads = async_io_tp.max_threads - async_io_tp.busy_threads;
1750 ves_icall_System_Threading_ThreadPool_GetMaxThreads (gint *workerThreads, gint *completionPortThreads)
1752 *workerThreads = async_tp.max_threads;
1753 *completionPortThreads = async_io_tp.max_threads;
1757 ves_icall_System_Threading_ThreadPool_GetMinThreads (gint *workerThreads, gint *completionPortThreads)
1759 *workerThreads = async_tp.min_threads;
1760 *completionPortThreads = async_io_tp.min_threads;
1764 ves_icall_System_Threading_ThreadPool_SetMinThreads (gint workerThreads, gint completionPortThreads)
1767 gint max_io_threads;
1769 max_threads = async_tp.max_threads;
1770 if (workerThreads <= 0 || workerThreads > max_threads)
1773 max_io_threads = async_io_tp.max_threads;
1774 if (completionPortThreads <= 0 || completionPortThreads > max_io_threads)
1777 InterlockedExchange (&async_tp.min_threads, workerThreads);
1778 InterlockedExchange (&async_io_tp.min_threads, completionPortThreads);
1779 if (workerThreads > async_tp.nthreads)
1780 mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, &async_tp, TRUE, SMALL_STACK);
1781 if (completionPortThreads > async_io_tp.nthreads)
1782 mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, &async_io_tp, TRUE, SMALL_STACK);
1787 ves_icall_System_Threading_ThreadPool_SetMaxThreads (gint workerThreads, gint completionPortThreads)
1790 gint min_io_threads;
1793 cpu_count = mono_cpu_count ();
1794 min_threads = async_tp.min_threads;
1795 if (workerThreads < min_threads || workerThreads < cpu_count)
1798 /* We don't really have the concept of completion ports. Do we care here? */
1799 min_io_threads = async_io_tp.min_threads;
1800 if (completionPortThreads < min_io_threads || completionPortThreads < cpu_count)
1803 InterlockedExchange (&async_tp.max_threads, workerThreads);
1804 InterlockedExchange (&async_io_tp.max_threads, completionPortThreads);
1809 * mono_install_threadpool_thread_hooks
1810 * @start_func: the function to be called right after a new threadpool thread is created. Can be NULL.
1811 * @finish_func: the function to be called right before a thredpool thread is exiting. Can be NULL.
1812 * @user_data: argument passed to @start_func and @finish_func.
1814 * @start_fun will be called right after a threadpool thread is created and @finish_func right before a threadpool thread exits.
1815 * The calls will be made from the thread itself.
1818 mono_install_threadpool_thread_hooks (MonoThreadPoolFunc start_func, MonoThreadPoolFunc finish_func, gpointer user_data)
1820 tp_start_func = start_func;
1821 tp_finish_func = finish_func;
1822 tp_hooks_user_data = user_data;
1826 * mono_install_threadpool_item_hooks
1827 * @begin_func: the function to be called before a threadpool work item processing starts.
1828 * @end_func: the function to be called after a threadpool work item is finished.
1829 * @user_data: argument passed to @begin_func and @end_func.
1831 * The calls will be made from the thread itself and from the same AppDomain
1832 * where the work item was executed.
1836 mono_install_threadpool_item_hooks (MonoThreadPoolItemFunc begin_func, MonoThreadPoolItemFunc end_func, gpointer user_data)
1838 tp_item_begin_func = begin_func;
1839 tp_item_end_func = end_func;
1840 tp_item_user_data = user_data;
1844 mono_internal_thread_unhandled_exception (MonoObject* exc)
1846 if (mono_runtime_unhandled_exception_policy_get () == MONO_UNHANDLED_POLICY_CURRENT) {
1850 klass = exc->vtable->klass;
1851 unloaded = is_appdomainunloaded_exception (exc->vtable->domain, klass);
1852 if (!unloaded && klass != mono_defaults.threadabortexception_class) {
1853 mono_unhandled_exception (exc);
1854 if (mono_environment_exitcode_get () == 1)
1857 if (klass == mono_defaults.threadabortexception_class)
1858 mono_thread_internal_reset_abort (mono_thread_internal_current ());
1863 * Suspend creation of new threads.
1866 mono_thread_pool_suspend (void)
1868 if (use_ms_threadpool ()) {
1869 mono_threadpool_ms_suspend ();
1876 * Resume creation of new threads.
1879 mono_thread_pool_resume (void)
1881 if (use_ms_threadpool ()) {
1882 mono_threadpool_ms_resume ();