2 * monitor.c: Monitor locking functions
5 * Dick Porter (dick@ximian.com)
7 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/monitor.h>
14 #include <mono/metadata/threads-types.h>
15 #include <mono/metadata/exception.h>
16 #include <mono/metadata/threads.h>
17 #include <mono/io-layer/io-layer.h>
18 #include <mono/metadata/object-internals.h>
19 #include <mono/metadata/gc-internal.h>
20 #include <mono/utils/mono-time.h>
22 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
26 * The monitor implementation here is based on
27 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
28 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
30 * The Dice paper describes a technique for saving lock record space
31 * by returning records to a free list when they become unused. That
32 * sounds like unnecessary complexity to me, though if it becomes
33 * clear that unused lock records are taking up lots of space or we
34 * need to shave more time off by avoiding a malloc then we can always
35 * implement the free list idea later. The timeout parameter to
36 * try_enter voids some of the assumptions about the reference count
37 * field in Dice's implementation too. In his version, the thread
38 * attempting to lock a contended object will block until it succeeds,
39 * so the reference count will never be decremented while an object is
42 * Bacon's thin locks have a fast path that doesn't need a lock record
43 * for the common case of locking an unlocked or shallow-nested
44 * object, but the technique relies on encoding the thread ID in 15
45 * bits (to avoid too much per-object space overhead.) Unfortunately
46 * I don't think it's possible to reliably encode a pthread_t into 15
47 * bits. (The JVM implementation used seems to have a 15-bit
48 * per-thread identifier available.)
50 * This implementation then combines Dice's basic lock model with
51 * Bacon's simplification of keeping a lock record for the lifetime of
55 struct _MonoThreadsSync
57 gsize owner; /* thread ID */
59 #ifdef HAVE_MOVING_COLLECTOR
62 volatile gint32 entry_count;
68 typedef struct _MonitorArray MonitorArray;
70 struct _MonitorArray {
73 MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
76 #define mono_monitor_allocator_lock() EnterCriticalSection (&monitor_mutex)
77 #define mono_monitor_allocator_unlock() LeaveCriticalSection (&monitor_mutex)
78 static CRITICAL_SECTION monitor_mutex;
79 static MonoThreadsSync *monitor_freelist;
80 static MonitorArray *monitor_allocated;
81 static int array_size = 16;
84 static __thread gsize tls_pthread_self MONO_TLS_FAST;
87 #ifndef PLATFORM_WIN32
89 #define GetCurrentThreadId() tls_pthread_self
92 * The usual problem: we can't replace GetCurrentThreadId () with a macro because
93 * it is in a public header.
95 #define GetCurrentThreadId() ((gsize)pthread_self ())
100 mono_monitor_init (void)
102 InitializeCriticalSection (&monitor_mutex);
106 mono_monitor_cleanup (void)
108 /*DeleteCriticalSection (&monitor_mutex);*/
112 * mono_monitor_init_tls:
114 * Setup TLS variables used by the monitor code for the current thread.
117 mono_monitor_init_tls (void)
119 #if !defined(PLATFORM_WIN32) && defined(HAVE_KW_THREAD)
120 tls_pthread_self = pthread_self ();
125 monitor_is_on_freelist (MonoThreadsSync *mon)
127 MonitorArray *marray;
128 for (marray = monitor_allocated; marray; marray = marray->next) {
129 if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
139 * Print a report on stdout of the managed locks currently held by
140 * threads. If @include_untaken is specified, list also inflated locks
142 * This is supposed to be used in debuggers like gdb.
145 mono_locks_dump (gboolean include_untaken)
148 int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
149 MonoThreadsSync *mon;
150 MonitorArray *marray;
151 for (mon = monitor_freelist; mon; mon = mon->data)
153 for (marray = monitor_allocated; marray; marray = marray->next) {
154 total += marray->num_monitors;
156 for (i = 0; i < marray->num_monitors; ++i) {
157 mon = &marray->monitors [i];
158 if (mon->data == NULL) {
159 if (i < marray->num_monitors - 1)
162 if (!monitor_is_on_freelist (mon->data)) {
163 MonoObject *holder = mono_gc_weak_link_get (&mon->data);
165 g_print ("Lock %p in object %p held by thread %p, nest level: %d\n",
166 mon, holder, (void*)mon->owner, mon->nest);
168 g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon->entry_count);
169 } else if (include_untaken) {
170 g_print ("Lock %p in object %p untaken\n", mon, holder);
177 g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
178 num_arrays, total, used, on_freelist, to_recycle);
181 /* LOCKING: this is called with monitor_mutex held */
183 mon_finalize (MonoThreadsSync *mon)
185 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": Finalizing sync %p", mon));
187 if (mon->entry_sem != NULL) {
188 CloseHandle (mon->entry_sem);
189 mon->entry_sem = NULL;
191 /* If this isn't empty then something is seriously broken - it
192 * means a thread is still waiting on the object that owned
193 * this lock, but the object has been finalized.
195 g_assert (mon->wait_list == NULL);
197 mon->entry_count = 0;
198 /* owner and nest are set in mon_new, no need to zero them out */
200 mon->data = monitor_freelist;
201 monitor_freelist = mon;
204 /* LOCKING: this is called with monitor_mutex held */
205 static MonoThreadsSync *
208 MonoThreadsSync *new;
210 if (!monitor_freelist) {
211 MonitorArray *marray;
213 /* see if any sync block has been collected */
215 for (marray = monitor_allocated; marray; marray = marray->next) {
216 for (i = 0; i < marray->num_monitors; ++i) {
217 if (marray->monitors [i].data == NULL) {
218 new = &marray->monitors [i];
219 new->data = monitor_freelist;
220 monitor_freelist = new;
223 /* small perf tweak to avoid scanning all the blocks */
227 /* need to allocate a new array of monitors */
228 if (!monitor_freelist) {
230 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": allocating more monitors: %d", array_size));
231 marray = g_malloc0 (sizeof (MonoArray) + array_size * sizeof (MonoThreadsSync));
232 marray->num_monitors = array_size;
234 /* link into the freelist */
235 for (i = 0; i < marray->num_monitors - 1; ++i) {
236 marray->monitors [i].data = &marray->monitors [i + 1];
238 marray->monitors [i].data = NULL; /* the last one */
239 monitor_freelist = &marray->monitors [0];
240 /* we happend the marray instead of prepending so that
241 * the collecting loop above will need to scan smaller arrays first
243 if (!monitor_allocated) {
244 monitor_allocated = marray;
246 last = monitor_allocated;
254 new = monitor_freelist;
255 monitor_freelist = new->data;
264 * Format of the lock word:
265 * thinhash | fathash | data
267 * thinhash is the lower bit: if set data is the shifted hashcode of the object.
268 * fathash is another bit: if set the hash code is stored in the MonoThreadsSync
269 * struct pointed to by data
270 * if neither bit is set and data is non-NULL, data is a MonoThreadsSync
274 MonoThreadsSync *sync;
278 LOCK_WORD_THIN_HASH = 1,
279 LOCK_WORD_FAT_HASH = 1 << 1,
280 LOCK_WORD_BITS_MASK = 0x3,
281 LOCK_WORD_HASH_SHIFT = 2
284 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
290 * Calculate a hash code for @obj that is constant while @obj is alive.
293 mono_object_hash (MonoObject* obj)
295 #ifdef HAVE_MOVING_COLLECTOR
300 lw.sync = obj->synchronisation;
301 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
302 /*g_print ("fast thin hash %d for obj %p store\n", (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT, obj);*/
303 return (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT;
305 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
306 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
307 /*g_print ("fast fat hash %d for obj %p store\n", lw.sync->hash_code, obj);*/
308 return lw.sync->hash_code;
311 * while we are inside this function, the GC will keep this object pinned,
312 * since we are in the unmanaged stack. Thanks to this and to the hash
313 * function that depends only on the address, we can ignore the races if
314 * another thread computes the hash at the same time, because it'll end up
315 * with the same value.
317 hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
318 /* clear the top bits as they can be discarded */
319 hash &= ~(LOCK_WORD_BITS_MASK << 30);
320 /* no hash flags were set, so it must be a MonoThreadsSync pointer if not NULL */
322 lw.sync->hash_code = hash;
323 /*g_print ("storing hash code %d for obj %p in sync %p\n", hash, obj, lw.sync);*/
324 lw.lock_word |= LOCK_WORD_FAT_HASH;
325 /* this is safe since we don't deflate locks */
326 obj->synchronisation = lw.sync;
328 /*g_print ("storing thin hash code %d for obj %p\n", hash, obj);*/
329 lw.lock_word = LOCK_WORD_THIN_HASH | (hash << LOCK_WORD_HASH_SHIFT);
330 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL) == NULL)
332 /*g_print ("failed store\n");*/
333 /* someone set the hash flag or someone inflated the object */
334 lw.sync = obj->synchronisation;
335 if (lw.lock_word & LOCK_WORD_THIN_HASH)
337 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
338 lw.sync->hash_code = hash;
339 lw.lock_word |= LOCK_WORD_FAT_HASH;
340 /* this is safe since we don't deflate locks */
341 obj->synchronisation = lw.sync;
346 * Wang's address-based hash function:
347 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
349 return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
353 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
354 * is requested. In this case it returns -1.
357 mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
359 MonoThreadsSync *mon;
360 gsize id = GetCurrentThreadId ();
362 guint32 then = 0, now, delta;
367 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
368 ": (%d) Trying to lock object %p (%d ms)", id, obj, ms));
370 if (G_UNLIKELY (!obj)) {
371 mono_raise_exception (mono_get_exception_argument_null ("obj"));
376 mon = obj->synchronisation;
378 /* If the object has never been locked... */
379 if (G_UNLIKELY (mon == NULL)) {
380 mono_monitor_allocator_lock ();
382 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, mon, NULL) == NULL) {
383 mono_gc_weak_link_add (&mon->data, obj);
384 mono_monitor_allocator_unlock ();
385 /* Successfully locked */
388 #ifdef HAVE_MOVING_COLLECTOR
390 lw.sync = obj->synchronisation;
391 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
392 MonoThreadsSync *oldlw = lw.sync;
393 /* move the already calculated hash */
394 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
396 lw.lock_word |= LOCK_WORD_FAT_HASH;
397 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
398 mono_gc_weak_link_add (&mon->data, obj);
399 mono_monitor_allocator_unlock ();
400 /* Successfully locked */
404 mono_monitor_allocator_unlock ();
407 } else if (lw.lock_word & LOCK_WORD_FAT_HASH) {
409 mono_monitor_allocator_unlock ();
410 /* get the old lock without the fat hash bit */
411 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
415 mono_monitor_allocator_unlock ();
416 mon = obj->synchronisation;
420 mono_monitor_allocator_unlock ();
421 mon = obj->synchronisation;
425 #ifdef HAVE_MOVING_COLLECTOR
428 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
429 MonoThreadsSync *oldlw = lw.sync;
430 mono_monitor_allocator_lock ();
432 /* move the already calculated hash */
433 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
435 lw.lock_word |= LOCK_WORD_FAT_HASH;
436 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
437 mono_gc_weak_link_add (&mon->data, obj);
438 mono_monitor_allocator_unlock ();
439 /* Successfully locked */
443 mono_monitor_allocator_unlock ();
450 #ifdef HAVE_MOVING_COLLECTOR
454 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
459 /* If the object has previously been locked but isn't now... */
461 /* This case differs from Dice's case 3 because we don't
462 * deflate locks or cache unused lock records
464 if (G_LIKELY (mon->owner == 0)) {
465 /* Try to install our ID in the owner field, nest
466 * should have been left at 1 by the previous unlock
469 if (G_LIKELY (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0)) {
471 g_assert (mon->nest == 1);
479 /* If the object is currently locked by this thread... */
480 if (mon->owner == id) {
485 /* The object must be locked by someone else... */
487 /* If ms is 0 we don't block, but just fail straight away */
489 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) timed out, returning FALSE", id));
493 /* The slow path begins here. We need to make sure theres a
494 * semaphore handle (creating it if necessary), and block on
497 if (mon->entry_sem == NULL) {
498 /* Create the semaphore */
499 sem = CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
500 g_assert (sem != NULL);
501 if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
502 /* Someone else just put a handle here */
507 /* If we need to time out, record a timestamp and adjust ms,
508 * because WaitForSingleObject doesn't tell us how long it
511 * Don't block forever here, because theres a chance the owner
512 * thread released the lock while we were creating the
513 * semaphore: we would not get the wakeup. Using the event
514 * handle technique from pulse/wait would involve locking the
515 * lock struct and therefore slowing down the fast path.
517 if (ms != INFINITE) {
518 then = mono_msec_ticks ();
528 InterlockedIncrement (&mon->entry_count);
530 thread = mono_thread_current ();
532 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
534 ret = WaitForSingleObjectEx (mon->entry_sem, waitms, allow_interruption);
536 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
538 InterlockedDecrement (&mon->entry_count);
540 if (ms != INFINITE) {
541 now = mono_msec_ticks ();
544 /* The counter must have wrapped around */
545 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
546 ": wrapped around! now=0x%x then=0x%x", now, then));
548 now += (0xffffffff - then);
551 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": wrap rejig: now=0x%x then=0x%x delta=0x%x", now, then, now-then));
561 if ((ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) && ms > 0) {
566 if (ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) {
567 /* Infinite wait, so just try again */
572 if (ret == WAIT_OBJECT_0) {
573 /* retry from the top */
577 /* We must have timed out */
578 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) timed out waiting, returning FALSE", id));
580 if (ret == WAIT_IO_COMPLETION)
587 mono_monitor_enter (MonoObject *obj)
589 return mono_monitor_try_enter_internal (obj, INFINITE, FALSE) == 1;
593 mono_monitor_try_enter (MonoObject *obj, guint32 ms)
595 return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
599 mono_monitor_exit (MonoObject *obj)
601 MonoThreadsSync *mon;
604 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Unlocking %p", GetCurrentThreadId (), obj));
606 if (G_UNLIKELY (!obj)) {
607 mono_raise_exception (mono_get_exception_argument_null ("obj"));
611 mon = obj->synchronisation;
613 #ifdef HAVE_MOVING_COLLECTOR
617 if (lw.lock_word & LOCK_WORD_THIN_HASH)
619 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
623 if (G_UNLIKELY (mon == NULL)) {
624 /* No one ever used Enter. Just ignore the Exit request as MS does */
627 if (G_UNLIKELY (mon->owner != GetCurrentThreadId ())) {
631 nest = mon->nest - 1;
633 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
634 ": (%d) Object %p is now unlocked", GetCurrentThreadId (), obj));
636 /* object is now unlocked, leave nest==1 so we don't
637 * need to set it when the lock is reacquired
641 /* Do the wakeup stuff. It's possible that the last
642 * blocking thread gave up waiting just before we
643 * release the semaphore resulting in a futile wakeup
644 * next time there's contention for this object, but
645 * it means we don't have to waste time locking the
648 if (mon->entry_count > 0) {
649 ReleaseSemaphore (mon->entry_sem, 1, NULL);
652 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
653 ": (%d) Object %p is now locked %d times", GetCurrentThreadId (), obj, nest));
659 ves_icall_System_Threading_Monitor_Monitor_try_enter (MonoObject *obj, guint32 ms)
664 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
666 mono_thread_interruption_checkpoint ();
673 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
675 MonoThreadsSync *mon;
677 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
678 ": Testing if %p is owned by thread %d", obj, GetCurrentThreadId()));
680 mon = obj->synchronisation;
681 #ifdef HAVE_MOVING_COLLECTOR
685 if (lw.lock_word & LOCK_WORD_THIN_HASH)
687 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
695 if(mon->owner==GetCurrentThreadId ()) {
703 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
705 MonoThreadsSync *mon;
707 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
708 ": (%d) Testing if %p is owned by any thread", GetCurrentThreadId (), obj));
710 mon = obj->synchronisation;
711 #ifdef HAVE_MOVING_COLLECTOR
715 if (lw.lock_word & LOCK_WORD_THIN_HASH)
717 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
725 if (mon->owner != 0) {
732 /* All wait list manipulation in the pulse, pulseall and wait
733 * functions happens while the monitor lock is held, so we don't need
734 * any extra struct locking
738 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
740 MonoThreadsSync *mon;
742 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Pulsing %p",
743 GetCurrentThreadId (), obj));
745 mon = obj->synchronisation;
746 #ifdef HAVE_MOVING_COLLECTOR
750 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
751 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
754 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
759 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
762 if (mon->owner != GetCurrentThreadId ()) {
763 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
767 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) %d threads waiting",
768 GetCurrentThreadId (), g_slist_length (mon->wait_list)));
770 if (mon->wait_list != NULL) {
771 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
772 ": (%d) signalling and dequeuing handle %p",
773 GetCurrentThreadId (), mon->wait_list->data));
775 SetEvent (mon->wait_list->data);
776 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
781 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
783 MonoThreadsSync *mon;
785 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION ": (%d) Pulsing all %p",
786 GetCurrentThreadId (), obj));
788 mon = obj->synchronisation;
789 #ifdef HAVE_MOVING_COLLECTOR
793 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
794 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
797 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
802 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
805 if (mon->owner != GetCurrentThreadId ()) {
806 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
810 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) %d threads waiting",
811 GetCurrentThreadId (), g_slist_length (mon->wait_list)));
813 while (mon->wait_list != NULL) {
814 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
815 ": (%d) signalling and dequeuing handle %p",
816 GetCurrentThreadId (), mon->wait_list->data));
818 SetEvent (mon->wait_list->data);
819 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
824 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
826 MonoThreadsSync *mon;
830 gboolean success = FALSE;
832 MonoThread *thread = mono_thread_current ();
834 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
835 ": (%d) Trying to wait for %p with timeout %dms",
836 GetCurrentThreadId (), obj, ms));
838 mon = obj->synchronisation;
839 #ifdef HAVE_MOVING_COLLECTOR
843 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
844 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
847 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
852 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
855 if (mon->owner != GetCurrentThreadId ()) {
856 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
860 /* Do this WaitSleepJoin check before creating the event handle */
861 mono_thread_current_check_pending_interrupt ();
863 event = CreateEvent (NULL, FALSE, FALSE, NULL);
865 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
869 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) queuing handle %p",
870 GetCurrentThreadId (), event));
872 mono_thread_current_check_pending_interrupt ();
874 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
876 mon->wait_list = g_slist_append (mon->wait_list, event);
878 /* Save the nest count, and release the lock */
881 mono_monitor_exit (obj);
883 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Unlocked %p lock %p",
884 GetCurrentThreadId (), obj, mon));
886 /* There's no race between unlocking mon and waiting for the
887 * event, because auto reset events are sticky, and this event
888 * is private to this thread. Therefore even if the event was
889 * signalled before we wait, we still succeed.
891 ret = WaitForSingleObjectEx (event, ms, TRUE);
893 /* Reset the thread state fairly early, so we don't have to worry
894 * about the monitor error checking
896 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
898 if (mono_thread_interruption_requested ()) {
903 /* Regain the lock with the previous nest count */
905 regain = mono_monitor_try_enter_internal (obj, INFINITE, TRUE);
907 mono_thread_interruption_checkpoint ();
908 } while (regain == -1);
911 /* Something went wrong, so throw a
912 * SynchronizationLockException
915 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to regain lock"));
921 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Regained %p lock %p",
922 GetCurrentThreadId (), obj, mon));
924 if (ret == WAIT_TIMEOUT) {
925 /* Poll the event again, just in case it was signalled
926 * while we were trying to regain the monitor lock
928 ret = WaitForSingleObjectEx (event, 0, FALSE);
931 /* Pulse will have popped our event from the queue if it signalled
932 * us, so we only do it here if the wait timed out.
934 * This avoids a race condition where the thread holding the
935 * lock can Pulse several times before the WaitForSingleObject
936 * returns. If we popped the queue here then this event might
937 * be signalled more than once, thereby starving another
941 if (ret == WAIT_OBJECT_0) {
942 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Success",
943 GetCurrentThreadId ()));
946 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Wait failed, dequeuing handle %p",
947 GetCurrentThreadId (), event));
948 /* No pulse, so we have to remove ourself from the
951 mon->wait_list = g_slist_remove (mon->wait_list, event);