2 * monitor.c: Monitor locking functions
5 * Dick Porter (dick@ximian.com)
7 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/monitor.h>
14 #include <mono/metadata/threads-types.h>
15 #include <mono/metadata/exception.h>
16 #include <mono/metadata/threads.h>
17 #include <mono/io-layer/io-layer.h>
18 #include <mono/metadata/object-internals.h>
19 #include <mono/metadata/gc-internal.h>
20 #include <mono/utils/mono-time.h>
23 #define G_LIKELY(a) (a)
24 #define G_UNLIKELY(a) (a)
27 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
31 * The monitor implementation here is based on
32 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
33 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
35 * The Dice paper describes a technique for saving lock record space
36 * by returning records to a free list when they become unused. That
37 * sounds like unnecessary complexity to me, though if it becomes
38 * clear that unused lock records are taking up lots of space or we
39 * need to shave more time off by avoiding a malloc then we can always
40 * implement the free list idea later. The timeout parameter to
41 * try_enter voids some of the assumptions about the reference count
42 * field in Dice's implementation too. In his version, the thread
43 * attempting to lock a contended object will block until it succeeds,
44 * so the reference count will never be decremented while an object is
47 * Bacon's thin locks have a fast path that doesn't need a lock record
48 * for the common case of locking an unlocked or shallow-nested
49 * object, but the technique relies on encoding the thread ID in 15
50 * bits (to avoid too much per-object space overhead.) Unfortunately
51 * I don't think it's possible to reliably encode a pthread_t into 15
52 * bits. (The JVM implementation used seems to have a 15-bit
53 * per-thread identifier available.)
55 * This implementation then combines Dice's basic lock model with
56 * Bacon's simplification of keeping a lock record for the lifetime of
60 struct _MonoThreadsSync
62 gsize owner; /* thread ID */
64 #ifdef HAVE_MOVING_COLLECTOR
67 volatile gint32 entry_count;
73 typedef struct _MonitorArray MonitorArray;
75 struct _MonitorArray {
78 MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
81 #define mono_monitor_allocator_lock() EnterCriticalSection (&monitor_mutex)
82 #define mono_monitor_allocator_unlock() LeaveCriticalSection (&monitor_mutex)
83 static CRITICAL_SECTION monitor_mutex;
84 static MonoThreadsSync *monitor_freelist;
85 static MonitorArray *monitor_allocated;
86 static int array_size = 16;
89 mono_monitor_init (void)
91 InitializeCriticalSection (&monitor_mutex);
95 mono_monitor_cleanup (void)
97 /*DeleteCriticalSection (&monitor_mutex);*/
101 monitor_is_on_freelist (MonoThreadsSync *mon)
103 MonitorArray *marray;
104 for (marray = monitor_allocated; marray; marray = marray->next) {
105 if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
115 * Print a report on stdout of the managed locks currently held by
116 * threads. If @include_untaken is specified, list also inflated locks
118 * This is supposed to be used in debuggers like gdb.
121 mono_locks_dump (gboolean include_untaken)
124 int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
125 MonoThreadsSync *mon;
126 MonitorArray *marray;
127 for (mon = monitor_freelist; mon; mon = mon->data)
129 for (marray = monitor_allocated; marray; marray = marray->next) {
130 total += marray->num_monitors;
132 for (i = 0; i < marray->num_monitors; ++i) {
133 mon = &marray->monitors [i];
134 if (mon->data == NULL) {
135 if (i < marray->num_monitors - 1)
138 if (!monitor_is_on_freelist (mon->data)) {
139 MonoObject *holder = mono_gc_weak_link_get (&mon->data);
141 g_print ("Lock %p in object %p held by thread %p, nest level: %d\n",
142 mon, holder, (void*)mon->owner, mon->nest);
144 g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon->entry_count);
145 } else if (include_untaken) {
146 g_print ("Lock %p in object %p untaken\n", mon, holder);
153 g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
154 num_arrays, total, used, on_freelist, to_recycle);
157 /* LOCKING: this is called with monitor_mutex held */
159 mon_finalize (MonoThreadsSync *mon)
161 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": Finalizing sync %p", mon));
163 if (mon->entry_sem != NULL) {
164 CloseHandle (mon->entry_sem);
165 mon->entry_sem = NULL;
167 /* If this isn't empty then something is seriously broken - it
168 * means a thread is still waiting on the object that owned
169 * this lock, but the object has been finalized.
171 g_assert (mon->wait_list == NULL);
173 mon->entry_count = 0;
174 /* owner and nest are set in mon_new, no need to zero them out */
176 mon->data = monitor_freelist;
177 monitor_freelist = mon;
180 /* LOCKING: this is called with monitor_mutex held */
181 static MonoThreadsSync *
184 MonoThreadsSync *new;
186 if (!monitor_freelist) {
187 MonitorArray *marray;
189 /* see if any sync block has been collected */
191 for (marray = monitor_allocated; marray; marray = marray->next) {
192 for (i = 0; i < marray->num_monitors; ++i) {
193 if (marray->monitors [i].data == NULL) {
194 new = &marray->monitors [i];
195 new->data = monitor_freelist;
196 monitor_freelist = new;
199 /* small perf tweak to avoid scanning all the blocks */
203 /* need to allocate a new array of monitors */
204 if (!monitor_freelist) {
206 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": allocating more monitors: %d", array_size));
207 marray = g_malloc0 (sizeof (MonoArray) + array_size * sizeof (MonoThreadsSync));
208 marray->num_monitors = array_size;
210 /* link into the freelist */
211 for (i = 0; i < marray->num_monitors - 1; ++i) {
212 marray->monitors [i].data = &marray->monitors [i + 1];
214 marray->monitors [i].data = NULL; /* the last one */
215 monitor_freelist = &marray->monitors [0];
216 /* we happend the marray instead of prepending so that
217 * the collecting loop above will need to scan smaller arrays first
219 if (!monitor_allocated) {
220 monitor_allocated = marray;
222 last = monitor_allocated;
230 new = monitor_freelist;
231 monitor_freelist = new->data;
240 * Format of the lock word:
241 * thinhash | fathash | data
243 * thinhash is the lower bit: if set data is the shifted hashcode of the object.
244 * fathash is another bit: if set the hash code is stored in the MonoThreadsSync
245 * struct pointed to by data
246 * if neither bit is set and data is non-NULL, data is a MonoThreadsSync
250 MonoThreadsSync *sync;
254 LOCK_WORD_THIN_HASH = 1,
255 LOCK_WORD_FAT_HASH = 1 << 1,
256 LOCK_WORD_BITS_MASK = 0x3,
257 LOCK_WORD_HASH_SHIFT = 2
260 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
266 * Calculate a hash code for @obj that is constant while @obj is alive.
269 mono_object_hash (MonoObject* obj)
271 #ifdef HAVE_MOVING_COLLECTOR
276 lw.sync = obj->synchronisation;
277 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
278 /*g_print ("fast thin hash %d for obj %p store\n", (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT, obj);*/
279 return (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT;
281 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
282 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
283 /*g_print ("fast fat hash %d for obj %p store\n", lw.sync->hash_code, obj);*/
284 return lw.sync->hash_code;
287 * while we are inside this function, the GC will keep this object pinned,
288 * since we are in the unmanaged stack. Thanks to this and to the hash
289 * function that depends only on the address, we can ignore the races if
290 * another thread computes the hash at the same time, because it'll end up
291 * with the same value.
293 hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
294 /* clear the top bits as they can be discarded */
295 hash &= ~(LOCK_WORD_BITS_MASK << 30);
296 /* no hash flags were set, so it must be a MonoThreadsSync pointer if not NULL */
298 lw.sync->hash_code = hash;
299 /*g_print ("storing hash code %d for obj %p in sync %p\n", hash, obj, lw.sync);*/
300 lw.lock_word |= LOCK_WORD_FAT_HASH;
301 /* this is safe since we don't deflate locks */
302 obj->synchronisation = lw.sync;
304 /*g_print ("storing thin hash code %d for obj %p\n", hash, obj);*/
305 lw.lock_word = LOCK_WORD_THIN_HASH | (hash << LOCK_WORD_HASH_SHIFT);
306 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL) == NULL)
308 /*g_print ("failed store\n");*/
309 /* someone set the hash flag or someone inflated the object */
310 lw.sync = obj->synchronisation;
311 if (lw.lock_word & LOCK_WORD_THIN_HASH)
313 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
314 lw.sync->hash_code = hash;
315 lw.lock_word |= LOCK_WORD_FAT_HASH;
316 /* this is safe since we don't deflate locks */
317 obj->synchronisation = lw.sync;
322 * Wang's address-based hash function:
323 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
325 return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
329 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
330 * is requested. In this case it returns -1.
333 mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
335 MonoThreadsSync *mon;
336 gsize id = GetCurrentThreadId ();
338 guint32 then = 0, now, delta;
343 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
344 ": (%d) Trying to lock object %p (%d ms)", id, obj, ms));
347 mon = obj->synchronisation;
349 /* If the object has never been locked... */
350 if (G_UNLIKELY (mon == NULL)) {
351 mono_monitor_allocator_lock ();
353 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, mon, NULL) == NULL) {
354 mono_gc_weak_link_add (&mon->data, obj);
355 mono_monitor_allocator_unlock ();
356 /* Successfully locked */
359 #ifdef HAVE_MOVING_COLLECTOR
361 lw.sync = obj->synchronisation;
362 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
363 MonoThreadsSync *oldlw = lw.sync;
364 /* move the already calculated hash */
365 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
367 lw.lock_word |= LOCK_WORD_FAT_HASH;
368 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
369 mono_gc_weak_link_add (&mon->data, obj);
370 mono_monitor_allocator_unlock ();
371 /* Successfully locked */
375 mono_monitor_allocator_unlock ();
378 } else if (lw.lock_word & LOCK_WORD_FAT_HASH) {
380 mono_monitor_allocator_unlock ();
381 /* get the old lock without the fat hash bit */
382 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
386 mono_monitor_allocator_unlock ();
387 mon = obj->synchronisation;
391 mono_monitor_allocator_unlock ();
392 mon = obj->synchronisation;
396 #ifdef HAVE_MOVING_COLLECTOR
399 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
400 MonoThreadsSync *oldlw = lw.sync;
401 mono_monitor_allocator_lock ();
403 /* move the already calculated hash */
404 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
406 lw.lock_word |= LOCK_WORD_FAT_HASH;
407 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
408 mono_gc_weak_link_add (&mon->data, obj);
409 mono_monitor_allocator_unlock ();
410 /* Successfully locked */
414 mono_monitor_allocator_unlock ();
421 #ifdef HAVE_MOVING_COLLECTOR
425 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
430 /* If the object is currently locked by this thread... */
431 if (mon->owner == id) {
436 /* If the object has previously been locked but isn't now... */
438 /* This case differs from Dice's case 3 because we don't
439 * deflate locks or cache unused lock records
441 if (G_LIKELY (mon->owner == 0)) {
442 /* Try to install our ID in the owner field, nest
443 * should have been left at 1 by the previous unlock
446 if (G_LIKELY (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0)) {
448 g_assert (mon->nest == 1);
456 /* The object must be locked by someone else... */
458 /* If ms is 0 we don't block, but just fail straight away */
460 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) timed out, returning FALSE", id));
464 /* The slow path begins here. We need to make sure theres a
465 * semaphore handle (creating it if necessary), and block on
468 if (mon->entry_sem == NULL) {
469 /* Create the semaphore */
470 sem = CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
471 g_assert (sem != NULL);
472 if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
473 /* Someone else just put a handle here */
478 /* If we need to time out, record a timestamp and adjust ms,
479 * because WaitForSingleObject doesn't tell us how long it
482 * Don't block forever here, because theres a chance the owner
483 * thread released the lock while we were creating the
484 * semaphore: we would not get the wakeup. Using the event
485 * handle technique from pulse/wait would involve locking the
486 * lock struct and therefore slowing down the fast path.
488 if (ms != INFINITE) {
489 then = mono_msec_ticks ();
499 InterlockedIncrement (&mon->entry_count);
501 thread = mono_thread_current ();
503 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
505 ret = WaitForSingleObjectEx (mon->entry_sem, waitms, allow_interruption);
507 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
509 InterlockedDecrement (&mon->entry_count);
511 if (ms != INFINITE) {
512 now = mono_msec_ticks ();
515 /* The counter must have wrapped around */
516 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
517 ": wrapped around! now=0x%x then=0x%x", now, then));
519 now += (0xffffffff - then);
522 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": wrap rejig: now=0x%x then=0x%x delta=0x%x", now, then, now-then));
532 if ((ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) && ms > 0) {
537 if (ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) {
538 /* Infinite wait, so just try again */
543 if (ret == WAIT_OBJECT_0) {
544 /* retry from the top */
548 /* We must have timed out */
549 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) timed out waiting, returning FALSE", id));
551 if (ret == WAIT_IO_COMPLETION)
558 mono_monitor_enter (MonoObject *obj)
560 return mono_monitor_try_enter_internal (obj, INFINITE, FALSE) == 1;
564 mono_monitor_try_enter (MonoObject *obj, guint32 ms)
566 return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
570 mono_monitor_exit (MonoObject *obj)
572 MonoThreadsSync *mon;
575 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Unlocking %p", GetCurrentThreadId (), obj));
577 mon = obj->synchronisation;
579 #ifdef HAVE_MOVING_COLLECTOR
583 if (lw.lock_word & LOCK_WORD_THIN_HASH)
585 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
589 if (G_UNLIKELY (mon == NULL)) {
590 /* No one ever used Enter. Just ignore the Exit request as MS does */
593 if (G_UNLIKELY (mon->owner != GetCurrentThreadId ())) {
597 nest = mon->nest - 1;
599 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
600 ": (%d) Object %p is now unlocked", GetCurrentThreadId (), obj));
602 /* object is now unlocked, leave nest==1 so we don't
603 * need to set it when the lock is reacquired
607 /* Do the wakeup stuff. It's possible that the last
608 * blocking thread gave up waiting just before we
609 * release the semaphore resulting in a futile wakeup
610 * next time there's contention for this object, but
611 * it means we don't have to waste time locking the
614 if (mon->entry_count > 0) {
615 ReleaseSemaphore (mon->entry_sem, 1, NULL);
618 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
619 ": (%d) Object %p is now locked %d times", GetCurrentThreadId (), obj, nest));
625 ves_icall_System_Threading_Monitor_Monitor_try_enter (MonoObject *obj, guint32 ms)
630 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
632 mono_thread_interruption_checkpoint ();
639 ves_icall_System_Threading_Monitor_Monitor_exit (MonoObject *obj)
641 mono_monitor_exit (obj);
645 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
647 MonoThreadsSync *mon;
649 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
650 ": Testing if %p is owned by thread %d", obj, GetCurrentThreadId()));
652 mon = obj->synchronisation;
653 #ifdef HAVE_MOVING_COLLECTOR
657 if (lw.lock_word & LOCK_WORD_THIN_HASH)
659 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
667 if(mon->owner==GetCurrentThreadId ()) {
675 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
677 MonoThreadsSync *mon;
679 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
680 ": (%d) Testing if %p is owned by any thread", GetCurrentThreadId (), obj));
682 mon = obj->synchronisation;
683 #ifdef HAVE_MOVING_COLLECTOR
687 if (lw.lock_word & LOCK_WORD_THIN_HASH)
689 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
697 if (mon->owner != 0) {
704 /* All wait list manipulation in the pulse, pulseall and wait
705 * functions happens while the monitor lock is held, so we don't need
706 * any extra struct locking
710 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
712 MonoThreadsSync *mon;
714 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Pulsing %p",
715 GetCurrentThreadId (), obj));
717 mon = obj->synchronisation;
718 #ifdef HAVE_MOVING_COLLECTOR
722 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
723 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
726 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
731 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
734 if (mon->owner != GetCurrentThreadId ()) {
735 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
739 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) %d threads waiting",
740 GetCurrentThreadId (), g_slist_length (mon->wait_list)));
742 if (mon->wait_list != NULL) {
743 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
744 ": (%d) signalling and dequeuing handle %p",
745 GetCurrentThreadId (), mon->wait_list->data));
747 SetEvent (mon->wait_list->data);
748 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
753 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
755 MonoThreadsSync *mon;
757 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION ": (%d) Pulsing all %p",
758 GetCurrentThreadId (), obj));
760 mon = obj->synchronisation;
761 #ifdef HAVE_MOVING_COLLECTOR
765 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
766 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
769 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
774 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
777 if (mon->owner != GetCurrentThreadId ()) {
778 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
782 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) %d threads waiting",
783 GetCurrentThreadId (), g_slist_length (mon->wait_list)));
785 while (mon->wait_list != NULL) {
786 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
787 ": (%d) signalling and dequeuing handle %p",
788 GetCurrentThreadId (), mon->wait_list->data));
790 SetEvent (mon->wait_list->data);
791 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
796 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
798 MonoThreadsSync *mon;
802 gboolean success = FALSE;
804 MonoThread *thread = mono_thread_current ();
806 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
807 ": (%d) Trying to wait for %p with timeout %dms",
808 GetCurrentThreadId (), obj, ms));
810 mon = obj->synchronisation;
811 #ifdef HAVE_MOVING_COLLECTOR
815 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
816 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
819 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
824 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
827 if (mon->owner != GetCurrentThreadId ()) {
828 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
832 /* Do this WaitSleepJoin check before creating the event handle */
833 mono_thread_current_check_pending_interrupt ();
835 event = CreateEvent (NULL, FALSE, FALSE, NULL);
837 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
841 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) queuing handle %p",
842 GetCurrentThreadId (), event));
844 mono_thread_current_check_pending_interrupt ();
846 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
848 mon->wait_list = g_slist_append (mon->wait_list, event);
850 /* Save the nest count, and release the lock */
853 mono_monitor_exit (obj);
855 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Unlocked %p lock %p",
856 GetCurrentThreadId (), obj, mon));
858 /* There's no race between unlocking mon and waiting for the
859 * event, because auto reset events are sticky, and this event
860 * is private to this thread. Therefore even if the event was
861 * signalled before we wait, we still succeed.
863 ret = WaitForSingleObjectEx (event, ms, TRUE);
865 /* Reset the thread state fairly early, so we don't have to worry
866 * about the monitor error checking
868 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
870 if (mono_thread_interruption_requested ()) {
875 /* Regain the lock with the previous nest count */
877 regain = mono_monitor_try_enter_internal (obj, INFINITE, TRUE);
879 mono_thread_interruption_checkpoint ();
880 } while (regain == -1);
883 /* Something went wrong, so throw a
884 * SynchronizationLockException
887 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to regain lock"));
893 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Regained %p lock %p",
894 GetCurrentThreadId (), obj, mon));
896 if (ret == WAIT_TIMEOUT) {
897 /* Poll the event again, just in case it was signalled
898 * while we were trying to regain the monitor lock
900 ret = WaitForSingleObjectEx (event, 0, FALSE);
903 /* Pulse will have popped our event from the queue if it signalled
904 * us, so we only do it here if the wait timed out.
906 * This avoids a race condition where the thread holding the
907 * lock can Pulse several times before the WaitForSingleObject
908 * returns. If we popped the queue here then this event might
909 * be signalled more than once, thereby starving another
913 if (ret == WAIT_OBJECT_0) {
914 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Success",
915 GetCurrentThreadId ()));
918 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Wait failed, dequeuing handle %p",
919 GetCurrentThreadId (), event));
920 /* No pulse, so we have to remove ourself from the
923 mon->wait_list = g_slist_remove (mon->wait_list, event);