2 * monitor.c: Monitor locking functions
5 * Dick Porter (dick@ximian.com)
7 * Copyright 2003 Ximian, Inc (http://www.ximian.com)
8 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/monitor.h>
17 #include <mono/metadata/threads-types.h>
18 #include <mono/metadata/exception.h>
19 #include <mono/metadata/threads.h>
20 #include <mono/io-layer/io-layer.h>
21 #include <mono/metadata/object-internals.h>
22 #include <mono/metadata/class-internals.h>
23 #include <mono/metadata/gc-internal.h>
24 #include <mono/metadata/method-builder.h>
25 #include <mono/metadata/debug-helpers.h>
26 #include <mono/metadata/tabledefs.h>
27 #include <mono/metadata/marshal.h>
28 #include <mono/metadata/profiler-private.h>
29 #include <mono/utils/mono-time.h>
30 #include <mono/utils/atomic.h>
33 * Pull the list of opcodes
35 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
39 #include "mono/cil/opcode.def"
44 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
48 * The monitor implementation here is based on
49 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
50 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
52 * The Dice paper describes a technique for saving lock record space
53 * by returning records to a free list when they become unused. That
54 * sounds like unnecessary complexity to me, though if it becomes
55 * clear that unused lock records are taking up lots of space or we
56 * need to shave more time off by avoiding a malloc then we can always
57 * implement the free list idea later. The timeout parameter to
58 * try_enter voids some of the assumptions about the reference count
59 * field in Dice's implementation too. In his version, the thread
60 * attempting to lock a contended object will block until it succeeds,
61 * so the reference count will never be decremented while an object is
64 * Bacon's thin locks have a fast path that doesn't need a lock record
65 * for the common case of locking an unlocked or shallow-nested
66 * object, but the technique relies on encoding the thread ID in 15
67 * bits (to avoid too much per-object space overhead.) Unfortunately
68 * I don't think it's possible to reliably encode a pthread_t into 15
69 * bits. (The JVM implementation used seems to have a 15-bit
70 * per-thread identifier available.)
72 * This implementation then combines Dice's basic lock model with
73 * Bacon's simplification of keeping a lock record for the lifetime of
78 typedef struct _MonitorArray MonitorArray;
80 struct _MonitorArray {
83 MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
86 #define mono_monitor_allocator_lock() mono_mutex_lock (&monitor_mutex)
87 #define mono_monitor_allocator_unlock() mono_mutex_unlock (&monitor_mutex)
88 static mono_mutex_t monitor_mutex;
89 static MonoThreadsSync *monitor_freelist;
90 static MonitorArray *monitor_allocated;
91 static int array_size = 16;
94 static __thread gsize tls_pthread_self MONO_TLS_FAST;
99 #define GetCurrentThreadId() tls_pthread_self
102 * The usual problem: we can't replace GetCurrentThreadId () with a macro because
103 * it is in a public header.
105 #define GetCurrentThreadId() ((gsize)pthread_self ())
110 mono_monitor_init (void)
112 mono_mutex_init_recursive (&monitor_mutex);
116 mono_monitor_cleanup (void)
118 MonoThreadsSync *mon;
119 /* MonitorArray *marray, *next = NULL; */
121 /*mono_mutex_destroy (&monitor_mutex);*/
123 /* The monitors on the freelist don't have weak links - mark them */
124 for (mon = monitor_freelist; mon; mon = mon->data)
125 mon->wait_list = (gpointer)-1;
127 /* FIXME: This still crashes with sgen (async_read.exe) */
129 for (marray = monitor_allocated; marray; marray = next) {
132 for (i = 0; i < marray->num_monitors; ++i) {
133 mon = &marray->monitors [i];
134 if (mon->wait_list != (gpointer)-1)
135 mono_gc_weak_link_remove (&mon->data);
145 * mono_monitor_init_tls:
147 * Setup TLS variables used by the monitor code for the current thread.
150 mono_monitor_init_tls (void)
152 #if !defined(HOST_WIN32) && defined(HAVE_KW_THREAD)
153 tls_pthread_self = (gsize) pthread_self ();
158 monitor_is_on_freelist (MonoThreadsSync *mon)
160 MonitorArray *marray;
161 for (marray = monitor_allocated; marray; marray = marray->next) {
162 if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
172 * Print a report on stdout of the managed locks currently held by
173 * threads. If @include_untaken is specified, list also inflated locks
175 * This is supposed to be used in debuggers like gdb.
178 mono_locks_dump (gboolean include_untaken)
181 int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
182 MonoThreadsSync *mon;
183 MonitorArray *marray;
184 for (mon = monitor_freelist; mon; mon = mon->data)
186 for (marray = monitor_allocated; marray; marray = marray->next) {
187 total += marray->num_monitors;
189 for (i = 0; i < marray->num_monitors; ++i) {
190 mon = &marray->monitors [i];
191 if (mon->data == NULL) {
192 if (i < marray->num_monitors - 1)
195 if (!monitor_is_on_freelist (mon->data)) {
196 MonoObject *holder = mono_gc_weak_link_get (&mon->data);
198 g_print ("Lock %p in object %p held by thread %p, nest level: %d\n",
199 mon, holder, (void*)mon->owner, mon->nest);
201 g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon->entry_count);
202 } else if (include_untaken) {
203 g_print ("Lock %p in object %p untaken\n", mon, holder);
210 g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
211 num_arrays, total, used, on_freelist, to_recycle);
214 /* LOCKING: this is called with monitor_mutex held */
216 mon_finalize (MonoThreadsSync *mon)
218 LOCK_DEBUG (g_message ("%s: Finalizing sync %p", __func__, mon));
220 if (mon->entry_sem != NULL) {
221 CloseHandle (mon->entry_sem);
222 mon->entry_sem = NULL;
224 /* If this isn't empty then something is seriously broken - it
225 * means a thread is still waiting on the object that owned
226 * this lock, but the object has been finalized.
228 g_assert (mon->wait_list == NULL);
230 mon->entry_count = 0;
231 /* owner and nest are set in mon_new, no need to zero them out */
233 mon->data = monitor_freelist;
234 monitor_freelist = mon;
235 #ifndef DISABLE_PERFCOUNTERS
236 mono_perfcounters->gc_sync_blocks--;
240 /* LOCKING: this is called with monitor_mutex held */
241 static MonoThreadsSync *
244 MonoThreadsSync *new;
246 if (!monitor_freelist) {
247 MonitorArray *marray;
249 /* see if any sync block has been collected */
251 for (marray = monitor_allocated; marray; marray = marray->next) {
252 for (i = 0; i < marray->num_monitors; ++i) {
253 if (marray->monitors [i].data == NULL) {
254 new = &marray->monitors [i];
255 if (new->wait_list) {
256 /* Orphaned events left by aborted threads */
257 while (new->wait_list) {
258 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d): Closing orphaned event %d", GetCurrentThreadId (), new->wait_list->data));
259 CloseHandle (new->wait_list->data);
260 new->wait_list = g_slist_remove (new->wait_list, new->wait_list->data);
263 mono_gc_weak_link_remove (&new->data, FALSE);
264 new->data = monitor_freelist;
265 monitor_freelist = new;
268 /* small perf tweak to avoid scanning all the blocks */
272 /* need to allocate a new array of monitors */
273 if (!monitor_freelist) {
275 LOCK_DEBUG (g_message ("%s: allocating more monitors: %d", __func__, array_size));
276 marray = g_malloc0 (sizeof (MonoArray) + array_size * sizeof (MonoThreadsSync));
277 marray->num_monitors = array_size;
279 /* link into the freelist */
280 for (i = 0; i < marray->num_monitors - 1; ++i) {
281 marray->monitors [i].data = &marray->monitors [i + 1];
283 marray->monitors [i].data = NULL; /* the last one */
284 monitor_freelist = &marray->monitors [0];
285 /* we happend the marray instead of prepending so that
286 * the collecting loop above will need to scan smaller arrays first
288 if (!monitor_allocated) {
289 monitor_allocated = marray;
291 last = monitor_allocated;
299 new = monitor_freelist;
300 monitor_freelist = new->data;
306 #ifndef DISABLE_PERFCOUNTERS
307 mono_perfcounters->gc_sync_blocks++;
313 * Format of the lock word:
314 * thinhash | fathash | data
316 * thinhash is the lower bit: if set data is the shifted hashcode of the object.
317 * fathash is another bit: if set the hash code is stored in the MonoThreadsSync
318 * struct pointed to by data
319 * if neither bit is set and data is non-NULL, data is a MonoThreadsSync
323 MonoThreadsSync *sync;
327 LOCK_WORD_THIN_HASH = 1,
328 LOCK_WORD_FAT_HASH = 1 << 1,
329 LOCK_WORD_BITS_MASK = 0x3,
330 LOCK_WORD_HASH_SHIFT = 2
333 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
339 * Calculate a hash code for @obj that is constant while @obj is alive.
342 mono_object_hash (MonoObject* obj)
344 #ifdef HAVE_MOVING_COLLECTOR
349 lw.sync = obj->synchronisation;
350 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
351 /*g_print ("fast thin hash %d for obj %p store\n", (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT, obj);*/
352 return (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT;
354 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
355 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
356 /*g_print ("fast fat hash %d for obj %p store\n", lw.sync->hash_code, obj);*/
357 return lw.sync->hash_code;
360 * while we are inside this function, the GC will keep this object pinned,
361 * since we are in the unmanaged stack. Thanks to this and to the hash
362 * function that depends only on the address, we can ignore the races if
363 * another thread computes the hash at the same time, because it'll end up
364 * with the same value.
366 hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
367 /* clear the top bits as they can be discarded */
368 hash &= ~(LOCK_WORD_BITS_MASK << 30);
369 /* no hash flags were set, so it must be a MonoThreadsSync pointer if not NULL */
371 lw.sync->hash_code = hash;
372 /*g_print ("storing hash code %d for obj %p in sync %p\n", hash, obj, lw.sync);*/
373 lw.lock_word |= LOCK_WORD_FAT_HASH;
374 /* this is safe since we don't deflate locks */
375 obj->synchronisation = lw.sync;
377 /*g_print ("storing thin hash code %d for obj %p\n", hash, obj);*/
378 lw.lock_word = LOCK_WORD_THIN_HASH | (hash << LOCK_WORD_HASH_SHIFT);
379 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL) == NULL)
381 /*g_print ("failed store\n");*/
382 /* someone set the hash flag or someone inflated the object */
383 lw.sync = obj->synchronisation;
384 if (lw.lock_word & LOCK_WORD_THIN_HASH)
386 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
387 lw.sync->hash_code = hash;
388 lw.lock_word |= LOCK_WORD_FAT_HASH;
389 /* this is safe since we don't deflate locks */
390 obj->synchronisation = lw.sync;
395 * Wang's address-based hash function:
396 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
398 return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
402 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
403 * is requested. In this case it returns -1.
406 mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
408 MonoThreadsSync *mon;
409 gsize id = GetCurrentThreadId ();
411 guint32 then = 0, now, delta;
414 MonoInternalThread *thread;
416 LOCK_DEBUG (g_message("%s: (%d) Trying to lock object %p (%d ms)", __func__, id, obj, ms));
418 if (G_UNLIKELY (!obj)) {
419 mono_raise_exception (mono_get_exception_argument_null ("obj"));
424 mon = obj->synchronisation;
426 /* If the object has never been locked... */
427 if (G_UNLIKELY (mon == NULL)) {
428 mono_monitor_allocator_lock ();
430 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, mon, NULL) == NULL) {
431 mono_gc_weak_link_add (&mon->data, obj, FALSE);
432 mono_monitor_allocator_unlock ();
433 /* Successfully locked */
436 #ifdef HAVE_MOVING_COLLECTOR
438 lw.sync = obj->synchronisation;
439 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
440 MonoThreadsSync *oldlw = lw.sync;
441 /* move the already calculated hash */
442 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
444 lw.lock_word |= LOCK_WORD_FAT_HASH;
445 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
446 mono_gc_weak_link_add (&mon->data, obj, FALSE);
447 mono_monitor_allocator_unlock ();
448 /* Successfully locked */
452 mono_monitor_allocator_unlock ();
455 } else if (lw.lock_word & LOCK_WORD_FAT_HASH) {
457 mono_monitor_allocator_unlock ();
458 /* get the old lock without the fat hash bit */
459 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
463 mono_monitor_allocator_unlock ();
464 mon = obj->synchronisation;
468 mono_monitor_allocator_unlock ();
469 mon = obj->synchronisation;
473 #ifdef HAVE_MOVING_COLLECTOR
476 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
477 MonoThreadsSync *oldlw = lw.sync;
478 mono_monitor_allocator_lock ();
480 /* move the already calculated hash */
481 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
483 lw.lock_word |= LOCK_WORD_FAT_HASH;
484 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
485 mono_gc_weak_link_add (&mon->data, obj, TRUE);
486 mono_monitor_allocator_unlock ();
487 /* Successfully locked */
491 mono_monitor_allocator_unlock ();
498 #ifdef HAVE_MOVING_COLLECTOR
502 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
507 /* If the object has previously been locked but isn't now... */
509 /* This case differs from Dice's case 3 because we don't
510 * deflate locks or cache unused lock records
512 if (G_LIKELY (mon->owner == 0)) {
513 /* Try to install our ID in the owner field, nest
514 * should have been left at 1 by the previous unlock
517 if (G_LIKELY (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0)) {
519 g_assert (mon->nest == 1);
527 /* If the object is currently locked by this thread... */
528 if (mon->owner == id) {
533 /* The object must be locked by someone else... */
534 #ifndef DISABLE_PERFCOUNTERS
535 mono_perfcounters->thread_contentions++;
538 /* If ms is 0 we don't block, but just fail straight away */
540 LOCK_DEBUG (g_message ("%s: (%d) timed out, returning FALSE", __func__, id));
544 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_CONTENTION);
546 /* The slow path begins here. */
548 /* a small amount of duplicated code, but it allows us to insert the profiler
549 * callbacks without impacting the fast path: from here on we don't need to go back to the
550 * retry label, but to retry_contended. At this point mon is already installed in the object
553 /* This case differs from Dice's case 3 because we don't
554 * deflate locks or cache unused lock records
556 if (G_LIKELY (mon->owner == 0)) {
557 /* Try to install our ID in the owner field, nest
558 * should have been left at 1 by the previous unlock
561 if (G_LIKELY (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0)) {
563 g_assert (mon->nest == 1);
564 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
569 /* If the object is currently locked by this thread... */
570 if (mon->owner == id) {
572 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
576 /* We need to make sure there's a semaphore handle (creating it if
577 * necessary), and block on it
579 if (mon->entry_sem == NULL) {
580 /* Create the semaphore */
581 sem = CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
582 g_assert (sem != NULL);
583 if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
584 /* Someone else just put a handle here */
589 /* If we need to time out, record a timestamp and adjust ms,
590 * because WaitForSingleObject doesn't tell us how long it
593 * Don't block forever here, because theres a chance the owner
594 * thread released the lock while we were creating the
595 * semaphore: we would not get the wakeup. Using the event
596 * handle technique from pulse/wait would involve locking the
597 * lock struct and therefore slowing down the fast path.
599 if (ms != INFINITE) {
600 then = mono_msec_ticks ();
610 InterlockedIncrement (&mon->entry_count);
612 #ifndef DISABLE_PERFCOUNTERS
613 mono_perfcounters->thread_queue_len++;
614 mono_perfcounters->thread_queue_max++;
616 thread = mono_thread_internal_current ();
618 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
621 * We pass TRUE instead of allow_interruption since we have to check for the
622 * StopRequested case below.
624 ret = WaitForSingleObjectEx (mon->entry_sem, waitms, TRUE);
626 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
628 InterlockedDecrement (&mon->entry_count);
629 #ifndef DISABLE_PERFCOUNTERS
630 mono_perfcounters->thread_queue_len--;
633 if (ms != INFINITE) {
634 now = mono_msec_ticks ();
637 /* The counter must have wrapped around */
638 LOCK_DEBUG (g_message ("%s: wrapped around! now=0x%x then=0x%x", __func__, now, then));
640 now += (0xffffffff - then);
643 LOCK_DEBUG (g_message ("%s: wrap rejig: now=0x%x then=0x%x delta=0x%x", __func__, now, then, now-then));
653 if ((ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) && ms > 0) {
655 goto retry_contended;
658 if (ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) {
659 if (ret == WAIT_IO_COMPLETION && (mono_thread_test_state (mono_thread_internal_current (), (ThreadState_StopRequested|ThreadState_SuspendRequested)))) {
661 * We have to obey a stop/suspend request even if
662 * allow_interruption is FALSE to avoid hangs at shutdown.
664 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
667 /* Infinite wait, so just try again */
668 goto retry_contended;
672 if (ret == WAIT_OBJECT_0) {
673 /* retry from the top */
674 goto retry_contended;
677 /* We must have timed out */
678 LOCK_DEBUG (g_message ("%s: (%d) timed out waiting, returning FALSE", __func__, id));
680 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
682 if (ret == WAIT_IO_COMPLETION)
689 mono_monitor_enter (MonoObject *obj)
691 return mono_monitor_try_enter_internal (obj, INFINITE, FALSE) == 1;
695 mono_monitor_try_enter (MonoObject *obj, guint32 ms)
697 return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
701 mono_monitor_exit (MonoObject *obj)
703 MonoThreadsSync *mon;
706 LOCK_DEBUG (g_message ("%s: (%d) Unlocking %p", __func__, GetCurrentThreadId (), obj));
708 if (G_UNLIKELY (!obj)) {
709 mono_raise_exception (mono_get_exception_argument_null ("obj"));
713 mon = obj->synchronisation;
715 #ifdef HAVE_MOVING_COLLECTOR
719 if (lw.lock_word & LOCK_WORD_THIN_HASH)
721 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
725 if (G_UNLIKELY (mon == NULL)) {
726 /* No one ever used Enter. Just ignore the Exit request as MS does */
729 if (G_UNLIKELY (mon->owner != GetCurrentThreadId ())) {
733 nest = mon->nest - 1;
735 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now unlocked", __func__, GetCurrentThreadId (), obj));
737 /* object is now unlocked, leave nest==1 so we don't
738 * need to set it when the lock is reacquired
742 /* Do the wakeup stuff. It's possible that the last
743 * blocking thread gave up waiting just before we
744 * release the semaphore resulting in a futile wakeup
745 * next time there's contention for this object, but
746 * it means we don't have to waste time locking the
749 if (mon->entry_count > 0) {
750 ReleaseSemaphore (mon->entry_sem, 1, NULL);
753 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now locked %d times", __func__, GetCurrentThreadId (), obj, nest));
759 mono_monitor_get_object_monitor_weak_link (MonoObject *object)
762 MonoThreadsSync *sync = NULL;
764 lw.sync = object->synchronisation;
765 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
766 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
768 } else if (!(lw.lock_word & LOCK_WORD_THIN_HASH)) {
772 if (sync && sync->data)
780 emit_obj_syncp_check (MonoMethodBuilder *mb, int syncp_loc, int *obj_null_branch, int *true_locktaken_branch, int *syncp_true_false_branch,
781 int *thin_hash_branch, gboolean branch_on_true)
788 mono_mb_emit_byte (mb, CEE_LDARG_0);
789 *obj_null_branch = mono_mb_emit_short_branch (mb, CEE_BRFALSE_S);
794 brtrue.s true_locktaken
796 if (true_locktaken_branch) {
797 mono_mb_emit_byte (mb, CEE_LDARG_1);
798 mono_mb_emit_byte (mb, CEE_LDIND_I1);
799 *true_locktaken_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
805 ldc.i4 MONO_STRUCT_OFFSET(MonoObject, synchronisation) objp off
810 brtrue/false.s syncp_true_false
813 mono_mb_emit_byte (mb, CEE_LDARG_0);
814 mono_mb_emit_byte (mb, CEE_CONV_I);
815 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoObject, synchronisation));
816 mono_mb_emit_byte (mb, CEE_ADD);
817 mono_mb_emit_byte (mb, CEE_LDIND_I);
818 mono_mb_emit_stloc (mb, syncp_loc);
821 if (mono_gc_is_moving ()) {
822 /*check for a thin hash*/
823 mono_mb_emit_ldloc (mb, syncp_loc);
824 mono_mb_emit_icon (mb, 0x01);
825 mono_mb_emit_byte (mb, CEE_CONV_I);
826 mono_mb_emit_byte (mb, CEE_AND);
827 *thin_hash_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
830 mono_mb_emit_ldloc (mb, syncp_loc);
831 mono_mb_emit_icon (mb, ~0x3);
832 mono_mb_emit_byte (mb, CEE_CONV_I);
833 mono_mb_emit_byte (mb, CEE_AND);
834 mono_mb_emit_stloc (mb, syncp_loc);
836 *thin_hash_branch = 0;
839 mono_mb_emit_ldloc (mb, syncp_loc);
840 *syncp_true_false_branch = mono_mb_emit_short_branch (mb, branch_on_true ? CEE_BRTRUE_S : CEE_BRFALSE_S);
845 static MonoMethod* monitor_il_fastpaths[3];
848 mono_monitor_is_il_fastpath_wrapper (MonoMethod *method)
851 for (i = 0; i < 3; ++i) {
852 if (monitor_il_fastpaths [i] == method)
866 register_fastpath (MonoMethod *method, int idx)
868 mono_memory_barrier ();
869 monitor_il_fastpaths [idx] = method;
874 mono_monitor_get_fast_enter_method (MonoMethod *monitor_enter_method)
876 MonoMethodBuilder *mb;
878 static MonoMethod *compare_exchange_method;
879 int obj_null_branch, true_locktaken_branch = 0, syncp_null_branch, has_owner_branch, other_owner_branch, tid_branch, thin_hash_branch;
880 int tid_loc, syncp_loc, owner_loc;
881 gboolean is_v4 = mono_method_signature (monitor_enter_method)->param_count == 2;
882 int fast_path_idx = is_v4 ? FASTPATH_ENTERV4 : FASTPATH_ENTER;
885 /* The !is_v4 version is not used/tested */
888 if (monitor_il_fastpaths [fast_path_idx])
889 return monitor_il_fastpaths [fast_path_idx];
891 if (!mono_get_runtime_callbacks ()->tls_key_supported (TLS_KEY_THREAD))
894 if (!compare_exchange_method) {
895 MonoMethodDesc *desc;
898 desc = mono_method_desc_new ("Interlocked:CompareExchange(intptr&,intptr,intptr)", FALSE);
899 class = mono_class_from_name (mono_defaults.corlib, "System.Threading", "Interlocked");
900 compare_exchange_method = mono_method_desc_search_in_class (desc, class);
901 mono_method_desc_free (desc);
903 if (!compare_exchange_method)
907 mb = mono_mb_new (mono_defaults.monitor_class, is_v4 ? "FastMonitorEnterV4" : "FastMonitorEnter", MONO_WRAPPER_UNKNOWN);
909 mb->method->slot = -1;
910 mb->method->flags = METHOD_ATTRIBUTE_PUBLIC | METHOD_ATTRIBUTE_STATIC |
911 METHOD_ATTRIBUTE_HIDE_BY_SIG | METHOD_ATTRIBUTE_FINAL;
914 tid_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
915 syncp_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
916 owner_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
918 emit_obj_syncp_check (mb, syncp_loc, &obj_null_branch, is_v4 ? &true_locktaken_branch : NULL, &syncp_null_branch, &thin_hash_branch, FALSE);
921 mono. tls thread_tls_offset threadp
922 ldc.i4 MONO_STRUCT_OFFSET(MonoThread, tid) threadp off
927 ldc.i4 MONO_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
935 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
936 mono_mb_emit_byte (mb, CEE_MONO_TLS);
937 mono_mb_emit_i4 (mb, TLS_KEY_THREAD);
938 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoInternalThread, tid));
939 mono_mb_emit_byte (mb, CEE_ADD);
940 mono_mb_emit_byte (mb, CEE_LDIND_I);
941 mono_mb_emit_stloc (mb, tid_loc);
942 mono_mb_emit_ldloc (mb, syncp_loc);
943 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoThreadsSync, owner));
944 mono_mb_emit_byte (mb, CEE_ADD);
945 mono_mb_emit_byte (mb, CEE_LDIND_I);
946 mono_mb_emit_stloc (mb, owner_loc);
947 mono_mb_emit_ldloc (mb, owner_loc);
948 tid_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
952 ldc.i4 MONO_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
955 ldc.i4 0 &owner tid 0
956 call System.Threading.Interlocked.CompareExchange oldowner
961 mono_mb_emit_ldloc (mb, syncp_loc);
962 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoThreadsSync, owner));
963 mono_mb_emit_byte (mb, CEE_ADD);
964 mono_mb_emit_ldloc (mb, tid_loc);
965 mono_mb_emit_byte (mb, CEE_LDC_I4_0);
966 mono_mb_emit_managed_call (mb, compare_exchange_method, NULL);
967 has_owner_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
970 mono_mb_emit_byte (mb, CEE_LDARG_1);
971 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
972 mono_mb_emit_byte (mb, CEE_STIND_I1);
974 mono_mb_emit_byte (mb, CEE_RET);
982 ldc.i4 MONO_STRUCT_OFFSET(MonoThreadsSync, nest) syncp off
986 ldc.i4 1 &nest nest 1
992 mono_mb_patch_short_branch (mb, tid_branch);
993 mono_mb_emit_ldloc (mb, owner_loc);
994 mono_mb_emit_ldloc (mb, tid_loc);
995 other_owner_branch = mono_mb_emit_short_branch (mb, CEE_BNE_UN_S);
996 mono_mb_emit_ldloc (mb, syncp_loc);
997 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoThreadsSync, nest));
998 mono_mb_emit_byte (mb, CEE_ADD);
999 mono_mb_emit_byte (mb, CEE_DUP);
1000 mono_mb_emit_byte (mb, CEE_LDIND_I4);
1001 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
1002 mono_mb_emit_byte (mb, CEE_ADD);
1003 mono_mb_emit_byte (mb, CEE_STIND_I4);
1006 mono_mb_emit_byte (mb, CEE_LDARG_1);
1007 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
1008 mono_mb_emit_byte (mb, CEE_STIND_I1);
1011 mono_mb_emit_byte (mb, CEE_RET);
1014 obj_null, syncp_null, has_owner, other_owner:
1016 call System.Threading.Monitor.Enter
1020 if (thin_hash_branch)
1021 mono_mb_patch_short_branch (mb, thin_hash_branch);
1022 mono_mb_patch_short_branch (mb, obj_null_branch);
1023 mono_mb_patch_short_branch (mb, syncp_null_branch);
1024 mono_mb_patch_short_branch (mb, has_owner_branch);
1025 mono_mb_patch_short_branch (mb, other_owner_branch);
1026 if (true_locktaken_branch)
1027 mono_mb_patch_short_branch (mb, true_locktaken_branch);
1028 mono_mb_emit_byte (mb, CEE_LDARG_0);
1030 mono_mb_emit_byte (mb, CEE_LDARG_1);
1031 mono_mb_emit_managed_call (mb, monitor_enter_method, NULL);
1032 mono_mb_emit_byte (mb, CEE_RET);
1035 res = register_fastpath (mono_mb_create_method (mb, mono_signature_no_pinvoke (monitor_enter_method), 5), fast_path_idx);
1037 info = mono_image_alloc0 (mono_defaults.corlib, sizeof (WrapperInfo));
1038 info->subtype = is_v4 ? WRAPPER_SUBTYPE_FAST_MONITOR_ENTER_V4 : WRAPPER_SUBTYPE_FAST_MONITOR_ENTER;
1039 mono_marshal_set_wrapper_info (res, info);
1046 mono_monitor_get_fast_exit_method (MonoMethod *monitor_exit_method)
1048 MonoMethodBuilder *mb;
1050 int obj_null_branch, has_waiting_branch, has_syncp_branch, owned_branch, nested_branch, thin_hash_branch;
1054 if (monitor_il_fastpaths [FASTPATH_EXIT])
1055 return monitor_il_fastpaths [FASTPATH_EXIT];
1057 if (!mono_get_runtime_callbacks ()->tls_key_supported (TLS_KEY_THREAD))
1060 mb = mono_mb_new (mono_defaults.monitor_class, "FastMonitorExit", MONO_WRAPPER_UNKNOWN);
1062 mb->method->slot = -1;
1063 mb->method->flags = METHOD_ATTRIBUTE_PUBLIC | METHOD_ATTRIBUTE_STATIC |
1064 METHOD_ATTRIBUTE_HIDE_BY_SIG | METHOD_ATTRIBUTE_FINAL;
1067 syncp_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
1069 emit_obj_syncp_check (mb, syncp_loc, &obj_null_branch, NULL, &has_syncp_branch, &thin_hash_branch, TRUE);
1075 mono_mb_emit_byte (mb, CEE_RET);
1080 ldc.i4 MONO_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
1083 mono. tls thread_tls_offset owner threadp
1084 ldc.i4 MONO_STRUCT_OFFSET(MonoThread, tid) owner threadp off
1090 mono_mb_patch_short_branch (mb, has_syncp_branch);
1091 mono_mb_emit_ldloc (mb, syncp_loc);
1092 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoThreadsSync, owner));
1093 mono_mb_emit_byte (mb, CEE_ADD);
1094 mono_mb_emit_byte (mb, CEE_LDIND_I);
1095 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1096 mono_mb_emit_byte (mb, CEE_MONO_TLS);
1097 mono_mb_emit_i4 (mb, TLS_KEY_THREAD);
1098 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoInternalThread, tid));
1099 mono_mb_emit_byte (mb, CEE_ADD);
1100 mono_mb_emit_byte (mb, CEE_LDIND_I);
1101 owned_branch = mono_mb_emit_short_branch (mb, CEE_BEQ_S);
1107 mono_mb_emit_byte (mb, CEE_RET);
1112 ldc.i4 MONO_STRUCT_OFFSET(MonoThreadsSync, nest) syncp off
1117 ldc.i4 1 &nest nest nest 1
1118 bgt.un.s nested &nest nest
1121 mono_mb_patch_short_branch (mb, owned_branch);
1122 mono_mb_emit_ldloc (mb, syncp_loc);
1123 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoThreadsSync, nest));
1124 mono_mb_emit_byte (mb, CEE_ADD);
1125 mono_mb_emit_byte (mb, CEE_DUP);
1126 mono_mb_emit_byte (mb, CEE_LDIND_I4);
1127 mono_mb_emit_byte (mb, CEE_DUP);
1128 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
1129 nested_branch = mono_mb_emit_short_branch (mb, CEE_BGT_UN_S);
1135 ldc.i4 MONO_STRUCT_OFFSET(MonoThreadsSync, entry_count) syncp off
1138 brtrue.s has_waiting
1141 mono_mb_emit_byte (mb, CEE_POP);
1142 mono_mb_emit_byte (mb, CEE_POP);
1143 mono_mb_emit_ldloc (mb, syncp_loc);
1144 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoThreadsSync, entry_count));
1145 mono_mb_emit_byte (mb, CEE_ADD);
1146 mono_mb_emit_byte (mb, CEE_LDIND_I4);
1147 has_waiting_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
1151 ldc.i4 MONO_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
1158 mono_mb_emit_ldloc (mb, syncp_loc);
1159 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoThreadsSync, owner));
1160 mono_mb_emit_byte (mb, CEE_ADD);
1161 mono_mb_emit_byte (mb, CEE_LDNULL);
1162 mono_mb_emit_byte (mb, CEE_STIND_I);
1163 mono_mb_emit_byte (mb, CEE_RET);
1167 ldc.i4 1 &nest nest 1
1173 mono_mb_patch_short_branch (mb, nested_branch);
1174 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
1175 mono_mb_emit_byte (mb, CEE_SUB);
1176 mono_mb_emit_byte (mb, CEE_STIND_I4);
1177 mono_mb_emit_byte (mb, CEE_RET);
1180 obj_null, has_waiting:
1182 call System.Threading.Monitor.Exit
1186 if (thin_hash_branch)
1187 mono_mb_patch_short_branch (mb, thin_hash_branch);
1188 mono_mb_patch_short_branch (mb, obj_null_branch);
1189 mono_mb_patch_short_branch (mb, has_waiting_branch);
1190 mono_mb_emit_byte (mb, CEE_LDARG_0);
1191 mono_mb_emit_managed_call (mb, monitor_exit_method, NULL);
1192 mono_mb_emit_byte (mb, CEE_RET);
1195 res = register_fastpath (mono_mb_create_method (mb, mono_signature_no_pinvoke (monitor_exit_method), 5), FASTPATH_EXIT);
1198 info = mono_image_alloc0 (mono_defaults.corlib, sizeof (WrapperInfo));
1199 info->subtype = WRAPPER_SUBTYPE_FAST_MONITOR_EXIT;
1200 mono_marshal_set_wrapper_info (res, info);
1206 mono_monitor_get_fast_path (MonoMethod *enter_or_exit)
1208 if (strcmp (enter_or_exit->name, "Enter") == 0)
1209 return mono_monitor_get_fast_enter_method (enter_or_exit);
1210 if (strcmp (enter_or_exit->name, "Exit") == 0)
1211 return mono_monitor_get_fast_exit_method (enter_or_exit);
1212 g_assert_not_reached ();
1217 * mono_monitor_threads_sync_member_offset:
1218 * @owner_offset: returns size and offset of the "owner" member
1219 * @nest_offset: returns size and offset of the "nest" member
1220 * @entry_count_offset: returns size and offset of the "entry_count" member
1222 * Returns the offsets and sizes of three members of the
1223 * MonoThreadsSync struct. The Monitor ASM fastpaths need this.
1226 mono_monitor_threads_sync_members_offset (int *owner_offset, int *nest_offset, int *entry_count_offset)
1230 #define ENCODE_OFF_SIZE(o,s) (((o) << 8) | ((s) & 0xff))
1232 *owner_offset = ENCODE_OFF_SIZE (MONO_STRUCT_OFFSET (MonoThreadsSync, owner), sizeof (ts.owner));
1233 *nest_offset = ENCODE_OFF_SIZE (MONO_STRUCT_OFFSET (MonoThreadsSync, nest), sizeof (ts.nest));
1234 *entry_count_offset = ENCODE_OFF_SIZE (MONO_STRUCT_OFFSET (MonoThreadsSync, entry_count), sizeof (ts.entry_count));
1238 ves_icall_System_Threading_Monitor_Monitor_try_enter (MonoObject *obj, guint32 ms)
1243 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
1245 mono_thread_interruption_checkpoint ();
1246 } while (res == -1);
1252 ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var (MonoObject *obj, guint32 ms, char *lockTaken)
1256 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
1257 /*This means we got interrupted during the wait and didn't got the monitor.*/
1259 mono_thread_interruption_checkpoint ();
1260 } while (res == -1);
1261 /*It's safe to do it from here since interruption would happen only on the wrapper.*/
1262 *lockTaken = res == 1;
1266 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
1268 MonoThreadsSync *mon;
1270 LOCK_DEBUG (g_message ("%s: Testing if %p is owned by thread %d", __func__, obj, GetCurrentThreadId()));
1272 mon = obj->synchronisation;
1273 #ifdef HAVE_MOVING_COLLECTOR
1277 if (lw.lock_word & LOCK_WORD_THIN_HASH)
1279 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1287 if(mon->owner==GetCurrentThreadId ()) {
1295 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
1297 MonoThreadsSync *mon;
1299 LOCK_DEBUG (g_message("%s: (%d) Testing if %p is owned by any thread", __func__, GetCurrentThreadId (), obj));
1301 mon = obj->synchronisation;
1302 #ifdef HAVE_MOVING_COLLECTOR
1306 if (lw.lock_word & LOCK_WORD_THIN_HASH)
1308 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1316 if (mon->owner != 0) {
1323 /* All wait list manipulation in the pulse, pulseall and wait
1324 * functions happens while the monitor lock is held, so we don't need
1325 * any extra struct locking
1329 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
1331 MonoThreadsSync *mon;
1333 LOCK_DEBUG (g_message ("%s: (%d) Pulsing %p", __func__, GetCurrentThreadId (), obj));
1335 mon = obj->synchronisation;
1336 #ifdef HAVE_MOVING_COLLECTOR
1340 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1341 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1344 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1349 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1352 if (mon->owner != GetCurrentThreadId ()) {
1353 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1357 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, GetCurrentThreadId (), g_slist_length (mon->wait_list)));
1359 if (mon->wait_list != NULL) {
1360 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, GetCurrentThreadId (), mon->wait_list->data));
1362 SetEvent (mon->wait_list->data);
1363 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1368 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
1370 MonoThreadsSync *mon;
1372 LOCK_DEBUG (g_message("%s: (%d) Pulsing all %p", __func__, GetCurrentThreadId (), obj));
1374 mon = obj->synchronisation;
1375 #ifdef HAVE_MOVING_COLLECTOR
1379 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1380 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1383 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1388 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1391 if (mon->owner != GetCurrentThreadId ()) {
1392 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1396 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, GetCurrentThreadId (), g_slist_length (mon->wait_list)));
1398 while (mon->wait_list != NULL) {
1399 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, GetCurrentThreadId (), mon->wait_list->data));
1401 SetEvent (mon->wait_list->data);
1402 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1407 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
1409 MonoThreadsSync *mon;
1413 gboolean success = FALSE;
1415 MonoInternalThread *thread = mono_thread_internal_current ();
1417 LOCK_DEBUG (g_message ("%s: (%d) Trying to wait for %p with timeout %dms", __func__, GetCurrentThreadId (), obj, ms));
1419 mon = obj->synchronisation;
1420 #ifdef HAVE_MOVING_COLLECTOR
1424 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1425 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1428 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1433 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1436 if (mon->owner != GetCurrentThreadId ()) {
1437 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1441 /* Do this WaitSleepJoin check before creating the event handle */
1442 mono_thread_current_check_pending_interrupt ();
1444 event = CreateEvent (NULL, FALSE, FALSE, NULL);
1445 if (event == NULL) {
1446 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
1450 LOCK_DEBUG (g_message ("%s: (%d) queuing handle %p", __func__, GetCurrentThreadId (), event));
1452 mono_thread_current_check_pending_interrupt ();
1454 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
1456 mon->wait_list = g_slist_append (mon->wait_list, event);
1458 /* Save the nest count, and release the lock */
1461 mono_monitor_exit (obj);
1463 LOCK_DEBUG (g_message ("%s: (%d) Unlocked %p lock %p", __func__, GetCurrentThreadId (), obj, mon));
1465 /* There's no race between unlocking mon and waiting for the
1466 * event, because auto reset events are sticky, and this event
1467 * is private to this thread. Therefore even if the event was
1468 * signalled before we wait, we still succeed.
1470 ret = WaitForSingleObjectEx (event, ms, TRUE);
1472 /* Reset the thread state fairly early, so we don't have to worry
1473 * about the monitor error checking
1475 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
1477 if (mono_thread_interruption_requested ()) {
1479 * Can't remove the event from wait_list, since the monitor is not locked by
1480 * us. So leave it there, mon_new () will delete it when the mon structure
1481 * is placed on the free list.
1482 * FIXME: The caller expects to hold the lock after the wait returns, but it
1483 * doesn't happen in this case:
1484 * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=97268
1489 /* Regain the lock with the previous nest count */
1491 regain = mono_monitor_try_enter_internal (obj, INFINITE, TRUE);
1493 mono_thread_interruption_checkpoint ();
1494 } while (regain == -1);
1497 /* Something went wrong, so throw a
1498 * SynchronizationLockException
1500 CloseHandle (event);
1501 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to regain lock"));
1507 LOCK_DEBUG (g_message ("%s: (%d) Regained %p lock %p", __func__, GetCurrentThreadId (), obj, mon));
1509 if (ret == WAIT_TIMEOUT) {
1510 /* Poll the event again, just in case it was signalled
1511 * while we were trying to regain the monitor lock
1513 ret = WaitForSingleObjectEx (event, 0, FALSE);
1516 /* Pulse will have popped our event from the queue if it signalled
1517 * us, so we only do it here if the wait timed out.
1519 * This avoids a race condition where the thread holding the
1520 * lock can Pulse several times before the WaitForSingleObject
1521 * returns. If we popped the queue here then this event might
1522 * be signalled more than once, thereby starving another
1526 if (ret == WAIT_OBJECT_0) {
1527 LOCK_DEBUG (g_message ("%s: (%d) Success", __func__, GetCurrentThreadId ()));
1530 LOCK_DEBUG (g_message ("%s: (%d) Wait failed, dequeuing handle %p", __func__, GetCurrentThreadId (), event));
1531 /* No pulse, so we have to remove ourself from the
1534 mon->wait_list = g_slist_remove (mon->wait_list, event);
1536 CloseHandle (event);