2 * monitor.c: Monitor locking functions
5 * Dick Porter (dick@ximian.com)
7 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/monitor.h>
14 #include <mono/metadata/threads-types.h>
15 #include <mono/metadata/exception.h>
16 #include <mono/metadata/threads.h>
17 #include <mono/io-layer/io-layer.h>
18 #include <mono/metadata/object-internals.h>
19 #include <mono/metadata/gc-internal.h>
21 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
25 * The monitor implementation here is based on
26 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
27 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
29 * The Dice paper describes a technique for saving lock record space
30 * by returning records to a free list when they become unused. That
31 * sounds like unnecessary complexity to me, though if it becomes
32 * clear that unused lock records are taking up lots of space or we
33 * need to shave more time off by avoiding a malloc then we can always
34 * implement the free list idea later. The timeout parameter to
35 * try_enter voids some of the assumptions about the reference count
36 * field in Dice's implementation too. In his version, the thread
37 * attempting to lock a contended object will block until it succeeds,
38 * so the reference count will never be decremented while an object is
41 * Bacon's thin locks have a fast path that doesn't need a lock record
42 * for the common case of locking an unlocked or shallow-nested
43 * object, but the technique relies on encoding the thread ID in 15
44 * bits (to avoid too much per-object space overhead.) Unfortunately
45 * I don't think it's possible to reliably encode a pthread_t into 15
46 * bits. (The JVM implementation used seems to have a 15-bit
47 * per-thread identifier available.)
49 * This implementation then combines Dice's basic lock model with
50 * Bacon's simplification of keeping a lock record for the lifetime of
54 struct _MonoThreadsSync
56 gsize owner; /* thread ID */
58 #ifdef HAVE_MOVING_COLLECTOR
61 volatile guint32 entry_count;
67 typedef struct _MonitorArray MonitorArray;
69 struct _MonitorArray {
72 MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
75 #define mono_monitor_allocator_lock() EnterCriticalSection (&monitor_mutex)
76 #define mono_monitor_allocator_unlock() LeaveCriticalSection (&monitor_mutex)
77 static CRITICAL_SECTION monitor_mutex;
78 static MonoThreadsSync *monitor_freelist;
79 static MonitorArray *monitor_allocated;
80 static int array_size = 16;
83 mono_monitor_init (void)
85 InitializeCriticalSection (&monitor_mutex);
89 mono_monitor_cleanup (void)
91 /*DeleteCriticalSection (&monitor_mutex);*/
94 /* LOCKING: this is called with monitor_mutex held */
96 mon_finalize (MonoThreadsSync *mon)
98 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": Finalizing sync %p", mon));
100 if (mon->entry_sem != NULL) {
101 CloseHandle (mon->entry_sem);
102 mon->entry_sem = NULL;
104 /* If this isn't empty then something is seriously broken - it
105 * means a thread is still waiting on the object that owned
106 * this lock, but the object has been finalized.
108 g_assert (mon->wait_list == NULL);
110 mon->entry_count = 0;
111 /* owner and nest are set in mon_new, no need to zero them out */
113 mon->data = monitor_freelist;
114 monitor_freelist = mon;
117 /* LOCKING: this is called with monitor_mutex held */
118 static MonoThreadsSync *
121 MonoThreadsSync *new;
123 if (!monitor_freelist) {
124 MonitorArray *marray;
126 /* see if any sync block has been collected */
128 for (marray = monitor_allocated; marray; marray = marray->next) {
129 for (i = 0; i < marray->num_monitors; ++i) {
130 if (marray->monitors [i].data == NULL) {
131 new = &marray->monitors [i];
132 new->data = monitor_freelist;
133 monitor_freelist = new;
136 /* small perf tweak to avoid scanning all the blocks */
140 /* need to allocate a new array of monitors */
141 if (!monitor_freelist) {
143 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": allocating more monitors: %d", array_size));
144 marray = g_malloc0 (sizeof (MonoArray) + array_size * sizeof (MonoThreadsSync));
145 marray->num_monitors = array_size;
147 /* link into the freelist */
148 for (i = 0; i < marray->num_monitors - 1; ++i) {
149 marray->monitors [i].data = &marray->monitors [i + 1];
151 marray->monitors [i].data = NULL; /* the last one */
152 monitor_freelist = &marray->monitors [0];
153 /* we happend the marray instead of prepending so that
154 * the collecting loop above will need to scan smaller arrays first
156 if (!monitor_allocated) {
157 monitor_allocated = marray;
159 last = monitor_allocated;
167 new = monitor_freelist;
168 monitor_freelist = new->data;
177 * Format of the lock word:
178 * thinhash | fathash | data
180 * thinhash is the lower bit: if set data is the shifted hashcode of the object.
181 * fathash is another bit: if set the hash code is stored in the MonoThreadsSync
182 * struct pointed to by data
183 * if neither bit is set and data is non-NULL, data is a MonoThreadsSync
187 MonoThreadsSync *sync;
191 LOCK_WORD_THIN_HASH = 1,
192 LOCK_WORD_FAT_HASH = 1 << 1,
193 LOCK_WORD_BITS_MASK = 0x3,
194 LOCK_WORD_HASH_SHIFT = 2
197 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
203 * Calculate a hash code for @obj that is constant while @obj is alive.
206 mono_object_hash (MonoObject* obj)
208 #ifdef HAVE_MOVING_COLLECTOR
213 lw.sync = obj->synchronisation;
214 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
215 /*g_print ("fast thin hash %d for obj %p store\n", (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT, obj);*/
216 return (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT;
218 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
219 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
220 /*g_print ("fast fat hash %d for obj %p store\n", lw.sync->hash_code, obj);*/
221 return lw.sync->hash_code;
224 * while we are inside this function, the GC will keep this object pinned,
225 * since we are in the unmanaged stack. Thanks to this and to the hash
226 * function that depends only on the address, we can ignore the races if
227 * another thread computes the hash at the same time, because it'll end up
228 * with the same value.
230 hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
231 /* clear the top bits as they can be discarded */
232 hash &= ~(LOCK_WORD_BITS_MASK << 30);
233 /* no hash flags were set, so it must be a MonoThreadsSync pointer if not NULL */
235 lw.sync->hash_code = hash;
236 /*g_print ("storing hash code %d for obj %p in sync %p\n", hash, obj, lw.sync);*/
237 lw.lock_word |= LOCK_WORD_FAT_HASH;
238 /* this is safe since we don't deflate locks */
239 obj->synchronisation = lw.sync;
241 /*g_print ("storing thin hash code %d for obj %p\n", hash, obj);*/
242 lw.lock_word = LOCK_WORD_THIN_HASH | (hash << LOCK_WORD_HASH_SHIFT);
243 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL) == NULL)
245 /*g_print ("failed store\n");*/
246 /* someone set the hash flag or someone inflated the object */
247 lw.sync = obj->synchronisation;
248 if (lw.lock_word & LOCK_WORD_THIN_HASH)
250 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
251 lw.sync->hash_code = hash;
252 lw.lock_word |= LOCK_WORD_FAT_HASH;
253 /* this is safe since we don't deflate locks */
254 obj->synchronisation = lw.sync;
259 * Wang's address-based hash function:
260 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
262 return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
266 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
267 * is requested. In this case it returns -1.
270 mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
272 MonoThreadsSync *mon;
273 gsize id = GetCurrentThreadId ();
275 guint32 then = 0, now, delta;
279 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
280 ": (%d) Trying to lock object %p (%d ms)", id, obj, ms));
283 mon = obj->synchronisation;
285 /* If the object has never been locked... */
287 mono_monitor_allocator_lock ();
289 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, mon, NULL) == NULL) {
290 mono_gc_weak_link_add (&mon->data, obj);
291 mono_monitor_allocator_unlock ();
292 /* Successfully locked */
295 #ifdef HAVE_MOVING_COLLECTOR
297 lw.sync = obj->synchronisation;
298 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
299 MonoThreadsSync *oldlw = lw.sync;
300 /* move the already calculated hash */
301 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
303 lw.lock_word |= LOCK_WORD_FAT_HASH;
304 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
305 mono_gc_weak_link_add (&mon->data, obj);
306 mono_monitor_allocator_unlock ();
307 /* Successfully locked */
311 mono_monitor_allocator_unlock ();
314 } else if (lw.lock_word & LOCK_WORD_FAT_HASH) {
316 mono_monitor_allocator_unlock ();
317 /* get the old lock without the fat hash bit */
318 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
322 mono_monitor_allocator_unlock ();
323 mon = obj->synchronisation;
327 mono_monitor_allocator_unlock ();
328 mon = obj->synchronisation;
332 #ifdef HAVE_MOVING_COLLECTOR
335 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
336 MonoThreadsSync *oldlw = lw.sync;
337 mono_monitor_allocator_lock ();
339 /* move the already calculated hash */
340 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
342 lw.lock_word |= LOCK_WORD_FAT_HASH;
343 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
344 mono_gc_weak_link_add (&mon->data, obj);
345 mono_monitor_allocator_unlock ();
346 /* Successfully locked */
350 mono_monitor_allocator_unlock ();
357 #ifdef HAVE_MOVING_COLLECTOR
361 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
366 /* If the object is currently locked by this thread... */
367 if (mon->owner == id) {
372 /* If the object has previously been locked but isn't now... */
374 /* This case differs from Dice's case 3 because we don't
375 * deflate locks or cache unused lock records
377 if (mon->owner == 0) {
378 /* Try to install our ID in the owner field, nest
379 * should have been left at 1 by the previous unlock
382 if (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0) {
384 g_assert (mon->nest == 1);
392 /* The object must be locked by someone else... */
394 /* If ms is 0 we don't block, but just fail straight away */
396 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) timed out, returning FALSE", id));
400 /* The slow path begins here. We need to make sure theres a
401 * semaphore handle (creating it if necessary), and block on
404 if (mon->entry_sem == NULL) {
405 /* Create the semaphore */
406 sem = CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
407 if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
408 /* Someone else just put a handle here */
413 /* If we need to time out, record a timestamp and adjust ms,
414 * because WaitForSingleObject doesn't tell us how long it
417 * Don't block forever here, because theres a chance the owner
418 * thread released the lock while we were creating the
419 * semaphore: we would not get the wakeup. Using the event
420 * handle technique from pulse/wait would involve locking the
421 * lock struct and therefore slowing down the fast path.
423 if (ms != INFINITE) {
424 then = GetTickCount ();
434 InterlockedIncrement (&mon->entry_count);
435 ret = WaitForSingleObjectEx (mon->entry_sem, waitms, allow_interruption);
436 InterlockedDecrement (&mon->entry_count);
438 if (ms != INFINITE) {
439 now = GetTickCount ();
442 /* The counter must have wrapped around */
443 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
444 ": wrapped around! now=0x%x then=0x%x", now, then));
446 now += (0xffffffff - then);
449 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": wrap rejig: now=0x%x then=0x%x delta=0x%x", now, then, now-then));
459 if ((ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) && ms > 0) {
464 if (ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) {
465 /* Infinite wait, so just try again */
470 if (ret == WAIT_OBJECT_0) {
471 /* retry from the top */
475 /* We must have timed out */
476 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) timed out waiting, returning FALSE", id));
478 if (ret == WAIT_IO_COMPLETION)
485 mono_monitor_enter (MonoObject *obj)
487 return mono_monitor_try_enter_internal (obj, INFINITE, FALSE) == 1;
491 mono_monitor_try_enter (MonoObject *obj, guint32 ms)
493 return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
497 mono_monitor_exit (MonoObject *obj)
499 MonoThreadsSync *mon;
502 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Unlocking %p", GetCurrentThreadId (), obj));
504 mon = obj->synchronisation;
506 #ifdef HAVE_MOVING_COLLECTOR
510 if (lw.lock_word & LOCK_WORD_THIN_HASH)
512 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
517 /* No one ever used Enter. Just ignore the Exit request as MS does */
520 if (mon->owner != GetCurrentThreadId ()) {
524 nest = mon->nest - 1;
526 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
527 ": (%d) Object %p is now unlocked", GetCurrentThreadId (), obj));
529 /* object is now unlocked, leave nest==1 so we don't
530 * need to set it when the lock is reacquired
534 /* Do the wakeup stuff. It's possible that the last
535 * blocking thread gave up waiting just before we
536 * release the semaphore resulting in a futile wakeup
537 * next time there's contention for this object, but
538 * it means we don't have to waste time locking the
541 if (mon->entry_count > 0) {
542 ReleaseSemaphore (mon->entry_sem, 1, NULL);
545 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
546 ": (%d) Object %p is now locked %d times", GetCurrentThreadId (), obj, nest));
552 ves_icall_System_Threading_Monitor_Monitor_try_enter (MonoObject *obj, guint32 ms)
557 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
559 mono_thread_interruption_checkpoint ();
566 ves_icall_System_Threading_Monitor_Monitor_exit (MonoObject *obj)
568 mono_monitor_exit (obj);
572 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
574 MonoThreadsSync *mon;
576 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
577 ": Testing if %p is owned by thread %d", obj, GetCurrentThreadId()));
579 mon = obj->synchronisation;
580 #ifdef HAVE_MOVING_COLLECTOR
584 if (lw.lock_word & LOCK_WORD_THIN_HASH)
586 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
594 if(mon->owner==GetCurrentThreadId ()) {
602 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
604 MonoThreadsSync *mon;
606 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
607 ": (%d) Testing if %p is owned by any thread", GetCurrentThreadId (), obj));
609 mon = obj->synchronisation;
610 #ifdef HAVE_MOVING_COLLECTOR
614 if (lw.lock_word & LOCK_WORD_THIN_HASH)
616 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
624 if (mon->owner != 0) {
631 /* All wait list manipulation in the pulse, pulseall and wait
632 * functions happens while the monitor lock is held, so we don't need
633 * any extra struct locking
637 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
639 MonoThreadsSync *mon;
641 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Pulsing %p",
642 GetCurrentThreadId (), obj));
644 mon = obj->synchronisation;
645 #ifdef HAVE_MOVING_COLLECTOR
649 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
650 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
653 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
658 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
661 if (mon->owner != GetCurrentThreadId ()) {
662 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
666 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) %d threads waiting",
667 GetCurrentThreadId (), g_slist_length (mon->wait_list)));
669 if (mon->wait_list != NULL) {
670 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
671 ": (%d) signalling and dequeuing handle %p",
672 GetCurrentThreadId (), mon->wait_list->data));
674 SetEvent (mon->wait_list->data);
675 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
680 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
682 MonoThreadsSync *mon;
684 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION ": (%d) Pulsing all %p",
685 GetCurrentThreadId (), obj));
687 mon = obj->synchronisation;
688 #ifdef HAVE_MOVING_COLLECTOR
692 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
693 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
696 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
701 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
704 if (mon->owner != GetCurrentThreadId ()) {
705 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
709 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) %d threads waiting",
710 GetCurrentThreadId (), g_slist_length (mon->wait_list)));
712 while (mon->wait_list != NULL) {
713 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
714 ": (%d) signalling and dequeuing handle %p",
715 GetCurrentThreadId (), mon->wait_list->data));
717 SetEvent (mon->wait_list->data);
718 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
723 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
725 MonoThreadsSync *mon;
729 gboolean success = FALSE;
731 MonoThread *thread = mono_thread_current ();
733 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
734 ": (%d) Trying to wait for %p with timeout %dms",
735 GetCurrentThreadId (), obj, ms));
737 mon = obj->synchronisation;
738 #ifdef HAVE_MOVING_COLLECTOR
742 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
743 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
746 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
751 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
754 if (mon->owner != GetCurrentThreadId ()) {
755 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
759 event = CreateEvent (NULL, FALSE, FALSE, NULL);
761 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
765 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) queuing handle %p",
766 GetCurrentThreadId (), event));
768 mono_monitor_enter (thread->synch_lock);
769 thread->state |= ThreadState_WaitSleepJoin;
770 mono_monitor_exit (thread->synch_lock);
772 mon->wait_list = g_slist_append (mon->wait_list, event);
774 /* Save the nest count, and release the lock */
777 mono_monitor_exit (obj);
779 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Unlocked %p lock %p",
780 GetCurrentThreadId (), obj, mon));
782 /* There's no race between unlocking mon and waiting for the
783 * event, because auto reset events are sticky, and this event
784 * is private to this thread. Therefore even if the event was
785 * signalled before we wait, we still succeed.
787 ret = WaitForSingleObjectEx (event, ms, TRUE);
789 /* Reset the thread state fairly early, so we don't have to worry
790 * about the monitor error checking
792 mono_monitor_enter (thread->synch_lock);
793 thread->state &= ~ThreadState_WaitSleepJoin;
794 mono_monitor_exit (thread->synch_lock);
796 if (mono_thread_interruption_requested ()) {
801 /* Regain the lock with the previous nest count */
803 regain = mono_monitor_try_enter_internal (obj, INFINITE, TRUE);
805 mono_thread_interruption_checkpoint ();
806 } while (regain == -1);
809 /* Something went wrong, so throw a
810 * SynchronizationLockException
813 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to regain lock"));
819 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Regained %p lock %p",
820 GetCurrentThreadId (), obj, mon));
822 if (ret == WAIT_TIMEOUT) {
823 /* Poll the event again, just in case it was signalled
824 * while we were trying to regain the monitor lock
826 ret = WaitForSingleObjectEx (event, 0, FALSE);
829 /* Pulse will have popped our event from the queue if it signalled
830 * us, so we only do it here if the wait timed out.
832 * This avoids a race condition where the thread holding the
833 * lock can Pulse several times before the WaitForSingleObject
834 * returns. If we popped the queue here then this event might
835 * be signalled more than once, thereby starving another
839 if (ret == WAIT_OBJECT_0) {
840 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Success",
841 GetCurrentThreadId ()));
844 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Wait failed, dequeuing handle %p",
845 GetCurrentThreadId (), event));
846 /* No pulse, so we have to remove ourself from the
849 mon->wait_list = g_slist_remove (mon->wait_list, event);