3 * Monitor locking functions
6 * Dick Porter (dick@ximian.com)
8 * Copyright 2003 Ximian, Inc (http://www.ximian.com)
9 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
10 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/monitor.h>
19 #include <mono/metadata/threads-types.h>
20 #include <mono/metadata/exception.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/object-internals.h>
23 #include <mono/metadata/class-internals.h>
24 #include <mono/metadata/gc-internals.h>
25 #include <mono/metadata/method-builder.h>
26 #include <mono/metadata/debug-helpers.h>
27 #include <mono/metadata/tabledefs.h>
28 #include <mono/metadata/marshal.h>
29 #include <mono/metadata/w32event.h>
30 #include <mono/utils/mono-threads.h>
31 #include <mono/metadata/profiler-private.h>
32 #include <mono/utils/mono-time.h>
33 #include <mono/utils/atomic.h>
34 #include <mono/utils/w32api.h>
35 #include <mono/utils/mono-os-wait.h>
38 * Pull the list of opcodes
40 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
44 #include "mono/cil/opcode.def"
49 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
53 * The monitor implementation here is based on
54 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
55 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
57 * The Dice paper describes a technique for saving lock record space
58 * by returning records to a free list when they become unused. That
59 * sounds like unnecessary complexity to me, though if it becomes
60 * clear that unused lock records are taking up lots of space or we
61 * need to shave more time off by avoiding a malloc then we can always
62 * implement the free list idea later. The timeout parameter to
63 * try_enter voids some of the assumptions about the reference count
64 * field in Dice's implementation too. In his version, the thread
65 * attempting to lock a contended object will block until it succeeds,
66 * so the reference count will never be decremented while an object is
69 * Bacon's thin locks have a fast path that doesn't need a lock record
70 * for the common case of locking an unlocked or shallow-nested
75 typedef struct _MonitorArray MonitorArray;
77 struct _MonitorArray {
80 MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
83 #define mono_monitor_allocator_lock() mono_os_mutex_lock (&monitor_mutex)
84 #define mono_monitor_allocator_unlock() mono_os_mutex_unlock (&monitor_mutex)
85 static mono_mutex_t monitor_mutex;
86 static MonoThreadsSync *monitor_freelist;
87 static MonitorArray *monitor_allocated;
88 static int array_size = 16;
90 /* MonoThreadsSync status helpers */
93 mon_status_get_owner (guint32 status)
95 return status & OWNER_MASK;
99 mon_status_set_owner (guint32 status, guint32 owner)
101 return (status & ENTRY_COUNT_MASK) | owner;
105 mon_status_get_entry_count (guint32 status)
107 gint32 entry_count = (gint32)((status & ENTRY_COUNT_MASK) >> ENTRY_COUNT_SHIFT);
108 gint32 zero = (gint32)(((guint32)ENTRY_COUNT_ZERO) >> ENTRY_COUNT_SHIFT);
109 return entry_count - zero;
112 static inline guint32
113 mon_status_init_entry_count (guint32 status)
115 return (status & OWNER_MASK) | ENTRY_COUNT_ZERO;
118 static inline guint32
119 mon_status_increment_entry_count (guint32 status)
121 return status + (1 << ENTRY_COUNT_SHIFT);
124 static inline guint32
125 mon_status_decrement_entry_count (guint32 status)
127 return status - (1 << ENTRY_COUNT_SHIFT);
130 static inline gboolean
131 mon_status_have_waiters (guint32 status)
133 return status & ENTRY_COUNT_WAITERS;
136 /* LockWord helpers */
138 static inline MonoThreadsSync*
139 lock_word_get_inflated_lock (LockWord lw)
141 lw.lock_word &= (~LOCK_WORD_STATUS_MASK);
145 static inline gboolean
146 lock_word_is_inflated (LockWord lw)
148 return lw.lock_word & LOCK_WORD_INFLATED;
151 static inline gboolean
152 lock_word_has_hash (LockWord lw)
154 return lw.lock_word & LOCK_WORD_HAS_HASH;
157 static inline LockWord
158 lock_word_set_has_hash (LockWord lw)
161 nlw.lock_word = lw.lock_word | LOCK_WORD_HAS_HASH;
165 static inline gboolean
166 lock_word_is_free (LockWord lw)
168 return !lw.lock_word;
171 static inline gboolean
172 lock_word_is_flat (LockWord lw)
174 /* Return whether the lock is flat or free */
175 return (lw.lock_word & LOCK_WORD_STATUS_MASK) == LOCK_WORD_FLAT;
179 lock_word_get_hash (LockWord lw)
181 return (gint32) (lw.lock_word >> LOCK_WORD_HASH_SHIFT);
185 lock_word_get_nest (LockWord lw)
187 if (lock_word_is_free (lw))
189 /* Inword nest count starts from 0 */
190 return ((lw.lock_word & LOCK_WORD_NEST_MASK) >> LOCK_WORD_NEST_SHIFT) + 1;
193 static inline gboolean
194 lock_word_is_nested (LockWord lw)
196 return lw.lock_word & LOCK_WORD_NEST_MASK;
199 static inline gboolean
200 lock_word_is_max_nest (LockWord lw)
202 return (lw.lock_word & LOCK_WORD_NEST_MASK) == LOCK_WORD_NEST_MASK;
205 static inline LockWord
206 lock_word_increment_nest (LockWord lw)
208 lw.lock_word += 1 << LOCK_WORD_NEST_SHIFT;
212 static inline LockWord
213 lock_word_decrement_nest (LockWord lw)
215 lw.lock_word -= 1 << LOCK_WORD_NEST_SHIFT;
220 lock_word_get_owner (LockWord lw)
222 return lw.lock_word >> LOCK_WORD_OWNER_SHIFT;
225 static inline LockWord
226 lock_word_new_thin_hash (gint32 hash)
229 lw.lock_word = (guint32)hash;
230 lw.lock_word = (lw.lock_word << LOCK_WORD_HASH_SHIFT) | LOCK_WORD_HAS_HASH;
234 static inline LockWord
235 lock_word_new_inflated (MonoThreadsSync *mon)
239 lw.lock_word |= LOCK_WORD_INFLATED;
243 static inline LockWord
244 lock_word_new_flat (gint32 owner)
247 lw.lock_word = owner;
248 lw.lock_word <<= LOCK_WORD_OWNER_SHIFT;
253 mono_monitor_init (void)
255 mono_os_mutex_init_recursive (&monitor_mutex);
259 mono_monitor_cleanup (void)
261 MonoThreadsSync *mon;
262 /* MonitorArray *marray, *next = NULL; */
264 /*mono_os_mutex_destroy (&monitor_mutex);*/
266 /* The monitors on the freelist don't have weak links - mark them */
267 for (mon = monitor_freelist; mon; mon = (MonoThreadsSync *)mon->data)
268 mon->wait_list = (GSList *)-1;
271 * FIXME: This still crashes with sgen (async_read.exe)
273 * In mini_cleanup() we first call mono_runtime_cleanup(), which calls
274 * mono_monitor_cleanup(), which is supposed to free all monitor memory.
276 * Later in mini_cleanup(), we call mono_domain_free(), which calls
277 * mono_gc_clear_domain(), which frees all weak links associated with objects.
278 * Those weak links reside in the monitor structures, which we've freed earlier.
280 * Unless we fix this dependency in the shutdown sequence this code has to remain
281 * disabled, or at least the call to g_free().
284 for (marray = monitor_allocated; marray; marray = next) {
287 for (i = 0; i < marray->num_monitors; ++i) {
288 mon = &marray->monitors [i];
289 if (mon->wait_list != (gpointer)-1)
290 mono_gc_weak_link_remove (&mon->data);
300 monitor_is_on_freelist (MonoThreadsSync *mon)
302 MonitorArray *marray;
303 for (marray = monitor_allocated; marray; marray = marray->next) {
304 if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
312 * \param include_untaken Whether to list unheld inflated locks.
313 * Print a report on stdout of the managed locks currently held by
314 * threads. If \p include_untaken is specified, list also inflated locks
316 * This is supposed to be used in debuggers like gdb.
319 mono_locks_dump (gboolean include_untaken)
322 int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
323 MonoThreadsSync *mon;
324 MonitorArray *marray;
325 for (mon = monitor_freelist; mon; mon = (MonoThreadsSync *)mon->data)
327 for (marray = monitor_allocated; marray; marray = marray->next) {
328 total += marray->num_monitors;
330 for (i = 0; i < marray->num_monitors; ++i) {
331 mon = &marray->monitors [i];
332 if (mon->data == NULL) {
333 if (i < marray->num_monitors - 1)
336 if (!monitor_is_on_freelist ((MonoThreadsSync *)mon->data)) {
337 MonoObject *holder = (MonoObject *)mono_gchandle_get_target ((guint32)mon->data);
338 if (mon_status_get_owner (mon->status)) {
339 g_print ("Lock %p in object %p held by thread %d, nest level: %d\n",
340 mon, holder, mon_status_get_owner (mon->status), mon->nest);
342 g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon_status_get_entry_count (mon->status));
343 } else if (include_untaken) {
344 g_print ("Lock %p in object %p untaken\n", mon, holder);
351 g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
352 num_arrays, total, used, on_freelist, to_recycle);
355 /* LOCKING: this is called with monitor_mutex held */
357 mon_finalize (MonoThreadsSync *mon)
359 LOCK_DEBUG (g_message ("%s: Finalizing sync %p", __func__, mon));
361 if (mon->entry_sem != NULL) {
362 mono_coop_sem_destroy (mon->entry_sem);
363 g_free (mon->entry_sem);
364 mon->entry_sem = NULL;
366 /* If this isn't empty then something is seriously broken - it
367 * means a thread is still waiting on the object that owned
368 * this lock, but the object has been finalized.
370 g_assert (mon->wait_list == NULL);
372 /* owner and nest are set in mon_new, no need to zero them out */
374 mon->data = monitor_freelist;
375 monitor_freelist = mon;
376 #ifndef DISABLE_PERFCOUNTERS
377 InterlockedDecrement (&mono_perfcounters->gc_sync_blocks);
381 /* LOCKING: this is called with monitor_mutex held */
382 static MonoThreadsSync *
385 MonoThreadsSync *new_;
387 if (!monitor_freelist) {
388 MonitorArray *marray;
390 /* see if any sync block has been collected */
392 for (marray = monitor_allocated; marray; marray = marray->next) {
393 for (i = 0; i < marray->num_monitors; ++i) {
394 if (mono_gchandle_get_target ((guint32)marray->monitors [i].data) == NULL) {
395 new_ = &marray->monitors [i];
396 if (new_->wait_list) {
397 /* Orphaned events left by aborted threads */
398 while (new_->wait_list) {
399 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d): Closing orphaned event %d", mono_thread_info_get_small_id (), new_->wait_list->data));
400 mono_w32event_close (new_->wait_list->data);
401 new_->wait_list = g_slist_remove (new_->wait_list, new_->wait_list->data);
404 mono_gchandle_free ((guint32)new_->data);
405 new_->data = monitor_freelist;
406 monitor_freelist = new_;
409 /* small perf tweak to avoid scanning all the blocks */
413 /* need to allocate a new array of monitors */
414 if (!monitor_freelist) {
416 LOCK_DEBUG (g_message ("%s: allocating more monitors: %d", __func__, array_size));
417 marray = (MonitorArray *)g_malloc0 (MONO_SIZEOF_MONO_ARRAY + array_size * sizeof (MonoThreadsSync));
418 marray->num_monitors = array_size;
420 /* link into the freelist */
421 for (i = 0; i < marray->num_monitors - 1; ++i) {
422 marray->monitors [i].data = &marray->monitors [i + 1];
424 marray->monitors [i].data = NULL; /* the last one */
425 monitor_freelist = &marray->monitors [0];
426 /* we happend the marray instead of prepending so that
427 * the collecting loop above will need to scan smaller arrays first
429 if (!monitor_allocated) {
430 monitor_allocated = marray;
432 last = monitor_allocated;
440 new_ = monitor_freelist;
441 monitor_freelist = (MonoThreadsSync *)new_->data;
443 new_->status = mon_status_set_owner (0, id);
444 new_->status = mon_status_init_entry_count (new_->status);
448 #ifndef DISABLE_PERFCOUNTERS
449 InterlockedIncrement (&mono_perfcounters->gc_sync_blocks);
454 static MonoThreadsSync*
455 alloc_mon (MonoObject *obj, gint32 id)
457 MonoThreadsSync *mon;
459 mono_monitor_allocator_lock ();
461 mon->data = (void *)(size_t)mono_gchandle_new_weakref (obj, TRUE);
462 mono_monitor_allocator_unlock ();
469 discard_mon (MonoThreadsSync *mon)
471 mono_monitor_allocator_lock ();
472 mono_gchandle_free ((guint32)mon->data);
474 mono_monitor_allocator_unlock ();
478 mono_monitor_inflate_owned (MonoObject *obj, int id)
480 MonoThreadsSync *mon;
481 LockWord nlw, old_lw, tmp_lw;
484 old_lw.sync = obj->synchronisation;
485 LOCK_DEBUG (g_message ("%s: (%d) Inflating owned lock object %p; LW = %p", __func__, id, obj, old_lw.sync));
487 if (lock_word_is_inflated (old_lw)) {
488 /* Someone else inflated the lock in the meantime */
492 mon = alloc_mon (obj, id);
494 nest = lock_word_get_nest (old_lw);
497 nlw = lock_word_new_inflated (mon);
499 mono_memory_write_barrier ();
500 tmp_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, nlw.sync, old_lw.sync);
501 if (tmp_lw.sync != old_lw.sync) {
502 /* Someone else inflated the lock in the meantime */
508 mono_monitor_inflate (MonoObject *obj)
510 MonoThreadsSync *mon;
511 LockWord nlw, old_lw;
513 LOCK_DEBUG (g_message ("%s: (%d) Inflating lock object %p; LW = %p", __func__, mono_thread_info_get_small_id (), obj, obj->synchronisation));
515 mon = alloc_mon (obj, 0);
517 nlw = lock_word_new_inflated (mon);
519 old_lw.sync = obj->synchronisation;
524 if (lock_word_is_inflated (old_lw)) {
527 #ifdef HAVE_MOVING_COLLECTOR
528 else if (lock_word_has_hash (old_lw)) {
529 mon->hash_code = lock_word_get_hash (old_lw);
530 mon->status = mon_status_set_owner (mon->status, 0);
531 nlw = lock_word_set_has_hash (nlw);
534 else if (lock_word_is_free (old_lw)) {
535 mon->status = mon_status_set_owner (mon->status, 0);
539 mon->status = mon_status_set_owner (mon->status, lock_word_get_owner (old_lw));
540 mon->nest = lock_word_get_nest (old_lw);
542 mono_memory_write_barrier ();
543 tmp_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, nlw.sync, old_lw.sync);
544 if (tmp_lw.sync == old_lw.sync) {
545 /* Successfully inflated the lock */
549 old_lw.sync = tmp_lw.sync;
552 /* Someone else inflated the lock before us */
556 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
562 * Calculate a hash code for @obj that is constant while @obj is alive.
565 mono_object_hash (MonoObject* obj)
567 #ifdef HAVE_MOVING_COLLECTOR
572 lw.sync = obj->synchronisation;
574 LOCK_DEBUG (g_message("%s: (%d) Get hash for object %p; LW = %p", __func__, mono_thread_info_get_small_id (), obj, obj->synchronisation));
576 if (lock_word_has_hash (lw)) {
577 if (lock_word_is_inflated (lw)) {
578 return lock_word_get_inflated_lock (lw)->hash_code;
580 return lock_word_get_hash (lw);
584 * while we are inside this function, the GC will keep this object pinned,
585 * since we are in the unmanaged stack. Thanks to this and to the hash
586 * function that depends only on the address, we can ignore the races if
587 * another thread computes the hash at the same time, because it'll end up
588 * with the same value.
590 hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
591 #if SIZEOF_VOID_P == 4
592 /* clear the top bits as they can be discarded */
593 hash &= ~(LOCK_WORD_STATUS_MASK << (32 - LOCK_WORD_STATUS_BITS));
595 if (lock_word_is_free (lw)) {
597 lw = lock_word_new_thin_hash (hash);
599 old_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL);
600 if (old_lw.sync == NULL) {
604 if (lock_word_has_hash (old_lw)) {
605 /* Done by somebody else */
609 mono_monitor_inflate (obj);
610 lw.sync = obj->synchronisation;
611 } else if (lock_word_is_flat (lw)) {
612 int id = mono_thread_info_get_small_id ();
613 if (lock_word_get_owner (lw) == id)
614 mono_monitor_inflate_owned (obj, id);
616 mono_monitor_inflate (obj);
617 lw.sync = obj->synchronisation;
620 /* At this point, the lock is inflated */
621 lock_word_get_inflated_lock (lw)->hash_code = hash;
622 lw = lock_word_set_has_hash (lw);
623 mono_memory_write_barrier ();
624 obj->synchronisation = lw.sync;
628 * Wang's address-based hash function:
629 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
631 return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
636 mono_monitor_ensure_owned (LockWord lw, guint32 id)
638 if (lock_word_is_flat (lw)) {
639 if (lock_word_get_owner (lw) == id)
641 } else if (lock_word_is_inflated (lw)) {
642 if (mon_status_get_owner (lock_word_get_inflated_lock (lw)->status) == id)
646 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Object synchronization method was called from an unsynchronized block of code."));
651 * When this function is called it has already been established that the
652 * current thread owns the monitor.
655 mono_monitor_exit_inflated (MonoObject *obj)
658 MonoThreadsSync *mon;
661 lw.sync = obj->synchronisation;
662 mon = lock_word_get_inflated_lock (lw);
664 nest = mon->nest - 1;
666 guint32 new_status, old_status, tmp_status;
668 old_status = mon->status;
671 * Release lock and do the wakeup stuff. It's possible that
672 * the last blocking thread gave up waiting just before we
673 * release the semaphore resulting in a negative entry count
674 * and a futile wakeup next time there's contention for this
678 gboolean have_waiters = mon_status_have_waiters (old_status);
680 new_status = mon_status_set_owner (old_status, 0);
682 new_status = mon_status_decrement_entry_count (new_status);
683 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
684 if (tmp_status == old_status) {
686 mono_coop_sem_post (mon->entry_sem);
689 old_status = tmp_status;
691 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now unlocked", __func__, mono_thread_info_get_small_id (), obj));
693 /* object is now unlocked, leave nest==1 so we don't
694 * need to set it when the lock is reacquired
697 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now locked %d times", __func__, mono_thread_info_get_small_id (), obj, nest));
703 * When this function is called it has already been established that the
704 * current thread owns the monitor.
707 mono_monitor_exit_flat (MonoObject *obj, LockWord old_lw)
709 LockWord new_lw, tmp_lw;
710 if (G_UNLIKELY (lock_word_is_nested (old_lw)))
711 new_lw = lock_word_decrement_nest (old_lw);
713 new_lw.lock_word = 0;
715 tmp_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, new_lw.sync, old_lw.sync);
716 if (old_lw.sync != tmp_lw.sync) {
717 /* Someone inflated the lock in the meantime */
718 mono_monitor_exit_inflated (obj);
721 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now locked %d times; LW = %p", __func__, mono_thread_info_get_small_id (), obj, lock_word_get_nest (new_lw), obj->synchronisation));
725 mon_decrement_entry_count (MonoThreadsSync *mon)
727 guint32 old_status, tmp_status, new_status;
729 /* Decrement entry count */
730 old_status = mon->status;
732 new_status = mon_status_decrement_entry_count (old_status);
733 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
734 if (tmp_status == old_status) {
737 old_status = tmp_status;
741 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
742 * is requested. In this case it returns -1.
745 mono_monitor_try_enter_inflated (MonoObject *obj, guint32 ms, gboolean allow_interruption, guint32 id)
748 MonoThreadsSync *mon;
750 gint64 then = 0, now, delta;
752 guint32 new_status, old_status, tmp_status;
753 MonoSemTimedwaitRet wait_ret;
754 MonoInternalThread *thread;
755 gboolean interrupted = FALSE;
757 LOCK_DEBUG (g_message("%s: (%d) Trying to lock object %p (%d ms)", __func__, id, obj, ms));
759 if (G_UNLIKELY (!obj)) {
760 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
764 lw.sync = obj->synchronisation;
765 mon = lock_word_get_inflated_lock (lw);
767 /* This case differs from Dice's case 3 because we don't
768 * deflate locks or cache unused lock records
770 old_status = mon->status;
771 if (G_LIKELY (mon_status_get_owner (old_status) == 0)) {
772 /* Try to install our ID in the owner field, nest
773 * should have been left at 1 by the previous unlock
776 new_status = mon_status_set_owner (old_status, id);
777 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
778 if (G_LIKELY (tmp_status == old_status)) {
780 g_assert (mon->nest == 1);
788 /* If the object is currently locked by this thread... */
789 if (mon_status_get_owner (old_status) == id) {
794 /* The object must be locked by someone else... */
795 #ifndef DISABLE_PERFCOUNTERS
796 InterlockedIncrement (&mono_perfcounters->thread_contentions);
799 /* If ms is 0 we don't block, but just fail straight away */
801 LOCK_DEBUG (g_message ("%s: (%d) timed out, returning FALSE", __func__, id));
805 MONO_PROFILER_RAISE (monitor_contention, (obj));
807 /* The slow path begins here. */
809 /* a small amount of duplicated code, but it allows us to insert the profiler
810 * callbacks without impacting the fast path: from here on we don't need to go back to the
811 * retry label, but to retry_contended. At this point mon is already installed in the object
814 /* This case differs from Dice's case 3 because we don't
815 * deflate locks or cache unused lock records
817 old_status = mon->status;
818 if (G_LIKELY (mon_status_get_owner (old_status) == 0)) {
819 /* Try to install our ID in the owner field, nest
820 * should have been left at 1 by the previous unlock
823 new_status = mon_status_set_owner (old_status, id);
824 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
825 if (G_LIKELY (tmp_status == old_status)) {
827 g_assert (mon->nest == 1);
828 MONO_PROFILER_RAISE (monitor_acquired, (obj));
833 /* If the object is currently locked by this thread... */
834 if (mon_status_get_owner (old_status) == id) {
836 MONO_PROFILER_RAISE (monitor_acquired, (obj));
840 /* We need to make sure there's a semaphore handle (creating it if
841 * necessary), and block on it
843 if (mon->entry_sem == NULL) {
844 /* Create the semaphore */
845 sem = g_new0 (MonoCoopSem, 1);
846 mono_coop_sem_init (sem, 0);
847 if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
848 /* Someone else just put a handle here */
849 mono_coop_sem_destroy (sem);
855 * We need to register ourselves as waiting if it is the first time we are waiting,
856 * of if we were signaled and failed to acquire the lock.
859 old_status = mon->status;
861 if (mon_status_get_owner (old_status) == 0)
862 goto retry_contended;
863 new_status = mon_status_increment_entry_count (old_status);
864 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
865 if (tmp_status == old_status) {
868 old_status = tmp_status;
872 if (ms != MONO_INFINITE_WAIT) {
873 then = mono_msec_ticks ();
877 #ifndef DISABLE_PERFCOUNTERS
878 InterlockedIncrement (&mono_perfcounters->thread_queue_len);
879 InterlockedIncrement (&mono_perfcounters->thread_queue_max);
881 thread = mono_thread_internal_current ();
884 * If we allow interruption, we check the test state for an abort request before going into sleep.
885 * This is a workaround to the fact that Thread.Abort does non-sticky interruption of semaphores.
887 * Semaphores don't support the sticky interruption with mono_thread_info_install_interrupt.
889 * A better fix would be to switch to wait with something that allows sticky interrupts together
890 * with wrapping it with abort_protected_block_count for the non-alertable cases.
891 * And somehow make this whole dance atomic and not crazy expensive. Good luck.
894 if (allow_interruption) {
895 if (!mono_thread_test_and_set_state (thread, ThreadState_AbortRequested, ThreadState_WaitSleepJoin)) {
896 wait_ret = MONO_SEM_TIMEDWAIT_RET_ALERTED;
900 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
904 * We pass ALERTABLE instead of allow_interruption since we have to check for the
905 * StopRequested case below.
907 wait_ret = mono_coop_sem_timedwait (mon->entry_sem, waitms, MONO_SEM_FLAGS_ALERTABLE);
909 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
912 #ifndef DISABLE_PERFCOUNTERS
913 InterlockedDecrement (&mono_perfcounters->thread_queue_len);
916 if (wait_ret == MONO_SEM_TIMEDWAIT_RET_ALERTED && !allow_interruption) {
919 * We have to obey a stop/suspend request even if
920 * allow_interruption is FALSE to avoid hangs at shutdown.
922 if (!mono_thread_test_state (mono_thread_internal_current (), ThreadState_SuspendRequested | ThreadState_AbortRequested)) {
923 if (ms != MONO_INFINITE_WAIT) {
924 now = mono_msec_ticks ();
926 /* it should not overflow before ~30k years */
927 g_assert (now >= then);
936 /* retry from the top */
937 goto retry_contended;
939 } else if (wait_ret == MONO_SEM_TIMEDWAIT_RET_SUCCESS) {
941 /* retry from the top */
942 goto retry_contended;
943 } else if (wait_ret == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT) {
947 /* Timed out or interrupted */
948 mon_decrement_entry_count (mon);
950 MONO_PROFILER_RAISE (monitor_failed, (obj));
952 if (wait_ret == MONO_SEM_TIMEDWAIT_RET_ALERTED) {
953 LOCK_DEBUG (g_message ("%s: (%d) interrupted waiting, returning -1", __func__, id));
955 } else if (wait_ret == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT) {
956 LOCK_DEBUG (g_message ("%s: (%d) timed out waiting, returning FALSE", __func__, id));
959 g_assert_not_reached ();
965 * If allow_interruption == TRUE, the method will be interrupted if abort or suspend
966 * is requested. In this case it returns -1.
969 mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
972 int id = mono_thread_info_get_small_id ();
974 LOCK_DEBUG (g_message("%s: (%d) Trying to lock object %p (%d ms)", __func__, id, obj, ms));
976 lw.sync = obj->synchronisation;
978 if (G_LIKELY (lock_word_is_free (lw))) {
979 LockWord nlw = lock_word_new_flat (id);
980 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, nlw.sync, NULL) == NULL) {
983 /* Someone acquired it in the meantime or put a hash */
984 mono_monitor_inflate (obj);
985 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
987 } else if (lock_word_is_inflated (lw)) {
988 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
989 } else if (lock_word_is_flat (lw)) {
990 if (lock_word_get_owner (lw) == id) {
991 if (lock_word_is_max_nest (lw)) {
992 mono_monitor_inflate_owned (obj, id);
993 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
995 LockWord nlw, old_lw;
996 nlw = lock_word_increment_nest (lw);
997 old_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, nlw.sync, lw.sync);
998 if (old_lw.sync != lw.sync) {
999 /* Someone else inflated it in the meantime */
1000 g_assert (lock_word_is_inflated (old_lw));
1001 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
1006 mono_monitor_inflate (obj);
1007 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
1009 } else if (lock_word_has_hash (lw)) {
1010 mono_monitor_inflate (obj);
1011 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
1014 g_assert_not_reached ();
1018 /* This is an icall */
1020 mono_monitor_enter_internal (MonoObject *obj)
1023 gboolean allow_interruption = TRUE;
1024 if (G_UNLIKELY (!obj)) {
1025 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
1030 * An inquisitive mind could ask what's the deal with this loop.
1031 * It exists to deal with interrupting a monitor enter that happened within an abort-protected block, like a .cctor.
1033 * The thread will be set with a pending abort and the wait might even be interrupted. Either way, once we call mono_thread_interruption_checkpoint,
1034 * it will return NULL meaning we can't be aborted right now. Once that happens we switch to non-alertable.
1037 res = mono_monitor_try_enter_internal (obj, MONO_INFINITE_WAIT, allow_interruption);
1038 /*This means we got interrupted during the wait and didn't got the monitor.*/
1040 MonoException *exc = mono_thread_interruption_checkpoint ();
1042 mono_set_pending_exception (exc);
1045 //we detected a pending interruption but it turned out to be a false positive, we ignore it from now on (this feels like a hack, right?, threads.c should give us less confusing directions)
1046 allow_interruption = FALSE;
1049 } while (res == -1);
1054 * mono_monitor_enter:
1057 mono_monitor_enter (MonoObject *obj)
1059 return mono_monitor_enter_internal (obj);
1062 /* Called from JITted code so we return guint32 instead of gboolean */
1064 mono_monitor_enter_fast (MonoObject *obj)
1066 if (G_UNLIKELY (!obj)) {
1067 /* don't set pending exn on the fast path, just return
1068 * FALSE and let the slow path take care of it. */
1071 return mono_monitor_try_enter_internal (obj, 0, FALSE) == 1;
1075 * mono_monitor_try_enter:
1078 mono_monitor_try_enter (MonoObject *obj, guint32 ms)
1080 if (G_UNLIKELY (!obj)) {
1081 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
1084 return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
1088 * mono_monitor_exit:
1091 mono_monitor_exit (MonoObject *obj)
1095 LOCK_DEBUG (g_message ("%s: (%d) Unlocking %p", __func__, mono_thread_info_get_small_id (), obj));
1097 if (G_UNLIKELY (!obj)) {
1098 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
1102 lw.sync = obj->synchronisation;
1104 if (!mono_monitor_ensure_owned (lw, mono_thread_info_get_small_id ()))
1107 if (G_UNLIKELY (lock_word_is_inflated (lw)))
1108 mono_monitor_exit_inflated (obj);
1110 mono_monitor_exit_flat (obj, lw);
1114 mono_monitor_get_object_monitor_gchandle (MonoObject *object)
1118 lw.sync = object->synchronisation;
1120 if (lock_word_is_inflated (lw)) {
1121 MonoThreadsSync *mon = lock_word_get_inflated_lock (lw);
1122 return (guint32)mon->data;
1128 * mono_monitor_threads_sync_member_offset:
1129 * @status_offset: returns size and offset of the "status" member
1130 * @nest_offset: returns size and offset of the "nest" member
1132 * Returns the offsets and sizes of two members of the
1133 * MonoThreadsSync struct. The Monitor ASM fastpaths need this.
1136 mono_monitor_threads_sync_members_offset (int *status_offset, int *nest_offset)
1140 #define ENCODE_OFF_SIZE(o,s) (((o) << 8) | ((s) & 0xff))
1142 *status_offset = ENCODE_OFF_SIZE (MONO_STRUCT_OFFSET (MonoThreadsSync, status), sizeof (ts.status));
1143 *nest_offset = ENCODE_OFF_SIZE (MONO_STRUCT_OFFSET (MonoThreadsSync, nest), sizeof (ts.nest));
1147 ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var (MonoObject *obj, guint32 ms, MonoBoolean *lockTaken)
1150 gboolean allow_interruption = TRUE;
1151 if (G_UNLIKELY (!obj)) {
1152 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
1156 res = mono_monitor_try_enter_internal (obj, ms, allow_interruption);
1157 /*This means we got interrupted during the wait and didn't got the monitor.*/
1159 MonoException *exc = mono_thread_interruption_checkpoint ();
1161 mono_set_pending_exception (exc);
1164 //we detected a pending interruption but it turned out to be a false positive, we ignore it from now on (this feels like a hack, right?, threads.c should give us less confusing directions)
1165 allow_interruption = FALSE;
1168 } while (res == -1);
1169 /*It's safe to do it from here since interruption would happen only on the wrapper.*/
1170 *lockTaken = res == 1;
1174 * mono_monitor_enter_v4:
1177 mono_monitor_enter_v4 (MonoObject *obj, char *lock_taken)
1179 if (*lock_taken == 1) {
1180 mono_set_pending_exception (mono_get_exception_argument ("lockTaken", "lockTaken is already true"));
1186 ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var (obj, MONO_INFINITE_WAIT, &taken);
1187 *lock_taken = taken;
1190 /* Called from JITted code */
1192 mono_monitor_enter_v4_internal (MonoObject *obj, MonoBoolean *lock_taken)
1194 if (*lock_taken == 1) {
1195 mono_set_pending_exception (mono_get_exception_argument ("lockTaken", "lockTaken is already true"));
1199 ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var (obj, MONO_INFINITE_WAIT, lock_taken);
1203 * mono_monitor_enter_v4_fast:
1205 * Same as mono_monitor_enter_v4, but return immediately if the
1206 * monitor cannot be acquired.
1207 * Returns TRUE if the lock was acquired, FALSE otherwise.
1208 * Called from JITted code so we return guint32 instead of gboolean.
1211 mono_monitor_enter_v4_fast (MonoObject *obj, MonoBoolean *lock_taken)
1213 if (*lock_taken == 1)
1215 if (G_UNLIKELY (!obj))
1217 gint32 res = mono_monitor_try_enter_internal (obj, 0, TRUE);
1218 *lock_taken = res == 1;
1223 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
1227 LOCK_DEBUG (g_message ("%s: Testing if %p is owned by thread %d", __func__, obj, mono_thread_info_get_small_id()));
1229 lw.sync = obj->synchronisation;
1231 if (lock_word_is_flat (lw)) {
1232 return lock_word_get_owner (lw) == mono_thread_info_get_small_id ();
1233 } else if (lock_word_is_inflated (lw)) {
1234 return mon_status_get_owner (lock_word_get_inflated_lock (lw)->status) == mono_thread_info_get_small_id ();
1241 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
1245 LOCK_DEBUG (g_message("%s: (%d) Testing if %p is owned by any thread", __func__, mono_thread_info_get_small_id (), obj));
1247 lw.sync = obj->synchronisation;
1249 if (lock_word_is_flat (lw)) {
1250 return !lock_word_is_free (lw);
1251 } else if (lock_word_is_inflated (lw)) {
1252 return mon_status_get_owner (lock_word_get_inflated_lock (lw)->status) != 0;
1258 /* All wait list manipulation in the pulse, pulseall and wait
1259 * functions happens while the monitor lock is held, so we don't need
1260 * any extra struct locking
1264 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
1268 MonoThreadsSync *mon;
1270 LOCK_DEBUG (g_message ("%s: (%d) Pulsing %p", __func__, mono_thread_info_get_small_id (), obj));
1272 id = mono_thread_info_get_small_id ();
1273 lw.sync = obj->synchronisation;
1275 if (!mono_monitor_ensure_owned (lw, id))
1278 if (!lock_word_is_inflated (lw)) {
1279 /* No threads waiting. A wait would have inflated the lock */
1283 mon = lock_word_get_inflated_lock (lw);
1285 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, mono_thread_info_get_small_id (), g_slist_length (mon->wait_list)));
1287 if (mon->wait_list != NULL) {
1288 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, mono_thread_info_get_small_id (), mon->wait_list->data));
1290 mono_w32event_set (mon->wait_list->data);
1291 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1296 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
1300 MonoThreadsSync *mon;
1302 LOCK_DEBUG (g_message("%s: (%d) Pulsing all %p", __func__, mono_thread_info_get_small_id (), obj));
1304 id = mono_thread_info_get_small_id ();
1305 lw.sync = obj->synchronisation;
1307 if (!mono_monitor_ensure_owned (lw, id))
1310 if (!lock_word_is_inflated (lw)) {
1311 /* No threads waiting. A wait would have inflated the lock */
1315 mon = lock_word_get_inflated_lock (lw);
1317 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, mono_thread_info_get_small_id (), g_slist_length (mon->wait_list)));
1319 while (mon->wait_list != NULL) {
1320 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, mono_thread_info_get_small_id (), mon->wait_list->data));
1322 mono_w32event_set (mon->wait_list->data);
1323 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1328 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
1331 MonoThreadsSync *mon;
1334 MonoW32HandleWaitRet ret;
1335 gboolean success = FALSE;
1337 MonoInternalThread *thread = mono_thread_internal_current ();
1338 int id = mono_thread_info_get_small_id ();
1340 LOCK_DEBUG (g_message ("%s: (%d) Trying to wait for %p with timeout %dms", __func__, mono_thread_info_get_small_id (), obj, ms));
1342 lw.sync = obj->synchronisation;
1344 if (!mono_monitor_ensure_owned (lw, id))
1347 if (!lock_word_is_inflated (lw)) {
1348 mono_monitor_inflate_owned (obj, id);
1349 lw.sync = obj->synchronisation;
1352 mon = lock_word_get_inflated_lock (lw);
1354 /* Do this WaitSleepJoin check before creating the event handle */
1355 if (mono_thread_current_check_pending_interrupt ())
1358 event = mono_w32event_create (FALSE, FALSE);
1359 if (event == NULL) {
1360 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
1364 LOCK_DEBUG (g_message ("%s: (%d) queuing handle %p", __func__, mono_thread_info_get_small_id (), event));
1366 /* This looks superfluous */
1367 if (mono_thread_current_check_pending_interrupt ()) {
1368 mono_w32event_close (event);
1372 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
1374 mon->wait_list = g_slist_append (mon->wait_list, event);
1376 /* Save the nest count, and release the lock */
1379 mono_memory_write_barrier ();
1380 mono_monitor_exit_inflated (obj);
1382 LOCK_DEBUG (g_message ("%s: (%d) Unlocked %p lock %p", __func__, mono_thread_info_get_small_id (), obj, mon));
1384 /* There's no race between unlocking mon and waiting for the
1385 * event, because auto reset events are sticky, and this event
1386 * is private to this thread. Therefore even if the event was
1387 * signalled before we wait, we still succeed.
1391 ret = mono_w32handle_convert_wait_ret (mono_win32_wait_for_single_object_ex (event, ms, TRUE), 1);
1393 ret = mono_w32handle_wait_one (event, ms, TRUE);
1394 #endif /* HOST_WIN32 */
1397 /* Reset the thread state fairly early, so we don't have to worry
1398 * about the monitor error checking
1400 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
1402 /* Regain the lock with the previous nest count */
1404 regain = mono_monitor_try_enter_inflated (obj, MONO_INFINITE_WAIT, TRUE, id);
1405 /* We must regain the lock before handling interruption requests */
1406 } while (regain == -1);
1408 g_assert (regain == 1);
1412 LOCK_DEBUG (g_message ("%s: (%d) Regained %p lock %p", __func__, mono_thread_info_get_small_id (), obj, mon));
1414 if (ret == MONO_W32HANDLE_WAIT_RET_TIMEOUT) {
1415 /* Poll the event again, just in case it was signalled
1416 * while we were trying to regain the monitor lock
1420 ret = mono_w32handle_convert_wait_ret (mono_win32_wait_for_single_object_ex (event, 0, FALSE), 1);
1422 ret = mono_w32handle_wait_one (event, 0, FALSE);
1423 #endif /* HOST_WIN32 */
1427 /* Pulse will have popped our event from the queue if it signalled
1428 * us, so we only do it here if the wait timed out.
1430 * This avoids a race condition where the thread holding the
1431 * lock can Pulse several times before the WaitForSingleObject
1432 * returns. If we popped the queue here then this event might
1433 * be signalled more than once, thereby starving another
1437 if (ret == MONO_W32HANDLE_WAIT_RET_SUCCESS_0) {
1438 LOCK_DEBUG (g_message ("%s: (%d) Success", __func__, mono_thread_info_get_small_id ()));
1441 LOCK_DEBUG (g_message ("%s: (%d) Wait failed, dequeuing handle %p", __func__, mono_thread_info_get_small_id (), event));
1442 /* No pulse, so we have to remove ourself from the
1445 mon->wait_list = g_slist_remove (mon->wait_list, event);
1447 mono_w32event_close (event);