2 * monitor.c: Monitor locking functions
5 * Dick Porter (dick@ximian.com)
7 * Copyright 2003 Ximian, Inc (http://www.ximian.com)
8 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
9 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/monitor.h>
18 #include <mono/metadata/threads-types.h>
19 #include <mono/metadata/exception.h>
20 #include <mono/metadata/threads.h>
21 #include <mono/io-layer/io-layer.h>
22 #include <mono/metadata/object-internals.h>
23 #include <mono/metadata/class-internals.h>
24 #include <mono/metadata/gc-internals.h>
25 #include <mono/metadata/method-builder.h>
26 #include <mono/metadata/debug-helpers.h>
27 #include <mono/metadata/tabledefs.h>
28 #include <mono/metadata/marshal.h>
29 #include <mono/utils/mono-threads.h>
30 #include <mono/metadata/profiler-private.h>
31 #include <mono/utils/mono-time.h>
32 #include <mono/utils/atomic.h>
35 * Pull the list of opcodes
37 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
41 #include "mono/cil/opcode.def"
46 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
50 * The monitor implementation here is based on
51 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
52 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
54 * The Dice paper describes a technique for saving lock record space
55 * by returning records to a free list when they become unused. That
56 * sounds like unnecessary complexity to me, though if it becomes
57 * clear that unused lock records are taking up lots of space or we
58 * need to shave more time off by avoiding a malloc then we can always
59 * implement the free list idea later. The timeout parameter to
60 * try_enter voids some of the assumptions about the reference count
61 * field in Dice's implementation too. In his version, the thread
62 * attempting to lock a contended object will block until it succeeds,
63 * so the reference count will never be decremented while an object is
66 * Bacon's thin locks have a fast path that doesn't need a lock record
67 * for the common case of locking an unlocked or shallow-nested
72 typedef struct _MonitorArray MonitorArray;
74 struct _MonitorArray {
77 MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
80 #define mono_monitor_allocator_lock() mono_os_mutex_lock (&monitor_mutex)
81 #define mono_monitor_allocator_unlock() mono_os_mutex_unlock (&monitor_mutex)
82 static mono_mutex_t monitor_mutex;
83 static MonoThreadsSync *monitor_freelist;
84 static MonitorArray *monitor_allocated;
85 static int array_size = 16;
87 /* MonoThreadsSync status helpers */
90 mon_status_get_owner (guint32 status)
92 return status & OWNER_MASK;
96 mon_status_set_owner (guint32 status, guint32 owner)
98 return (status & ENTRY_COUNT_MASK) | owner;
102 mon_status_get_entry_count (guint32 status)
104 gint32 entry_count = (gint32)((status & ENTRY_COUNT_MASK) >> ENTRY_COUNT_SHIFT);
105 gint32 zero = (gint32)(((guint32)ENTRY_COUNT_ZERO) >> ENTRY_COUNT_SHIFT);
106 return entry_count - zero;
109 static inline guint32
110 mon_status_init_entry_count (guint32 status)
112 return (status & OWNER_MASK) | ENTRY_COUNT_ZERO;
115 static inline guint32
116 mon_status_increment_entry_count (guint32 status)
118 return status + (1 << ENTRY_COUNT_SHIFT);
121 static inline guint32
122 mon_status_decrement_entry_count (guint32 status)
124 return status - (1 << ENTRY_COUNT_SHIFT);
127 static inline gboolean
128 mon_status_have_waiters (guint32 status)
130 return status & ENTRY_COUNT_WAITERS;
133 /* LockWord helpers */
135 static inline MonoThreadsSync*
136 lock_word_get_inflated_lock (LockWord lw)
138 lw.lock_word &= (~LOCK_WORD_STATUS_MASK);
142 static inline gboolean
143 lock_word_is_inflated (LockWord lw)
145 return lw.lock_word & LOCK_WORD_INFLATED;
148 static inline gboolean
149 lock_word_has_hash (LockWord lw)
151 return lw.lock_word & LOCK_WORD_HAS_HASH;
154 static inline LockWord
155 lock_word_set_has_hash (LockWord lw)
158 nlw.lock_word = lw.lock_word | LOCK_WORD_HAS_HASH;
162 static inline gboolean
163 lock_word_is_free (LockWord lw)
165 return !lw.lock_word;
168 static inline gboolean
169 lock_word_is_flat (LockWord lw)
171 /* Return whether the lock is flat or free */
172 return (lw.lock_word & LOCK_WORD_STATUS_MASK) == LOCK_WORD_FLAT;
176 lock_word_get_hash (LockWord lw)
178 return (gint32) (lw.lock_word >> LOCK_WORD_HASH_SHIFT);
182 lock_word_get_nest (LockWord lw)
184 if (lock_word_is_free (lw))
186 /* Inword nest count starts from 0 */
187 return ((lw.lock_word & LOCK_WORD_NEST_MASK) >> LOCK_WORD_NEST_SHIFT) + 1;
190 static inline gboolean
191 lock_word_is_nested (LockWord lw)
193 return lw.lock_word & LOCK_WORD_NEST_MASK;
196 static inline gboolean
197 lock_word_is_max_nest (LockWord lw)
199 return (lw.lock_word & LOCK_WORD_NEST_MASK) == LOCK_WORD_NEST_MASK;
202 static inline LockWord
203 lock_word_increment_nest (LockWord lw)
205 lw.lock_word += 1 << LOCK_WORD_NEST_SHIFT;
209 static inline LockWord
210 lock_word_decrement_nest (LockWord lw)
212 lw.lock_word -= 1 << LOCK_WORD_NEST_SHIFT;
217 lock_word_get_owner (LockWord lw)
219 return lw.lock_word >> LOCK_WORD_OWNER_SHIFT;
222 static inline LockWord
223 lock_word_new_thin_hash (gint32 hash)
226 lw.lock_word = (guint32)hash;
227 lw.lock_word = (lw.lock_word << LOCK_WORD_HASH_SHIFT) | LOCK_WORD_HAS_HASH;
231 static inline LockWord
232 lock_word_new_inflated (MonoThreadsSync *mon)
236 lw.lock_word |= LOCK_WORD_INFLATED;
240 static inline LockWord
241 lock_word_new_flat (gint32 owner)
244 lw.lock_word = owner;
245 lw.lock_word <<= LOCK_WORD_OWNER_SHIFT;
250 mono_monitor_init (void)
252 mono_os_mutex_init_recursive (&monitor_mutex);
256 mono_monitor_cleanup (void)
258 MonoThreadsSync *mon;
259 /* MonitorArray *marray, *next = NULL; */
261 /*mono_os_mutex_destroy (&monitor_mutex);*/
263 /* The monitors on the freelist don't have weak links - mark them */
264 for (mon = monitor_freelist; mon; mon = (MonoThreadsSync *)mon->data)
265 mon->wait_list = (GSList *)-1;
268 * FIXME: This still crashes with sgen (async_read.exe)
270 * In mini_cleanup() we first call mono_runtime_cleanup(), which calls
271 * mono_monitor_cleanup(), which is supposed to free all monitor memory.
273 * Later in mini_cleanup(), we call mono_domain_free(), which calls
274 * mono_gc_clear_domain(), which frees all weak links associated with objects.
275 * Those weak links reside in the monitor structures, which we've freed earlier.
277 * Unless we fix this dependency in the shutdown sequence this code has to remain
278 * disabled, or at least the call to g_free().
281 for (marray = monitor_allocated; marray; marray = next) {
284 for (i = 0; i < marray->num_monitors; ++i) {
285 mon = &marray->monitors [i];
286 if (mon->wait_list != (gpointer)-1)
287 mono_gc_weak_link_remove (&mon->data);
297 monitor_is_on_freelist (MonoThreadsSync *mon)
299 MonitorArray *marray;
300 for (marray = monitor_allocated; marray; marray = marray->next) {
301 if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
311 * Print a report on stdout of the managed locks currently held by
312 * threads. If @include_untaken is specified, list also inflated locks
314 * This is supposed to be used in debuggers like gdb.
317 mono_locks_dump (gboolean include_untaken)
320 int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
321 MonoThreadsSync *mon;
322 MonitorArray *marray;
323 for (mon = monitor_freelist; mon; mon = (MonoThreadsSync *)mon->data)
325 for (marray = monitor_allocated; marray; marray = marray->next) {
326 total += marray->num_monitors;
328 for (i = 0; i < marray->num_monitors; ++i) {
329 mon = &marray->monitors [i];
330 if (mon->data == NULL) {
331 if (i < marray->num_monitors - 1)
334 if (!monitor_is_on_freelist ((MonoThreadsSync *)mon->data)) {
335 MonoObject *holder = (MonoObject *)mono_gchandle_get_target ((guint32)mon->data);
336 if (mon_status_get_owner (mon->status)) {
337 g_print ("Lock %p in object %p held by thread %d, nest level: %d\n",
338 mon, holder, mon_status_get_owner (mon->status), mon->nest);
340 g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon_status_get_entry_count (mon->status));
341 } else if (include_untaken) {
342 g_print ("Lock %p in object %p untaken\n", mon, holder);
349 g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
350 num_arrays, total, used, on_freelist, to_recycle);
353 /* LOCKING: this is called with monitor_mutex held */
355 mon_finalize (MonoThreadsSync *mon)
357 LOCK_DEBUG (g_message ("%s: Finalizing sync %p", __func__, mon));
359 if (mon->entry_sem != NULL) {
360 mono_coop_sem_destroy (mon->entry_sem);
361 g_free (mon->entry_sem);
362 mon->entry_sem = NULL;
364 /* If this isn't empty then something is seriously broken - it
365 * means a thread is still waiting on the object that owned
366 * this lock, but the object has been finalized.
368 g_assert (mon->wait_list == NULL);
370 /* owner and nest are set in mon_new, no need to zero them out */
372 mon->data = monitor_freelist;
373 monitor_freelist = mon;
374 #ifndef DISABLE_PERFCOUNTERS
375 mono_perfcounters->gc_sync_blocks--;
379 /* LOCKING: this is called with monitor_mutex held */
380 static MonoThreadsSync *
383 MonoThreadsSync *new_;
385 if (!monitor_freelist) {
386 MonitorArray *marray;
388 /* see if any sync block has been collected */
390 for (marray = monitor_allocated; marray; marray = marray->next) {
391 for (i = 0; i < marray->num_monitors; ++i) {
392 if (mono_gchandle_get_target ((guint32)marray->monitors [i].data) == NULL) {
393 new_ = &marray->monitors [i];
394 if (new_->wait_list) {
395 /* Orphaned events left by aborted threads */
396 while (new_->wait_list) {
397 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d): Closing orphaned event %d", mono_thread_info_get_small_id (), new_->wait_list->data));
398 CloseHandle (new_->wait_list->data);
399 new_->wait_list = g_slist_remove (new_->wait_list, new_->wait_list->data);
402 mono_gchandle_free ((guint32)new_->data);
403 new_->data = monitor_freelist;
404 monitor_freelist = new_;
407 /* small perf tweak to avoid scanning all the blocks */
411 /* need to allocate a new array of monitors */
412 if (!monitor_freelist) {
414 LOCK_DEBUG (g_message ("%s: allocating more monitors: %d", __func__, array_size));
415 marray = (MonitorArray *)g_malloc0 (MONO_SIZEOF_MONO_ARRAY + array_size * sizeof (MonoThreadsSync));
416 marray->num_monitors = array_size;
418 /* link into the freelist */
419 for (i = 0; i < marray->num_monitors - 1; ++i) {
420 marray->monitors [i].data = &marray->monitors [i + 1];
422 marray->monitors [i].data = NULL; /* the last one */
423 monitor_freelist = &marray->monitors [0];
424 /* we happend the marray instead of prepending so that
425 * the collecting loop above will need to scan smaller arrays first
427 if (!monitor_allocated) {
428 monitor_allocated = marray;
430 last = monitor_allocated;
438 new_ = monitor_freelist;
439 monitor_freelist = (MonoThreadsSync *)new_->data;
441 new_->status = mon_status_set_owner (0, id);
442 new_->status = mon_status_init_entry_count (new_->status);
446 #ifndef DISABLE_PERFCOUNTERS
447 mono_perfcounters->gc_sync_blocks++;
452 static MonoThreadsSync*
453 alloc_mon (MonoObject *obj, gint32 id)
455 MonoThreadsSync *mon;
457 mono_monitor_allocator_lock ();
459 mon->data = (void *)(size_t)mono_gchandle_new_weakref (obj, TRUE);
460 mono_monitor_allocator_unlock ();
467 discard_mon (MonoThreadsSync *mon)
469 mono_monitor_allocator_lock ();
470 mono_gchandle_free ((guint32)mon->data);
472 mono_monitor_allocator_unlock ();
476 mono_monitor_inflate_owned (MonoObject *obj, int id)
478 MonoThreadsSync *mon;
479 LockWord nlw, old_lw, tmp_lw;
482 old_lw.sync = obj->synchronisation;
483 LOCK_DEBUG (g_message ("%s: (%d) Inflating owned lock object %p; LW = %p", __func__, id, obj, old_lw.sync));
485 if (lock_word_is_inflated (old_lw)) {
486 /* Someone else inflated the lock in the meantime */
490 mon = alloc_mon (obj, id);
492 nest = lock_word_get_nest (old_lw);
495 nlw = lock_word_new_inflated (mon);
497 mono_memory_write_barrier ();
498 tmp_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, nlw.sync, old_lw.sync);
499 if (tmp_lw.sync != old_lw.sync) {
500 /* Someone else inflated the lock in the meantime */
506 mono_monitor_inflate (MonoObject *obj)
508 MonoThreadsSync *mon;
509 LockWord nlw, old_lw;
511 LOCK_DEBUG (g_message ("%s: (%d) Inflating lock object %p; LW = %p", __func__, mono_thread_info_get_small_id (), obj, obj->synchronisation));
513 mon = alloc_mon (obj, 0);
515 nlw = lock_word_new_inflated (mon);
517 old_lw.sync = obj->synchronisation;
522 if (lock_word_is_inflated (old_lw)) {
525 #ifdef HAVE_MOVING_COLLECTOR
526 else if (lock_word_has_hash (old_lw)) {
527 mon->hash_code = lock_word_get_hash (old_lw);
528 mon->status = mon_status_set_owner (mon->status, 0);
529 nlw = lock_word_set_has_hash (nlw);
532 else if (lock_word_is_free (old_lw)) {
533 mon->status = mon_status_set_owner (mon->status, 0);
537 mon->status = mon_status_set_owner (mon->status, lock_word_get_owner (old_lw));
538 mon->nest = lock_word_get_nest (old_lw);
540 mono_memory_write_barrier ();
541 tmp_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, nlw.sync, old_lw.sync);
542 if (tmp_lw.sync == old_lw.sync) {
543 /* Successfully inflated the lock */
547 old_lw.sync = tmp_lw.sync;
550 /* Someone else inflated the lock before us */
554 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
560 * Calculate a hash code for @obj that is constant while @obj is alive.
563 mono_object_hash (MonoObject* obj)
565 #ifdef HAVE_MOVING_COLLECTOR
570 lw.sync = obj->synchronisation;
572 LOCK_DEBUG (g_message("%s: (%d) Get hash for object %p; LW = %p", __func__, mono_thread_info_get_small_id (), obj, obj->synchronisation));
574 if (lock_word_has_hash (lw)) {
575 if (lock_word_is_inflated (lw)) {
576 return lock_word_get_inflated_lock (lw)->hash_code;
578 return lock_word_get_hash (lw);
582 * while we are inside this function, the GC will keep this object pinned,
583 * since we are in the unmanaged stack. Thanks to this and to the hash
584 * function that depends only on the address, we can ignore the races if
585 * another thread computes the hash at the same time, because it'll end up
586 * with the same value.
588 hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
589 #if SIZEOF_VOID_P == 4
590 /* clear the top bits as they can be discarded */
591 hash &= ~(LOCK_WORD_STATUS_MASK << (32 - LOCK_WORD_STATUS_BITS));
593 if (lock_word_is_free (lw)) {
595 lw = lock_word_new_thin_hash (hash);
597 old_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL);
598 if (old_lw.sync == NULL) {
602 if (lock_word_has_hash (old_lw)) {
603 /* Done by somebody else */
607 mono_monitor_inflate (obj);
608 lw.sync = obj->synchronisation;
609 } else if (lock_word_is_flat (lw)) {
610 int id = mono_thread_info_get_small_id ();
611 if (lock_word_get_owner (lw) == id)
612 mono_monitor_inflate_owned (obj, id);
614 mono_monitor_inflate (obj);
615 lw.sync = obj->synchronisation;
618 /* At this point, the lock is inflated */
619 lock_word_get_inflated_lock (lw)->hash_code = hash;
620 lw = lock_word_set_has_hash (lw);
621 mono_memory_write_barrier ();
622 obj->synchronisation = lw.sync;
626 * Wang's address-based hash function:
627 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
629 return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
634 mono_monitor_ensure_owned (LockWord lw, guint32 id)
636 if (lock_word_is_flat (lw)) {
637 if (lock_word_get_owner (lw) == id)
639 } else if (lock_word_is_inflated (lw)) {
640 if (mon_status_get_owner (lock_word_get_inflated_lock (lw)->status) == id)
644 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Object synchronization method was called from an unsynchronized block of code."));
648 * When this function is called it has already been established that the
649 * current thread owns the monitor.
652 mono_monitor_exit_inflated (MonoObject *obj)
655 MonoThreadsSync *mon;
658 lw.sync = obj->synchronisation;
659 mon = lock_word_get_inflated_lock (lw);
661 nest = mon->nest - 1;
663 guint32 new_status, old_status, tmp_status;
665 old_status = mon->status;
668 * Release lock and do the wakeup stuff. It's possible that
669 * the last blocking thread gave up waiting just before we
670 * release the semaphore resulting in a negative entry count
671 * and a futile wakeup next time there's contention for this
675 gboolean have_waiters = mon_status_have_waiters (old_status);
677 new_status = mon_status_set_owner (old_status, 0);
679 new_status = mon_status_decrement_entry_count (new_status);
680 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
681 if (tmp_status == old_status) {
683 mono_coop_sem_post (mon->entry_sem);
686 old_status = tmp_status;
688 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now unlocked", __func__, mono_thread_info_get_small_id (), obj));
690 /* object is now unlocked, leave nest==1 so we don't
691 * need to set it when the lock is reacquired
694 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now locked %d times", __func__, mono_thread_info_get_small_id (), obj, nest));
700 * When this function is called it has already been established that the
701 * current thread owns the monitor.
704 mono_monitor_exit_flat (MonoObject *obj, LockWord old_lw)
706 LockWord new_lw, tmp_lw;
707 if (G_UNLIKELY (lock_word_is_nested (old_lw)))
708 new_lw = lock_word_decrement_nest (old_lw);
710 new_lw.lock_word = 0;
712 tmp_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, new_lw.sync, old_lw.sync);
713 if (old_lw.sync != tmp_lw.sync) {
714 /* Someone inflated the lock in the meantime */
715 mono_monitor_exit_inflated (obj);
718 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now locked %d times; LW = %p", __func__, mono_thread_info_get_small_id (), obj, lock_word_get_nest (new_lw), obj->synchronisation));
722 mon_decrement_entry_count (MonoThreadsSync *mon)
724 guint32 old_status, tmp_status, new_status;
726 /* Decrement entry count */
727 old_status = mon->status;
729 new_status = mon_status_decrement_entry_count (old_status);
730 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
731 if (tmp_status == old_status) {
734 old_status = tmp_status;
738 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
739 * is requested. In this case it returns -1.
742 mono_monitor_try_enter_inflated (MonoObject *obj, guint32 ms, gboolean allow_interruption, guint32 id)
745 MonoThreadsSync *mon;
747 gint64 then = 0, now, delta;
749 guint32 new_status, old_status, tmp_status;
750 MonoSemTimedwaitRet wait_ret;
751 MonoInternalThread *thread;
752 gboolean interrupted = FALSE;
754 LOCK_DEBUG (g_message("%s: (%d) Trying to lock object %p (%d ms)", __func__, id, obj, ms));
756 if (G_UNLIKELY (!obj)) {
757 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
761 lw.sync = obj->synchronisation;
762 mon = lock_word_get_inflated_lock (lw);
764 /* This case differs from Dice's case 3 because we don't
765 * deflate locks or cache unused lock records
767 old_status = mon->status;
768 if (G_LIKELY (mon_status_get_owner (old_status) == 0)) {
769 /* Try to install our ID in the owner field, nest
770 * should have been left at 1 by the previous unlock
773 new_status = mon_status_set_owner (old_status, id);
774 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
775 if (G_LIKELY (tmp_status == old_status)) {
777 g_assert (mon->nest == 1);
785 /* If the object is currently locked by this thread... */
786 if (mon_status_get_owner (old_status) == id) {
791 /* The object must be locked by someone else... */
792 #ifndef DISABLE_PERFCOUNTERS
793 mono_perfcounters->thread_contentions++;
796 /* If ms is 0 we don't block, but just fail straight away */
798 LOCK_DEBUG (g_message ("%s: (%d) timed out, returning FALSE", __func__, id));
802 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_CONTENTION);
804 /* The slow path begins here. */
806 /* a small amount of duplicated code, but it allows us to insert the profiler
807 * callbacks without impacting the fast path: from here on we don't need to go back to the
808 * retry label, but to retry_contended. At this point mon is already installed in the object
811 /* This case differs from Dice's case 3 because we don't
812 * deflate locks or cache unused lock records
814 old_status = mon->status;
815 if (G_LIKELY (mon_status_get_owner (old_status) == 0)) {
816 /* Try to install our ID in the owner field, nest
817 * should have been left at 1 by the previous unlock
820 new_status = mon_status_set_owner (old_status, id);
821 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
822 if (G_LIKELY (tmp_status == old_status)) {
824 g_assert (mon->nest == 1);
825 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
830 /* If the object is currently locked by this thread... */
831 if (mon_status_get_owner (old_status) == id) {
833 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
837 /* We need to make sure there's a semaphore handle (creating it if
838 * necessary), and block on it
840 if (mon->entry_sem == NULL) {
841 /* Create the semaphore */
842 sem = g_new0 (MonoCoopSem, 1);
843 mono_coop_sem_init (sem, 0);
844 if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
845 /* Someone else just put a handle here */
846 mono_coop_sem_destroy (sem);
852 * We need to register ourselves as waiting if it is the first time we are waiting,
853 * of if we were signaled and failed to acquire the lock.
856 old_status = mon->status;
858 if (mon_status_get_owner (old_status) == 0)
859 goto retry_contended;
860 new_status = mon_status_increment_entry_count (old_status);
861 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
862 if (tmp_status == old_status) {
865 old_status = tmp_status;
869 if (ms != INFINITE) {
870 then = mono_msec_ticks ();
874 #ifndef DISABLE_PERFCOUNTERS
875 mono_perfcounters->thread_queue_len++;
876 mono_perfcounters->thread_queue_max++;
878 thread = mono_thread_internal_current ();
881 * If we allow interruption, we check the test state for an abort request before going into sleep.
882 * This is a workaround to the fact that Thread.Abort does non-sticky interruption of semaphores.
884 * Semaphores don't support the sticky interruption with mono_thread_info_install_interrupt.
886 * A better fix would be to switch to wait with something that allows sticky interrupts together
887 * with wrapping it with abort_protected_block_count for the non-alertable cases.
888 * And somehow make this whole dance atomic and not crazy expensive. Good luck.
891 if (allow_interruption) {
892 if (!mono_thread_test_and_set_state (thread, (MonoThreadState)(ThreadState_StopRequested | ThreadState_AbortRequested), ThreadState_WaitSleepJoin)) {
893 wait_ret = MONO_SEM_TIMEDWAIT_RET_ALERTED;
897 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
901 * We pass ALERTABLE instead of allow_interruption since we have to check for the
902 * StopRequested case below.
904 wait_ret = mono_coop_sem_timedwait (mon->entry_sem, waitms, MONO_SEM_FLAGS_ALERTABLE);
906 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
909 #ifndef DISABLE_PERFCOUNTERS
910 mono_perfcounters->thread_queue_len--;
913 if (wait_ret == MONO_SEM_TIMEDWAIT_RET_ALERTED && !allow_interruption) {
916 * We have to obey a stop/suspend request even if
917 * allow_interruption is FALSE to avoid hangs at shutdown.
919 if (!mono_thread_test_state (mono_thread_internal_current (), (MonoThreadState)(ThreadState_StopRequested | ThreadState_SuspendRequested | ThreadState_AbortRequested))) {
920 if (ms != INFINITE) {
921 now = mono_msec_ticks ();
923 /* it should not overflow before ~30k years */
924 g_assert (now >= then);
933 /* retry from the top */
934 goto retry_contended;
936 } else if (wait_ret == MONO_SEM_TIMEDWAIT_RET_SUCCESS) {
938 /* retry from the top */
939 goto retry_contended;
940 } else if (wait_ret == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT) {
944 /* Timed out or interrupted */
945 mon_decrement_entry_count (mon);
947 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
949 if (wait_ret == MONO_SEM_TIMEDWAIT_RET_ALERTED) {
950 LOCK_DEBUG (g_message ("%s: (%d) interrupted waiting, returning -1", __func__, id));
952 } else if (wait_ret == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT) {
953 LOCK_DEBUG (g_message ("%s: (%d) timed out waiting, returning FALSE", __func__, id));
956 g_assert_not_reached ();
962 * If allow_interruption == TRUE, the method will be interrupted if abort or suspend
963 * is requested. In this case it returns -1.
966 mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
969 int id = mono_thread_info_get_small_id ();
971 LOCK_DEBUG (g_message("%s: (%d) Trying to lock object %p (%d ms)", __func__, id, obj, ms));
973 lw.sync = obj->synchronisation;
975 if (G_LIKELY (lock_word_is_free (lw))) {
976 LockWord nlw = lock_word_new_flat (id);
977 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, nlw.sync, NULL) == NULL) {
980 /* Someone acquired it in the meantime or put a hash */
981 mono_monitor_inflate (obj);
982 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
984 } else if (lock_word_is_inflated (lw)) {
985 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
986 } else if (lock_word_is_flat (lw)) {
987 if (lock_word_get_owner (lw) == id) {
988 if (lock_word_is_max_nest (lw)) {
989 mono_monitor_inflate_owned (obj, id);
990 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
992 LockWord nlw, old_lw;
993 nlw = lock_word_increment_nest (lw);
994 old_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, nlw.sync, lw.sync);
995 if (old_lw.sync != lw.sync) {
996 /* Someone else inflated it in the meantime */
997 g_assert (lock_word_is_inflated (old_lw));
998 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
1003 mono_monitor_inflate (obj);
1004 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
1006 } else if (lock_word_has_hash (lw)) {
1007 mono_monitor_inflate (obj);
1008 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
1011 g_assert_not_reached ();
1016 mono_monitor_enter (MonoObject *obj)
1019 gboolean allow_interruption = TRUE;
1020 if (G_UNLIKELY (!obj)) {
1021 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
1026 * An inquisitive mind could ask what's the deal with this loop.
1027 * It exists to deal with interrupting a monitor enter that happened within an abort-protected block, like a .cctor.
1029 * The thread will be set with a pending abort and the wait might even be interrupted. Either way, once we call mono_thread_interruption_checkpoint,
1030 * it will return NULL meaning we can't be aborted right now. Once that happens we switch to non-alertable.
1033 res = mono_monitor_try_enter_internal (obj, INFINITE, allow_interruption);
1034 /*This means we got interrupted during the wait and didn't got the monitor.*/
1036 MonoException *exc = mono_thread_interruption_checkpoint ();
1038 mono_set_pending_exception (exc);
1041 //we detected a pending interruption but it turned out to be a false positive, we ignore it from now on (this feels like a hack, right?, threads.c should give us less confusing directions)
1042 allow_interruption = FALSE;
1045 } while (res == -1);
1050 mono_monitor_enter_fast (MonoObject *obj)
1052 if (G_UNLIKELY (!obj)) {
1053 /* don't set pending exn on the fast path, just return
1054 * FALSE and let the slow path take care of it. */
1057 return mono_monitor_try_enter_internal (obj, 0, FALSE) == 1;
1061 mono_monitor_try_enter (MonoObject *obj, guint32 ms)
1063 if (G_UNLIKELY (!obj)) {
1064 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
1067 return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
1071 mono_monitor_exit (MonoObject *obj)
1075 LOCK_DEBUG (g_message ("%s: (%d) Unlocking %p", __func__, mono_thread_info_get_small_id (), obj));
1077 if (G_UNLIKELY (!obj)) {
1078 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
1082 lw.sync = obj->synchronisation;
1084 mono_monitor_ensure_owned (lw, mono_thread_info_get_small_id ());
1086 if (G_UNLIKELY (lock_word_is_inflated (lw)))
1087 mono_monitor_exit_inflated (obj);
1089 mono_monitor_exit_flat (obj, lw);
1093 mono_monitor_get_object_monitor_gchandle (MonoObject *object)
1097 lw.sync = object->synchronisation;
1099 if (lock_word_is_inflated (lw)) {
1100 MonoThreadsSync *mon = lock_word_get_inflated_lock (lw);
1101 return (guint32)mon->data;
1107 * mono_monitor_threads_sync_member_offset:
1108 * @status_offset: returns size and offset of the "status" member
1109 * @nest_offset: returns size and offset of the "nest" member
1111 * Returns the offsets and sizes of two members of the
1112 * MonoThreadsSync struct. The Monitor ASM fastpaths need this.
1115 mono_monitor_threads_sync_members_offset (int *status_offset, int *nest_offset)
1119 #define ENCODE_OFF_SIZE(o,s) (((o) << 8) | ((s) & 0xff))
1121 *status_offset = ENCODE_OFF_SIZE (MONO_STRUCT_OFFSET (MonoThreadsSync, status), sizeof (ts.status));
1122 *nest_offset = ENCODE_OFF_SIZE (MONO_STRUCT_OFFSET (MonoThreadsSync, nest), sizeof (ts.nest));
1126 ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var (MonoObject *obj, guint32 ms, char *lockTaken)
1129 gboolean allow_interruption = TRUE;
1130 if (G_UNLIKELY (!obj)) {
1131 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
1135 res = mono_monitor_try_enter_internal (obj, ms, allow_interruption);
1136 /*This means we got interrupted during the wait and didn't got the monitor.*/
1138 MonoException *exc = mono_thread_interruption_checkpoint ();
1140 mono_set_pending_exception (exc);
1143 //we detected a pending interruption but it turned out to be a false positive, we ignore it from now on (this feels like a hack, right?, threads.c should give us less confusing directions)
1144 allow_interruption = FALSE;
1147 } while (res == -1);
1148 /*It's safe to do it from here since interruption would happen only on the wrapper.*/
1149 *lockTaken = res == 1;
1153 mono_monitor_enter_v4 (MonoObject *obj, char *lock_taken)
1155 if (*lock_taken == 1) {
1156 mono_set_pending_exception (mono_get_exception_argument ("lockTaken", "lockTaken is already true"));
1160 ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var (obj, INFINITE, lock_taken);
1164 * mono_monitor_enter_v4_fast:
1166 * Same as mono_monitor_enter_v4, but return immediately if the
1167 * monitor cannot be acquired.
1168 * Returns TRUE if the lock was acquired, FALSE otherwise.
1171 mono_monitor_enter_v4_fast (MonoObject *obj, char *lock_taken)
1173 if (*lock_taken == 1)
1175 if (G_UNLIKELY (!obj))
1177 gint32 res = mono_monitor_try_enter_internal (obj, 0, TRUE);
1178 *lock_taken = res == 1;
1183 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
1187 LOCK_DEBUG (g_message ("%s: Testing if %p is owned by thread %d", __func__, obj, mono_thread_info_get_small_id()));
1189 lw.sync = obj->synchronisation;
1191 if (lock_word_is_flat (lw)) {
1192 return lock_word_get_owner (lw) == mono_thread_info_get_small_id ();
1193 } else if (lock_word_is_inflated (lw)) {
1194 return mon_status_get_owner (lock_word_get_inflated_lock (lw)->status) == mono_thread_info_get_small_id ();
1201 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
1205 LOCK_DEBUG (g_message("%s: (%d) Testing if %p is owned by any thread", __func__, mono_thread_info_get_small_id (), obj));
1207 lw.sync = obj->synchronisation;
1209 if (lock_word_is_flat (lw)) {
1210 return !lock_word_is_free (lw);
1211 } else if (lock_word_is_inflated (lw)) {
1212 return mon_status_get_owner (lock_word_get_inflated_lock (lw)->status) != 0;
1218 /* All wait list manipulation in the pulse, pulseall and wait
1219 * functions happens while the monitor lock is held, so we don't need
1220 * any extra struct locking
1224 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
1228 MonoThreadsSync *mon;
1230 LOCK_DEBUG (g_message ("%s: (%d) Pulsing %p", __func__, mono_thread_info_get_small_id (), obj));
1232 id = mono_thread_info_get_small_id ();
1233 lw.sync = obj->synchronisation;
1235 mono_monitor_ensure_owned (lw, id);
1237 if (!lock_word_is_inflated (lw)) {
1238 /* No threads waiting. A wait would have inflated the lock */
1242 mon = lock_word_get_inflated_lock (lw);
1244 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, mono_thread_info_get_small_id (), g_slist_length (mon->wait_list)));
1246 if (mon->wait_list != NULL) {
1247 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, mono_thread_info_get_small_id (), mon->wait_list->data));
1249 SetEvent (mon->wait_list->data);
1250 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1255 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
1259 MonoThreadsSync *mon;
1261 LOCK_DEBUG (g_message("%s: (%d) Pulsing all %p", __func__, mono_thread_info_get_small_id (), obj));
1263 id = mono_thread_info_get_small_id ();
1264 lw.sync = obj->synchronisation;
1266 mono_monitor_ensure_owned (lw, id);
1268 if (!lock_word_is_inflated (lw)) {
1269 /* No threads waiting. A wait would have inflated the lock */
1273 mon = lock_word_get_inflated_lock (lw);
1275 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, mono_thread_info_get_small_id (), g_slist_length (mon->wait_list)));
1277 while (mon->wait_list != NULL) {
1278 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, mono_thread_info_get_small_id (), mon->wait_list->data));
1280 SetEvent (mon->wait_list->data);
1281 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1286 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
1289 MonoThreadsSync *mon;
1293 gboolean success = FALSE;
1295 MonoInternalThread *thread = mono_thread_internal_current ();
1296 int id = mono_thread_info_get_small_id ();
1298 LOCK_DEBUG (g_message ("%s: (%d) Trying to wait for %p with timeout %dms", __func__, mono_thread_info_get_small_id (), obj, ms));
1300 lw.sync = obj->synchronisation;
1302 mono_monitor_ensure_owned (lw, id);
1304 if (!lock_word_is_inflated (lw)) {
1305 mono_monitor_inflate_owned (obj, id);
1306 lw.sync = obj->synchronisation;
1309 mon = lock_word_get_inflated_lock (lw);
1311 /* Do this WaitSleepJoin check before creating the event handle */
1312 if (mono_thread_current_check_pending_interrupt ())
1315 event = CreateEvent (NULL, FALSE, FALSE, NULL);
1316 if (event == NULL) {
1317 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
1321 LOCK_DEBUG (g_message ("%s: (%d) queuing handle %p", __func__, mono_thread_info_get_small_id (), event));
1323 /* This looks superfluous */
1324 if (mono_thread_current_check_pending_interrupt ()) {
1325 CloseHandle (event);
1329 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
1331 mon->wait_list = g_slist_append (mon->wait_list, event);
1333 /* Save the nest count, and release the lock */
1336 mono_memory_write_barrier ();
1337 mono_monitor_exit_inflated (obj);
1339 LOCK_DEBUG (g_message ("%s: (%d) Unlocked %p lock %p", __func__, mono_thread_info_get_small_id (), obj, mon));
1341 /* There's no race between unlocking mon and waiting for the
1342 * event, because auto reset events are sticky, and this event
1343 * is private to this thread. Therefore even if the event was
1344 * signalled before we wait, we still succeed.
1347 ret = WaitForSingleObjectEx (event, ms, TRUE);
1350 /* Reset the thread state fairly early, so we don't have to worry
1351 * about the monitor error checking
1353 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
1355 /* Regain the lock with the previous nest count */
1357 regain = mono_monitor_try_enter_inflated (obj, INFINITE, TRUE, id);
1358 /* We must regain the lock before handling interruption requests */
1359 } while (regain == -1);
1361 g_assert (regain == 1);
1365 LOCK_DEBUG (g_message ("%s: (%d) Regained %p lock %p", __func__, mono_thread_info_get_small_id (), obj, mon));
1367 if (ret == WAIT_TIMEOUT) {
1368 /* Poll the event again, just in case it was signalled
1369 * while we were trying to regain the monitor lock
1372 ret = WaitForSingleObjectEx (event, 0, FALSE);
1376 /* Pulse will have popped our event from the queue if it signalled
1377 * us, so we only do it here if the wait timed out.
1379 * This avoids a race condition where the thread holding the
1380 * lock can Pulse several times before the WaitForSingleObject
1381 * returns. If we popped the queue here then this event might
1382 * be signalled more than once, thereby starving another
1386 if (ret == WAIT_OBJECT_0) {
1387 LOCK_DEBUG (g_message ("%s: (%d) Success", __func__, mono_thread_info_get_small_id ()));
1390 LOCK_DEBUG (g_message ("%s: (%d) Wait failed, dequeuing handle %p", __func__, mono_thread_info_get_small_id (), event));
1391 /* No pulse, so we have to remove ourself from the
1394 mon->wait_list = g_slist_remove (mon->wait_list, event);
1396 CloseHandle (event);