2 * monitor.c: Monitor locking functions
5 * Dick Porter (dick@ximian.com)
7 * Copyright 2003 Ximian, Inc (http://www.ximian.com)
8 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/monitor.h>
17 #include <mono/metadata/threads-types.h>
18 #include <mono/metadata/exception.h>
19 #include <mono/metadata/threads.h>
20 #include <mono/io-layer/io-layer.h>
21 #include <mono/metadata/object-internals.h>
22 #include <mono/metadata/class-internals.h>
23 #include <mono/metadata/gc-internals.h>
24 #include <mono/metadata/method-builder.h>
25 #include <mono/metadata/debug-helpers.h>
26 #include <mono/metadata/tabledefs.h>
27 #include <mono/metadata/marshal.h>
28 #include <mono/utils/mono-threads.h>
29 #include <mono/metadata/profiler-private.h>
30 #include <mono/utils/mono-time.h>
31 #include <mono/utils/atomic.h>
34 * Pull the list of opcodes
36 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
40 #include "mono/cil/opcode.def"
45 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
49 * The monitor implementation here is based on
50 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
51 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
53 * The Dice paper describes a technique for saving lock record space
54 * by returning records to a free list when they become unused. That
55 * sounds like unnecessary complexity to me, though if it becomes
56 * clear that unused lock records are taking up lots of space or we
57 * need to shave more time off by avoiding a malloc then we can always
58 * implement the free list idea later. The timeout parameter to
59 * try_enter voids some of the assumptions about the reference count
60 * field in Dice's implementation too. In his version, the thread
61 * attempting to lock a contended object will block until it succeeds,
62 * so the reference count will never be decremented while an object is
65 * Bacon's thin locks have a fast path that doesn't need a lock record
66 * for the common case of locking an unlocked or shallow-nested
71 typedef struct _MonitorArray MonitorArray;
73 struct _MonitorArray {
76 MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
79 #define mono_monitor_allocator_lock() mono_os_mutex_lock (&monitor_mutex)
80 #define mono_monitor_allocator_unlock() mono_os_mutex_unlock (&monitor_mutex)
81 static mono_mutex_t monitor_mutex;
82 static MonoThreadsSync *monitor_freelist;
83 static MonitorArray *monitor_allocated;
84 static int array_size = 16;
86 /* MonoThreadsSync status helpers */
89 mon_status_get_owner (guint32 status)
91 return status & OWNER_MASK;
95 mon_status_set_owner (guint32 status, guint32 owner)
97 return (status & ENTRY_COUNT_MASK) | owner;
101 mon_status_get_entry_count (guint32 status)
103 gint32 entry_count = (gint32)((status & ENTRY_COUNT_MASK) >> ENTRY_COUNT_SHIFT);
104 gint32 zero = (gint32)(((guint32)ENTRY_COUNT_ZERO) >> ENTRY_COUNT_SHIFT);
105 return entry_count - zero;
108 static inline guint32
109 mon_status_init_entry_count (guint32 status)
111 return (status & OWNER_MASK) | ENTRY_COUNT_ZERO;
114 static inline guint32
115 mon_status_increment_entry_count (guint32 status)
117 return status + (1 << ENTRY_COUNT_SHIFT);
120 static inline guint32
121 mon_status_decrement_entry_count (guint32 status)
123 return status - (1 << ENTRY_COUNT_SHIFT);
126 static inline gboolean
127 mon_status_have_waiters (guint32 status)
129 return status & ENTRY_COUNT_WAITERS;
132 /* LockWord helpers */
134 static inline MonoThreadsSync*
135 lock_word_get_inflated_lock (LockWord lw)
137 lw.lock_word &= (~LOCK_WORD_STATUS_MASK);
141 static inline gboolean
142 lock_word_is_inflated (LockWord lw)
144 return lw.lock_word & LOCK_WORD_INFLATED;
147 static inline gboolean
148 lock_word_has_hash (LockWord lw)
150 return lw.lock_word & LOCK_WORD_HAS_HASH;
153 static inline LockWord
154 lock_word_set_has_hash (LockWord lw)
157 nlw.lock_word = lw.lock_word | LOCK_WORD_HAS_HASH;
161 static inline gboolean
162 lock_word_is_free (LockWord lw)
164 return !lw.lock_word;
167 static inline gboolean
168 lock_word_is_flat (LockWord lw)
170 /* Return whether the lock is flat or free */
171 return (lw.lock_word & LOCK_WORD_STATUS_MASK) == LOCK_WORD_FLAT;
175 lock_word_get_hash (LockWord lw)
177 return (gint32) (lw.lock_word >> LOCK_WORD_HASH_SHIFT);
181 lock_word_get_nest (LockWord lw)
183 if (lock_word_is_free (lw))
185 /* Inword nest count starts from 0 */
186 return ((lw.lock_word & LOCK_WORD_NEST_MASK) >> LOCK_WORD_NEST_SHIFT) + 1;
189 static inline gboolean
190 lock_word_is_nested (LockWord lw)
192 return lw.lock_word & LOCK_WORD_NEST_MASK;
195 static inline gboolean
196 lock_word_is_max_nest (LockWord lw)
198 return (lw.lock_word & LOCK_WORD_NEST_MASK) == LOCK_WORD_NEST_MASK;
201 static inline LockWord
202 lock_word_increment_nest (LockWord lw)
204 lw.lock_word += 1 << LOCK_WORD_NEST_SHIFT;
208 static inline LockWord
209 lock_word_decrement_nest (LockWord lw)
211 lw.lock_word -= 1 << LOCK_WORD_NEST_SHIFT;
216 lock_word_get_owner (LockWord lw)
218 return lw.lock_word >> LOCK_WORD_OWNER_SHIFT;
221 static inline LockWord
222 lock_word_new_thin_hash (gint32 hash)
225 lw.lock_word = (guint32)hash;
226 lw.lock_word = (lw.lock_word << LOCK_WORD_HASH_SHIFT) | LOCK_WORD_HAS_HASH;
230 static inline LockWord
231 lock_word_new_inflated (MonoThreadsSync *mon)
235 lw.lock_word |= LOCK_WORD_INFLATED;
239 static inline LockWord
240 lock_word_new_flat (gint32 owner)
243 lw.lock_word = owner;
244 lw.lock_word <<= LOCK_WORD_OWNER_SHIFT;
249 mono_monitor_init (void)
251 mono_os_mutex_init_recursive (&monitor_mutex);
255 mono_monitor_cleanup (void)
257 MonoThreadsSync *mon;
258 /* MonitorArray *marray, *next = NULL; */
260 /*mono_os_mutex_destroy (&monitor_mutex);*/
262 /* The monitors on the freelist don't have weak links - mark them */
263 for (mon = monitor_freelist; mon; mon = (MonoThreadsSync *)mon->data)
264 mon->wait_list = (GSList *)-1;
267 * FIXME: This still crashes with sgen (async_read.exe)
269 * In mini_cleanup() we first call mono_runtime_cleanup(), which calls
270 * mono_monitor_cleanup(), which is supposed to free all monitor memory.
272 * Later in mini_cleanup(), we call mono_domain_free(), which calls
273 * mono_gc_clear_domain(), which frees all weak links associated with objects.
274 * Those weak links reside in the monitor structures, which we've freed earlier.
276 * Unless we fix this dependency in the shutdown sequence this code has to remain
277 * disabled, or at least the call to g_free().
280 for (marray = monitor_allocated; marray; marray = next) {
283 for (i = 0; i < marray->num_monitors; ++i) {
284 mon = &marray->monitors [i];
285 if (mon->wait_list != (gpointer)-1)
286 mono_gc_weak_link_remove (&mon->data);
296 monitor_is_on_freelist (MonoThreadsSync *mon)
298 MonitorArray *marray;
299 for (marray = monitor_allocated; marray; marray = marray->next) {
300 if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
310 * Print a report on stdout of the managed locks currently held by
311 * threads. If @include_untaken is specified, list also inflated locks
313 * This is supposed to be used in debuggers like gdb.
316 mono_locks_dump (gboolean include_untaken)
319 int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
320 MonoThreadsSync *mon;
321 MonitorArray *marray;
322 for (mon = monitor_freelist; mon; mon = (MonoThreadsSync *)mon->data)
324 for (marray = monitor_allocated; marray; marray = marray->next) {
325 total += marray->num_monitors;
327 for (i = 0; i < marray->num_monitors; ++i) {
328 mon = &marray->monitors [i];
329 if (mon->data == NULL) {
330 if (i < marray->num_monitors - 1)
333 if (!monitor_is_on_freelist ((MonoThreadsSync *)mon->data)) {
334 MonoObject *holder = (MonoObject *)mono_gchandle_get_target ((guint32)mon->data);
335 if (mon_status_get_owner (mon->status)) {
336 g_print ("Lock %p in object %p held by thread %d, nest level: %d\n",
337 mon, holder, mon_status_get_owner (mon->status), mon->nest);
339 g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon_status_get_entry_count (mon->status));
340 } else if (include_untaken) {
341 g_print ("Lock %p in object %p untaken\n", mon, holder);
348 g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
349 num_arrays, total, used, on_freelist, to_recycle);
352 /* LOCKING: this is called with monitor_mutex held */
354 mon_finalize (MonoThreadsSync *mon)
356 LOCK_DEBUG (g_message ("%s: Finalizing sync %p", __func__, mon));
358 if (mon->entry_sem != NULL) {
359 CloseHandle (mon->entry_sem);
360 mon->entry_sem = NULL;
362 /* If this isn't empty then something is seriously broken - it
363 * means a thread is still waiting on the object that owned
364 * this lock, but the object has been finalized.
366 g_assert (mon->wait_list == NULL);
368 /* owner and nest are set in mon_new, no need to zero them out */
370 mon->data = monitor_freelist;
371 monitor_freelist = mon;
372 #ifndef DISABLE_PERFCOUNTERS
373 mono_perfcounters->gc_sync_blocks--;
377 /* LOCKING: this is called with monitor_mutex held */
378 static MonoThreadsSync *
381 MonoThreadsSync *new_;
383 if (!monitor_freelist) {
384 MonitorArray *marray;
386 /* see if any sync block has been collected */
388 for (marray = monitor_allocated; marray; marray = marray->next) {
389 for (i = 0; i < marray->num_monitors; ++i) {
390 if (mono_gchandle_get_target ((guint32)marray->monitors [i].data) == NULL) {
391 new_ = &marray->monitors [i];
392 if (new_->wait_list) {
393 /* Orphaned events left by aborted threads */
394 while (new_->wait_list) {
395 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d): Closing orphaned event %d", mono_thread_info_get_small_id (), new_->wait_list->data));
396 CloseHandle (new_->wait_list->data);
397 new_->wait_list = g_slist_remove (new_->wait_list, new_->wait_list->data);
400 mono_gchandle_free ((guint32)new_->data);
401 new_->data = monitor_freelist;
402 monitor_freelist = new_;
405 /* small perf tweak to avoid scanning all the blocks */
409 /* need to allocate a new array of monitors */
410 if (!monitor_freelist) {
412 LOCK_DEBUG (g_message ("%s: allocating more monitors: %d", __func__, array_size));
413 marray = (MonitorArray *)g_malloc0 (MONO_SIZEOF_MONO_ARRAY + array_size * sizeof (MonoThreadsSync));
414 marray->num_monitors = array_size;
416 /* link into the freelist */
417 for (i = 0; i < marray->num_monitors - 1; ++i) {
418 marray->monitors [i].data = &marray->monitors [i + 1];
420 marray->monitors [i].data = NULL; /* the last one */
421 monitor_freelist = &marray->monitors [0];
422 /* we happend the marray instead of prepending so that
423 * the collecting loop above will need to scan smaller arrays first
425 if (!monitor_allocated) {
426 monitor_allocated = marray;
428 last = monitor_allocated;
436 new_ = monitor_freelist;
437 monitor_freelist = (MonoThreadsSync *)new_->data;
439 new_->status = mon_status_set_owner (0, id);
440 new_->status = mon_status_init_entry_count (new_->status);
444 #ifndef DISABLE_PERFCOUNTERS
445 mono_perfcounters->gc_sync_blocks++;
450 static MonoThreadsSync*
451 alloc_mon (MonoObject *obj, gint32 id)
453 MonoThreadsSync *mon;
455 mono_monitor_allocator_lock ();
457 mon->data = (void *)(size_t)mono_gchandle_new_weakref (obj, TRUE);
458 mono_monitor_allocator_unlock ();
465 discard_mon (MonoThreadsSync *mon)
467 mono_monitor_allocator_lock ();
468 mono_gchandle_free ((guint32)mon->data);
470 mono_monitor_allocator_unlock ();
474 mono_monitor_inflate_owned (MonoObject *obj, int id)
476 MonoThreadsSync *mon;
477 LockWord nlw, old_lw, tmp_lw;
480 old_lw.sync = obj->synchronisation;
481 LOCK_DEBUG (g_message ("%s: (%d) Inflating owned lock object %p; LW = %p", __func__, id, obj, old_lw.sync));
483 if (lock_word_is_inflated (old_lw)) {
484 /* Someone else inflated the lock in the meantime */
488 mon = alloc_mon (obj, id);
490 nest = lock_word_get_nest (old_lw);
493 nlw = lock_word_new_inflated (mon);
495 mono_memory_write_barrier ();
496 tmp_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, nlw.sync, old_lw.sync);
497 if (tmp_lw.sync != old_lw.sync) {
498 /* Someone else inflated the lock in the meantime */
504 mono_monitor_inflate (MonoObject *obj)
506 MonoThreadsSync *mon;
507 LockWord nlw, old_lw;
509 LOCK_DEBUG (g_message ("%s: (%d) Inflating lock object %p; LW = %p", __func__, mono_thread_info_get_small_id (), obj, obj->synchronisation));
511 mon = alloc_mon (obj, 0);
513 nlw = lock_word_new_inflated (mon);
515 old_lw.sync = obj->synchronisation;
520 if (lock_word_is_inflated (old_lw)) {
523 #ifdef HAVE_MOVING_COLLECTOR
524 else if (lock_word_has_hash (old_lw)) {
525 mon->hash_code = lock_word_get_hash (old_lw);
526 mon->status = mon_status_set_owner (mon->status, 0);
527 nlw = lock_word_set_has_hash (nlw);
530 else if (lock_word_is_free (old_lw)) {
531 mon->status = mon_status_set_owner (mon->status, 0);
535 mon->status = mon_status_set_owner (mon->status, lock_word_get_owner (old_lw));
536 mon->nest = lock_word_get_nest (old_lw);
538 mono_memory_write_barrier ();
539 tmp_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, nlw.sync, old_lw.sync);
540 if (tmp_lw.sync == old_lw.sync) {
541 /* Successfully inflated the lock */
545 old_lw.sync = tmp_lw.sync;
548 /* Someone else inflated the lock before us */
552 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
558 * Calculate a hash code for @obj that is constant while @obj is alive.
561 mono_object_hash (MonoObject* obj)
563 #ifdef HAVE_MOVING_COLLECTOR
568 lw.sync = obj->synchronisation;
570 LOCK_DEBUG (g_message("%s: (%d) Get hash for object %p; LW = %p", __func__, mono_thread_info_get_small_id (), obj, obj->synchronisation));
572 if (lock_word_has_hash (lw)) {
573 if (lock_word_is_inflated (lw)) {
574 return lock_word_get_inflated_lock (lw)->hash_code;
576 return lock_word_get_hash (lw);
580 * while we are inside this function, the GC will keep this object pinned,
581 * since we are in the unmanaged stack. Thanks to this and to the hash
582 * function that depends only on the address, we can ignore the races if
583 * another thread computes the hash at the same time, because it'll end up
584 * with the same value.
586 hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
587 #if SIZEOF_VOID_P == 4
588 /* clear the top bits as they can be discarded */
589 hash &= ~(LOCK_WORD_STATUS_MASK << (32 - LOCK_WORD_STATUS_BITS));
591 if (lock_word_is_free (lw)) {
593 lw = lock_word_new_thin_hash (hash);
595 old_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL);
596 if (old_lw.sync == NULL) {
600 if (lock_word_has_hash (old_lw)) {
601 /* Done by somebody else */
605 mono_monitor_inflate (obj);
606 lw.sync = obj->synchronisation;
607 } else if (lock_word_is_flat (lw)) {
608 int id = mono_thread_info_get_small_id ();
609 if (lock_word_get_owner (lw) == id)
610 mono_monitor_inflate_owned (obj, id);
612 mono_monitor_inflate (obj);
613 lw.sync = obj->synchronisation;
616 /* At this point, the lock is inflated */
617 lock_word_get_inflated_lock (lw)->hash_code = hash;
618 lw = lock_word_set_has_hash (lw);
619 mono_memory_write_barrier ();
620 obj->synchronisation = lw.sync;
624 * Wang's address-based hash function:
625 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
627 return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
632 mono_monitor_ensure_owned (LockWord lw, guint32 id)
634 if (lock_word_is_flat (lw)) {
635 if (lock_word_get_owner (lw) == id)
637 } else if (lock_word_is_inflated (lw)) {
638 if (mon_status_get_owner (lock_word_get_inflated_lock (lw)->status) == id)
642 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Object synchronization method was called from an unsynchronized block of code."));
646 * When this function is called it has already been established that the
647 * current thread owns the monitor.
650 mono_monitor_exit_inflated (MonoObject *obj)
653 MonoThreadsSync *mon;
656 lw.sync = obj->synchronisation;
657 mon = lock_word_get_inflated_lock (lw);
659 nest = mon->nest - 1;
661 guint32 new_status, old_status, tmp_status;
663 old_status = mon->status;
666 * Release lock and do the wakeup stuff. It's possible that
667 * the last blocking thread gave up waiting just before we
668 * release the semaphore resulting in a negative entry count
669 * and a futile wakeup next time there's contention for this
673 gboolean have_waiters = mon_status_have_waiters (old_status);
675 new_status = mon_status_set_owner (old_status, 0);
677 new_status = mon_status_decrement_entry_count (new_status);
678 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
679 if (tmp_status == old_status) {
681 ReleaseSemaphore (mon->entry_sem, 1, NULL);
684 old_status = tmp_status;
686 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now unlocked", __func__, mono_thread_info_get_small_id (), obj));
688 /* object is now unlocked, leave nest==1 so we don't
689 * need to set it when the lock is reacquired
692 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now locked %d times", __func__, mono_thread_info_get_small_id (), obj, nest));
698 * When this function is called it has already been established that the
699 * current thread owns the monitor.
702 mono_monitor_exit_flat (MonoObject *obj, LockWord old_lw)
704 LockWord new_lw, tmp_lw;
705 if (G_UNLIKELY (lock_word_is_nested (old_lw)))
706 new_lw = lock_word_decrement_nest (old_lw);
708 new_lw.lock_word = 0;
710 tmp_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, new_lw.sync, old_lw.sync);
711 if (old_lw.sync != tmp_lw.sync) {
712 /* Someone inflated the lock in the meantime */
713 mono_monitor_exit_inflated (obj);
716 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now locked %d times; LW = %p", __func__, mono_thread_info_get_small_id (), obj, lock_word_get_nest (new_lw), obj->synchronisation));
720 mon_decrement_entry_count (MonoThreadsSync *mon)
722 guint32 old_status, tmp_status, new_status;
724 /* Decrement entry count */
725 old_status = mon->status;
727 new_status = mon_status_decrement_entry_count (old_status);
728 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
729 if (tmp_status == old_status) {
732 old_status = tmp_status;
736 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
737 * is requested. In this case it returns -1.
740 mono_monitor_try_enter_inflated (MonoObject *obj, guint32 ms, gboolean allow_interruption, guint32 id)
743 MonoThreadsSync *mon;
745 guint32 then = 0, now, delta;
748 guint32 new_status, old_status, tmp_status;
749 MonoInternalThread *thread;
750 gboolean interrupted = FALSE;
752 LOCK_DEBUG (g_message("%s: (%d) Trying to lock object %p (%d ms)", __func__, id, obj, ms));
754 if (G_UNLIKELY (!obj)) {
755 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
759 lw.sync = obj->synchronisation;
760 mon = lock_word_get_inflated_lock (lw);
762 /* This case differs from Dice's case 3 because we don't
763 * deflate locks or cache unused lock records
765 old_status = mon->status;
766 if (G_LIKELY (mon_status_get_owner (old_status) == 0)) {
767 /* Try to install our ID in the owner field, nest
768 * should have been left at 1 by the previous unlock
771 new_status = mon_status_set_owner (old_status, id);
772 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
773 if (G_LIKELY (tmp_status == old_status)) {
775 g_assert (mon->nest == 1);
783 /* If the object is currently locked by this thread... */
784 if (mon_status_get_owner (old_status) == id) {
789 /* The object must be locked by someone else... */
790 #ifndef DISABLE_PERFCOUNTERS
791 mono_perfcounters->thread_contentions++;
794 /* If ms is 0 we don't block, but just fail straight away */
796 LOCK_DEBUG (g_message ("%s: (%d) timed out, returning FALSE", __func__, id));
800 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_CONTENTION);
802 /* The slow path begins here. */
804 /* a small amount of duplicated code, but it allows us to insert the profiler
805 * callbacks without impacting the fast path: from here on we don't need to go back to the
806 * retry label, but to retry_contended. At this point mon is already installed in the object
809 /* This case differs from Dice's case 3 because we don't
810 * deflate locks or cache unused lock records
812 old_status = mon->status;
813 if (G_LIKELY (mon_status_get_owner (old_status) == 0)) {
814 /* Try to install our ID in the owner field, nest
815 * should have been left at 1 by the previous unlock
818 new_status = mon_status_set_owner (old_status, id);
819 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
820 if (G_LIKELY (tmp_status == old_status)) {
822 g_assert (mon->nest == 1);
823 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
828 /* If the object is currently locked by this thread... */
829 if (mon_status_get_owner (old_status) == id) {
831 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
835 /* We need to make sure there's a semaphore handle (creating it if
836 * necessary), and block on it
838 if (mon->entry_sem == NULL) {
839 /* Create the semaphore */
840 sem = CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
841 g_assert (sem != NULL);
842 if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
843 /* Someone else just put a handle here */
849 * We need to register ourselves as waiting if it is the first time we are waiting,
850 * of if we were signaled and failed to acquire the lock.
853 old_status = mon->status;
855 if (mon_status_get_owner (old_status) == 0)
856 goto retry_contended;
857 new_status = mon_status_increment_entry_count (old_status);
858 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
859 if (tmp_status == old_status) {
862 old_status = tmp_status;
866 if (ms != INFINITE) {
867 then = mono_msec_ticks ();
871 #ifndef DISABLE_PERFCOUNTERS
872 mono_perfcounters->thread_queue_len++;
873 mono_perfcounters->thread_queue_max++;
875 thread = mono_thread_internal_current ();
877 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
880 * We pass TRUE instead of allow_interruption since we have to check for the
881 * StopRequested case below.
883 MONO_PREPARE_BLOCKING;
884 ret = WaitForSingleObjectEx (mon->entry_sem, waitms, TRUE);
885 MONO_FINISH_BLOCKING;
887 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
889 #ifndef DISABLE_PERFCOUNTERS
890 mono_perfcounters->thread_queue_len--;
893 if (ret == WAIT_IO_COMPLETION && !allow_interruption) {
896 * We have to obey a stop/suspend request even if
897 * allow_interruption is FALSE to avoid hangs at shutdown.
899 if (!mono_thread_test_state (mono_thread_internal_current (), (MonoThreadState)(ThreadState_StopRequested | ThreadState_SuspendRequested | ThreadState_AbortRequested))) {
900 if (ms != INFINITE) {
901 now = mono_msec_ticks ();
903 LOCK_DEBUG (g_message ("%s: wrapped around! now=0x%x then=0x%x", __func__, now, then));
905 now += (0xffffffff - then);
908 LOCK_DEBUG (g_message ("%s: wrap rejig: now=0x%x then=0x%x delta=0x%x", __func__, now, then, now-then));
918 /* retry from the top */
919 goto retry_contended;
921 } else if (ret == WAIT_OBJECT_0) {
923 /* retry from the top */
924 goto retry_contended;
925 } else if (ret == WAIT_TIMEOUT) {
929 /* Timed out or interrupted */
930 mon_decrement_entry_count (mon);
932 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
934 if (ret == WAIT_IO_COMPLETION) {
935 LOCK_DEBUG (g_message ("%s: (%d) interrupted waiting, returning -1", __func__, id));
937 } else if (ret == WAIT_TIMEOUT) {
938 LOCK_DEBUG (g_message ("%s: (%d) timed out waiting, returning FALSE", __func__, id));
941 g_assert_not_reached ();
947 * If allow_interruption == TRUE, the method will be interrupted if abort or suspend
948 * is requested. In this case it returns -1.
951 mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
954 int id = mono_thread_info_get_small_id ();
956 LOCK_DEBUG (g_message("%s: (%d) Trying to lock object %p (%d ms)", __func__, id, obj, ms));
958 if (G_UNLIKELY (!obj)) {
959 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
963 lw.sync = obj->synchronisation;
965 if (G_LIKELY (lock_word_is_free (lw))) {
966 LockWord nlw = lock_word_new_flat (id);
967 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, nlw.sync, NULL) == NULL) {
970 /* Someone acquired it in the meantime or put a hash */
971 mono_monitor_inflate (obj);
972 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
974 } else if (lock_word_is_inflated (lw)) {
975 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
976 } else if (lock_word_is_flat (lw)) {
977 if (lock_word_get_owner (lw) == id) {
978 if (lock_word_is_max_nest (lw)) {
979 mono_monitor_inflate_owned (obj, id);
980 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
982 LockWord nlw, old_lw;
983 nlw = lock_word_increment_nest (lw);
984 old_lw.sync = (MonoThreadsSync *)InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, nlw.sync, lw.sync);
985 if (old_lw.sync != lw.sync) {
986 /* Someone else inflated it in the meantime */
987 g_assert (lock_word_is_inflated (old_lw));
988 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
993 mono_monitor_inflate (obj);
994 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
996 } else if (lock_word_has_hash (lw)) {
997 mono_monitor_inflate (obj);
998 return mono_monitor_try_enter_inflated (obj, ms, allow_interruption, id);
1001 g_assert_not_reached ();
1006 mono_monitor_enter (MonoObject *obj)
1008 return mono_monitor_try_enter_internal (obj, INFINITE, FALSE) == 1;
1012 mono_monitor_enter_fast (MonoObject *obj)
1014 return mono_monitor_try_enter_internal (obj, 0, FALSE) == 1;
1018 mono_monitor_try_enter (MonoObject *obj, guint32 ms)
1020 return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
1024 mono_monitor_exit (MonoObject *obj)
1028 LOCK_DEBUG (g_message ("%s: (%d) Unlocking %p", __func__, mono_thread_info_get_small_id (), obj));
1030 if (G_UNLIKELY (!obj)) {
1031 mono_set_pending_exception (mono_get_exception_argument_null ("obj"));
1035 lw.sync = obj->synchronisation;
1037 mono_monitor_ensure_owned (lw, mono_thread_info_get_small_id ());
1039 if (G_UNLIKELY (lock_word_is_inflated (lw)))
1040 mono_monitor_exit_inflated (obj);
1042 mono_monitor_exit_flat (obj, lw);
1046 mono_monitor_get_object_monitor_gchandle (MonoObject *object)
1050 lw.sync = object->synchronisation;
1052 if (lock_word_is_inflated (lw)) {
1053 MonoThreadsSync *mon = lock_word_get_inflated_lock (lw);
1054 return (guint32)mon->data;
1060 * mono_monitor_threads_sync_member_offset:
1061 * @status_offset: returns size and offset of the "status" member
1062 * @nest_offset: returns size and offset of the "nest" member
1064 * Returns the offsets and sizes of two members of the
1065 * MonoThreadsSync struct. The Monitor ASM fastpaths need this.
1068 mono_monitor_threads_sync_members_offset (int *status_offset, int *nest_offset)
1072 #define ENCODE_OFF_SIZE(o,s) (((o) << 8) | ((s) & 0xff))
1074 *status_offset = ENCODE_OFF_SIZE (MONO_STRUCT_OFFSET (MonoThreadsSync, status), sizeof (ts.status));
1075 *nest_offset = ENCODE_OFF_SIZE (MONO_STRUCT_OFFSET (MonoThreadsSync, nest), sizeof (ts.nest));
1079 ves_icall_System_Threading_Monitor_Monitor_try_enter (MonoObject *obj, guint32 ms)
1084 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
1086 MonoException *exc = mono_thread_interruption_checkpoint ();
1088 mono_set_pending_exception (exc);
1092 } while (res == -1);
1098 ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var (MonoObject *obj, guint32 ms, char *lockTaken)
1102 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
1103 /*This means we got interrupted during the wait and didn't got the monitor.*/
1105 MonoException *exc = mono_thread_interruption_checkpoint ();
1107 mono_set_pending_exception (exc);
1111 } while (res == -1);
1112 /*It's safe to do it from here since interruption would happen only on the wrapper.*/
1113 *lockTaken = res == 1;
1117 mono_monitor_enter_v4 (MonoObject *obj, char *lock_taken)
1119 if (*lock_taken == 1) {
1120 mono_set_pending_exception (mono_get_exception_argument ("lockTaken", "lockTaken is already true"));
1124 ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var (obj, INFINITE, lock_taken);
1128 * mono_monitor_enter_v4_fast:
1130 * Same as mono_monitor_enter_v4, but return immediately if the
1131 * monitor cannot be acquired.
1132 * Returns TRUE if the lock was acquired, FALSE otherwise.
1135 mono_monitor_enter_v4_fast (MonoObject *obj, char *lock_taken)
1137 if (*lock_taken == 1)
1139 gint32 res = mono_monitor_try_enter_internal (obj, 0, TRUE);
1140 *lock_taken = res == 1;
1145 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
1149 LOCK_DEBUG (g_message ("%s: Testing if %p is owned by thread %d", __func__, obj, mono_thread_info_get_small_id()));
1151 lw.sync = obj->synchronisation;
1153 if (lock_word_is_flat (lw)) {
1154 return lock_word_get_owner (lw) == mono_thread_info_get_small_id ();
1155 } else if (lock_word_is_inflated (lw)) {
1156 return mon_status_get_owner (lock_word_get_inflated_lock (lw)->status) == mono_thread_info_get_small_id ();
1163 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
1167 LOCK_DEBUG (g_message("%s: (%d) Testing if %p is owned by any thread", __func__, mono_thread_info_get_small_id (), obj));
1169 lw.sync = obj->synchronisation;
1171 if (lock_word_is_flat (lw)) {
1172 return !lock_word_is_free (lw);
1173 } else if (lock_word_is_inflated (lw)) {
1174 return mon_status_get_owner (lock_word_get_inflated_lock (lw)->status) != 0;
1180 /* All wait list manipulation in the pulse, pulseall and wait
1181 * functions happens while the monitor lock is held, so we don't need
1182 * any extra struct locking
1186 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
1190 MonoThreadsSync *mon;
1192 LOCK_DEBUG (g_message ("%s: (%d) Pulsing %p", __func__, mono_thread_info_get_small_id (), obj));
1194 id = mono_thread_info_get_small_id ();
1195 lw.sync = obj->synchronisation;
1197 mono_monitor_ensure_owned (lw, id);
1199 if (!lock_word_is_inflated (lw)) {
1200 /* No threads waiting. A wait would have inflated the lock */
1204 mon = lock_word_get_inflated_lock (lw);
1206 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, mono_thread_info_get_small_id (), g_slist_length (mon->wait_list)));
1208 if (mon->wait_list != NULL) {
1209 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, mono_thread_info_get_small_id (), mon->wait_list->data));
1211 SetEvent (mon->wait_list->data);
1212 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1217 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
1221 MonoThreadsSync *mon;
1223 LOCK_DEBUG (g_message("%s: (%d) Pulsing all %p", __func__, mono_thread_info_get_small_id (), obj));
1225 id = mono_thread_info_get_small_id ();
1226 lw.sync = obj->synchronisation;
1228 mono_monitor_ensure_owned (lw, id);
1230 if (!lock_word_is_inflated (lw)) {
1231 /* No threads waiting. A wait would have inflated the lock */
1235 mon = lock_word_get_inflated_lock (lw);
1237 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, mono_thread_info_get_small_id (), g_slist_length (mon->wait_list)));
1239 while (mon->wait_list != NULL) {
1240 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, mono_thread_info_get_small_id (), mon->wait_list->data));
1242 SetEvent (mon->wait_list->data);
1243 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1248 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
1251 MonoThreadsSync *mon;
1255 gboolean success = FALSE;
1257 MonoInternalThread *thread = mono_thread_internal_current ();
1258 int id = mono_thread_info_get_small_id ();
1260 LOCK_DEBUG (g_message ("%s: (%d) Trying to wait for %p with timeout %dms", __func__, mono_thread_info_get_small_id (), obj, ms));
1262 lw.sync = obj->synchronisation;
1264 mono_monitor_ensure_owned (lw, id);
1266 if (!lock_word_is_inflated (lw)) {
1267 mono_monitor_inflate_owned (obj, id);
1268 lw.sync = obj->synchronisation;
1271 mon = lock_word_get_inflated_lock (lw);
1273 /* Do this WaitSleepJoin check before creating the event handle */
1274 mono_thread_current_check_pending_interrupt ();
1276 event = CreateEvent (NULL, FALSE, FALSE, NULL);
1277 if (event == NULL) {
1278 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
1282 LOCK_DEBUG (g_message ("%s: (%d) queuing handle %p", __func__, mono_thread_info_get_small_id (), event));
1284 mono_thread_current_check_pending_interrupt ();
1286 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
1288 mon->wait_list = g_slist_append (mon->wait_list, event);
1290 /* Save the nest count, and release the lock */
1293 mono_memory_write_barrier ();
1294 mono_monitor_exit_inflated (obj);
1296 LOCK_DEBUG (g_message ("%s: (%d) Unlocked %p lock %p", __func__, mono_thread_info_get_small_id (), obj, mon));
1298 /* There's no race between unlocking mon and waiting for the
1299 * event, because auto reset events are sticky, and this event
1300 * is private to this thread. Therefore even if the event was
1301 * signalled before we wait, we still succeed.
1303 MONO_PREPARE_BLOCKING;
1304 ret = WaitForSingleObjectEx (event, ms, TRUE);
1305 MONO_FINISH_BLOCKING;
1307 /* Reset the thread state fairly early, so we don't have to worry
1308 * about the monitor error checking
1310 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
1312 /* Regain the lock with the previous nest count */
1314 regain = mono_monitor_try_enter_inflated (obj, INFINITE, TRUE, id);
1315 /* We must regain the lock before handling interruption requests */
1316 } while (regain == -1);
1318 g_assert (regain == 1);
1322 LOCK_DEBUG (g_message ("%s: (%d) Regained %p lock %p", __func__, mono_thread_info_get_small_id (), obj, mon));
1324 if (ret == WAIT_TIMEOUT) {
1325 /* Poll the event again, just in case it was signalled
1326 * while we were trying to regain the monitor lock
1328 MONO_PREPARE_BLOCKING;
1329 ret = WaitForSingleObjectEx (event, 0, FALSE);
1330 MONO_FINISH_BLOCKING;
1333 /* Pulse will have popped our event from the queue if it signalled
1334 * us, so we only do it here if the wait timed out.
1336 * This avoids a race condition where the thread holding the
1337 * lock can Pulse several times before the WaitForSingleObject
1338 * returns. If we popped the queue here then this event might
1339 * be signalled more than once, thereby starving another
1343 if (ret == WAIT_OBJECT_0) {
1344 LOCK_DEBUG (g_message ("%s: (%d) Success", __func__, mono_thread_info_get_small_id ()));
1347 LOCK_DEBUG (g_message ("%s: (%d) Wait failed, dequeuing handle %p", __func__, mono_thread_info_get_small_id (), event));
1348 /* No pulse, so we have to remove ourself from the
1351 mon->wait_list = g_slist_remove (mon->wait_list, event);
1353 CloseHandle (event);