2 * monitor.c: Monitor locking functions
5 * Dick Porter (dick@ximian.com)
7 * Copyright 2003 Ximian, Inc (http://www.ximian.com)
8 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/monitor.h>
17 #include <mono/metadata/threads-types.h>
18 #include <mono/metadata/exception.h>
19 #include <mono/metadata/threads.h>
20 #include <mono/io-layer/io-layer.h>
21 #include <mono/metadata/object-internals.h>
22 #include <mono/metadata/class-internals.h>
23 #include <mono/metadata/gc-internal.h>
24 #include <mono/metadata/method-builder.h>
25 #include <mono/metadata/debug-helpers.h>
26 #include <mono/metadata/tabledefs.h>
27 #include <mono/metadata/marshal.h>
28 #include <mono/utils/mono-threads.h>
29 #include <mono/metadata/profiler-private.h>
30 #include <mono/utils/mono-time.h>
31 #include <mono/utils/atomic.h>
34 * Pull the list of opcodes
36 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
40 #include "mono/cil/opcode.def"
45 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
49 * The monitor implementation here is based on
50 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
51 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
53 * The Dice paper describes a technique for saving lock record space
54 * by returning records to a free list when they become unused. That
55 * sounds like unnecessary complexity to me, though if it becomes
56 * clear that unused lock records are taking up lots of space or we
57 * need to shave more time off by avoiding a malloc then we can always
58 * implement the free list idea later. The timeout parameter to
59 * try_enter voids some of the assumptions about the reference count
60 * field in Dice's implementation too. In his version, the thread
61 * attempting to lock a contended object will block until it succeeds,
62 * so the reference count will never be decremented while an object is
65 * Bacon's thin locks have a fast path that doesn't need a lock record
66 * for the common case of locking an unlocked or shallow-nested
67 * object, but the technique relies on encoding the thread ID in 15
68 * bits (to avoid too much per-object space overhead.) Unfortunately
69 * I don't think it's possible to reliably encode a pthread_t into 15
70 * bits. (The JVM implementation used seems to have a 15-bit
71 * per-thread identifier available.)
73 * This implementation then combines Dice's basic lock model with
74 * Bacon's simplification of keeping a lock record for the lifetime of
79 typedef struct _MonitorArray MonitorArray;
81 struct _MonitorArray {
84 MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
87 #define mono_monitor_allocator_lock() mono_mutex_lock (&monitor_mutex)
88 #define mono_monitor_allocator_unlock() mono_mutex_unlock (&monitor_mutex)
89 static mono_mutex_t monitor_mutex;
90 static MonoThreadsSync *monitor_freelist;
91 static MonitorArray *monitor_allocated;
92 static int array_size = 16;
95 mon_status_get_owner (guint32 status)
97 return status & OWNER_MASK;
100 static inline guint32
101 mon_status_set_owner (guint32 status, guint32 owner)
103 return (status & ENTRY_COUNT_MASK) | owner;
107 mon_status_get_entry_count (guint32 status)
109 gint32 entry_count = (gint32)((status & ENTRY_COUNT_MASK) >> ENTRY_COUNT_SHIFT);
110 gint32 zero = (gint32)(((guint32)ENTRY_COUNT_ZERO) >> ENTRY_COUNT_SHIFT);
111 return entry_count - zero;
114 static inline guint32
115 mon_status_init_entry_count (guint32 status)
117 return (status & OWNER_MASK) | ENTRY_COUNT_ZERO;
120 static inline guint32
121 mon_status_increment_entry_count (guint32 status)
123 return status + (1 << ENTRY_COUNT_SHIFT);
126 static inline guint32
127 mon_status_decrement_entry_count (guint32 status)
129 return status - (1 << ENTRY_COUNT_SHIFT);
132 static inline gboolean
133 mon_status_have_waiters (guint32 status)
135 return status & ENTRY_COUNT_WAITERS;
139 mono_monitor_init (void)
141 mono_mutex_init_recursive (&monitor_mutex);
145 mono_monitor_cleanup (void)
147 MonoThreadsSync *mon;
148 /* MonitorArray *marray, *next = NULL; */
150 /*mono_mutex_destroy (&monitor_mutex);*/
152 /* The monitors on the freelist don't have weak links - mark them */
153 for (mon = monitor_freelist; mon; mon = mon->data)
154 mon->wait_list = (gpointer)-1;
157 * FIXME: This still crashes with sgen (async_read.exe)
159 * In mini_cleanup() we first call mono_runtime_cleanup(), which calls
160 * mono_monitor_cleanup(), which is supposed to free all monitor memory.
162 * Later in mini_cleanup(), we call mono_domain_free(), which calls
163 * mono_gc_clear_domain(), which frees all weak links associated with objects.
164 * Those weak links reside in the monitor structures, which we've freed earlier.
166 * Unless we fix this dependency in the shutdown sequence this code has to remain
167 * disabled, or at least the call to g_free().
170 for (marray = monitor_allocated; marray; marray = next) {
173 for (i = 0; i < marray->num_monitors; ++i) {
174 mon = &marray->monitors [i];
175 if (mon->wait_list != (gpointer)-1)
176 mono_gc_weak_link_remove (&mon->data);
186 monitor_is_on_freelist (MonoThreadsSync *mon)
188 MonitorArray *marray;
189 for (marray = monitor_allocated; marray; marray = marray->next) {
190 if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
200 * Print a report on stdout of the managed locks currently held by
201 * threads. If @include_untaken is specified, list also inflated locks
203 * This is supposed to be used in debuggers like gdb.
206 mono_locks_dump (gboolean include_untaken)
209 int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
210 MonoThreadsSync *mon;
211 MonitorArray *marray;
212 for (mon = monitor_freelist; mon; mon = mon->data)
214 for (marray = monitor_allocated; marray; marray = marray->next) {
215 total += marray->num_monitors;
217 for (i = 0; i < marray->num_monitors; ++i) {
218 mon = &marray->monitors [i];
219 if (mon->data == NULL) {
220 if (i < marray->num_monitors - 1)
223 if (!monitor_is_on_freelist (mon->data)) {
224 MonoObject *holder = mono_gc_weak_link_get (&mon->data);
225 if (mon_status_get_owner (mon->status)) {
226 g_print ("Lock %p in object %p held by thread %d, nest level: %d\n",
227 mon, holder, mon_status_get_owner (mon->status), mon->nest);
229 g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon_status_get_entry_count (mon->status));
230 } else if (include_untaken) {
231 g_print ("Lock %p in object %p untaken\n", mon, holder);
238 g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
239 num_arrays, total, used, on_freelist, to_recycle);
242 /* LOCKING: this is called with monitor_mutex held */
244 mon_finalize (MonoThreadsSync *mon)
246 LOCK_DEBUG (g_message ("%s: Finalizing sync %p", __func__, mon));
248 if (mon->entry_sem != NULL) {
249 CloseHandle (mon->entry_sem);
250 mon->entry_sem = NULL;
252 /* If this isn't empty then something is seriously broken - it
253 * means a thread is still waiting on the object that owned
254 * this lock, but the object has been finalized.
256 g_assert (mon->wait_list == NULL);
258 /* owner and nest are set in mon_new, no need to zero them out */
260 mon->data = monitor_freelist;
261 monitor_freelist = mon;
262 #ifndef DISABLE_PERFCOUNTERS
263 mono_perfcounters->gc_sync_blocks--;
267 /* LOCKING: this is called with monitor_mutex held */
268 static MonoThreadsSync *
271 MonoThreadsSync *new;
273 if (!monitor_freelist) {
274 MonitorArray *marray;
276 /* see if any sync block has been collected */
278 for (marray = monitor_allocated; marray; marray = marray->next) {
279 for (i = 0; i < marray->num_monitors; ++i) {
280 if (marray->monitors [i].data == NULL) {
281 new = &marray->monitors [i];
282 if (new->wait_list) {
283 /* Orphaned events left by aborted threads */
284 while (new->wait_list) {
285 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d): Closing orphaned event %d", mono_thread_info_get_small_id (), new->wait_list->data));
286 CloseHandle (new->wait_list->data);
287 new->wait_list = g_slist_remove (new->wait_list, new->wait_list->data);
290 mono_gc_weak_link_remove (&new->data, TRUE);
291 new->data = monitor_freelist;
292 monitor_freelist = new;
295 /* small perf tweak to avoid scanning all the blocks */
299 /* need to allocate a new array of monitors */
300 if (!monitor_freelist) {
302 LOCK_DEBUG (g_message ("%s: allocating more monitors: %d", __func__, array_size));
303 marray = g_malloc0 (sizeof (MonoArray) + array_size * sizeof (MonoThreadsSync));
304 marray->num_monitors = array_size;
306 /* link into the freelist */
307 for (i = 0; i < marray->num_monitors - 1; ++i) {
308 marray->monitors [i].data = &marray->monitors [i + 1];
310 marray->monitors [i].data = NULL; /* the last one */
311 monitor_freelist = &marray->monitors [0];
312 /* we happend the marray instead of prepending so that
313 * the collecting loop above will need to scan smaller arrays first
315 if (!monitor_allocated) {
316 monitor_allocated = marray;
318 last = monitor_allocated;
326 new = monitor_freelist;
327 monitor_freelist = new->data;
329 new->status = mon_status_set_owner (0, id);
330 new->status = mon_status_init_entry_count (new->status);
334 #ifndef DISABLE_PERFCOUNTERS
335 mono_perfcounters->gc_sync_blocks++;
341 * Format of the lock word:
342 * thinhash | fathash | data
344 * thinhash is the lower bit: if set data is the shifted hashcode of the object.
345 * fathash is another bit: if set the hash code is stored in the MonoThreadsSync
346 * struct pointed to by data
347 * if neither bit is set and data is non-NULL, data is a MonoThreadsSync
351 MonoThreadsSync *sync;
355 LOCK_WORD_THIN_HASH = 1,
356 LOCK_WORD_FAT_HASH = 1 << 1,
357 LOCK_WORD_BITS_MASK = 0x3,
358 LOCK_WORD_HASH_SHIFT = 2
361 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
367 * Calculate a hash code for @obj that is constant while @obj is alive.
370 mono_object_hash (MonoObject* obj)
372 #ifdef HAVE_MOVING_COLLECTOR
377 lw.sync = obj->synchronisation;
378 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
379 /*g_print ("fast thin hash %d for obj %p store\n", (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT, obj);*/
380 return (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT;
382 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
383 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
384 /*g_print ("fast fat hash %d for obj %p store\n", lw.sync->hash_code, obj);*/
385 return lw.sync->hash_code;
388 * while we are inside this function, the GC will keep this object pinned,
389 * since we are in the unmanaged stack. Thanks to this and to the hash
390 * function that depends only on the address, we can ignore the races if
391 * another thread computes the hash at the same time, because it'll end up
392 * with the same value.
394 hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
395 /* clear the top bits as they can be discarded */
396 hash &= ~(LOCK_WORD_BITS_MASK << 30);
397 /* no hash flags were set, so it must be a MonoThreadsSync pointer if not NULL */
399 lw.sync->hash_code = hash;
400 /*g_print ("storing hash code %d for obj %p in sync %p\n", hash, obj, lw.sync);*/
401 lw.lock_word |= LOCK_WORD_FAT_HASH;
402 /* this is safe since we don't deflate locks */
403 obj->synchronisation = lw.sync;
405 /*g_print ("storing thin hash code %d for obj %p\n", hash, obj);*/
406 lw.lock_word = LOCK_WORD_THIN_HASH | (hash << LOCK_WORD_HASH_SHIFT);
407 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL) == NULL)
409 /*g_print ("failed store\n");*/
410 /* someone set the hash flag or someone inflated the object */
411 lw.sync = obj->synchronisation;
412 if (lw.lock_word & LOCK_WORD_THIN_HASH)
414 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
415 lw.sync->hash_code = hash;
416 lw.lock_word |= LOCK_WORD_FAT_HASH;
417 /* this is safe since we don't deflate locks */
418 obj->synchronisation = lw.sync;
423 * Wang's address-based hash function:
424 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
426 return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
431 mon_decrement_entry_count (MonoThreadsSync *mon)
433 guint32 old_status, tmp_status, new_status;
435 /* Decrement entry count */
436 old_status = mon->status;
438 new_status = mon_status_decrement_entry_count (old_status);
439 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
440 if (tmp_status == old_status) {
443 old_status = tmp_status;
447 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
448 * is requested. In this case it returns -1.
451 mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
453 MonoThreadsSync *mon;
454 gsize id = mono_thread_info_get_small_id ();
456 guint32 then = 0, now, delta;
459 guint32 new_status, old_status, tmp_status;
460 MonoInternalThread *thread;
461 gboolean interrupted = FALSE;
463 LOCK_DEBUG (g_message("%s: (%d) Trying to lock object %p (%d ms)", __func__, id, obj, ms));
465 if (G_UNLIKELY (!obj)) {
466 mono_raise_exception (mono_get_exception_argument_null ("obj"));
471 mon = obj->synchronisation;
473 /* If the object has never been locked... */
474 if (G_UNLIKELY (mon == NULL)) {
475 mono_monitor_allocator_lock ();
477 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, mon, NULL) == NULL) {
478 mono_gc_weak_link_add (&mon->data, obj, TRUE);
479 mono_monitor_allocator_unlock ();
480 /* Successfully locked */
483 #ifdef HAVE_MOVING_COLLECTOR
485 lw.sync = obj->synchronisation;
486 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
487 MonoThreadsSync *oldlw = lw.sync;
488 /* move the already calculated hash */
489 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
491 lw.lock_word |= LOCK_WORD_FAT_HASH;
492 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
493 mono_gc_weak_link_add (&mon->data, obj, TRUE);
494 mono_monitor_allocator_unlock ();
495 /* Successfully locked */
499 mono_monitor_allocator_unlock ();
502 } else if (lw.lock_word & LOCK_WORD_FAT_HASH) {
504 mono_monitor_allocator_unlock ();
505 /* get the old lock without the fat hash bit */
506 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
510 mono_monitor_allocator_unlock ();
511 mon = obj->synchronisation;
515 mono_monitor_allocator_unlock ();
516 mon = obj->synchronisation;
520 #ifdef HAVE_MOVING_COLLECTOR
523 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
524 MonoThreadsSync *oldlw = lw.sync;
525 mono_monitor_allocator_lock ();
527 /* move the already calculated hash */
528 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
530 lw.lock_word |= LOCK_WORD_FAT_HASH;
531 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
532 mono_gc_weak_link_add (&mon->data, obj, TRUE);
533 mono_monitor_allocator_unlock ();
534 /* Successfully locked */
538 mono_monitor_allocator_unlock ();
545 #ifdef HAVE_MOVING_COLLECTOR
549 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
554 /* If the object has previously been locked but isn't now... */
556 /* This case differs from Dice's case 3 because we don't
557 * deflate locks or cache unused lock records
559 old_status = mon->status;
560 if (G_LIKELY (mon_status_get_owner (old_status) == 0)) {
561 /* Try to install our ID in the owner field, nest
562 * should have been left at 1 by the previous unlock
565 new_status = mon_status_set_owner (old_status, id);
566 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
567 if (G_LIKELY (tmp_status == old_status)) {
569 g_assert (mon->nest == 1);
577 /* If the object is currently locked by this thread... */
578 if (mon_status_get_owner (old_status) == id) {
583 /* The object must be locked by someone else... */
584 #ifndef DISABLE_PERFCOUNTERS
585 mono_perfcounters->thread_contentions++;
588 /* If ms is 0 we don't block, but just fail straight away */
590 LOCK_DEBUG (g_message ("%s: (%d) timed out, returning FALSE", __func__, id));
594 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_CONTENTION);
596 /* The slow path begins here. */
598 /* a small amount of duplicated code, but it allows us to insert the profiler
599 * callbacks without impacting the fast path: from here on we don't need to go back to the
600 * retry label, but to retry_contended. At this point mon is already installed in the object
603 /* This case differs from Dice's case 3 because we don't
604 * deflate locks or cache unused lock records
606 old_status = mon->status;
607 if (G_LIKELY (mon_status_get_owner (old_status) == 0)) {
608 /* Try to install our ID in the owner field, nest
609 * should have been left at 1 by the previous unlock
612 new_status = mon_status_set_owner (old_status, id);
613 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
614 if (G_LIKELY (tmp_status == old_status)) {
616 g_assert (mon->nest == 1);
617 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
622 /* If the object is currently locked by this thread... */
623 if (mon_status_get_owner (old_status) == id) {
625 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
629 /* We need to make sure there's a semaphore handle (creating it if
630 * necessary), and block on it
632 if (mon->entry_sem == NULL) {
633 /* Create the semaphore */
634 sem = CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
635 g_assert (sem != NULL);
636 if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
637 /* Someone else just put a handle here */
643 * We need to register ourselves as waiting if it is the first time we are waiting,
644 * of if we were signaled and failed to acquire the lock.
647 old_status = mon->status;
649 if (mon_status_get_owner (old_status) == 0)
650 goto retry_contended;
651 new_status = mon_status_increment_entry_count (old_status);
652 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
653 if (tmp_status == old_status) {
656 old_status = tmp_status;
660 if (ms != INFINITE) {
661 then = mono_msec_ticks ();
665 #ifndef DISABLE_PERFCOUNTERS
666 mono_perfcounters->thread_queue_len++;
667 mono_perfcounters->thread_queue_max++;
669 thread = mono_thread_internal_current ();
671 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
674 * We pass TRUE instead of allow_interruption since we have to check for the
675 * StopRequested case below.
677 MONO_PREPARE_BLOCKING
678 ret = WaitForSingleObjectEx (mon->entry_sem, waitms, TRUE);
681 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
683 #ifndef DISABLE_PERFCOUNTERS
684 mono_perfcounters->thread_queue_len--;
687 if (ret == WAIT_IO_COMPLETION && !allow_interruption) {
690 * We have to obey a stop/suspend request even if
691 * allow_interruption is FALSE to avoid hangs at shutdown.
693 if (!mono_thread_test_state (mono_thread_internal_current (), (ThreadState_StopRequested|ThreadState_SuspendRequested))) {
694 if (ms != INFINITE) {
695 now = mono_msec_ticks ();
697 LOCK_DEBUG (g_message ("%s: wrapped around! now=0x%x then=0x%x", __func__, now, then));
699 now += (0xffffffff - then);
702 LOCK_DEBUG (g_message ("%s: wrap rejig: now=0x%x then=0x%x delta=0x%x", __func__, now, then, now-then));
712 /* retry from the top */
713 goto retry_contended;
715 } else if (ret == WAIT_OBJECT_0) {
717 /* retry from the top */
718 goto retry_contended;
719 } else if (ret == WAIT_TIMEOUT) {
723 /* Timed out or interrupted */
724 mon_decrement_entry_count (mon);
726 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
728 if (ret == WAIT_IO_COMPLETION) {
729 LOCK_DEBUG (g_message ("%s: (%d) interrupted waiting, returning -1", __func__, id));
731 } else if (ret == WAIT_TIMEOUT) {
732 LOCK_DEBUG (g_message ("%s: (%d) timed out waiting, returning FALSE", __func__, id));
735 g_assert_not_reached ();
741 mono_monitor_enter (MonoObject *obj)
743 return mono_monitor_try_enter_internal (obj, INFINITE, FALSE) == 1;
747 mono_monitor_try_enter (MonoObject *obj, guint32 ms)
749 return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
753 mono_monitor_exit (MonoObject *obj)
755 MonoThreadsSync *mon;
757 guint32 new_status, old_status, tmp_status;
759 LOCK_DEBUG (g_message ("%s: (%d) Unlocking %p", __func__, mono_thread_info_get_small_id (), obj));
761 if (G_UNLIKELY (!obj)) {
762 mono_raise_exception (mono_get_exception_argument_null ("obj"));
766 mon = obj->synchronisation;
768 #ifdef HAVE_MOVING_COLLECTOR
772 if (lw.lock_word & LOCK_WORD_THIN_HASH)
774 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
778 if (G_UNLIKELY (mon == NULL)) {
779 /* No one ever used Enter. Just ignore the Exit request as MS does */
783 old_status = mon->status;
784 if (G_UNLIKELY (mon_status_get_owner (old_status) != mono_thread_info_get_small_id ())) {
788 nest = mon->nest - 1;
791 * Release lock and do the wakeup stuff. It's possible that
792 * the last blocking thread gave up waiting just before we
793 * release the semaphore resulting in a negative entry count
794 * and a futile wakeup next time there's contention for this
798 gboolean have_waiters = mon_status_have_waiters (old_status);
800 new_status = mon_status_set_owner (old_status, 0);
802 new_status = mon_status_decrement_entry_count (new_status);
803 tmp_status = InterlockedCompareExchange ((gint32*)&mon->status, new_status, old_status);
804 if (tmp_status == old_status) {
806 ReleaseSemaphore (mon->entry_sem, 1, NULL);
809 old_status = tmp_status;
811 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now unlocked", __func__, mono_thread_info_get_small_id (), obj));
813 /* object is now unlocked, leave nest==1 so we don't
814 * need to set it when the lock is reacquired
818 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now locked %d times", __func__, mono_thread_info_get_small_id (), obj, nest));
824 mono_monitor_get_object_monitor_weak_link (MonoObject *object)
827 MonoThreadsSync *sync = NULL;
829 lw.sync = object->synchronisation;
830 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
831 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
833 } else if (!(lw.lock_word & LOCK_WORD_THIN_HASH)) {
837 if (sync && sync->data)
843 * mono_monitor_threads_sync_member_offset:
844 * @status_offset: returns size and offset of the "status" member
845 * @nest_offset: returns size and offset of the "nest" member
847 * Returns the offsets and sizes of two members of the
848 * MonoThreadsSync struct. The Monitor ASM fastpaths need this.
851 mono_monitor_threads_sync_members_offset (int *status_offset, int *nest_offset)
855 #define ENCODE_OFF_SIZE(o,s) (((o) << 8) | ((s) & 0xff))
857 *status_offset = ENCODE_OFF_SIZE (MONO_STRUCT_OFFSET (MonoThreadsSync, status), sizeof (ts.status));
858 *nest_offset = ENCODE_OFF_SIZE (MONO_STRUCT_OFFSET (MonoThreadsSync, nest), sizeof (ts.nest));
862 ves_icall_System_Threading_Monitor_Monitor_try_enter (MonoObject *obj, guint32 ms)
867 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
869 mono_thread_interruption_checkpoint ();
876 ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var (MonoObject *obj, guint32 ms, char *lockTaken)
880 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
881 /*This means we got interrupted during the wait and didn't got the monitor.*/
883 mono_thread_interruption_checkpoint ();
885 /*It's safe to do it from here since interruption would happen only on the wrapper.*/
886 *lockTaken = res == 1;
890 mono_monitor_enter_v4 (MonoObject *obj, char *lock_taken)
892 if (*lock_taken == 1)
893 mono_raise_exception (mono_get_exception_argument ("lockTaken", "lockTaken is already true"));
895 ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var (obj, INFINITE, lock_taken);
899 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
901 MonoThreadsSync *mon;
903 LOCK_DEBUG (g_message ("%s: Testing if %p is owned by thread %d", __func__, obj, mono_thread_info_get_small_id()));
905 mon = obj->synchronisation;
906 #ifdef HAVE_MOVING_COLLECTOR
910 if (lw.lock_word & LOCK_WORD_THIN_HASH)
912 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
920 if (mon_status_get_owner (mon->status) == mono_thread_info_get_small_id ()) {
928 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
930 MonoThreadsSync *mon;
932 LOCK_DEBUG (g_message("%s: (%d) Testing if %p is owned by any thread", __func__, mono_thread_info_get_small_id (), obj));
934 mon = obj->synchronisation;
935 #ifdef HAVE_MOVING_COLLECTOR
939 if (lw.lock_word & LOCK_WORD_THIN_HASH)
941 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
949 if (mon_status_get_owner (mon->status) != 0) {
956 /* All wait list manipulation in the pulse, pulseall and wait
957 * functions happens while the monitor lock is held, so we don't need
958 * any extra struct locking
962 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
964 MonoThreadsSync *mon;
966 LOCK_DEBUG (g_message ("%s: (%d) Pulsing %p", __func__, mono_thread_info_get_small_id (), obj));
968 mon = obj->synchronisation;
969 #ifdef HAVE_MOVING_COLLECTOR
973 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
974 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Not locked"));
977 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
982 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Not locked"));
985 if (mon_status_get_owner (mon->status) != mono_thread_info_get_small_id ()) {
986 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
990 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, mono_thread_info_get_small_id (), g_slist_length (mon->wait_list)));
992 if (mon->wait_list != NULL) {
993 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, mono_thread_info_get_small_id (), mon->wait_list->data));
995 SetEvent (mon->wait_list->data);
996 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1001 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
1003 MonoThreadsSync *mon;
1005 LOCK_DEBUG (g_message("%s: (%d) Pulsing all %p", __func__, mono_thread_info_get_small_id (), obj));
1007 mon = obj->synchronisation;
1008 #ifdef HAVE_MOVING_COLLECTOR
1012 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1013 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Not locked"));
1016 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1021 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Not locked"));
1024 if (mon_status_get_owner (mon->status) != mono_thread_info_get_small_id ()) {
1025 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1029 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, mono_thread_info_get_small_id (), g_slist_length (mon->wait_list)));
1031 while (mon->wait_list != NULL) {
1032 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, mono_thread_info_get_small_id (), mon->wait_list->data));
1034 SetEvent (mon->wait_list->data);
1035 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1040 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
1042 MonoThreadsSync *mon;
1046 gboolean success = FALSE;
1048 MonoInternalThread *thread = mono_thread_internal_current ();
1050 LOCK_DEBUG (g_message ("%s: (%d) Trying to wait for %p with timeout %dms", __func__, mono_thread_info_get_small_id (), obj, ms));
1052 mon = obj->synchronisation;
1053 #ifdef HAVE_MOVING_COLLECTOR
1057 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1058 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Not locked"));
1061 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1066 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Not locked"));
1069 if (mon_status_get_owner (mon->status) != mono_thread_info_get_small_id ()) {
1070 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1074 /* Do this WaitSleepJoin check before creating the event handle */
1075 mono_thread_current_check_pending_interrupt ();
1077 event = CreateEvent (NULL, FALSE, FALSE, NULL);
1078 if (event == NULL) {
1079 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
1083 LOCK_DEBUG (g_message ("%s: (%d) queuing handle %p", __func__, mono_thread_info_get_small_id (), event));
1085 mono_thread_current_check_pending_interrupt ();
1087 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
1089 mon->wait_list = g_slist_append (mon->wait_list, event);
1091 /* Save the nest count, and release the lock */
1094 mono_monitor_exit (obj);
1096 LOCK_DEBUG (g_message ("%s: (%d) Unlocked %p lock %p", __func__, mono_thread_info_get_small_id (), obj, mon));
1098 /* There's no race between unlocking mon and waiting for the
1099 * event, because auto reset events are sticky, and this event
1100 * is private to this thread. Therefore even if the event was
1101 * signalled before we wait, we still succeed.
1103 MONO_PREPARE_BLOCKING
1104 ret = WaitForSingleObjectEx (event, ms, TRUE);
1105 MONO_FINISH_BLOCKING
1107 /* Reset the thread state fairly early, so we don't have to worry
1108 * about the monitor error checking
1110 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
1112 if (mono_thread_interruption_requested ()) {
1114 * Can't remove the event from wait_list, since the monitor is not locked by
1115 * us. So leave it there, mon_new () will delete it when the mon structure
1116 * is placed on the free list.
1117 * FIXME: The caller expects to hold the lock after the wait returns, but it
1118 * doesn't happen in this case:
1119 * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=97268
1124 /* Regain the lock with the previous nest count */
1126 regain = mono_monitor_try_enter_internal (obj, INFINITE, TRUE);
1128 mono_thread_interruption_checkpoint ();
1129 } while (regain == -1);
1132 /* Something went wrong, so throw a
1133 * SynchronizationLockException
1135 CloseHandle (event);
1136 mono_set_pending_exception (mono_get_exception_synchronization_lock ("Failed to regain lock"));
1142 LOCK_DEBUG (g_message ("%s: (%d) Regained %p lock %p", __func__, mono_thread_info_get_small_id (), obj, mon));
1144 if (ret == WAIT_TIMEOUT) {
1145 /* Poll the event again, just in case it was signalled
1146 * while we were trying to regain the monitor lock
1148 MONO_PREPARE_BLOCKING
1149 ret = WaitForSingleObjectEx (event, 0, FALSE);
1150 MONO_FINISH_BLOCKING
1153 /* Pulse will have popped our event from the queue if it signalled
1154 * us, so we only do it here if the wait timed out.
1156 * This avoids a race condition where the thread holding the
1157 * lock can Pulse several times before the WaitForSingleObject
1158 * returns. If we popped the queue here then this event might
1159 * be signalled more than once, thereby starving another
1163 if (ret == WAIT_OBJECT_0) {
1164 LOCK_DEBUG (g_message ("%s: (%d) Success", __func__, mono_thread_info_get_small_id ()));
1167 LOCK_DEBUG (g_message ("%s: (%d) Wait failed, dequeuing handle %p", __func__, mono_thread_info_get_small_id (), event));
1168 /* No pulse, so we have to remove ourself from the
1171 mon->wait_list = g_slist_remove (mon->wait_list, event);
1173 CloseHandle (event);