2 * monitor.c: Monitor locking functions
5 * Dick Porter (dick@ximian.com)
7 * Copyright 2003 Ximian, Inc (http://www.ximian.com)
8 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
15 #include <mono/metadata/monitor.h>
16 #include <mono/metadata/threads-types.h>
17 #include <mono/metadata/exception.h>
18 #include <mono/metadata/threads.h>
19 #include <mono/io-layer/io-layer.h>
20 #include <mono/metadata/object-internals.h>
21 #include <mono/metadata/class-internals.h>
22 #include <mono/metadata/gc-internal.h>
23 #include <mono/metadata/method-builder.h>
24 #include <mono/metadata/debug-helpers.h>
25 #include <mono/metadata/tabledefs.h>
26 #include <mono/metadata/marshal.h>
27 #include <mono/metadata/profiler-private.h>
28 #include <mono/utils/mono-time.h>
29 #include <mono/utils/mono-semaphore.h>
32 * Pull the list of opcodes
34 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
38 #include "mono/cil/opcode.def"
43 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
47 * The monitor implementation here is based on
48 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
49 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
51 * The Dice paper describes a technique for saving lock record space
52 * by returning records to a free list when they become unused. That
53 * sounds like unnecessary complexity to me, though if it becomes
54 * clear that unused lock records are taking up lots of space or we
55 * need to shave more time off by avoiding a malloc then we can always
56 * implement the free list idea later. The timeout parameter to
57 * try_enter voids some of the assumptions about the reference count
58 * field in Dice's implementation too. In his version, the thread
59 * attempting to lock a contended object will block until it succeeds,
60 * so the reference count will never be decremented while an object is
63 * Bacon's thin locks have a fast path that doesn't need a lock record
64 * for the common case of locking an unlocked or shallow-nested
65 * object, but the technique relies on encoding the thread ID in 15
66 * bits (to avoid too much per-object space overhead.) Unfortunately
67 * I don't think it's possible to reliably encode a pthread_t into 15
68 * bits. (The JVM implementation used seems to have a 15-bit
69 * per-thread identifier available.)
71 * This implementation then combines Dice's basic lock model with
72 * Bacon's simplification of keeping a lock record for the lifetime of
76 struct _MonoThreadsSync
78 gsize owner; /* thread ID */
80 #ifdef HAVE_MOVING_COLLECTOR
83 volatile gint32 entry_count;
84 MonoSemType *entry_sem;
89 typedef struct _MonitorArray MonitorArray;
91 struct _MonitorArray {
94 MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
97 #define mono_monitor_allocator_lock() EnterCriticalSection (&monitor_mutex)
98 #define mono_monitor_allocator_unlock() LeaveCriticalSection (&monitor_mutex)
99 static CRITICAL_SECTION monitor_mutex;
100 static MonoThreadsSync *monitor_freelist;
101 static MonitorArray *monitor_allocated;
102 static int array_size = 16;
104 #ifdef HAVE_KW_THREAD
105 static __thread gsize tls_pthread_self MONO_TLS_FAST;
109 #ifdef HAVE_KW_THREAD
110 #define GetCurrentThreadId() tls_pthread_self
113 * The usual problem: we can't replace GetCurrentThreadId () with a macro because
114 * it is in a public header.
116 #define GetCurrentThreadId() ((gsize)pthread_self ())
121 mono_monitor_init (void)
123 InitializeCriticalSection (&monitor_mutex);
127 mono_monitor_cleanup (void)
129 MonoThreadsSync *mon;
130 /* MonitorArray *marray, *next = NULL; */
132 /*DeleteCriticalSection (&monitor_mutex);*/
134 /* The monitors on the freelist don't have weak links - mark them */
135 for (mon = monitor_freelist; mon; mon = mon->data)
136 mon->wait_list = (gpointer)-1;
138 /* FIXME: This still crashes with sgen (async_read.exe) */
140 for (marray = monitor_allocated; marray; marray = next) {
143 for (i = 0; i < marray->num_monitors; ++i) {
144 mon = &marray->monitors [i];
145 if (mon->wait_list != (gpointer)-1)
146 mono_gc_weak_link_remove (&mon->data);
156 * mono_monitor_init_tls:
158 * Setup TLS variables used by the monitor code for the current thread.
161 mono_monitor_init_tls (void)
163 #if !defined(HOST_WIN32) && defined(HAVE_KW_THREAD)
164 tls_pthread_self = pthread_self ();
169 monitor_is_on_freelist (MonoThreadsSync *mon)
171 MonitorArray *marray;
172 for (marray = monitor_allocated; marray; marray = marray->next) {
173 if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
183 * Print a report on stdout of the managed locks currently held by
184 * threads. If @include_untaken is specified, list also inflated locks
186 * This is supposed to be used in debuggers like gdb.
189 mono_locks_dump (gboolean include_untaken)
192 int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
193 MonoThreadsSync *mon;
194 MonitorArray *marray;
195 for (mon = monitor_freelist; mon; mon = mon->data)
197 for (marray = monitor_allocated; marray; marray = marray->next) {
198 total += marray->num_monitors;
200 for (i = 0; i < marray->num_monitors; ++i) {
201 mon = &marray->monitors [i];
202 if (mon->data == NULL) {
203 if (i < marray->num_monitors - 1)
206 if (!monitor_is_on_freelist (mon->data)) {
207 MonoObject *holder = mono_gc_weak_link_get (&mon->data);
209 g_print ("Lock %p in object %p held by thread %p, nest level: %d\n",
210 mon, holder, (void*)mon->owner, mon->nest);
212 g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon->entry_count);
213 } else if (include_untaken) {
214 g_print ("Lock %p in object %p untaken\n", mon, holder);
221 g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
222 num_arrays, total, used, on_freelist, to_recycle);
225 /* LOCKING: this is called with monitor_mutex held */
227 mon_finalize (MonoThreadsSync *mon)
229 LOCK_DEBUG (g_message ("%s: Finalizing sync %p", __func__, mon));
231 if (mon->entry_sem != NULL) {
232 MONO_SEM_DESTROY (mon->entry_sem);
233 g_free (mon->entry_sem);
234 mon->entry_sem = NULL;
236 /* If this isn't empty then something is seriously broken - it
237 * means a thread is still waiting on the object that owned
238 * this lock, but the object has been finalized.
240 g_assert (mon->wait_list == NULL);
242 mon->entry_count = 0;
243 /* owner and nest are set in mon_new, no need to zero them out */
245 mon->data = monitor_freelist;
246 monitor_freelist = mon;
247 mono_perfcounters->gc_sync_blocks--;
250 /* LOCKING: this is called with monitor_mutex held */
251 static MonoThreadsSync *
254 MonoThreadsSync *new;
256 if (!monitor_freelist) {
257 MonitorArray *marray;
259 /* see if any sync block has been collected */
261 for (marray = monitor_allocated; marray; marray = marray->next) {
262 for (i = 0; i < marray->num_monitors; ++i) {
263 if (marray->monitors [i].data == NULL) {
264 new = &marray->monitors [i];
265 if (new->wait_list) {
266 /* Orphaned events left by aborted threads */
267 while (new->wait_list) {
268 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d): Closing orphaned event %d", GetCurrentThreadId (), new->wait_list->data));
269 CloseHandle (new->wait_list->data);
270 new->wait_list = g_slist_remove (new->wait_list, new->wait_list->data);
273 mono_gc_weak_link_remove (&new->data);
274 new->data = monitor_freelist;
275 monitor_freelist = new;
278 /* small perf tweak to avoid scanning all the blocks */
282 /* need to allocate a new array of monitors */
283 if (!monitor_freelist) {
285 LOCK_DEBUG (g_message ("%s: allocating more monitors: %d", __func__, array_size));
286 marray = g_malloc0 (sizeof (MonoArray) + array_size * sizeof (MonoThreadsSync));
287 marray->num_monitors = array_size;
289 /* link into the freelist */
290 for (i = 0; i < marray->num_monitors - 1; ++i) {
291 marray->monitors [i].data = &marray->monitors [i + 1];
293 marray->monitors [i].data = NULL; /* the last one */
294 monitor_freelist = &marray->monitors [0];
295 /* we happend the marray instead of prepending so that
296 * the collecting loop above will need to scan smaller arrays first
298 if (!monitor_allocated) {
299 monitor_allocated = marray;
301 last = monitor_allocated;
309 new = monitor_freelist;
310 monitor_freelist = new->data;
315 mono_perfcounters->gc_sync_blocks++;
320 * Format of the lock word:
321 * thinhash | fathash | data
323 * thinhash is the lower bit: if set data is the shifted hashcode of the object.
324 * fathash is another bit: if set the hash code is stored in the MonoThreadsSync
325 * struct pointed to by data
326 * if neither bit is set and data is non-NULL, data is a MonoThreadsSync
330 MonoThreadsSync *sync;
334 LOCK_WORD_THIN_HASH = 1,
335 LOCK_WORD_FAT_HASH = 1 << 1,
336 LOCK_WORD_BITS_MASK = 0x3,
337 LOCK_WORD_HASH_SHIFT = 2
340 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
346 * Calculate a hash code for @obj that is constant while @obj is alive.
349 mono_object_hash (MonoObject* obj)
351 #ifdef HAVE_MOVING_COLLECTOR
356 lw.sync = obj->synchronisation;
357 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
358 /*g_print ("fast thin hash %d for obj %p store\n", (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT, obj);*/
359 return (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT;
361 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
362 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
363 /*g_print ("fast fat hash %d for obj %p store\n", lw.sync->hash_code, obj);*/
364 return lw.sync->hash_code;
367 * while we are inside this function, the GC will keep this object pinned,
368 * since we are in the unmanaged stack. Thanks to this and to the hash
369 * function that depends only on the address, we can ignore the races if
370 * another thread computes the hash at the same time, because it'll end up
371 * with the same value.
373 hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
374 /* clear the top bits as they can be discarded */
375 hash &= ~(LOCK_WORD_BITS_MASK << 30);
376 /* no hash flags were set, so it must be a MonoThreadsSync pointer if not NULL */
378 lw.sync->hash_code = hash;
379 /*g_print ("storing hash code %d for obj %p in sync %p\n", hash, obj, lw.sync);*/
380 lw.lock_word |= LOCK_WORD_FAT_HASH;
381 /* this is safe since we don't deflate locks */
382 obj->synchronisation = lw.sync;
384 /*g_print ("storing thin hash code %d for obj %p\n", hash, obj);*/
385 lw.lock_word = LOCK_WORD_THIN_HASH | (hash << LOCK_WORD_HASH_SHIFT);
386 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL) == NULL)
388 /*g_print ("failed store\n");*/
389 /* someone set the hash flag or someone inflated the object */
390 lw.sync = obj->synchronisation;
391 if (lw.lock_word & LOCK_WORD_THIN_HASH)
393 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
394 lw.sync->hash_code = hash;
395 lw.lock_word |= LOCK_WORD_FAT_HASH;
396 /* this is safe since we don't deflate locks */
397 obj->synchronisation = lw.sync;
402 * Wang's address-based hash function:
403 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
405 return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
409 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
410 * is requested. In this case it returns -1.
413 mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
415 MonoThreadsSync *mon;
416 gsize id = GetCurrentThreadId ();
418 guint32 then = 0, now, delta;
421 MonoInternalThread *thread;
423 LOCK_DEBUG (g_message("%s: (%d) Trying to lock object %p (%d ms)", __func__, id, obj, ms));
425 if (G_UNLIKELY (!obj)) {
426 mono_raise_exception (mono_get_exception_argument_null ("obj"));
431 mon = obj->synchronisation;
433 /* If the object has never been locked... */
434 if (G_UNLIKELY (mon == NULL)) {
435 mono_monitor_allocator_lock ();
437 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, mon, NULL) == NULL) {
438 mono_gc_weak_link_add (&mon->data, obj, FALSE);
439 mono_monitor_allocator_unlock ();
440 /* Successfully locked */
443 #ifdef HAVE_MOVING_COLLECTOR
445 lw.sync = obj->synchronisation;
446 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
447 MonoThreadsSync *oldlw = lw.sync;
448 /* move the already calculated hash */
449 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
451 lw.lock_word |= LOCK_WORD_FAT_HASH;
452 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
453 mono_gc_weak_link_add (&mon->data, obj, FALSE);
454 mono_monitor_allocator_unlock ();
455 /* Successfully locked */
459 mono_monitor_allocator_unlock ();
462 } else if (lw.lock_word & LOCK_WORD_FAT_HASH) {
464 mono_monitor_allocator_unlock ();
465 /* get the old lock without the fat hash bit */
466 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
470 mono_monitor_allocator_unlock ();
471 mon = obj->synchronisation;
475 mono_monitor_allocator_unlock ();
476 mon = obj->synchronisation;
480 #ifdef HAVE_MOVING_COLLECTOR
483 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
484 MonoThreadsSync *oldlw = lw.sync;
485 mono_monitor_allocator_lock ();
487 /* move the already calculated hash */
488 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
490 lw.lock_word |= LOCK_WORD_FAT_HASH;
491 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
492 mono_gc_weak_link_add (&mon->data, obj, TRUE);
493 mono_monitor_allocator_unlock ();
494 /* Successfully locked */
498 mono_monitor_allocator_unlock ();
505 #ifdef HAVE_MOVING_COLLECTOR
509 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
514 /* If the object has previously been locked but isn't now... */
516 /* This case differs from Dice's case 3 because we don't
517 * deflate locks or cache unused lock records
519 if (G_LIKELY (mon->owner == 0)) {
520 /* Try to install our ID in the owner field, nest
521 * should have been left at 1 by the previous unlock
524 if (G_LIKELY (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0)) {
526 g_assert (mon->nest == 1);
534 /* If the object is currently locked by this thread... */
535 if (mon->owner == id) {
540 /* The object must be locked by someone else... */
541 mono_perfcounters->thread_contentions++;
543 /* If ms is 0 we don't block, but just fail straight away */
545 LOCK_DEBUG (g_message ("%s: (%d) timed out, returning FALSE", __func__, id));
549 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_CONTENTION);
551 /* The slow path begins here. */
553 /* a small amount of duplicated code, but it allows us to insert the profiler
554 * callbacks without impacting the fast path: from here on we don't need to go back to the
555 * retry label, but to retry_contended. At this point mon is already installed in the object
558 /* This case differs from Dice's case 3 because we don't
559 * deflate locks or cache unused lock records
561 if (G_LIKELY (mon->owner == 0)) {
562 /* Try to install our ID in the owner field, nest
563 * should have been left at 1 by the previous unlock
566 if (G_LIKELY (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0)) {
568 g_assert (mon->nest == 1);
569 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
574 /* If the object is currently locked by this thread... */
575 if (mon->owner == id) {
577 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
581 /* We need to make sure there's a semaphore handle (creating it if
582 * necessary), and block on it
584 if (mon->entry_sem == NULL) {
585 /* Create the semaphore */
586 sem = g_new0 (MonoSemType, 1);
587 MONO_SEM_INIT (sem, 0);
588 g_assert (sem != NULL);
589 if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
590 /* Someone else just put a handle here */
591 MONO_SEM_DESTROY (sem);
596 /* If we need to time out, record a timestamp and adjust ms,
597 * because WaitForSingleObject doesn't tell us how long it
600 * Don't block forever here, because theres a chance the owner
601 * thread released the lock while we were creating the
602 * semaphore: we would not get the wakeup. Using the event
603 * handle technique from pulse/wait would involve locking the
604 * lock struct and therefore slowing down the fast path.
606 if (ms != INFINITE) {
607 then = mono_msec_ticks ();
617 InterlockedIncrement (&mon->entry_count);
619 mono_perfcounters->thread_queue_len++;
620 mono_perfcounters->thread_queue_max++;
621 thread = mono_thread_internal_current ();
623 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
626 * We pass TRUE instead of allow_interruption since we have to check for the
627 * StopRequested case below.
629 ret = MONO_SEM_TIMEDWAIT_ALERTABLE(mon->entry_sem, waitms, TRUE);
631 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
633 InterlockedDecrement (&mon->entry_count);
634 mono_perfcounters->thread_queue_len--;
636 if (ms != INFINITE) {
637 now = mono_msec_ticks ();
640 /* The counter must have wrapped around */
641 LOCK_DEBUG (g_message ("%s: wrapped around! now=0x%x then=0x%x", __func__, now, then));
643 now += (0xffffffff - then);
646 LOCK_DEBUG (g_message ("%s: wrap rejig: now=0x%x then=0x%x delta=0x%x", __func__, now, then, now-then));
656 if ((ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) && ms > 0) {
658 goto retry_contended;
661 if (ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) {
662 if (ret == WAIT_IO_COMPLETION && (mono_thread_test_state (mono_thread_internal_current (), (ThreadState_StopRequested|ThreadState_SuspendRequested)))) {
664 * We have to obey a stop/suspend request even if
665 * allow_interruption is FALSE to avoid hangs at shutdown.
667 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
670 /* Infinite wait, so just try again */
671 goto retry_contended;
675 if (ret == WAIT_OBJECT_0) {
676 /* retry from the top */
677 goto retry_contended;
680 /* We must have timed out */
681 LOCK_DEBUG (g_message ("%s: (%d) timed out waiting, returning FALSE", __func__, id));
683 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
685 if (ret == WAIT_IO_COMPLETION)
692 mono_monitor_enter (MonoObject *obj)
694 return mono_monitor_try_enter_internal (obj, INFINITE, FALSE) == 1;
698 mono_monitor_try_enter (MonoObject *obj, guint32 ms)
700 return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
704 mono_monitor_exit (MonoObject *obj)
706 MonoThreadsSync *mon;
709 LOCK_DEBUG (g_message ("%s: (%d) Unlocking %p", __func__, GetCurrentThreadId (), obj));
711 if (G_UNLIKELY (!obj)) {
712 mono_raise_exception (mono_get_exception_argument_null ("obj"));
716 mon = obj->synchronisation;
718 #ifdef HAVE_MOVING_COLLECTOR
722 if (lw.lock_word & LOCK_WORD_THIN_HASH)
724 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
728 if (G_UNLIKELY (mon == NULL)) {
729 /* No one ever used Enter. Just ignore the Exit request as MS does */
732 if (G_UNLIKELY (mon->owner != GetCurrentThreadId ())) {
736 nest = mon->nest - 1;
738 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now unlocked", __func__, GetCurrentThreadId (), obj));
740 /* object is now unlocked, leave nest==1 so we don't
741 * need to set it when the lock is reacquired
745 /* Do the wakeup stuff. It's possible that the last
746 * blocking thread gave up waiting just before we
747 * release the semaphore resulting in a futile wakeup
748 * next time there's contention for this object, but
749 * it means we don't have to waste time locking the
752 if (mon->entry_count > 0) {
753 MONO_SEM_POST (mon->entry_sem);
756 LOCK_DEBUG (g_message ("%s: (%d) Object %p is now locked %d times", __func__, GetCurrentThreadId (), obj, nest));
762 mono_monitor_get_object_monitor_weak_link (MonoObject *object)
765 MonoThreadsSync *sync = NULL;
767 lw.sync = object->synchronisation;
768 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
769 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
771 } else if (!(lw.lock_word & LOCK_WORD_THIN_HASH)) {
775 if (sync && sync->data)
781 emit_obj_syncp_check (MonoMethodBuilder *mb, int syncp_loc, int *obj_null_branch, int *syncp_true_false_branch,
782 gboolean branch_on_true)
789 mono_mb_emit_byte (mb, CEE_LDARG_0);
790 *obj_null_branch = mono_mb_emit_short_branch (mb, CEE_BRFALSE_S);
795 ldc.i4 G_STRUCT_OFFSET(MonoObject, synchronisation) objp off
800 brtrue/false.s syncp_true_false
803 mono_mb_emit_byte (mb, CEE_LDARG_0);
804 mono_mb_emit_byte (mb, CEE_CONV_I);
805 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoObject, synchronisation));
806 mono_mb_emit_byte (mb, CEE_ADD);
807 mono_mb_emit_byte (mb, CEE_LDIND_I);
808 mono_mb_emit_stloc (mb, syncp_loc);
809 mono_mb_emit_ldloc (mb, syncp_loc);
810 *syncp_true_false_branch = mono_mb_emit_short_branch (mb, branch_on_true ? CEE_BRTRUE_S : CEE_BRFALSE_S);
814 mono_monitor_get_fast_enter_method (MonoMethod *monitor_enter_method)
816 static MonoMethod *fast_monitor_enter;
817 static MonoMethod *compare_exchange_method;
819 MonoMethodBuilder *mb;
820 int obj_null_branch, syncp_null_branch, has_owner_branch, other_owner_branch, tid_branch;
821 int tid_loc, syncp_loc, owner_loc;
822 int thread_tls_offset;
824 #ifdef HAVE_MOVING_COLLECTOR
828 thread_tls_offset = mono_thread_get_tls_offset ();
829 if (thread_tls_offset == -1)
832 if (fast_monitor_enter)
833 return fast_monitor_enter;
835 if (!compare_exchange_method) {
836 MonoMethodDesc *desc;
839 desc = mono_method_desc_new ("Interlocked:CompareExchange(intptr&,intptr,intptr)", FALSE);
840 class = mono_class_from_name (mono_defaults.corlib, "System.Threading", "Interlocked");
841 compare_exchange_method = mono_method_desc_search_in_class (desc, class);
842 mono_method_desc_free (desc);
844 if (!compare_exchange_method)
848 mb = mono_mb_new (mono_defaults.monitor_class, "FastMonitorEnter", MONO_WRAPPER_UNKNOWN);
850 mb->method->slot = -1;
851 mb->method->flags = METHOD_ATTRIBUTE_PUBLIC | METHOD_ATTRIBUTE_STATIC |
852 METHOD_ATTRIBUTE_HIDE_BY_SIG | METHOD_ATTRIBUTE_FINAL;
854 tid_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
855 syncp_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
856 owner_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
858 emit_obj_syncp_check (mb, syncp_loc, &obj_null_branch, &syncp_null_branch, FALSE);
861 mono. tls thread_tls_offset threadp
862 ldc.i4 G_STRUCT_OFFSET(MonoThread, tid) threadp off
867 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
875 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
876 mono_mb_emit_byte (mb, CEE_MONO_TLS);
877 mono_mb_emit_i4 (mb, thread_tls_offset);
878 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoInternalThread, tid));
879 mono_mb_emit_byte (mb, CEE_ADD);
880 mono_mb_emit_byte (mb, CEE_LDIND_I);
881 mono_mb_emit_stloc (mb, tid_loc);
882 mono_mb_emit_ldloc (mb, syncp_loc);
883 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
884 mono_mb_emit_byte (mb, CEE_ADD);
885 mono_mb_emit_byte (mb, CEE_LDIND_I);
886 mono_mb_emit_stloc (mb, owner_loc);
887 mono_mb_emit_ldloc (mb, owner_loc);
888 tid_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
892 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
895 ldc.i4 0 &owner tid 0
896 call System.Threading.Interlocked.CompareExchange oldowner
901 mono_mb_emit_ldloc (mb, syncp_loc);
902 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
903 mono_mb_emit_byte (mb, CEE_ADD);
904 mono_mb_emit_ldloc (mb, tid_loc);
905 mono_mb_emit_byte (mb, CEE_LDC_I4_0);
906 mono_mb_emit_managed_call (mb, compare_exchange_method, NULL);
907 has_owner_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
908 mono_mb_emit_byte (mb, CEE_RET);
916 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, nest) syncp off
920 ldc.i4 1 &nest nest 1
926 mono_mb_patch_short_branch (mb, tid_branch);
927 mono_mb_emit_ldloc (mb, owner_loc);
928 mono_mb_emit_ldloc (mb, tid_loc);
929 other_owner_branch = mono_mb_emit_short_branch (mb, CEE_BNE_UN_S);
930 mono_mb_emit_ldloc (mb, syncp_loc);
931 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, nest));
932 mono_mb_emit_byte (mb, CEE_ADD);
933 mono_mb_emit_byte (mb, CEE_DUP);
934 mono_mb_emit_byte (mb, CEE_LDIND_I4);
935 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
936 mono_mb_emit_byte (mb, CEE_ADD);
937 mono_mb_emit_byte (mb, CEE_STIND_I4);
938 mono_mb_emit_byte (mb, CEE_RET);
941 obj_null, syncp_null, has_owner, other_owner:
943 call System.Threading.Monitor.Enter
947 mono_mb_patch_short_branch (mb, obj_null_branch);
948 mono_mb_patch_short_branch (mb, syncp_null_branch);
949 mono_mb_patch_short_branch (mb, has_owner_branch);
950 mono_mb_patch_short_branch (mb, other_owner_branch);
951 mono_mb_emit_byte (mb, CEE_LDARG_0);
952 mono_mb_emit_managed_call (mb, monitor_enter_method, NULL);
953 mono_mb_emit_byte (mb, CEE_RET);
955 fast_monitor_enter = mono_mb_create_method (mb, mono_signature_no_pinvoke (monitor_enter_method), 5);
958 return fast_monitor_enter;
962 mono_monitor_get_fast_exit_method (MonoMethod *monitor_exit_method)
964 static MonoMethod *fast_monitor_exit;
966 MonoMethodBuilder *mb;
967 int obj_null_branch, has_waiting_branch, has_syncp_branch, owned_branch, nested_branch;
968 int thread_tls_offset;
971 #ifdef HAVE_MOVING_COLLECTOR
975 thread_tls_offset = mono_thread_get_tls_offset ();
976 if (thread_tls_offset == -1)
979 if (fast_monitor_exit)
980 return fast_monitor_exit;
982 mb = mono_mb_new (mono_defaults.monitor_class, "FastMonitorExit", MONO_WRAPPER_UNKNOWN);
984 mb->method->slot = -1;
985 mb->method->flags = METHOD_ATTRIBUTE_PUBLIC | METHOD_ATTRIBUTE_STATIC |
986 METHOD_ATTRIBUTE_HIDE_BY_SIG | METHOD_ATTRIBUTE_FINAL;
988 syncp_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
990 emit_obj_syncp_check (mb, syncp_loc, &obj_null_branch, &has_syncp_branch, TRUE);
996 mono_mb_emit_byte (mb, CEE_RET);
1001 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
1004 mono. tls thread_tls_offset owner threadp
1005 ldc.i4 G_STRUCT_OFFSET(MonoThread, tid) owner threadp off
1011 mono_mb_patch_short_branch (mb, has_syncp_branch);
1012 mono_mb_emit_ldloc (mb, syncp_loc);
1013 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
1014 mono_mb_emit_byte (mb, CEE_ADD);
1015 mono_mb_emit_byte (mb, CEE_LDIND_I);
1016 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1017 mono_mb_emit_byte (mb, CEE_MONO_TLS);
1018 mono_mb_emit_i4 (mb, thread_tls_offset);
1019 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoInternalThread, tid));
1020 mono_mb_emit_byte (mb, CEE_ADD);
1021 mono_mb_emit_byte (mb, CEE_LDIND_I);
1022 owned_branch = mono_mb_emit_short_branch (mb, CEE_BEQ_S);
1028 mono_mb_emit_byte (mb, CEE_RET);
1033 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, nest) syncp off
1038 ldc.i4 1 &nest nest nest 1
1039 bgt.un.s nested &nest nest
1042 mono_mb_patch_short_branch (mb, owned_branch);
1043 mono_mb_emit_ldloc (mb, syncp_loc);
1044 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, nest));
1045 mono_mb_emit_byte (mb, CEE_ADD);
1046 mono_mb_emit_byte (mb, CEE_DUP);
1047 mono_mb_emit_byte (mb, CEE_LDIND_I4);
1048 mono_mb_emit_byte (mb, CEE_DUP);
1049 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
1050 nested_branch = mono_mb_emit_short_branch (mb, CEE_BGT_UN_S);
1056 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, entry_count) syncp off
1059 brtrue.s has_waiting
1062 mono_mb_emit_byte (mb, CEE_POP);
1063 mono_mb_emit_byte (mb, CEE_POP);
1064 mono_mb_emit_ldloc (mb, syncp_loc);
1065 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, entry_count));
1066 mono_mb_emit_byte (mb, CEE_ADD);
1067 mono_mb_emit_byte (mb, CEE_LDIND_I4);
1068 has_waiting_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
1072 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
1079 mono_mb_emit_ldloc (mb, syncp_loc);
1080 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
1081 mono_mb_emit_byte (mb, CEE_ADD);
1082 mono_mb_emit_byte (mb, CEE_LDNULL);
1083 mono_mb_emit_byte (mb, CEE_STIND_I);
1084 mono_mb_emit_byte (mb, CEE_RET);
1088 ldc.i4 1 &nest nest 1
1094 mono_mb_patch_short_branch (mb, nested_branch);
1095 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
1096 mono_mb_emit_byte (mb, CEE_SUB);
1097 mono_mb_emit_byte (mb, CEE_STIND_I4);
1098 mono_mb_emit_byte (mb, CEE_RET);
1101 obj_null, has_waiting:
1103 call System.Threading.Monitor.Exit
1107 mono_mb_patch_short_branch (mb, obj_null_branch);
1108 mono_mb_patch_short_branch (mb, has_waiting_branch);
1109 mono_mb_emit_byte (mb, CEE_LDARG_0);
1110 mono_mb_emit_managed_call (mb, monitor_exit_method, NULL);
1111 mono_mb_emit_byte (mb, CEE_RET);
1113 fast_monitor_exit = mono_mb_create_method (mb, mono_signature_no_pinvoke (monitor_exit_method), 5);
1116 return fast_monitor_exit;
1120 mono_monitor_get_fast_path (MonoMethod *enter_or_exit)
1122 if (strcmp (enter_or_exit->name, "Enter") == 0)
1123 return mono_monitor_get_fast_enter_method (enter_or_exit);
1124 if (strcmp (enter_or_exit->name, "Exit") == 0)
1125 return mono_monitor_get_fast_exit_method (enter_or_exit);
1126 g_assert_not_reached ();
1131 * mono_monitor_threads_sync_member_offset:
1132 * @owner_offset: returns size and offset of the "owner" member
1133 * @nest_offset: returns size and offset of the "nest" member
1134 * @entry_count_offset: returns size and offset of the "entry_count" member
1136 * Returns the offsets and sizes of three members of the
1137 * MonoThreadsSync struct. The Monitor ASM fastpaths need this.
1140 mono_monitor_threads_sync_members_offset (int *owner_offset, int *nest_offset, int *entry_count_offset)
1144 #define ENCODE_OFF_SIZE(o,s) (((o) << 8) | ((s) & 0xff))
1146 *owner_offset = ENCODE_OFF_SIZE (G_STRUCT_OFFSET (MonoThreadsSync, owner), sizeof (ts.owner));
1147 *nest_offset = ENCODE_OFF_SIZE (G_STRUCT_OFFSET (MonoThreadsSync, nest), sizeof (ts.nest));
1148 *entry_count_offset = ENCODE_OFF_SIZE (G_STRUCT_OFFSET (MonoThreadsSync, entry_count), sizeof (ts.entry_count));
1152 ves_icall_System_Threading_Monitor_Monitor_try_enter (MonoObject *obj, guint32 ms)
1157 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
1159 mono_thread_interruption_checkpoint ();
1160 } while (res == -1);
1166 ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var (MonoObject *obj, guint32 ms, char *lockTaken)
1170 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
1171 /*This means we got interrupted during the wait and didn't got the monitor.*/
1173 mono_thread_interruption_checkpoint ();
1174 } while (res == -1);
1175 /*It's safe to do it from here since interruption would happen only on the wrapper.*/
1176 *lockTaken = res == 1;
1180 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
1182 MonoThreadsSync *mon;
1184 LOCK_DEBUG (g_message ("%s: Testing if %p is owned by thread %d", __func__, obj, GetCurrentThreadId()));
1186 mon = obj->synchronisation;
1187 #ifdef HAVE_MOVING_COLLECTOR
1191 if (lw.lock_word & LOCK_WORD_THIN_HASH)
1193 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1201 if(mon->owner==GetCurrentThreadId ()) {
1209 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
1211 MonoThreadsSync *mon;
1213 LOCK_DEBUG (g_message("%s: (%d) Testing if %p is owned by any thread", __func__, GetCurrentThreadId (), obj));
1215 mon = obj->synchronisation;
1216 #ifdef HAVE_MOVING_COLLECTOR
1220 if (lw.lock_word & LOCK_WORD_THIN_HASH)
1222 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1230 if (mon->owner != 0) {
1237 /* All wait list manipulation in the pulse, pulseall and wait
1238 * functions happens while the monitor lock is held, so we don't need
1239 * any extra struct locking
1243 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
1245 MonoThreadsSync *mon;
1247 LOCK_DEBUG (g_message ("%s: (%d) Pulsing %p", __func__, GetCurrentThreadId (), obj));
1249 mon = obj->synchronisation;
1250 #ifdef HAVE_MOVING_COLLECTOR
1254 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1255 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1258 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1263 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1266 if (mon->owner != GetCurrentThreadId ()) {
1267 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1271 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, GetCurrentThreadId (), g_slist_length (mon->wait_list)));
1273 if (mon->wait_list != NULL) {
1274 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, GetCurrentThreadId (), mon->wait_list->data));
1276 SetEvent (mon->wait_list->data);
1277 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1282 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
1284 MonoThreadsSync *mon;
1286 LOCK_DEBUG (g_message("%s: (%d) Pulsing all %p", __func__, GetCurrentThreadId (), obj));
1288 mon = obj->synchronisation;
1289 #ifdef HAVE_MOVING_COLLECTOR
1293 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1294 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1297 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1302 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1305 if (mon->owner != GetCurrentThreadId ()) {
1306 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1310 LOCK_DEBUG (g_message ("%s: (%d) %d threads waiting", __func__, GetCurrentThreadId (), g_slist_length (mon->wait_list)));
1312 while (mon->wait_list != NULL) {
1313 LOCK_DEBUG (g_message ("%s: (%d) signalling and dequeuing handle %p", __func__, GetCurrentThreadId (), mon->wait_list->data));
1315 SetEvent (mon->wait_list->data);
1316 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1321 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
1323 MonoThreadsSync *mon;
1327 gboolean success = FALSE;
1329 MonoInternalThread *thread = mono_thread_internal_current ();
1331 LOCK_DEBUG (g_message ("%s: (%d) Trying to wait for %p with timeout %dms", __func__, GetCurrentThreadId (), obj, ms));
1333 mon = obj->synchronisation;
1334 #ifdef HAVE_MOVING_COLLECTOR
1338 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1339 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1342 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1347 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1350 if (mon->owner != GetCurrentThreadId ()) {
1351 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1355 /* Do this WaitSleepJoin check before creating the event handle */
1356 mono_thread_current_check_pending_interrupt ();
1358 event = CreateEvent (NULL, FALSE, FALSE, NULL);
1359 if (event == NULL) {
1360 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
1364 LOCK_DEBUG (g_message ("%s: (%d) queuing handle %p", __func__, GetCurrentThreadId (), event));
1366 mono_thread_current_check_pending_interrupt ();
1368 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
1370 mon->wait_list = g_slist_append (mon->wait_list, event);
1372 /* Save the nest count, and release the lock */
1375 mono_monitor_exit (obj);
1377 LOCK_DEBUG (g_message ("%s: (%d) Unlocked %p lock %p", __func__, GetCurrentThreadId (), obj, mon));
1379 /* There's no race between unlocking mon and waiting for the
1380 * event, because auto reset events are sticky, and this event
1381 * is private to this thread. Therefore even if the event was
1382 * signalled before we wait, we still succeed.
1384 ret = WaitForSingleObjectEx (event, ms, TRUE);
1386 /* Reset the thread state fairly early, so we don't have to worry
1387 * about the monitor error checking
1389 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
1391 if (mono_thread_interruption_requested ()) {
1393 * Can't remove the event from wait_list, since the monitor is not locked by
1394 * us. So leave it there, mon_new () will delete it when the mon structure
1395 * is placed on the free list.
1396 * FIXME: The caller expects to hold the lock after the wait returns, but it
1397 * doesn't happen in this case:
1398 * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=97268
1403 /* Regain the lock with the previous nest count */
1405 regain = mono_monitor_try_enter_internal (obj, INFINITE, TRUE);
1407 mono_thread_interruption_checkpoint ();
1408 } while (regain == -1);
1411 /* Something went wrong, so throw a
1412 * SynchronizationLockException
1414 CloseHandle (event);
1415 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to regain lock"));
1421 LOCK_DEBUG (g_message ("%s: (%d) Regained %p lock %p", __func__, GetCurrentThreadId (), obj, mon));
1423 if (ret == WAIT_TIMEOUT) {
1424 /* Poll the event again, just in case it was signalled
1425 * while we were trying to regain the monitor lock
1427 ret = WaitForSingleObjectEx (event, 0, FALSE);
1430 /* Pulse will have popped our event from the queue if it signalled
1431 * us, so we only do it here if the wait timed out.
1433 * This avoids a race condition where the thread holding the
1434 * lock can Pulse several times before the WaitForSingleObject
1435 * returns. If we popped the queue here then this event might
1436 * be signalled more than once, thereby starving another
1440 if (ret == WAIT_OBJECT_0) {
1441 LOCK_DEBUG (g_message ("%s: (%d) Success", __func__, GetCurrentThreadId ()));
1444 LOCK_DEBUG (g_message ("%s: (%d) Wait failed, dequeuing handle %p", __func__, GetCurrentThreadId (), event));
1445 /* No pulse, so we have to remove ourself from the
1448 mon->wait_list = g_slist_remove (mon->wait_list, event);
1450 CloseHandle (event);