2 * monitor.c: Monitor locking functions
5 * Dick Porter (dick@ximian.com)
7 * Copyright 2003 Ximian, Inc (http://www.ximian.com)
8 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
15 #include <mono/metadata/monitor.h>
16 #include <mono/metadata/threads-types.h>
17 #include <mono/metadata/exception.h>
18 #include <mono/metadata/threads.h>
19 #include <mono/io-layer/io-layer.h>
20 #include <mono/metadata/object-internals.h>
21 #include <mono/metadata/class-internals.h>
22 #include <mono/metadata/gc-internal.h>
23 #include <mono/metadata/method-builder.h>
24 #include <mono/metadata/debug-helpers.h>
25 #include <mono/metadata/tabledefs.h>
26 #include <mono/metadata/marshal.h>
27 #include <mono/metadata/profiler-private.h>
28 #include <mono/utils/mono-time.h>
31 * Pull the list of opcodes
33 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
37 #include "mono/cil/opcode.def"
42 /*#define LOCK_DEBUG(a) do { a; } while (0)*/
46 * The monitor implementation here is based on
47 * http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf and
48 * http://www.research.ibm.com/people/d/dfb/papers/Bacon98Thin.ps
50 * The Dice paper describes a technique for saving lock record space
51 * by returning records to a free list when they become unused. That
52 * sounds like unnecessary complexity to me, though if it becomes
53 * clear that unused lock records are taking up lots of space or we
54 * need to shave more time off by avoiding a malloc then we can always
55 * implement the free list idea later. The timeout parameter to
56 * try_enter voids some of the assumptions about the reference count
57 * field in Dice's implementation too. In his version, the thread
58 * attempting to lock a contended object will block until it succeeds,
59 * so the reference count will never be decremented while an object is
62 * Bacon's thin locks have a fast path that doesn't need a lock record
63 * for the common case of locking an unlocked or shallow-nested
64 * object, but the technique relies on encoding the thread ID in 15
65 * bits (to avoid too much per-object space overhead.) Unfortunately
66 * I don't think it's possible to reliably encode a pthread_t into 15
67 * bits. (The JVM implementation used seems to have a 15-bit
68 * per-thread identifier available.)
70 * This implementation then combines Dice's basic lock model with
71 * Bacon's simplification of keeping a lock record for the lifetime of
75 struct _MonoThreadsSync
77 gsize owner; /* thread ID */
79 #ifdef HAVE_MOVING_COLLECTOR
82 volatile gint32 entry_count;
88 typedef struct _MonitorArray MonitorArray;
90 struct _MonitorArray {
93 MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
96 #define mono_monitor_allocator_lock() EnterCriticalSection (&monitor_mutex)
97 #define mono_monitor_allocator_unlock() LeaveCriticalSection (&monitor_mutex)
98 static CRITICAL_SECTION monitor_mutex;
99 static MonoThreadsSync *monitor_freelist;
100 static MonitorArray *monitor_allocated;
101 static int array_size = 16;
103 #ifdef HAVE_KW_THREAD
104 static __thread gsize tls_pthread_self MONO_TLS_FAST;
107 #ifndef PLATFORM_WIN32
108 #ifdef HAVE_KW_THREAD
109 #define GetCurrentThreadId() tls_pthread_self
112 * The usual problem: we can't replace GetCurrentThreadId () with a macro because
113 * it is in a public header.
115 #define GetCurrentThreadId() ((gsize)pthread_self ())
120 mono_monitor_init (void)
122 InitializeCriticalSection (&monitor_mutex);
126 mono_monitor_cleanup (void)
128 /*DeleteCriticalSection (&monitor_mutex);*/
132 * mono_monitor_init_tls:
134 * Setup TLS variables used by the monitor code for the current thread.
137 mono_monitor_init_tls (void)
139 #if !defined(PLATFORM_WIN32) && defined(HAVE_KW_THREAD)
140 tls_pthread_self = pthread_self ();
145 monitor_is_on_freelist (MonoThreadsSync *mon)
147 MonitorArray *marray;
148 for (marray = monitor_allocated; marray; marray = marray->next) {
149 if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
159 * Print a report on stdout of the managed locks currently held by
160 * threads. If @include_untaken is specified, list also inflated locks
162 * This is supposed to be used in debuggers like gdb.
165 mono_locks_dump (gboolean include_untaken)
168 int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
169 MonoThreadsSync *mon;
170 MonitorArray *marray;
171 for (mon = monitor_freelist; mon; mon = mon->data)
173 for (marray = monitor_allocated; marray; marray = marray->next) {
174 total += marray->num_monitors;
176 for (i = 0; i < marray->num_monitors; ++i) {
177 mon = &marray->monitors [i];
178 if (mon->data == NULL) {
179 if (i < marray->num_monitors - 1)
182 if (!monitor_is_on_freelist (mon->data)) {
183 MonoObject *holder = mono_gc_weak_link_get (&mon->data);
185 g_print ("Lock %p in object %p held by thread %p, nest level: %d\n",
186 mon, holder, (void*)mon->owner, mon->nest);
188 g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon->entry_count);
189 } else if (include_untaken) {
190 g_print ("Lock %p in object %p untaken\n", mon, holder);
197 g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
198 num_arrays, total, used, on_freelist, to_recycle);
201 /* LOCKING: this is called with monitor_mutex held */
203 mon_finalize (MonoThreadsSync *mon)
205 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": Finalizing sync %p", mon));
207 if (mon->entry_sem != NULL) {
208 CloseHandle (mon->entry_sem);
209 mon->entry_sem = NULL;
211 /* If this isn't empty then something is seriously broken - it
212 * means a thread is still waiting on the object that owned
213 * this lock, but the object has been finalized.
215 g_assert (mon->wait_list == NULL);
217 mon->entry_count = 0;
218 /* owner and nest are set in mon_new, no need to zero them out */
220 mon->data = monitor_freelist;
221 monitor_freelist = mon;
222 mono_perfcounters->gc_sync_blocks--;
225 /* LOCKING: this is called with monitor_mutex held */
226 static MonoThreadsSync *
229 MonoThreadsSync *new;
231 if (!monitor_freelist) {
232 MonitorArray *marray;
234 /* see if any sync block has been collected */
236 for (marray = monitor_allocated; marray; marray = marray->next) {
237 for (i = 0; i < marray->num_monitors; ++i) {
238 if (marray->monitors [i].data == NULL) {
239 new = &marray->monitors [i];
240 new->data = monitor_freelist;
241 monitor_freelist = new;
244 /* small perf tweak to avoid scanning all the blocks */
248 /* need to allocate a new array of monitors */
249 if (!monitor_freelist) {
251 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": allocating more monitors: %d", array_size));
252 marray = g_malloc0 (sizeof (MonoArray) + array_size * sizeof (MonoThreadsSync));
253 marray->num_monitors = array_size;
255 /* link into the freelist */
256 for (i = 0; i < marray->num_monitors - 1; ++i) {
257 marray->monitors [i].data = &marray->monitors [i + 1];
259 marray->monitors [i].data = NULL; /* the last one */
260 monitor_freelist = &marray->monitors [0];
261 /* we happend the marray instead of prepending so that
262 * the collecting loop above will need to scan smaller arrays first
264 if (!monitor_allocated) {
265 monitor_allocated = marray;
267 last = monitor_allocated;
275 new = monitor_freelist;
276 monitor_freelist = new->data;
281 mono_perfcounters->gc_sync_blocks++;
286 * Format of the lock word:
287 * thinhash | fathash | data
289 * thinhash is the lower bit: if set data is the shifted hashcode of the object.
290 * fathash is another bit: if set the hash code is stored in the MonoThreadsSync
291 * struct pointed to by data
292 * if neither bit is set and data is non-NULL, data is a MonoThreadsSync
296 MonoThreadsSync *sync;
300 LOCK_WORD_THIN_HASH = 1,
301 LOCK_WORD_FAT_HASH = 1 << 1,
302 LOCK_WORD_BITS_MASK = 0x3,
303 LOCK_WORD_HASH_SHIFT = 2
306 #define MONO_OBJECT_ALIGNMENT_SHIFT 3
312 * Calculate a hash code for @obj that is constant while @obj is alive.
315 mono_object_hash (MonoObject* obj)
317 #ifdef HAVE_MOVING_COLLECTOR
322 lw.sync = obj->synchronisation;
323 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
324 /*g_print ("fast thin hash %d for obj %p store\n", (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT, obj);*/
325 return (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT;
327 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
328 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
329 /*g_print ("fast fat hash %d for obj %p store\n", lw.sync->hash_code, obj);*/
330 return lw.sync->hash_code;
333 * while we are inside this function, the GC will keep this object pinned,
334 * since we are in the unmanaged stack. Thanks to this and to the hash
335 * function that depends only on the address, we can ignore the races if
336 * another thread computes the hash at the same time, because it'll end up
337 * with the same value.
339 hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
340 /* clear the top bits as they can be discarded */
341 hash &= ~(LOCK_WORD_BITS_MASK << 30);
342 /* no hash flags were set, so it must be a MonoThreadsSync pointer if not NULL */
344 lw.sync->hash_code = hash;
345 /*g_print ("storing hash code %d for obj %p in sync %p\n", hash, obj, lw.sync);*/
346 lw.lock_word |= LOCK_WORD_FAT_HASH;
347 /* this is safe since we don't deflate locks */
348 obj->synchronisation = lw.sync;
350 /*g_print ("storing thin hash code %d for obj %p\n", hash, obj);*/
351 lw.lock_word = LOCK_WORD_THIN_HASH | (hash << LOCK_WORD_HASH_SHIFT);
352 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL) == NULL)
354 /*g_print ("failed store\n");*/
355 /* someone set the hash flag or someone inflated the object */
356 lw.sync = obj->synchronisation;
357 if (lw.lock_word & LOCK_WORD_THIN_HASH)
359 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
360 lw.sync->hash_code = hash;
361 lw.lock_word |= LOCK_WORD_FAT_HASH;
362 /* this is safe since we don't deflate locks */
363 obj->synchronisation = lw.sync;
368 * Wang's address-based hash function:
369 * http://www.concentric.net/~Ttwang/tech/addrhash.htm
371 return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
375 /* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
376 * is requested. In this case it returns -1.
379 mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
381 MonoThreadsSync *mon;
382 gsize id = GetCurrentThreadId ();
384 guint32 then = 0, now, delta;
389 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
390 ": (%d) Trying to lock object %p (%d ms)", id, obj, ms));
392 if (G_UNLIKELY (!obj)) {
393 mono_raise_exception (mono_get_exception_argument_null ("obj"));
398 mon = obj->synchronisation;
400 /* If the object has never been locked... */
401 if (G_UNLIKELY (mon == NULL)) {
402 mono_monitor_allocator_lock ();
404 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, mon, NULL) == NULL) {
405 mono_gc_weak_link_add (&mon->data, obj, FALSE);
406 mono_monitor_allocator_unlock ();
407 /* Successfully locked */
410 #ifdef HAVE_MOVING_COLLECTOR
412 lw.sync = obj->synchronisation;
413 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
414 MonoThreadsSync *oldlw = lw.sync;
415 /* move the already calculated hash */
416 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
418 lw.lock_word |= LOCK_WORD_FAT_HASH;
419 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
420 mono_gc_weak_link_add (&mon->data, obj, FALSE);
421 mono_monitor_allocator_unlock ();
422 /* Successfully locked */
426 mono_monitor_allocator_unlock ();
429 } else if (lw.lock_word & LOCK_WORD_FAT_HASH) {
431 mono_monitor_allocator_unlock ();
432 /* get the old lock without the fat hash bit */
433 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
437 mono_monitor_allocator_unlock ();
438 mon = obj->synchronisation;
442 mono_monitor_allocator_unlock ();
443 mon = obj->synchronisation;
447 #ifdef HAVE_MOVING_COLLECTOR
450 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
451 MonoThreadsSync *oldlw = lw.sync;
452 mono_monitor_allocator_lock ();
454 /* move the already calculated hash */
455 mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
457 lw.lock_word |= LOCK_WORD_FAT_HASH;
458 if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
459 mono_gc_weak_link_add (&mon->data, obj, TRUE);
460 mono_monitor_allocator_unlock ();
461 /* Successfully locked */
465 mono_monitor_allocator_unlock ();
472 #ifdef HAVE_MOVING_COLLECTOR
476 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
481 /* If the object has previously been locked but isn't now... */
483 /* This case differs from Dice's case 3 because we don't
484 * deflate locks or cache unused lock records
486 if (G_LIKELY (mon->owner == 0)) {
487 /* Try to install our ID in the owner field, nest
488 * should have been left at 1 by the previous unlock
491 if (G_LIKELY (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0)) {
493 g_assert (mon->nest == 1);
501 /* If the object is currently locked by this thread... */
502 if (mon->owner == id) {
507 /* The object must be locked by someone else... */
508 mono_perfcounters->thread_contentions++;
510 /* If ms is 0 we don't block, but just fail straight away */
512 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) timed out, returning FALSE", id));
516 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_CONTENTION);
518 /* The slow path begins here. */
520 /* a small amount of duplicated code, but it allows us to insert the profiler
521 * callbacks without impacting the fast path: from here on we don't need to go back to the
522 * retry label, but to retry_contended. At this point mon is already installed in the object
525 /* This case differs from Dice's case 3 because we don't
526 * deflate locks or cache unused lock records
528 if (G_LIKELY (mon->owner == 0)) {
529 /* Try to install our ID in the owner field, nest
530 * should have been left at 1 by the previous unlock
533 if (G_LIKELY (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0)) {
535 g_assert (mon->nest == 1);
536 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
541 /* If the object is currently locked by this thread... */
542 if (mon->owner == id) {
544 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_DONE);
548 /* We need to make sure there's a semaphore handle (creating it if
549 * necessary), and block on it
551 if (mon->entry_sem == NULL) {
552 /* Create the semaphore */
553 sem = CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
554 g_assert (sem != NULL);
555 if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
556 /* Someone else just put a handle here */
561 /* If we need to time out, record a timestamp and adjust ms,
562 * because WaitForSingleObject doesn't tell us how long it
565 * Don't block forever here, because theres a chance the owner
566 * thread released the lock while we were creating the
567 * semaphore: we would not get the wakeup. Using the event
568 * handle technique from pulse/wait would involve locking the
569 * lock struct and therefore slowing down the fast path.
571 if (ms != INFINITE) {
572 then = mono_msec_ticks ();
582 InterlockedIncrement (&mon->entry_count);
584 mono_perfcounters->thread_queue_len++;
585 mono_perfcounters->thread_queue_max++;
586 thread = mono_thread_current ();
588 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
591 * We pass TRUE instead of allow_interruption since we have to check for the
592 * StopRequested case below.
594 ret = WaitForSingleObjectEx (mon->entry_sem, waitms, TRUE);
596 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
598 InterlockedDecrement (&mon->entry_count);
599 mono_perfcounters->thread_queue_len--;
601 if (ms != INFINITE) {
602 now = mono_msec_ticks ();
605 /* The counter must have wrapped around */
606 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
607 ": wrapped around! now=0x%x then=0x%x", now, then));
609 now += (0xffffffff - then);
612 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": wrap rejig: now=0x%x then=0x%x delta=0x%x", now, then, now-then));
622 if ((ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) && ms > 0) {
624 goto retry_contended;
627 if (ret == WAIT_TIMEOUT || (ret == WAIT_IO_COMPLETION && !allow_interruption)) {
628 if (ret == WAIT_IO_COMPLETION && (mono_thread_test_state (mono_thread_current (), (ThreadState_StopRequested|ThreadState_SuspendRequested)))) {
630 * We have to obey a stop/suspend request even if
631 * allow_interruption is FALSE to avoid hangs at shutdown.
633 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
636 /* Infinite wait, so just try again */
637 goto retry_contended;
641 if (ret == WAIT_OBJECT_0) {
642 /* retry from the top */
643 goto retry_contended;
646 /* We must have timed out */
647 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) timed out waiting, returning FALSE", id));
649 mono_profiler_monitor_event (obj, MONO_PROFILER_MONITOR_FAIL);
651 if (ret == WAIT_IO_COMPLETION)
658 mono_monitor_enter (MonoObject *obj)
660 return mono_monitor_try_enter_internal (obj, INFINITE, FALSE) == 1;
664 mono_monitor_try_enter (MonoObject *obj, guint32 ms)
666 return mono_monitor_try_enter_internal (obj, ms, FALSE) == 1;
670 mono_monitor_exit (MonoObject *obj)
672 MonoThreadsSync *mon;
675 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Unlocking %p", GetCurrentThreadId (), obj));
677 if (G_UNLIKELY (!obj)) {
678 mono_raise_exception (mono_get_exception_argument_null ("obj"));
682 mon = obj->synchronisation;
684 #ifdef HAVE_MOVING_COLLECTOR
688 if (lw.lock_word & LOCK_WORD_THIN_HASH)
690 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
694 if (G_UNLIKELY (mon == NULL)) {
695 /* No one ever used Enter. Just ignore the Exit request as MS does */
698 if (G_UNLIKELY (mon->owner != GetCurrentThreadId ())) {
702 nest = mon->nest - 1;
704 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
705 ": (%d) Object %p is now unlocked", GetCurrentThreadId (), obj));
707 /* object is now unlocked, leave nest==1 so we don't
708 * need to set it when the lock is reacquired
712 /* Do the wakeup stuff. It's possible that the last
713 * blocking thread gave up waiting just before we
714 * release the semaphore resulting in a futile wakeup
715 * next time there's contention for this object, but
716 * it means we don't have to waste time locking the
719 if (mon->entry_count > 0) {
720 ReleaseSemaphore (mon->entry_sem, 1, NULL);
723 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
724 ": (%d) Object %p is now locked %d times", GetCurrentThreadId (), obj, nest));
730 mono_monitor_get_object_monitor_weak_link (MonoObject *object)
733 MonoThreadsSync *sync = NULL;
735 lw.sync = object->synchronisation;
736 if (lw.lock_word & LOCK_WORD_FAT_HASH) {
737 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
739 } else if (!(lw.lock_word & LOCK_WORD_THIN_HASH)) {
743 if (sync && sync->data)
749 emit_obj_syncp_check (MonoMethodBuilder *mb, int syncp_loc, int *obj_null_branch, int *syncp_true_false_branch,
750 gboolean branch_on_true)
757 mono_mb_emit_byte (mb, CEE_LDARG_0);
758 *obj_null_branch = mono_mb_emit_short_branch (mb, CEE_BRFALSE_S);
763 ldc.i4 G_STRUCT_OFFSET(MonoObject, synchronisation) objp off
768 brtrue/false.s syncp_true_false
771 mono_mb_emit_byte (mb, CEE_LDARG_0);
772 mono_mb_emit_byte (mb, CEE_CONV_I);
773 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoObject, synchronisation));
774 mono_mb_emit_byte (mb, CEE_ADD);
775 mono_mb_emit_byte (mb, CEE_LDIND_I);
776 mono_mb_emit_stloc (mb, syncp_loc);
777 mono_mb_emit_ldloc (mb, syncp_loc);
778 *syncp_true_false_branch = mono_mb_emit_short_branch (mb, branch_on_true ? CEE_BRTRUE_S : CEE_BRFALSE_S);
782 mono_monitor_get_fast_enter_method (MonoMethod *monitor_enter_method)
784 static MonoMethod *fast_monitor_enter;
785 static MonoMethod *compare_exchange_method;
787 MonoMethodBuilder *mb;
788 int obj_null_branch, syncp_null_branch, has_owner_branch, other_owner_branch, tid_branch;
789 int tid_loc, syncp_loc, owner_loc;
790 int thread_tls_offset;
792 #ifdef HAVE_MOVING_COLLECTOR
796 thread_tls_offset = mono_thread_get_tls_offset ();
797 if (thread_tls_offset == -1)
800 if (fast_monitor_enter)
801 return fast_monitor_enter;
803 if (!compare_exchange_method) {
804 MonoMethodDesc *desc;
807 desc = mono_method_desc_new ("Interlocked:CompareExchange(intptr&,intptr,intptr)", FALSE);
808 class = mono_class_from_name (mono_defaults.corlib, "System.Threading", "Interlocked");
809 compare_exchange_method = mono_method_desc_search_in_class (desc, class);
810 mono_method_desc_free (desc);
812 if (!compare_exchange_method)
816 mb = mono_mb_new (mono_defaults.monitor_class, "FastMonitorEnter", MONO_WRAPPER_UNKNOWN);
818 mb->method->slot = -1;
819 mb->method->flags = METHOD_ATTRIBUTE_PUBLIC | METHOD_ATTRIBUTE_STATIC |
820 METHOD_ATTRIBUTE_HIDE_BY_SIG | METHOD_ATTRIBUTE_FINAL;
822 tid_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
823 syncp_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
824 owner_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
826 emit_obj_syncp_check (mb, syncp_loc, &obj_null_branch, &syncp_null_branch, FALSE);
829 mono. tls thread_tls_offset threadp
830 ldc.i4 G_STRUCT_OFFSET(MonoThread, tid) threadp off
835 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
843 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
844 mono_mb_emit_byte (mb, CEE_MONO_TLS);
845 mono_mb_emit_i4 (mb, thread_tls_offset);
846 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThread, tid));
847 mono_mb_emit_byte (mb, CEE_ADD);
848 mono_mb_emit_byte (mb, CEE_LDIND_I);
849 mono_mb_emit_stloc (mb, tid_loc);
850 mono_mb_emit_ldloc (mb, syncp_loc);
851 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
852 mono_mb_emit_byte (mb, CEE_ADD);
853 mono_mb_emit_byte (mb, CEE_LDIND_I);
854 mono_mb_emit_stloc (mb, owner_loc);
855 mono_mb_emit_ldloc (mb, owner_loc);
856 tid_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
860 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
863 ldc.i4 0 &owner tid 0
864 call System.Threading.Interlocked.CompareExchange oldowner
869 mono_mb_emit_ldloc (mb, syncp_loc);
870 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
871 mono_mb_emit_byte (mb, CEE_ADD);
872 mono_mb_emit_ldloc (mb, tid_loc);
873 mono_mb_emit_byte (mb, CEE_LDC_I4_0);
874 mono_mb_emit_managed_call (mb, compare_exchange_method, NULL);
875 has_owner_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
876 mono_mb_emit_byte (mb, CEE_RET);
884 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, nest) syncp off
888 ldc.i4 1 &nest nest 1
894 mono_mb_patch_short_branch (mb, tid_branch);
895 mono_mb_emit_ldloc (mb, owner_loc);
896 mono_mb_emit_ldloc (mb, tid_loc);
897 other_owner_branch = mono_mb_emit_short_branch (mb, CEE_BNE_UN_S);
898 mono_mb_emit_ldloc (mb, syncp_loc);
899 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, nest));
900 mono_mb_emit_byte (mb, CEE_ADD);
901 mono_mb_emit_byte (mb, CEE_DUP);
902 mono_mb_emit_byte (mb, CEE_LDIND_I4);
903 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
904 mono_mb_emit_byte (mb, CEE_ADD);
905 mono_mb_emit_byte (mb, CEE_STIND_I4);
906 mono_mb_emit_byte (mb, CEE_RET);
909 obj_null, syncp_null, has_owner, other_owner:
911 call System.Threading.Monitor.Enter
915 mono_mb_patch_short_branch (mb, obj_null_branch);
916 mono_mb_patch_short_branch (mb, syncp_null_branch);
917 mono_mb_patch_short_branch (mb, has_owner_branch);
918 mono_mb_patch_short_branch (mb, other_owner_branch);
919 mono_mb_emit_byte (mb, CEE_LDARG_0);
920 mono_mb_emit_managed_call (mb, monitor_enter_method, NULL);
921 mono_mb_emit_byte (mb, CEE_RET);
923 fast_monitor_enter = mono_mb_create_method (mb, mono_signature_no_pinvoke (monitor_enter_method), 5);
926 return fast_monitor_enter;
930 mono_monitor_get_fast_exit_method (MonoMethod *monitor_exit_method)
932 static MonoMethod *fast_monitor_exit;
934 MonoMethodBuilder *mb;
935 int obj_null_branch, has_waiting_branch, has_syncp_branch, owned_branch, nested_branch;
936 int thread_tls_offset;
939 #ifdef HAVE_MOVING_COLLECTOR
943 thread_tls_offset = mono_thread_get_tls_offset ();
944 if (thread_tls_offset == -1)
947 if (fast_monitor_exit)
948 return fast_monitor_exit;
950 mb = mono_mb_new (mono_defaults.monitor_class, "FastMonitorExit", MONO_WRAPPER_UNKNOWN);
952 mb->method->slot = -1;
953 mb->method->flags = METHOD_ATTRIBUTE_PUBLIC | METHOD_ATTRIBUTE_STATIC |
954 METHOD_ATTRIBUTE_HIDE_BY_SIG | METHOD_ATTRIBUTE_FINAL;
956 syncp_loc = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
958 emit_obj_syncp_check (mb, syncp_loc, &obj_null_branch, &has_syncp_branch, TRUE);
964 mono_mb_emit_byte (mb, CEE_RET);
969 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
972 mono. tls thread_tls_offset owner threadp
973 ldc.i4 G_STRUCT_OFFSET(MonoThread, tid) owner threadp off
979 mono_mb_patch_short_branch (mb, has_syncp_branch);
980 mono_mb_emit_ldloc (mb, syncp_loc);
981 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
982 mono_mb_emit_byte (mb, CEE_ADD);
983 mono_mb_emit_byte (mb, CEE_LDIND_I);
984 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
985 mono_mb_emit_byte (mb, CEE_MONO_TLS);
986 mono_mb_emit_i4 (mb, thread_tls_offset);
987 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThread, tid));
988 mono_mb_emit_byte (mb, CEE_ADD);
989 mono_mb_emit_byte (mb, CEE_LDIND_I);
990 owned_branch = mono_mb_emit_short_branch (mb, CEE_BEQ_S);
996 mono_mb_emit_byte (mb, CEE_RET);
1001 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, nest) syncp off
1006 ldc.i4 1 &nest nest nest 1
1007 bgt.un.s nested &nest nest
1010 mono_mb_patch_short_branch (mb, owned_branch);
1011 mono_mb_emit_ldloc (mb, syncp_loc);
1012 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, nest));
1013 mono_mb_emit_byte (mb, CEE_ADD);
1014 mono_mb_emit_byte (mb, CEE_DUP);
1015 mono_mb_emit_byte (mb, CEE_LDIND_I4);
1016 mono_mb_emit_byte (mb, CEE_DUP);
1017 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
1018 nested_branch = mono_mb_emit_short_branch (mb, CEE_BGT_UN_S);
1024 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, entry_count) syncp off
1027 brtrue.s has_waiting
1030 mono_mb_emit_byte (mb, CEE_POP);
1031 mono_mb_emit_byte (mb, CEE_POP);
1032 mono_mb_emit_ldloc (mb, syncp_loc);
1033 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, entry_count));
1034 mono_mb_emit_byte (mb, CEE_ADD);
1035 mono_mb_emit_byte (mb, CEE_LDIND_I4);
1036 has_waiting_branch = mono_mb_emit_short_branch (mb, CEE_BRTRUE_S);
1040 ldc.i4 G_STRUCT_OFFSET(MonoThreadsSync, owner) syncp off
1047 mono_mb_emit_ldloc (mb, syncp_loc);
1048 mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoThreadsSync, owner));
1049 mono_mb_emit_byte (mb, CEE_ADD);
1050 mono_mb_emit_byte (mb, CEE_LDNULL);
1051 mono_mb_emit_byte (mb, CEE_STIND_I);
1052 mono_mb_emit_byte (mb, CEE_RET);
1056 ldc.i4 1 &nest nest 1
1062 mono_mb_patch_short_branch (mb, nested_branch);
1063 mono_mb_emit_byte (mb, CEE_LDC_I4_1);
1064 mono_mb_emit_byte (mb, CEE_SUB);
1065 mono_mb_emit_byte (mb, CEE_STIND_I4);
1066 mono_mb_emit_byte (mb, CEE_RET);
1069 obj_null, has_waiting:
1071 call System.Threading.Monitor.Exit
1075 mono_mb_patch_short_branch (mb, obj_null_branch);
1076 mono_mb_patch_short_branch (mb, has_waiting_branch);
1077 mono_mb_emit_byte (mb, CEE_LDARG_0);
1078 mono_mb_emit_managed_call (mb, monitor_exit_method, NULL);
1079 mono_mb_emit_byte (mb, CEE_RET);
1081 fast_monitor_exit = mono_mb_create_method (mb, mono_signature_no_pinvoke (monitor_exit_method), 5);
1084 return fast_monitor_exit;
1088 mono_monitor_get_fast_path (MonoMethod *enter_or_exit)
1090 if (strcmp (enter_or_exit->name, "Enter") == 0)
1091 return mono_monitor_get_fast_enter_method (enter_or_exit);
1092 if (strcmp (enter_or_exit->name, "Exit") == 0)
1093 return mono_monitor_get_fast_exit_method (enter_or_exit);
1094 g_assert_not_reached ();
1099 * mono_monitor_threads_sync_member_offset:
1100 * @owner_offset: returns size and offset of the "owner" member
1101 * @nest_offset: returns size and offset of the "nest" member
1102 * @entry_count_offset: returns size and offset of the "entry_count" member
1104 * Returns the offsets and sizes of three members of the
1105 * MonoThreadsSync struct. The Monitor ASM fastpaths need this.
1108 mono_monitor_threads_sync_members_offset (int *owner_offset, int *nest_offset, int *entry_count_offset)
1112 #define ENCODE_OFF_SIZE(o,s) (((o) << 8) | ((s) & 0xff))
1114 *owner_offset = ENCODE_OFF_SIZE (G_STRUCT_OFFSET (MonoThreadsSync, owner), sizeof (ts.owner));
1115 *nest_offset = ENCODE_OFF_SIZE (G_STRUCT_OFFSET (MonoThreadsSync, nest), sizeof (ts.nest));
1116 *entry_count_offset = ENCODE_OFF_SIZE (G_STRUCT_OFFSET (MonoThreadsSync, entry_count), sizeof (ts.entry_count));
1120 ves_icall_System_Threading_Monitor_Monitor_try_enter (MonoObject *obj, guint32 ms)
1125 res = mono_monitor_try_enter_internal (obj, ms, TRUE);
1127 mono_thread_interruption_checkpoint ();
1128 } while (res == -1);
1134 ves_icall_System_Threading_Monitor_Monitor_test_owner (MonoObject *obj)
1136 MonoThreadsSync *mon;
1138 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
1139 ": Testing if %p is owned by thread %d", obj, GetCurrentThreadId()));
1141 mon = obj->synchronisation;
1142 #ifdef HAVE_MOVING_COLLECTOR
1146 if (lw.lock_word & LOCK_WORD_THIN_HASH)
1148 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1156 if(mon->owner==GetCurrentThreadId ()) {
1164 ves_icall_System_Threading_Monitor_Monitor_test_synchronised (MonoObject *obj)
1166 MonoThreadsSync *mon;
1168 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION
1169 ": (%d) Testing if %p is owned by any thread", GetCurrentThreadId (), obj));
1171 mon = obj->synchronisation;
1172 #ifdef HAVE_MOVING_COLLECTOR
1176 if (lw.lock_word & LOCK_WORD_THIN_HASH)
1178 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1186 if (mon->owner != 0) {
1193 /* All wait list manipulation in the pulse, pulseall and wait
1194 * functions happens while the monitor lock is held, so we don't need
1195 * any extra struct locking
1199 ves_icall_System_Threading_Monitor_Monitor_pulse (MonoObject *obj)
1201 MonoThreadsSync *mon;
1203 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Pulsing %p",
1204 GetCurrentThreadId (), obj));
1206 mon = obj->synchronisation;
1207 #ifdef HAVE_MOVING_COLLECTOR
1211 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1212 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1215 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1220 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1223 if (mon->owner != GetCurrentThreadId ()) {
1224 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1228 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) %d threads waiting",
1229 GetCurrentThreadId (), g_slist_length (mon->wait_list)));
1231 if (mon->wait_list != NULL) {
1232 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
1233 ": (%d) signalling and dequeuing handle %p",
1234 GetCurrentThreadId (), mon->wait_list->data));
1236 SetEvent (mon->wait_list->data);
1237 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1242 ves_icall_System_Threading_Monitor_Monitor_pulse_all (MonoObject *obj)
1244 MonoThreadsSync *mon;
1246 LOCK_DEBUG (g_message(G_GNUC_PRETTY_FUNCTION ": (%d) Pulsing all %p",
1247 GetCurrentThreadId (), obj));
1249 mon = obj->synchronisation;
1250 #ifdef HAVE_MOVING_COLLECTOR
1254 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1255 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1258 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1263 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1266 if (mon->owner != GetCurrentThreadId ()) {
1267 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1271 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) %d threads waiting",
1272 GetCurrentThreadId (), g_slist_length (mon->wait_list)));
1274 while (mon->wait_list != NULL) {
1275 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
1276 ": (%d) signalling and dequeuing handle %p",
1277 GetCurrentThreadId (), mon->wait_list->data));
1279 SetEvent (mon->wait_list->data);
1280 mon->wait_list = g_slist_remove (mon->wait_list, mon->wait_list->data);
1285 ves_icall_System_Threading_Monitor_Monitor_wait (MonoObject *obj, guint32 ms)
1287 MonoThreadsSync *mon;
1291 gboolean success = FALSE;
1293 MonoThread *thread = mono_thread_current ();
1295 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION
1296 ": (%d) Trying to wait for %p with timeout %dms",
1297 GetCurrentThreadId (), obj, ms));
1299 mon = obj->synchronisation;
1300 #ifdef HAVE_MOVING_COLLECTOR
1304 if (lw.lock_word & LOCK_WORD_THIN_HASH) {
1305 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1308 lw.lock_word &= ~LOCK_WORD_BITS_MASK;
1313 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
1316 if (mon->owner != GetCurrentThreadId ()) {
1317 mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
1321 /* Do this WaitSleepJoin check before creating the event handle */
1322 mono_thread_current_check_pending_interrupt ();
1324 event = CreateEvent (NULL, FALSE, FALSE, NULL);
1325 if (event == NULL) {
1326 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to set up wait event"));
1330 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) queuing handle %p",
1331 GetCurrentThreadId (), event));
1333 mono_thread_current_check_pending_interrupt ();
1335 mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
1337 mon->wait_list = g_slist_append (mon->wait_list, event);
1339 /* Save the nest count, and release the lock */
1342 mono_monitor_exit (obj);
1344 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Unlocked %p lock %p",
1345 GetCurrentThreadId (), obj, mon));
1347 /* There's no race between unlocking mon and waiting for the
1348 * event, because auto reset events are sticky, and this event
1349 * is private to this thread. Therefore even if the event was
1350 * signalled before we wait, we still succeed.
1352 ret = WaitForSingleObjectEx (event, ms, TRUE);
1354 /* Reset the thread state fairly early, so we don't have to worry
1355 * about the monitor error checking
1357 mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
1359 if (mono_thread_interruption_requested ()) {
1360 CloseHandle (event);
1364 /* Regain the lock with the previous nest count */
1366 regain = mono_monitor_try_enter_internal (obj, INFINITE, TRUE);
1368 mono_thread_interruption_checkpoint ();
1369 } while (regain == -1);
1372 /* Something went wrong, so throw a
1373 * SynchronizationLockException
1375 CloseHandle (event);
1376 mono_raise_exception (mono_get_exception_synchronization_lock ("Failed to regain lock"));
1382 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Regained %p lock %p",
1383 GetCurrentThreadId (), obj, mon));
1385 if (ret == WAIT_TIMEOUT) {
1386 /* Poll the event again, just in case it was signalled
1387 * while we were trying to regain the monitor lock
1389 ret = WaitForSingleObjectEx (event, 0, FALSE);
1392 /* Pulse will have popped our event from the queue if it signalled
1393 * us, so we only do it here if the wait timed out.
1395 * This avoids a race condition where the thread holding the
1396 * lock can Pulse several times before the WaitForSingleObject
1397 * returns. If we popped the queue here then this event might
1398 * be signalled more than once, thereby starving another
1402 if (ret == WAIT_OBJECT_0) {
1403 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Success",
1404 GetCurrentThreadId ()));
1407 LOCK_DEBUG (g_message (G_GNUC_PRETTY_FUNCTION ": (%d) Wait failed, dequeuing handle %p",
1408 GetCurrentThreadId (), event));
1409 /* No pulse, so we have to remove ourself from the
1412 mon->wait_list = g_slist_remove (mon->wait_list, event);
1414 CloseHandle (event);