struct _MonoThreadsSync
{
- guint32 owner; /* thread ID */
+ gsize owner; /* thread ID */
guint32 nest;
+#ifdef HAVE_MOVING_COLLECTOR
+ gint32 hash_code;
+#endif
volatile guint32 entry_count;
HANDLE entry_sem;
GSList *wait_list;
MonoThreadsSync monitors [MONO_ZERO_LEN_ARRAY];
};
+#define mono_monitor_allocator_lock() EnterCriticalSection (&monitor_mutex)
+#define mono_monitor_allocator_unlock() LeaveCriticalSection (&monitor_mutex)
static CRITICAL_SECTION monitor_mutex;
static MonoThreadsSync *monitor_freelist;
static MonitorArray *monitor_allocated;
{
InitializeCriticalSection (&monitor_mutex);
}
+
+void
+mono_monitor_cleanup (void)
+{
+ /*DeleteCriticalSection (&monitor_mutex);*/
+}
+
+static int
+monitor_is_on_freelist (MonoThreadsSync *mon)
+{
+ MonitorArray *marray;
+ for (marray = monitor_allocated; marray; marray = marray->next) {
+ if (mon >= marray->monitors && mon < &marray->monitors [marray->num_monitors])
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/**
+ * mono_locks_dump:
+ * @include_untaken:
+ *
+ * Print a report on stdout of the managed locks currently held by
+ * threads. If @include_untaken is specified, list also inflated locks
+ * which are unheld.
+ * This is supposed to be used in debuggers like gdb.
+ */
+void
+mono_locks_dump (gboolean include_untaken)
+{
+ int i;
+ int used = 0, on_freelist = 0, to_recycle = 0, total = 0, num_arrays = 0;
+ MonoThreadsSync *mon;
+ MonitorArray *marray;
+ for (mon = monitor_freelist; mon; mon = mon->data)
+ on_freelist++;
+ for (marray = monitor_allocated; marray; marray = marray->next) {
+ total += marray->num_monitors;
+ num_arrays++;
+ for (i = 0; i < marray->num_monitors; ++i) {
+ mon = &marray->monitors [i];
+ if (mon->data == NULL) {
+ if (i < marray->num_monitors - 1)
+ to_recycle++;
+ } else {
+ if (!monitor_is_on_freelist (mon->data)) {
+ MonoObject *holder = mono_gc_weak_link_get (&mon->data);
+ if (mon->owner) {
+ g_print ("Lock %p in object %p held by thread %p, nest level: %d\n",
+ mon, holder, (void*)mon->owner, mon->nest);
+ if (mon->entry_sem)
+ g_print ("\tWaiting on semaphore %p: %d\n", mon->entry_sem, mon->entry_count);
+ } else if (include_untaken) {
+ g_print ("Lock %p in object %p untaken\n", mon, holder);
+ }
+ used++;
+ }
+ }
+ }
+ }
+ g_print ("Total locks (in %d array(s)): %d, used: %d, on freelist: %d, to recycle: %d\n",
+ num_arrays, total, used, on_freelist, to_recycle);
+}
/* LOCKING: this is called with monitor_mutex held */
static void
/* LOCKING: this is called with monitor_mutex held */
static MonoThreadsSync *
-mon_new (guint32 id)
+mon_new (gsize id)
{
MonoThreadsSync *new;
return new;
}
+/*
+ * Format of the lock word:
+ * thinhash | fathash | data
+ *
+ * thinhash is the lower bit: if set data is the shifted hashcode of the object.
+ * fathash is another bit: if set the hash code is stored in the MonoThreadsSync
+ * struct pointed to by data
+ * if neither bit is set and data is non-NULL, data is a MonoThreadsSync
+ */
+typedef union {
+ gsize lock_word;
+ MonoThreadsSync *sync;
+} LockWord;
+
+enum {
+ LOCK_WORD_THIN_HASH = 1,
+ LOCK_WORD_FAT_HASH = 1 << 1,
+ LOCK_WORD_BITS_MASK = 0x3,
+ LOCK_WORD_HASH_SHIFT = 2
+};
+
+#define MONO_OBJECT_ALIGNMENT_SHIFT 3
+
+/*
+ * mono_object_hash:
+ * @obj: an object
+ *
+ * Calculate a hash code for @obj that is constant while @obj is alive.
+ */
+int
+mono_object_hash (MonoObject* obj)
+{
+#ifdef HAVE_MOVING_COLLECTOR
+ LockWord lw;
+ unsigned int hash;
+ if (!obj)
+ return 0;
+ lw.sync = obj->synchronisation;
+ if (lw.lock_word & LOCK_WORD_THIN_HASH) {
+ /*g_print ("fast thin hash %d for obj %p store\n", (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT, obj);*/
+ return (unsigned int)lw.lock_word >> LOCK_WORD_HASH_SHIFT;
+ }
+ if (lw.lock_word & LOCK_WORD_FAT_HASH) {
+ lw.lock_word &= ~LOCK_WORD_BITS_MASK;
+ /*g_print ("fast fat hash %d for obj %p store\n", lw.sync->hash_code, obj);*/
+ return lw.sync->hash_code;
+ }
+ /*
+ * while we are inside this function, the GC will keep this object pinned,
+ * since we are in the unmanaged stack. Thanks to this and to the hash
+ * function that depends only on the address, we can ignore the races if
+ * another thread computes the hash at the same time, because it'll end up
+ * with the same value.
+ */
+ hash = (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
+ /* clear the top bits as they can be discarded */
+ hash &= ~(LOCK_WORD_BITS_MASK << 30);
+ /* no hash flags were set, so it must be a MonoThreadsSync pointer if not NULL */
+ if (lw.sync) {
+ lw.sync->hash_code = hash;
+ /*g_print ("storing hash code %d for obj %p in sync %p\n", hash, obj, lw.sync);*/
+ lw.lock_word |= LOCK_WORD_FAT_HASH;
+ /* this is safe since we don't deflate locks */
+ obj->synchronisation = lw.sync;
+ } else {
+ /*g_print ("storing thin hash code %d for obj %p\n", hash, obj);*/
+ lw.lock_word = LOCK_WORD_THIN_HASH | (hash << LOCK_WORD_HASH_SHIFT);
+ if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, NULL) == NULL)
+ return hash;
+ /*g_print ("failed store\n");*/
+ /* someone set the hash flag or someone inflated the object */
+ lw.sync = obj->synchronisation;
+ if (lw.lock_word & LOCK_WORD_THIN_HASH)
+ return hash;
+ lw.lock_word &= ~LOCK_WORD_BITS_MASK;
+ lw.sync->hash_code = hash;
+ lw.lock_word |= LOCK_WORD_FAT_HASH;
+ /* this is safe since we don't deflate locks */
+ obj->synchronisation = lw.sync;
+ }
+ return hash;
+#else
+/*
+ * Wang's address-based hash function:
+ * http://www.concentric.net/~Ttwang/tech/addrhash.htm
+ */
+ return (GPOINTER_TO_UINT (obj) >> MONO_OBJECT_ALIGNMENT_SHIFT) * 2654435761u;
+#endif
+}
+
/* If allow_interruption==TRUE, the method will be interrumped if abort or suspend
* is requested. In this case it returns -1.
*/
mono_monitor_try_enter_internal (MonoObject *obj, guint32 ms, gboolean allow_interruption)
{
MonoThreadsSync *mon;
- guint32 id = GetCurrentThreadId ();
+ gsize id = GetCurrentThreadId ();
HANDLE sem;
guint32 then = 0, now, delta;
guint32 waitms;
/* If the object has never been locked... */
if (mon == NULL) {
- EnterCriticalSection (&monitor_mutex);
+ mono_monitor_allocator_lock ();
mon = mon_new (id);
if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, mon, NULL) == NULL) {
mono_gc_weak_link_add (&mon->data, obj);
- LeaveCriticalSection (&monitor_mutex);
+ mono_monitor_allocator_unlock ();
/* Successfully locked */
return 1;
} else {
+#ifdef HAVE_MOVING_COLLECTOR
+ LockWord lw;
+ lw.sync = obj->synchronisation;
+ if (lw.lock_word & LOCK_WORD_THIN_HASH) {
+ MonoThreadsSync *oldlw = lw.sync;
+ /* move the already calculated hash */
+ mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
+ lw.sync = mon;
+ lw.lock_word |= LOCK_WORD_FAT_HASH;
+ if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
+ mono_gc_weak_link_add (&mon->data, obj);
+ mono_monitor_allocator_unlock ();
+ /* Successfully locked */
+ return 1;
+ } else {
+ mon_finalize (mon);
+ mono_monitor_allocator_unlock ();
+ goto retry;
+ }
+ } else if (lw.lock_word & LOCK_WORD_FAT_HASH) {
+ mon_finalize (mon);
+ mono_monitor_allocator_unlock ();
+ /* get the old lock without the fat hash bit */
+ lw.lock_word &= ~LOCK_WORD_BITS_MASK;
+ mon = lw.sync;
+ } else {
+ mon_finalize (mon);
+ mono_monitor_allocator_unlock ();
+ mon = obj->synchronisation;
+ }
+#else
mon_finalize (mon);
- LeaveCriticalSection (&monitor_mutex);
- goto retry;
+ mono_monitor_allocator_unlock ();
+ mon = obj->synchronisation;
+#endif
+ }
+ } else {
+#ifdef HAVE_MOVING_COLLECTOR
+ LockWord lw;
+ lw.sync = mon;
+ if (lw.lock_word & LOCK_WORD_THIN_HASH) {
+ MonoThreadsSync *oldlw = lw.sync;
+ mono_monitor_allocator_lock ();
+ mon = mon_new (id);
+ /* move the already calculated hash */
+ mon->hash_code = lw.lock_word >> LOCK_WORD_HASH_SHIFT;
+ lw.sync = mon;
+ lw.lock_word |= LOCK_WORD_FAT_HASH;
+ if (InterlockedCompareExchangePointer ((gpointer*)&obj->synchronisation, lw.sync, oldlw) == oldlw) {
+ mono_gc_weak_link_add (&mon->data, obj);
+ mono_monitor_allocator_unlock ();
+ /* Successfully locked */
+ return 1;
+ } else {
+ mon_finalize (mon);
+ mono_monitor_allocator_unlock ();
+ goto retry;
+ }
}
+#endif
+ }
+
+#ifdef HAVE_MOVING_COLLECTOR
+ {
+ LockWord lw;
+ lw.sync = mon;
+ lw.lock_word &= ~LOCK_WORD_BITS_MASK;
+ mon = lw.sync;
}
+#endif
/* If the object is currently locked by this thread... */
if (mon->owner == id) {
* should have been left at 1 by the previous unlock
* operation
*/
- if (InterlockedCompareExchange (&mon->owner, id, 0) == 0) {
+ if (InterlockedCompareExchangePointer ((gpointer *)&mon->owner, (gpointer)id, 0) == 0) {
/* Success */
g_assert (mon->nest == 1);
return 1;
if (mon->entry_sem == NULL) {
/* Create the semaphore */
sem = CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
+ g_assert (sem != NULL);
if (InterlockedCompareExchangePointer ((gpointer*)&mon->entry_sem, sem, NULL) != NULL) {
/* Someone else just put a handle here */
CloseHandle (sem);
mon = obj->synchronisation;
+#ifdef HAVE_MOVING_COLLECTOR
+ {
+ LockWord lw;
+ lw.sync = mon;
+ if (lw.lock_word & LOCK_WORD_THIN_HASH)
+ return;
+ lw.lock_word &= ~LOCK_WORD_BITS_MASK;
+ mon = lw.sync;
+ }
+#endif
if (mon == NULL) {
/* No one ever used Enter. Just ignore the Exit request as MS does */
return;
": Testing if %p is owned by thread %d", obj, GetCurrentThreadId()));
mon = obj->synchronisation;
+#ifdef HAVE_MOVING_COLLECTOR
+ {
+ LockWord lw;
+ lw.sync = mon;
+ if (lw.lock_word & LOCK_WORD_THIN_HASH)
+ return FALSE;
+ lw.lock_word &= ~LOCK_WORD_BITS_MASK;
+ mon = lw.sync;
+ }
+#endif
if (mon == NULL) {
return FALSE;
}
": (%d) Testing if %p is owned by any thread", GetCurrentThreadId (), obj));
mon = obj->synchronisation;
+#ifdef HAVE_MOVING_COLLECTOR
+ {
+ LockWord lw;
+ lw.sync = mon;
+ if (lw.lock_word & LOCK_WORD_THIN_HASH)
+ return FALSE;
+ lw.lock_word &= ~LOCK_WORD_BITS_MASK;
+ mon = lw.sync;
+ }
+#endif
if (mon == NULL) {
return FALSE;
}
GetCurrentThreadId (), obj));
mon = obj->synchronisation;
+#ifdef HAVE_MOVING_COLLECTOR
+ {
+ LockWord lw;
+ lw.sync = mon;
+ if (lw.lock_word & LOCK_WORD_THIN_HASH) {
+ mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
+ return;
+ }
+ lw.lock_word &= ~LOCK_WORD_BITS_MASK;
+ mon = lw.sync;
+ }
+#endif
if (mon == NULL) {
mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
return;
GetCurrentThreadId (), obj));
mon = obj->synchronisation;
+#ifdef HAVE_MOVING_COLLECTOR
+ {
+ LockWord lw;
+ lw.sync = mon;
+ if (lw.lock_word & LOCK_WORD_THIN_HASH) {
+ mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
+ return;
+ }
+ lw.lock_word &= ~LOCK_WORD_BITS_MASK;
+ mon = lw.sync;
+ }
+#endif
if (mon == NULL) {
mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
return;
GetCurrentThreadId (), obj, ms));
mon = obj->synchronisation;
+#ifdef HAVE_MOVING_COLLECTOR
+ {
+ LockWord lw;
+ lw.sync = mon;
+ if (lw.lock_word & LOCK_WORD_THIN_HASH) {
+ mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
+ return FALSE;
+ }
+ lw.lock_word &= ~LOCK_WORD_BITS_MASK;
+ mon = lw.sync;
+ }
+#endif
if (mon == NULL) {
mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked"));
return FALSE;
mono_raise_exception (mono_get_exception_synchronization_lock ("Not locked by this thread"));
return FALSE;
}
+
+ /* Do this WaitSleepJoin check before creating the event handle */
+ mono_thread_current_check_pending_interrupt ();
event = CreateEvent (NULL, FALSE, FALSE, NULL);
if (event == NULL) {