* Removed all Id tags.
[cacao.git] / src / threads / native / lock.c
index 61c323844832c2df449eefb525f13d00dc8b892b..3adfb24dc3ff802a7b820aceb310abbab88de4e0 100644 (file)
@@ -1,6 +1,6 @@
 /* src/threads/native/lock.c - lock implementation
 
-   Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+   Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
    J. Wenninger, Institut f. Computersprachen - TU Wien
    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
    02110-1301, USA.
 
-   Contact: cacao@cacaojvm.org
-
-   Authors: Stefan Ring
-
-   Changes: Christian Thalinger
-                       Edwin Steiner
-
-   $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
-
 */
 
 
 #include "config.h"
 
-/* XXX cleanup these includes */
-
 #include <stdlib.h>
-#include <string.h>
+#include <stdio.h>
 #include <assert.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <signal.h>
 #include <sys/time.h>
-#include <time.h>
-#include <errno.h>
-
 #include <pthread.h>
-#include <semaphore.h>
 
 #include "vm/types.h"
 
-#include "arch.h"
-
-#ifndef USE_MD_THREAD_STUFF
-#include "machine-instr.h"
-#else
-#include "threads/native/generic-primitives.h"
-#endif
-
-#include "mm/boehm.h"
 #include "mm/memory.h"
-#include "native/native.h"
-#include "native/include/java_lang_Object.h"
-#include "native/include/java_lang_Throwable.h"
-#include "native/include/java_lang_Thread.h"
-#include "native/include/java_lang_ThreadGroup.h"
-#include "native/include/java_lang_VMThread.h"
+
+#include "threads/native/lock.h"
 #include "threads/native/threads.h"
-#include "toolbox/avl.h"
-#include "toolbox/logging.h"
-#include "vm/builtin.h"
-#include "vm/exceptions.h"
+
 #include "vm/global.h"
-#include "vm/loader.h"
-#include "vm/options.h"
+#include "vm/exceptions.h"
+#include "vm/finalizer.h"
 #include "vm/stringlocal.h"
 #include "vm/vm.h"
-#include "vm/jit/asmpart.h"
 
-#if !defined(__DARWIN__)
-#if defined(__LINUX__)
-#define GC_LINUX_THREADS
-#elif defined(__MIPS__)
-#define GC_IRIX_THREADS
+#include "vmcore/options.h"
+
+#if defined(ENABLE_STATISTICS)
+# include "vmcore/statistics.h"
+#endif
+
+#if defined(ENABLE_VMLOG)
+#include <vmlog_cacao.h>
+#endif
+
+/* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
+
+#include "arch.h"
+
+/* includes for atomic instructions: */
+
+#if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
+#include "threads/native/generic-primitives.h"
+#else
+#include "machine-instr.h"
 #endif
-#include "boehm-gc/include/gc.h"
+
+#if defined(ENABLE_JVMTI)
+#include "native/jvmti/cacaodbg.h"
 #endif
 
-#ifdef USE_MD_THREAD_STUFF
-pthread_mutex_t _atomic_add_lock = PTHREAD_MUTEX_INITIALIZER;
-pthread_mutex_t _cas_lock = PTHREAD_MUTEX_INITIALIZER;
-pthread_mutex_t _mb_lock = PTHREAD_MUTEX_INITIALIZER;
+#if defined(ENABLE_GC_BOEHM)
+# include "mm/boehm-gc/include/gc.h"
+#endif
+
+
+/******************************************************************************/
+/* DEBUGGING MACROS                                                           */
+/******************************************************************************/
+
+/* #define LOCK_VERBOSE */
+
+#if defined(LOCK_VERBOSE)
+#define LOCK_LOG(args)  do { printf args; fflush(stdout); } while (0)
+#else
+#define LOCK_LOG(args)
 #endif
 
 
@@ -101,30 +94,112 @@ pthread_mutex_t _mb_lock = PTHREAD_MUTEX_INITIALIZER;
 /* MACROS                                                                     */
 /******************************************************************************/
 
-#define INITIALLOCKRECORDS 8
+/* number of lock records in the first pool allocated for a thread */
+#define LOCK_INITIAL_LOCK_RECORDS 8
 
-#define GRAB_LR(lr,t) \
-    if (lr->owner != t) { \
-               lr = lr->incharge; \
-       }
+#define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
 
-#define CHECK_MONITORSTATE(lr,t,mo,a) \
-    if (lr->o != mo || lr->owner != t) { \
-               *exceptionptr = new_illegalmonitorstateexception(); \
-               a; \
-       }
+#define LOCK_HASH(obj)  ((ptrint)(obj))
+
+#define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
+       ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
 
+/* CAUTION: oldvalue is evaluated twice! */
+#define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
+       (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
 
+
+/******************************************************************************/
+/* MACROS FOR THE FLAT LOCK CONTENTION BIT                                    */
 /******************************************************************************/
-/* GLOBAL VARIABLES                                                           */
+
+#define LOCK_SET_FLC_BIT(obj)    ((obj)->hdrflags |= HDRFLAG_FLC)
+#define LOCK_CLEAR_FLC_BIT(obj)  ((obj)->hdrflags &= ~ HDRFLAG_FLC)
+#define LOCK_TEST_FLC_BIT(obj)   ((obj)->hdrflags & HDRFLAG_FLC)
+
+
 /******************************************************************************/
+/* MACROS FOR THIN/FAT LOCKS                                                  */
+/******************************************************************************/
+
+/* We use a variant of the tasuki locks described in the paper
+ *     
+ *     Tamiya Onodera, Kiyokuni Kawachiya
+ *     A Study of Locking Objects with Bimodal Fields
+ *     Proceedings of the ACM OOPSLA '99, pp. 223-237
+ *     1999
+ *
+ * The underlying thin locks are a variant of the thin locks described in
+ * 
+ *     Bacon, Konuru, Murthy, Serrano
+ *     Thin Locks: Featherweight Synchronization for Java
+ *        Proceedings of the ACM Conference on Programming Language Design and 
+ *        Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
+ *        June 1998
+ *
+ * In thin lock mode the lockword (monitorPtr) looks like this:
+ *
+ *     ,----------------------,-----------,---,
+ *     |      thread ID       |   count   | 0 |
+ *     `----------------------'-----------'---´
+ *
+ *     thread ID......the 'index' of the owning thread, or 0
+ *     count..........number of times the lock has been entered        minus 1
+ *     0..............the shape bit is 0 in thin lock mode
+ *
+ * In fat lock mode it is basically a lock_record_t *:
+ *
+ *     ,----------------------------------,---,
+ *     |    lock_record_t * (without LSB) | 1 |
+ *     `----------------------------------'---´
+ *
+ *     1..............the shape bit is 1 in fat lock mode
+ */
+
+#if SIZEOF_VOID_P == 8
+#define THIN_LOCK_WORD_SIZE    64
+#else
+#define THIN_LOCK_WORD_SIZE    32
+#endif
 
-/* unlocked dummy record - avoids NULL checks */
-static lock_record_t *dummyLR;
+#define THIN_LOCK_SHAPE_BIT    0x01
 
-pthread_mutex_t lock_global_pool_lock;
-lock_record_pool_t *lock_global_pool;
+#define THIN_UNLOCKED          0
 
+#define THIN_LOCK_COUNT_SHIFT  1
+#define THIN_LOCK_COUNT_SIZE   8
+#define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
+#define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
+#define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
+
+#define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
+#define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
+
+#define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
+#define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
+
+#define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
+#define MAKE_FAT_LOCK(ptr)      ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
+
+#define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
+
+
+/* global variables ***********************************************************/
+
+/* hashtable mapping objects to lock records */
+static lock_hashtable_t lock_hashtable;
+
+
+/******************************************************************************/
+/* PROTOTYPES                                                                 */
+/******************************************************************************/
+
+static void lock_hashtable_init(void);
+
+static void lock_record_enter(threadobject *t, lock_record_t *lr);
+static void lock_record_exit(threadobject *t, lock_record_t *lr);
+static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
+static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
 
 
 /*============================================================================*/
@@ -140,216 +215,373 @@ lock_record_pool_t *lock_global_pool;
 
 void lock_init(void)
 {
-       pthread_mutex_init(&lock_global_pool_lock, NULL);
+       /* initialize lock hashtable */
 
-       /* Every newly created object's monitorPtr points here so we save
-          a check against NULL */
+       lock_hashtable_init();
 
-       dummyLR = NEW(lock_record_t);
-       dummyLR->o = NULL;
-       dummyLR->owner = NULL;
-       dummyLR->waiting = NULL;
-       dummyLR->incharge = dummyLR;
+#if defined(ENABLE_VMLOG)
+       vmlog_cacao_init_lock();
+#endif
 }
 
 
-/* lock_record_init ************************************************************
+/* lock_pre_compute_thinlock ***************************************************
 
-   Initialize a lock record.
+   Pre-compute the thin lock value for a thread index.
 
    IN:
-      r............the lock record to initialize
-         t............will become the owner
+      index........the thead index (>= 1)
+
+   RETURN VALUE:
+      the thin lock value for this thread index
 
 *******************************************************************************/
 
-static void lock_record_init(lock_record_t *r, threadobject *t)
+ptrint lock_pre_compute_thinlock(s4 index)
 {
-       r->lockCount = 1;
-       r->owner = t;
-       r->queuers = 0;
-       r->o = NULL;
-       r->waiter = NULL;
-       r->incharge = (lock_record_t *) &dummyLR;
-       r->waiting = NULL;
-       threads_sem_init(&r->queueSem, 0, 0);
-       pthread_mutex_init(&r->resolveLock, NULL);
-       pthread_cond_init(&r->resolveWait, NULL);
+       return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
+}
+
+
+/* lock_record_new *************************************************************
+
+   Allocate a lock record.
+
+*******************************************************************************/
+
+static lock_record_t *lock_record_new(void)
+{
+       lock_record_t *lr;
+
+       /* allocate the data structure on the C heap */
+
+       lr = NEW(lock_record_t);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_record += sizeof(lock_record_t);
+#endif
+
+       /* initialize the members */
+
+       lr->object  = NULL;
+       lr->owner   = NULL;
+       lr->count   = 0;
+       lr->waiters = NULL;
+
+       /* initialize the mutex */
+
+       pthread_mutex_init(&(lr->mutex), NULL);
+
+       return lr;
 }
 
 
-/* lock_init_execution_env *****************************************************
+/* lock_record_free ************************************************************
 
-   Initialize the execution environment for a thread.
+   Free a lock record.
 
    IN:
-      thread.......the thread
+       lr....lock record to free
 
 *******************************************************************************/
 
-void lock_init_execution_env(threadobject *thread)
+static void lock_record_free(lock_record_t *lr)
 {
-       thread->ee.firstLR = NULL;
-       thread->ee.lrpool = NULL;
-       thread->ee.numlr = 0;
-}
+#if 0
+       /* check the members */
+
+       lr->object  = o;
+       lr->owner   = NULL;
+       lr->count   = 0;
+       lr->waiters = NULL;
+#endif
+
+       /* destroy the mutex */
+
+       pthread_mutex_destroy(&(lr->mutex));
 
+       /* free the data structure */
+
+       FREE(lr, lock_record_t);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_record -= sizeof(lock_record_t);
+#endif
+}
 
 
 /*============================================================================*/
-/* LOCK RECORD MANAGEMENT                                                     */
+/* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
 /*============================================================================*/
 
+/* lock_hashtable_init *********************************************************
 
-/* lock_record_alloc_new_pool **************************************************
+   Initialize the global hashtable mapping objects to lock records.
 
-   Get a new lock record pool from the memory allocator.
+*******************************************************************************/
 
-   IN:
-      thread.......the thread that will own the lock records
-         size.........number of lock records in the pool to allocate
+static void lock_hashtable_init(void)
+{
+       pthread_mutex_init(&(lock_hashtable.mutex), NULL);
+
+       lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
+       lock_hashtable.entries = 0;
+       lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
+#endif
+
+       MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
+}
 
-   RETURN VALUE:
-      the new lock record pool, with initialized lock records
+
+/* lock_hashtable_grow *********************************************************
+
+   Grow the lock record hashtable to about twice its current size and
+   rehash the entries.
 
 *******************************************************************************/
 
-static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
+/* must be called with hashtable mutex locked */
+static void lock_hashtable_grow(void)
 {
-       lock_record_pool_t *p = mem_alloc(sizeof(lock_record_pool_header_t)
-                                                                               + sizeof(lock_record_t) * size);
-       int i;
-
-       p->header.size = size;
-       for (i=0; i<size; i++) {
-               lock_record_init(&p->lr[i], thread);
-               p->lr[i].nextFree = &p->lr[i+1];
+       u4 oldsize;
+       u4 newsize;
+       lock_record_t **oldtable;
+       lock_record_t **newtable;
+       lock_record_t *lr;
+       lock_record_t *next;
+       u4 i;
+       u4 h;
+       u4 newslot;
+
+       /* allocate a new table */
+
+       oldsize = lock_hashtable.size;
+       newsize = oldsize*2 + 1; /* XXX should use prime numbers */
+
+       LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
+
+       oldtable = lock_hashtable.ptr;
+       newtable = MNEW(lock_record_t *, newsize);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_hashtable += sizeof(lock_record_t *) * newsize;
+#endif
+
+       MZERO(newtable, lock_record_t *, newsize);
+
+       /* rehash the entries */
+
+       for (i = 0; i < oldsize; i++) {
+               lr = oldtable[i];
+               while (lr) {
+                       next = lr->hashlink;
+
+                       h = LOCK_HASH(lr->object);
+                       newslot = h % newsize;
+
+                       lr->hashlink = newtable[newslot];
+                       newtable[newslot] = lr;
+
+                       lr = next;
+               }
        }
-       p->lr[i-1].nextFree = NULL;
-       return p;
+
+       /* replace the old table */
+
+       lock_hashtable.ptr  = newtable;
+       lock_hashtable.size = newsize;
+
+       MFREE(oldtable, lock_record_t *, oldsize);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
+#endif
 }
 
 
-/* lock_record_alloc_pool ******************************************************
+/* lock_hashtable_get **********************************************************
 
-   Allocate a lock record pool. The pool is either taken from the global free
-   list or requested from the memory allocator.
+   Find the lock record for the given object.  If it does not exists,
+   yet, create it and enter it in the hashtable.
 
    IN:
-      thread.......the thread that will own the lock records
-         size.........number of lock records in the pool to allocate
+         o....the object to look up
 
    RETURN VALUE:
-      the new lock record pool, with initialized lock records
+      the lock record to use for this object
 
 *******************************************************************************/
 
-static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
+#if defined(ENABLE_GC_BOEHM)
+static void lock_record_finalizer(void *object, void *p);
+#endif
+
+static lock_record_t *lock_hashtable_get(java_object_t *o)
 {
-       pthread_mutex_lock(&lock_global_pool_lock);
+       ptrint         lockword;
+       u4             slot;
+       lock_record_t *lr;
+
+       lockword = (ptrint) o->monitorPtr;
+
+       if (IS_FAT_LOCK(lockword))
+               return GET_FAT_LOCK(lockword);
 
-       if (lock_global_pool) {
-               int i;
-               lock_record_pool_t *pool = lock_global_pool;
-               lock_global_pool = pool->header.next;
+       /* lock the hashtable */
 
-               pthread_mutex_unlock(&lock_global_pool_lock);
+       pthread_mutex_lock(&(lock_hashtable.mutex));
 
-               for (i=0; i < pool->header.size; i++) {
-                       pool->lr[i].owner = t;
-                       pool->lr[i].nextFree = &pool->lr[i+1];
+       /* lookup the lock record in the hashtable */
+
+       slot = LOCK_HASH(o) % lock_hashtable.size;
+       lr   = lock_hashtable.ptr[slot];
+
+       for (; lr != NULL; lr = lr->hashlink) {
+               if (lr->object == o) {
+                       pthread_mutex_unlock(&(lock_hashtable.mutex));
+                       return lr;
                }
-               pool->lr[i-1].nextFree = NULL;
+       }
+
+       /* not found, we must create a new one */
+
+       lr = lock_record_new();
+
+       lr->object = o;
+
+#if defined(ENABLE_GC_BOEHM)
+       /* register new finalizer to clean up the lock record */
+
+       GC_REGISTER_FINALIZER(o, lock_record_finalizer, 0, 0, 0);
+#endif
+
+       LOCK_LOG(("thread %d allocated for %p new lr %p\n",
+                         t->index, (void*) o, (void*) lr));
+
+       /* enter it in the hashtable */
+
+       lr->hashlink             = lock_hashtable.ptr[slot];
+       lock_hashtable.ptr[slot] = lr;
+       lock_hashtable.entries++;
+
+       /* check whether the hash should grow */
 
-               return pool;
+       if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
+               lock_hashtable_grow();
        }
 
-       pthread_mutex_unlock(&lock_global_pool_lock);
+       /* unlock the hashtable */
 
-       return lock_record_alloc_new_pool(t, size);
+       pthread_mutex_unlock(&(lock_hashtable.mutex));
+
+       /* return the new lock record */
+
+       return lr;
 }
 
 
-/* lock_record_free_pools ******************************************************
+/* lock_hashtable_remove *******************************************************
 
-   Free the lock record pools in the given linked list.
+   Remove the lock record for the given object from the hashtable.
 
    IN:
-      pool.........list header
+       o....the object to look up
 
 *******************************************************************************/
 
-void lock_record_free_pools(lock_record_pool_t *pool)
+static void lock_hashtable_remove(java_object_t *o)
 {
-       lock_record_pool_header_t *last;
+       ptrint         lockword;
+       lock_record_t *lr;
+       u4             slot;
+       lock_record_t *tmplr;
 
-       pthread_mutex_lock(&lock_global_pool_lock);
+       /* lock the hashtable */
 
-       last = &pool->header;
-       while (last->next)
-               last = &last->next->header;
-       last->next = lock_global_pool;
-       lock_global_pool = pool;
+       pthread_mutex_lock(&(lock_hashtable.mutex));
 
-       pthread_mutex_unlock(&lock_global_pool_lock);
-}
+       /* get lock record */
 
+       lockword = (ptrint) o->monitorPtr;
 
-/* lock_record_alloc ***********************************************************
+       assert(IS_FAT_LOCK(lockword));
 
-   Allocate a lock record which is owned by the given thread.
+       lr = GET_FAT_LOCK(lockword);
 
-   IN:
-      t............the thread 
+       /* remove the lock-record from the hashtable */
 
-*******************************************************************************/
+       slot  = LOCK_HASH(o) % lock_hashtable.size;
+       tmplr = lock_hashtable.ptr[slot];
 
-static lock_record_t *lock_record_alloc(threadobject *t)
-{
-       lock_record_t *r;
-
-       assert(t);
-       r = t->ee.firstLR;
-
-       if (!r) {
-               int poolsize = t->ee.numlr ? t->ee.numlr * 2 : INITIALLOCKRECORDS;
-               lock_record_pool_t *pool = lock_record_alloc_pool(t, poolsize);
-               pool->header.next = t->ee.lrpool;
-               t->ee.lrpool = pool;
-               r = &pool->lr[0];
-               t->ee.numlr += pool->header.size;
+       if (tmplr == lr) {
+               /* special handling if it's the first in the chain */
+
+               lock_hashtable.ptr[slot] = lr->hashlink;
        }
+       else {
+               for (; tmplr != NULL; tmplr = tmplr->hashlink) {
+                       if (tmplr->hashlink == lr) {
+                               tmplr->hashlink = lr->hashlink;
+                               break;
+                       }
+               }
 
-       t->ee.firstLR = r->nextFree;
-#ifndef NDEBUG
-       r->nextFree = NULL; /* in order to find invalid uses of nextFree */
-#endif
-       return r;
-}
+               assert(tmplr != NULL);
+       }
 
+       /* decrease entry count */
 
-/* lock_record_recycle *********************************************************
+       lock_hashtable.entries--;
 
-   Recycle the given lock record. It will be inserted in the appropriate
-   free list.
+       /* unlock the hashtable */
+
+       pthread_mutex_unlock(&(lock_hashtable.mutex));
+}
 
-   IN:
-      t............the owner
-         r............lock record to recycle
+
+/* lock_record_finalizer *******************************************************
+
+   XXX Remove me for exact GC.
 
 *******************************************************************************/
 
-static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
+static void lock_record_finalizer(void *object, void *p)
 {
-       assert(t);
-       assert(r);
-       assert(r->owner == t);
-       assert(r->nextFree == NULL);
+       java_object_t *o;
+       ptrint         lockword;
+       lock_record_t *lr;
 
-       r->nextFree = t->ee.firstLR;
-       t->ee.firstLR = r;
-}
+       o = (java_object_t *) object;
+
+       /* check for a finalizer function */
+
+       if (o->vftbl->class->finalizer != NULL)
+               finalizer_run(object, p);
+
+       /* remove the lock-record entry from the hashtable */
+
+       lock_hashtable_remove(o);
+
+       /* get lock record */
 
+       lockword = (ptrint) o->monitorPtr;
+
+       assert(IS_FAT_LOCK(lockword));
+
+       lr = GET_FAT_LOCK(lockword);
+
+       /* now release the lock record */
+
+       lock_record_free(lr);
+}
 
 
 /*============================================================================*/
@@ -364,17 +596,18 @@ static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
 
 *******************************************************************************/
 
-void lock_init_object_lock(java_objectheader *o)
+void lock_init_object_lock(java_object_t *o)
 {
        assert(o);
 
-       o->monitorPtr = dummyLR;
+       o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
+       LOCK_CLEAR_FLC_BIT(o);
 }
 
 
 /* lock_get_initial_lock_word **************************************************
 
-   Returns the global dummy monitor lock record. The pointer is
+   Returns the initial (unlocked) lock word. The pointer is
    required in the code generator to set up a virtual
    java_objectheader for code patch locking.
 
@@ -382,7 +615,7 @@ void lock_init_object_lock(java_objectheader *o)
 
 lock_record_t *lock_get_initial_lock_word(void)
 {
-       return dummyLR;
+       return (lock_record_t *) THIN_UNLOCKED;
 }
 
 
@@ -392,55 +625,99 @@ lock_record_t *lock_get_initial_lock_word(void)
 /*============================================================================*/
 
 
-/* lock_queue_on_lock_record ***************************************************
+/* lock_record_enter ***********************************************************
+
+   Enter the lock represented by the given lock record.
 
-   Suspend the current thread and queue it on the given lock record.
+   IN:
+      t.................the current thread
+         lr................the lock record
 
 *******************************************************************************/
 
-static void lock_queue_on_lock_record(lock_record_t *lr, java_objectheader *o)
+static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
 {
-       atomic_add(&lr->queuers, 1);
-       MEMORY_BARRIER_AFTER_ATOMIC();
+       pthread_mutex_lock(&(lr->mutex));
 
-       if (lr->o == o)
-               threads_sem_wait(&lr->queueSem);
-
-       atomic_add(&lr->queuers, -1);
+       lr->owner = t;
 }
 
 
-/* lock_record_release *********************************************************
+/* lock_record_exit ************************************************************
+
+   Release the lock represented by the given lock record.
+
+   IN:
+      t.................the current thread
+         lr................the lock record
 
-   Release the lock held by the given lock record. Threads queueing on the
-   semaphore of the record will be woken up.
+   PRE-CONDITION:
+      The current thread must own the lock represented by this lock record.
+         This is NOT checked by this function!
 
 *******************************************************************************/
 
-static void lock_record_release(lock_record_t *lr)
+static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
 {
-       int q;
-       lr->o = NULL;
-       MEMORY_BARRIER();
-       q = lr->queuers;
-       while (q--)
-               threads_sem_post(&lr->queueSem);
+       lr->owner = NULL;
+       pthread_mutex_unlock(&(lr->mutex));
 }
 
 
-static inline void lock_handle_waiter(lock_record_t *newlr,
-                                                                         lock_record_t *curlr,
-                                                                         java_objectheader *o)
+/* lock_inflate ****************************************************************
+
+   Inflate the lock of the given object. This may only be called by the
+   owner of the monitor of the object.
+
+   IN:
+      t............the current thread
+         o............the object of which to inflate the lock
+         lr...........the lock record to install. The current thread must
+                      own the lock of this lock record!
+
+   PRE-CONDITION:
+      The current thread must be the owner of this object's monitor AND
+         of the lock record's lock!
+
+*******************************************************************************/
+
+static void lock_inflate(threadobject *t, java_object_t *o, lock_record_t *lr)
 {
-       /* if the current lock record is used for waiting on the object */
-       /* `o`, then record it as a waiter in the new lock record       */
+       ptrint lockword;
+
+       /* get the current lock count */
+
+       lockword = (ptrint) o->monitorPtr;
+
+       if (IS_FAT_LOCK(lockword)) {
+               assert(GET_FAT_LOCK(lockword) == lr);
+       }
+       else {
+               assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
+
+               /* copy the count from the thin lock */
 
-       if (curlr->waiting == o)
-               newlr->waiter = curlr;
+               lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
+       }
+
+       LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
+                       t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
+
+       /* clear flat-lock-contention bit */
+
+       LOCK_CLEAR_FLC_BIT(o);
+
+       /* notify waiting objects */
+
+       lock_record_notify(t, lr, false);
+
+       /* install it */
+
+       o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
 }
 
 
-/* lock_monitor_enter ****************************************************************
+/* lock_monitor_enter **********************************************************
 
    Acquire the monitor of the given object. If the current thread already
    owns the monitor, the lock counter is simply increased.
@@ -452,116 +729,146 @@ static inline void lock_handle_waiter(lock_record_t *newlr,
          o............the object of which to enter the monitor
 
    RETURN VALUE:
-      the new lock record of the object when it has been entered
+      true.........the lock has been successfully acquired
+         false........an exception has been thrown
 
 *******************************************************************************/
 
-lock_record_t *lock_monitor_enter(threadobject *t, java_objectheader *o)
+bool lock_monitor_enter(java_object_t *o)
 {
-       for (;;) {
-               lock_record_t *lr = o->monitorPtr;
-               if (lr->o != o) {
-                       /* the lock record does not lock this object */
-                       lock_record_t *nlr;
-                       lock_record_t *mlr;
-
-                       /* allocate a new lock record for this object */
-                       mlr     = lock_record_alloc(t);
-                       mlr->o = o;
-
-                       /* check if it is the same record the object refered to earlier */
-                       if (mlr == lr) {
-                               MEMORY_BARRIER();
-                               nlr = o->monitorPtr;
-                               if (nlr == lr) {
-                                       /* the object still refers to the same lock record */
-                                       /* got it! */
-                                       lock_handle_waiter(mlr, lr, o);
-                                       return mlr;
-                               }
-                       }
-                       else {
-                               /* no, it's another lock record */
-                               /* if we don't own the old record, set incharge XXX */
-                               if (lr->owner != t)
-                                       mlr->incharge = lr;
-
-                               /* if the object still refers to lr, replace it by the new mlr */
-                               MEMORY_BARRIER_BEFORE_ATOMIC();
-                               nlr = (lock_record_t *) compare_and_swap((long*) &o->monitorPtr, (long) lr, (long) mlr);
-                       }
+       threadobject  *t;
+       /* CAUTION: This code assumes that ptrint is unsigned! */
+       ptrint         lockword;
+       ptrint         thinlock;
+       lock_record_t *lr;
 
-                       if (nlr == lr) {
-                               /* we swapped the new record in successfully */
-                               if (mlr == lr || lr->o != o) {
-                                       /* the old lock record is the same as the new one, or */
-                                       /* it locks another object.                           */
-                                       /* got it! */
-                                       lock_handle_waiter(mlr, lr, o);
-                                       return mlr;
-                               }
-                               /* lr locks the object, we have to wait */
-                               while (lr->o == o)
-                                       lock_queue_on_lock_record(lr, o);
-
-                               /* got it! */
-                               lock_handle_waiter(mlr, lr, o);
-                               return mlr;
-                       }
+       if (o == NULL) {
+               exceptions_throw_nullpointerexception();
+               return false;
+       }
+
+       t = THREADOBJECT;
+
+       thinlock = t->thinlock;
+
+       /* most common case: try to thin-lock an unlocked object */
+
+       if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
+               /* success. we locked it */
+               /* The Java Memory Model requires a memory barrier here: */
+               MEMORY_BARRIER();
+               return true;
+       }
+
+       /* next common case: recursive lock with small recursion count */
+       /* We don't have to worry about stale values here, as any stale value  */
+       /* will indicate another thread holding the lock (or an inflated lock) */
+
+       if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
+               /* we own this monitor               */
+               /* check the current recursion count */
+
+               if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
+               {
+                       /* the recursion count is low enough */
+
+                       o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
 
-                       /* forget this mlr lock record, wait on nlr and try again */
-                       lock_record_release(mlr);
-                       lock_record_recycle(t, mlr);
-                       lock_queue_on_lock_record(nlr, o);
+                       /* success. we locked it */
+                       return true;
                }
                else {
-                       /* the lock record is for the object we want */
+                       /* recursion count overflow */
 
-                       if (lr->owner == t) {
-                               /* we own it already, just recurse */
-                               lr->lockCount++;
-                               return lr;
-                       }
+                       lr = lock_hashtable_get(o);
+                       lock_record_enter(t, lr);
+                       lock_inflate(t, o, lr);
+                       lr->count++;
 
-                       /* it's locked. we wait and then try again */
-                       lock_queue_on_lock_record(lr, o);
+                       return true;
                }
        }
-}
 
+       /* the lock is either contented or fat */
 
-/* lock_wake_waiters ********************************************************
+       if (IS_FAT_LOCK(lockword)) {
 
-   For each lock record in the given waiter list, post the queueSem
-   once for each queuer of the lock record.
+               lr = GET_FAT_LOCK(lockword);
 
-   IN:
-      lr...........the head of the waiter list
+               /* check for recursive entering */
+               if (lr->owner == t) {
+                       lr->count++;
+                       return true;
+               }
 
-*******************************************************************************/
+               /* acquire the mutex of the lock record */
 
-static void lock_wake_waiters(lock_record_t *lr)
-{
-       lock_record_t *tmplr;
-       s4 q;
+               lock_record_enter(t, lr);
+
+               assert(lr->count == 0);
+
+               return true;
+       }
+
+       /****** inflation path ******/
+
+       /* first obtain the lock record for this object */
 
-       /* move it to a local variable (Stefan commented this especially.
-        * Might be important somehow...) */
+       lr = lock_hashtable_get(o);
 
-       tmplr = lr;
+#if defined(ENABLE_JVMTI)
+       /* Monitor Contended Enter */
+       jvmti_MonitorContendedEntering(false, o);
+#endif
+
+       /* enter the monitor */
+
+       lock_record_enter(t, lr);
+
+#if defined(ENABLE_JVMTI)
+       /* Monitor Contended Entered */
+       jvmti_MonitorContendedEntering(true, o);
+#endif
+
+       /* inflation loop */
+
+       while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
+               /* Set the flat lock contention bit to let the owning thread
+                  know that we want to be notified of unlocking. */
+
+               LOCK_SET_FLC_BIT(o);
+
+               LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
+                                 t->index, (void*) o, (void*) lr));
+
+               /* try to lock the object */
+
+               if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
+                       /* we can inflate the lock ourselves */
+
+                       LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
+                                         t->index, (void*) o, (void*) lr));
+
+                       lock_inflate(t, o, lr);
+               }
+               else {
+                       /* wait until another thread sees the flc bit and notifies
+                          us of unlocking */
 
-       do {
-               q = tmplr->queuers;
+                       LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
+                                         t->index, (void*) o, (void*) lr));
 
-               while (q--)
-                       threads_sem_post(&tmplr->queueSem);
+                       lock_record_wait(t, lr, 0, 0);
+               }
+       }
 
-               tmplr = tmplr->waiter;
-       } while (tmplr != NULL && tmplr != lr); /* this breaks cycles to lr */
+       /* we own the inflated lock now */
+
+       return true;
 }
 
 
-/* lock_monitor_exit *****************************************************************
+/* lock_monitor_exit ***********************************************************
 
    Decrement the counter of a (currently owned) monitor. If the counter
    reaches zero, release the monitor.
@@ -579,177 +886,341 @@ static void lock_wake_waiters(lock_record_t *lr)
 
 *******************************************************************************/
 
-bool lock_monitor_exit(threadobject *t, java_objectheader *o)
+bool lock_monitor_exit(java_object_t *o)
 {
-       lock_record_t *lr;
+       threadobject *t;
+       ptrint        lockword;
+       ptrint        thinlock;
+
+       if (o == NULL) {
+               exceptions_throw_nullpointerexception();
+               return false;
+       }
+
+       t = THREADOBJECT;
+
+       /* We don't have to worry about stale values here, as any stale value */
+       /* will indicate that we don't own the lock.                          */
+
+       lockword = (ptrint) o->monitorPtr;
+       thinlock = t->thinlock;
+
+       /* most common case: we release a thin lock that we hold once */
 
-       lr = o->monitorPtr;
-       GRAB_LR(lr, t);
-       CHECK_MONITORSTATE(lr, t, o, return false);
+       if (lockword == thinlock) {
+               /* memory barrier for Java Memory Model */
+               MEMORY_BARRIER();
+               o->monitorPtr = THIN_UNLOCKED;
+               /* memory barrier for thin locking */
+               MEMORY_BARRIER();
 
-       /* { the current thread `t` owns the lock record `lr` on object `o` } */
+               /* check if there has been a flat lock contention on this object */
+
+               if (LOCK_TEST_FLC_BIT(o)) {
+                       lock_record_t *lr;
+
+                       LOCK_LOG(("thread %d saw flc bit on %p %s\n",
+                                       t->index, (void*) o, o->vftbl->class->name->text));
+
+                       /* there has been a contention on this thin lock */
+
+                       lr = lock_hashtable_get(o);
+
+                       LOCK_LOG(("thread %d for %p got lr %p\n",
+                                       t->index, (void*) o, (void*) lr));
+
+                       lock_record_enter(t, lr);
+
+                       if (LOCK_TEST_FLC_BIT(o)) {
+                               /* notify a thread that it can try to inflate the lock now */
+
+                               lock_record_notify(t, lr, true);
+                       }
+
+                       lock_record_exit(t, lr);
+               }
 
-       if (lr->lockCount > 1) {
-               /* we had locked this one recursively. just decrement, it will */
-               /* still be locked. */
-               lr->lockCount--;
                return true;
        }
 
-       /* we are going to unlock and recycle this lock record */
+       /* next common case: we release a recursive lock, count > 0 */
 
-       if (lr->waiter) {
-               lock_record_t *wlr = lr->waiter;
-               if (o->monitorPtr != lr ||
-                       (void*) compare_and_swap((long*) &o->monitorPtr, (long) lr, (long) wlr) != lr)
-               {
-                       lock_record_t *nlr = o->monitorPtr;
-                       assert(nlr->waiter == NULL);
-                       nlr->waiter = wlr; /* XXX is it ok to overwrite the nlr->waiter field like that? */
-                       STORE_ORDER_BARRIER();
+       if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
+               o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
+               return true;
+       }
+
+       /* either the lock is fat, or we don't hold it at all */
+
+       if (IS_FAT_LOCK(lockword)) {
+
+               lock_record_t *lr;
+
+               lr = GET_FAT_LOCK(lockword);
+
+               /* check if we own this monitor */
+               /* We don't have to worry about stale values here, as any stale value */
+               /* will be != t and thus fail this check.                             */
+
+               if (lr->owner != t) {
+                       exceptions_throw_illegalmonitorstateexception();
+                       return false;
                }
-               else {
-                       lock_wake_waiters(wlr);
+
+               /* { the current thread `t` owns the lock record `lr` on object `o` } */
+
+               if (lr->count != 0) {
+                       /* we had locked this one recursively. just decrement, it will */
+                       /* still be locked. */
+                       lr->count--;
+                       return true;
                }
-               lr->waiter = NULL;
+
+               /* unlock this lock record */
+
+               lr->owner = NULL;
+               pthread_mutex_unlock(&(lr->mutex));
+
+               return true;
        }
 
-       /* unlock and throw away this lock record */
-       lock_record_release(lr);
-       lock_record_recycle(t, lr);
-       return true;
+       /* legal thin lock cases have been handled above, so this is an error */
+
+       exceptions_throw_illegalmonitorstateexception();
+
+       return false;
 }
 
 
-/* lock_record_remove_waiter *******************************************************
+/* lock_record_add_waiter ******************************************************
 
-   Remove a waiter lock record from the waiter list of the given lock record
+   Add a thread to the list of waiting threads of a lock record.
 
    IN:
-      lr...........the lock record holding the waiter list
-         toremove.....the record to remove from the list
+      lr...........the lock record
+      thread.......the thread to add
 
 *******************************************************************************/
 
-static void lock_record_remove_waiter(lock_record_t *lr,
-                                                                         lock_record_t *toremove)
+static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
 {
-       do {
-               if (lr->waiter == toremove) {
-                       lr->waiter = toremove->waiter;
-                       break;
+       lock_waiter_t *waiter;
+
+       /* allocate a waiter data structure */
+
+       waiter = NEW(lock_waiter_t);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_waiter += sizeof(lock_waiter_t);
+#endif
+
+       waiter->waiter = thread;
+       waiter->next   = lr->waiters;
+
+       lr->waiters = waiter;
+}
+
+
+/* lock_record_remove_waiter ***************************************************
+
+   Remove a thread from the list of waiting threads of a lock record.
+
+   IN:
+      lr...........the lock record
+      t............the current thread
+
+   PRE-CONDITION:
+      The current thread must be the owner of the lock record.
+   
+*******************************************************************************/
+
+static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
+{
+       lock_waiter_t **link;
+       lock_waiter_t  *w;
+
+       link = &(lr->waiters);
+
+       while ((w = *link)) {
+               if (w->waiter == thread) {
+                       *link = w->next;
+
+                       /* free the waiter data structure */
+
+                       FREE(w, lock_waiter_t);
+
+#if defined(ENABLE_STATISTICS)
+                       if (opt_stat)
+                               size_lock_waiter -= sizeof(lock_waiter_t);
+#endif
+
+                       return;
                }
-               lr = lr->waiter;
-       } while (lr); /* XXX need to break cycle? */
+
+               link = &(w->next);
+       }
+
+       /* this should never happen */
+
+       vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
 }
 
 
-/* lock_monitor_wait *****************************************************************
+/* lock_record_wait ************************************************************
 
-   Wait on an object for a given (maximum) amount of time.
+   Wait on a lock record for a given (maximum) amount of time.
 
    IN:
       t............the current thread
-         o............the object
+         lr...........the lock record
          millis.......milliseconds of timeout
          nanos........nanoseconds of timeout
 
    PRE-CONDITION:
-      The current thread must be the owner of the object's monitor.
+      The current thread must be the owner of the lock record.
+         This is NOT checked by this function!
    
 *******************************************************************************/
 
-void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
+static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
 {
+       s4   lockcount;
        bool wasinterrupted;
-       lock_record_t *newlr;
-       lock_record_t *lr;
-
-       lr = o->monitorPtr;
-       GRAB_LR(lr, t);
-       CHECK_MONITORSTATE(lr, t, o, return);
 
-       /* { the thread t owns the lock record lr on the object o } */
+       /* { the thread t owns the fat lock record lr on the object o } */
 
-       /* wake threads waiting on this record XXX why? */
+       /* register us as waiter for this object */
 
-       if (lr->waiter)
-               lock_wake_waiters(lr->waiter);
+       lock_record_add_waiter(lr, thread);
 
-       /* mark the lock record as "waiting on object o" */
+       /* remember the old lock count */
 
-       lr->waiting = o;
-       STORE_ORDER_BARRIER();
+       lockcount = lr->count;
 
        /* unlock this record */
 
-       lock_record_release(lr);
+       lr->count = 0;
+       lock_record_exit(thread, lr);
 
        /* wait until notified/interrupted/timed out */
 
-       wasinterrupted = threads_wait_with_timeout_relative(t, millis, nanos);
+       wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
 
        /* re-enter the monitor */
 
-       newlr = lock_monitor_enter(t, o);
+       lock_record_enter(thread, lr);
 
-       /* we are no longer waiting */
+       /* remove us from the list of waiting threads */
 
-       lock_record_remove_waiter(newlr, lr);
-       newlr->lockCount = lr->lockCount;
+       lock_record_remove_waiter(lr, thread);
 
-       /* recylce the old lock record */
+       /* restore the old lock count */
 
-       lr->lockCount = 1;
-       lr->waiting = NULL;
-       lr->waiter = NULL;
-       lock_record_recycle(t, lr);
+       lr->count = lockcount;
 
        /* if we have been interrupted, throw the appropriate exception */
 
        if (wasinterrupted)
-               *exceptionptr = new_exception(string_java_lang_InterruptedException);
+               exceptions_throw_interruptedexception();
 }
 
 
-/* lock_monitor_notify **************************************************************
+/* lock_monitor_wait ***********************************************************
 
-   Notify one thread or all threads waiting on the given object.
+   Wait on an object for a given (maximum) amount of time.
 
    IN:
       t............the current thread
          o............the object
-         one..........if true, only notify one thread
+         millis.......milliseconds of timeout
+         nanos........nanoseconds of timeout
 
    PRE-CONDITION:
       The current thread must be the owner of the object's monitor.
    
 *******************************************************************************/
 
-static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
+static void lock_monitor_wait(threadobject *t, java_object_t *o, s8 millis, s4 nanos)
 {
+       ptrint         lockword;
        lock_record_t *lr;
-       lock_record_t *wlr;
-       threadobject *wthread;
 
-       lr = o->monitorPtr;
-       GRAB_LR(lr, t);
-       CHECK_MONITORSTATE(lr, t, o, return);
+       lockword = (ptrint) o->monitorPtr;
+
+       /* check if we own this monitor */
+       /* We don't have to worry about stale values here, as any stale value */
+       /* will fail this check.                                              */
+
+       if (IS_FAT_LOCK(lockword)) {
+
+               lr = GET_FAT_LOCK(lockword);
+
+               if (lr->owner != t) {
+                       exceptions_throw_illegalmonitorstateexception();
+                       return;
+               }
+       }
+       else {
+               /* it's a thin lock */
+
+               if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
+                       exceptions_throw_illegalmonitorstateexception();
+                       return;
+               }
+
+               /* inflate this lock */
+
+               lr = lock_hashtable_get(o);
+
+               lock_record_enter(t, lr);
+               lock_inflate(t, o, lr);
+       }
+
+       /* { the thread t owns the fat lock record lr on the object o } */
+
+       lock_record_wait(t, lr, millis, nanos);
+}
+
+
+/* lock_record_notify **********************************************************
+
+   Notify one thread or all threads waiting on the given lock record.
+
+   IN:
+      t............the current thread
+         lr...........the lock record
+         one..........if true, only notify one thread
 
-       /* { the thread t owns the lock record lr on the object o } */
+   PRE-CONDITION:
+      The current thread must be the owner of the lock record.
+         This is NOT checked by this function!
+   
+*******************************************************************************/
+
+static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
+{
+       lock_waiter_t *waiter;
+       threadobject *waitingthread;
+
+       /* { the thread t owns the fat lock record lr on the object o } */
 
        /* for each waiter: */
 
-       for (wlr = lr->waiter; wlr; wlr = wlr->waiter) {
+       for (waiter = lr->waiters; waiter != NULL; waiter = waiter->next) {
 
                /* signal the waiting thread */
 
-               wthread = wlr->owner;
-               pthread_mutex_lock(&wthread->waitLock);
-               if (wthread->isSleeping)
-                       pthread_cond_signal(&wthread->waitCond);
-               wthread->signaled = true;
-               pthread_mutex_unlock(&wthread->waitLock);
+               waitingthread = waiter->waiter;
+
+               pthread_mutex_lock(&waitingthread->waitmutex);
+
+               if (waitingthread->sleeping)
+                       pthread_cond_signal(&waitingthread->waitcond);
+
+               waitingthread->signaled = true;
+
+               pthread_mutex_unlock(&waitingthread->waitmutex);
 
                /* if we should only wake one, we are done */
 
@@ -759,33 +1230,106 @@ static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
 }
 
 
+/* lock_monitor_notify *********************************************************
+
+   Notify one thread or all threads waiting on the given object.
+
+   IN:
+      t............the current thread
+         o............the object
+         one..........if true, only notify one thread
+
+   PRE-CONDITION:
+      The current thread must be the owner of the object's monitor.
+   
+*******************************************************************************/
+
+static void lock_monitor_notify(threadobject *t, java_object_t *o, bool one)
+{
+       ptrint lockword;
+       lock_record_t *lr;
+
+       lockword = (ptrint) o->monitorPtr;
+
+       /* check if we own this monitor */
+       /* We don't have to worry about stale values here, as any stale value */
+       /* will fail this check.                                              */
+
+       if (IS_FAT_LOCK(lockword)) {
+
+               lr = GET_FAT_LOCK(lockword);
+
+               if (lr->owner != t) {
+                       exceptions_throw_illegalmonitorstateexception();
+                       return;
+               }
+       }
+       else {
+               /* it's a thin lock */
+
+               if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
+                       exceptions_throw_illegalmonitorstateexception();
+                       return;
+               }
+
+               /* inflate this lock */
+
+               lr = lock_hashtable_get(o);
+
+               lock_record_enter(t, lr);
+               lock_inflate(t, o, lr);
+       }
+
+       /* { the thread t owns the fat lock record lr on the object o } */
+
+       lock_record_notify(t, lr, one);
+}
+
+
 
 /*============================================================================*/
 /* INQUIRY FUNCIONS                                                           */
 /*============================================================================*/
 
 
-/* lock_does_thread_hold_lock **************************************************
+/* lock_is_held_by_current_thread **********************************************
 
-   Return true if the given thread owns the monitor of the given object.
+   Return true if the current thread owns the monitor of the given object.
 
    IN:
-      t............the thread
          o............the object
-   
-   RETURN VALUE:
-      true, if the thread is locking the object
 
+   RETURN VALUE:
+      true, if the current thread holds the lock of this object.
+   
 *******************************************************************************/
 
-bool lock_does_thread_hold_lock(threadobject *t, java_objectheader *o)
+bool lock_is_held_by_current_thread(java_object_t *o)
 {
+       threadobject  *t;
+       ptrint         lockword;
        lock_record_t *lr;
 
-       lr = o->monitorPtr;
-       GRAB_LR(lr, t);
+       t = THREADOBJECT;
+
+       /* check if we own this monitor */
+       /* We don't have to worry about stale values here, as any stale value */
+       /* will fail this check.                                              */
+
+       lockword = (ptrint) o->monitorPtr;
 
-       return (lr->o == o) && (lr->owner == t);
+       if (IS_FAT_LOCK(lockword)) {
+               /* it's a fat lock */
+
+               lr = GET_FAT_LOCK(lockword);
+
+               return (lr->owner == t);
+       }
+       else {
+               /* it's a thin lock */
+
+               return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
+       }
 }
 
 
@@ -806,10 +1350,13 @@ bool lock_does_thread_hold_lock(threadobject *t, java_objectheader *o)
    
 *******************************************************************************/
 
-void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
+void lock_wait_for_object(java_object_t *o, s8 millis, s4 nanos)
 {
-       threadobject *t = (threadobject*) THREADOBJECT;
-       lock_monitor_wait(t, o, millis, nanos);
+       threadobject *thread;
+
+       thread = THREADOBJECT;
+
+       lock_monitor_wait(thread, o, millis, nanos);
 }
 
 
@@ -822,10 +1369,13 @@ void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
    
 *******************************************************************************/
 
-void lock_notify_object(java_objectheader *o)
+void lock_notify_object(java_object_t *o)
 {
-       threadobject *t = (threadobject*) THREADOBJECT;
-       lock_monitor_notify(t, o, true);
+       threadobject *thread;
+
+       thread = THREADOBJECT;
+
+       lock_monitor_notify(thread, o, true);
 }
 
 
@@ -838,12 +1388,16 @@ void lock_notify_object(java_objectheader *o)
    
 *******************************************************************************/
 
-void lock_notify_all_object(java_objectheader *o)
+void lock_notify_all_object(java_object_t *o)
 {
-       threadobject *t = (threadobject*) THREADOBJECT;
-       lock_monitor_notify(t, o, false);
+       threadobject *thread;
+
+       thread = THREADOBJECT;
+
+       lock_monitor_notify(thread, o, false);
 }
 
+
 /*
  * These are local overrides for various environment variables in Emacs.
  * Please do not remove this and leave it at the end of the file, where