* configure.ac (AC_CONFIG_FILES): Moved src/threads/native/Makefile to
[cacao.git] / src / threads / native / lock.c
index 0cf9c7d77c63c0697ad9db2335c515483f0efc7d..6ef203fc82bc2bf6d5820532f3e03fbdfd6e8983 100644 (file)
@@ -1,9 +1,7 @@
 /* src/threads/native/lock.c - lock implementation
 
-   Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
-   C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
-   E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
-   J. Wenninger, Institut f. Computersprachen - TU Wien
+   Copyright (C) 1996-2005, 2006, 2007, 2008
+   CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
 
    This file is part of CACAO.
 
    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
    02110-1301, USA.
 
-   Contact: cacao@cacaojvm.org
-
-   Authors: Stefan Ring
-                       Edwin Steiner
-
-   Changes: Christian Thalinger
-
-   $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
-
 */
 
 
 #include "config.h"
 
-#include <stdlib.h>
-#include <stdio.h>
 #include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
 #include <sys/time.h>
 #include <pthread.h>
 
-#include "mm/memory.h"
 #include "vm/types.h"
+
+#include "mm/memory.h"
+
+#include "native/llni.h"
+
+#include "threads/lock-common.h"
+#include "threads/threadlist.h"
+#include "threads/threads-common.h"
+
+#include "threads/native/lock.h"
+#include "threads/native/threads.h"
+
+#include "toolbox/list.h"
+
 #include "vm/global.h"
 #include "vm/exceptions.h"
+#include "vm/finalizer.h"
 #include "vm/stringlocal.h"
+#include "vm/vm.h"
+
+#include "vmcore/options.h"
+
+#if defined(ENABLE_STATISTICS)
+# include "vmcore/statistics.h"
+#endif
+
+#if defined(ENABLE_VMLOG)
+#include <vmlog_cacao.h>
+#endif
 
 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
 
 #include "machine-instr.h"
 #endif
 
-/* #define LOCK_VERBOSE */
+#if defined(ENABLE_JVMTI)
+#include "native/jvmti/cacaodbg.h"
+#endif
+
+#if defined(ENABLE_GC_BOEHM)
+# include "mm/boehm-gc/include/gc.h"
+#endif
+
+
+/* debug **********************************************************************/
+
+#if !defined(NDEBUG)
+# define DEBUGLOCKS(format) \
+    do { \
+        if (opt_DebugLocks) { \
+            log_println format; \
+        } \
+    } while (0)
+#else
+# define DEBUGLOCKS(format)
+#endif
 
 
 /******************************************************************************/
 /******************************************************************************/
 
 /* number of lock records in the first pool allocated for a thread */
-#define INITIALLOCKRECORDS 8
+#define LOCK_INITIAL_LOCK_RECORDS 8
+
+#define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
 
 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
        ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
 
-/* CAUTION: oldvalue is evaluated twice! */
-#define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
-       (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
-
 
 /******************************************************************************/
 /* MACROS FOR THIN/FAT LOCKS                                                  */
 /******************************************************************************/
 
-/* We use a variant of the thin locks described in the paper
+/* We use a variant of the tasuki locks described in the paper
+ *     
+ *     Tamiya Onodera, Kiyokuni Kawachiya
+ *     A Study of Locking Objects with Bimodal Fields
+ *     Proceedings of the ACM OOPSLA '99, pp. 223-237
+ *     1999
+ *
+ * The underlying thin locks are a variant of the thin locks described in
  * 
  *     Bacon, Konuru, Murthy, Serrano
  *     Thin Locks: Featherweight Synchronization for Java
  *        Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
  *        June 1998
  *
- * In thin lock mode the lockword (monitorPtr) looks like this:
+ * In thin lock mode the lockword looks like this:
  *
  *     ,----------------------,-----------,---,
  *     |      thread ID       |   count   | 0 |
 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
 
 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
-#define MAKE_FAT_LOCK(ptr)      ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
+#define MAKE_FAT_LOCK(ptr)      ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
 
 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
+#define GET_THREAD_INDEX(lockword) ((unsigned) lockword >> THIN_LOCK_TID_SHIFT)
+
+
+/* global variables ***********************************************************/
+
+/* hashtable mapping objects to lock records */
+static lock_hashtable_t lock_hashtable;
 
 
 /******************************************************************************/
-/* GLOBAL VARIABLES                                                           */
+/* PROTOTYPES                                                                 */
 /******************************************************************************/
 
-/* global lock record pool list header */
-lock_record_pool_t *lock_global_pool;
-
-/* mutex for synchronizing access to the global pool */
-pthread_mutex_t lock_global_pool_lock;
+static void lock_hashtable_init(void);
 
+static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
+static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
+static void lock_record_enter(threadobject *t, lock_record_t *lr);
+static void lock_record_exit(threadobject *t, lock_record_t *lr);
+static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
+static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
 
 
 /*============================================================================*/
@@ -162,284 +211,467 @@ pthread_mutex_t lock_global_pool_lock;
 
 void lock_init(void)
 {
-       pthread_mutex_init(&lock_global_pool_lock, NULL);
+       /* initialize lock hashtable */
+
+       lock_hashtable_init();
+
+#if defined(ENABLE_VMLOG)
+       vmlog_cacao_init_lock();
+#endif
 }
 
 
-/* lock_record_init ************************************************************
+/* lock_pre_compute_thinlock ***************************************************
 
-   Initialize a lock record.
+   Pre-compute the thin lock value for a thread index.
 
    IN:
-      r............the lock record to initialize
-         t............will become the owner
+      index........the thead index (>= 1)
+
+   RETURN VALUE:
+      the thin lock value for this thread index
 
 *******************************************************************************/
 
-static void lock_record_init(lock_record_t *r, threadobject *t)
+ptrint lock_pre_compute_thinlock(s4 index)
 {
-       r->owner = t;
-       r->count = 0;
-       r->waiters = NULL;
+       return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
+}
 
-#if !defined(NDEBUG)
-       r->nextfree = NULL;
+
+/* lock_record_new *************************************************************
+
+   Allocate a lock record.
+
+*******************************************************************************/
+
+static lock_record_t *lock_record_new(void)
+{
+       int result;
+       lock_record_t *lr;
+
+       /* allocate the data structure on the C heap */
+
+       lr = NEW(lock_record_t);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_record += sizeof(lock_record_t);
 #endif
 
-       pthread_mutex_init(&(r->mutex), NULL);
+       /* initialize the members */
+
+       lr->object  = NULL;
+       lr->owner   = NULL;
+       lr->count   = 0;
+       lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
+
+#if defined(ENABLE_GC_CACAO)
+       /* register the lock object as weak reference with the GC */
+
+       gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
+#endif
+
+       /* initialize the mutex */
+
+       result = pthread_mutex_init(&(lr->mutex), NULL);
+       if (result != 0)
+               vm_abort_errnum(result, "lock_record_new: pthread_mutex_init failed");
+
+       DEBUGLOCKS(("[lock_record_new   : lr=%p]", (void *) lr));
+
+       return lr;
 }
 
 
-/* lock_init_execution_env *****************************************************
+/* lock_record_free ************************************************************
 
-   Initialize the execution environment for a thread.
+   Free a lock record.
 
    IN:
-      thread.......the thread
+       lr....lock record to free
 
 *******************************************************************************/
 
-void lock_init_execution_env(threadobject *thread)
+static void lock_record_free(lock_record_t *lr)
 {
-       thread->ee.firstfree = NULL;
-       thread->ee.lockrecordpools = NULL;
-       thread->ee.lockrecordcount = 0;
-}
+       int result;
 
+       DEBUGLOCKS(("[lock_record_free  : lr=%p]", (void *) lr));
 
+       /* Destroy the mutex. */
 
-/* lock_pre_compute_thinlock ***************************************************
+       result = pthread_mutex_destroy(&(lr->mutex));
+       if (result != 0)
+               vm_abort_errnum(result, "lock_record_free: pthread_mutex_destroy failed");
 
-   Pre-compute the thin lock value for a thread index.
+#if defined(ENABLE_GC_CACAO)
+       /* unregister the lock object reference with the GC */
 
-   IN:
-      index........the thead index (>= 1)
+       gc_weakreference_unregister(&(lr->object));
+#endif
 
-   RETURN VALUE:
-      the thin lock value for this thread index
+       /* Free the waiters list. */
 
-*******************************************************************************/
+       list_free(lr->waiters);
 
-ptrint lock_pre_compute_thinlock(s4 index)
-{
-       return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
-}
+       /* Free the data structure. */
 
+       FREE(lr, lock_record_t);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_record -= sizeof(lock_record_t);
+#endif
+}
 
 
 /*============================================================================*/
-/* LOCK RECORD MANAGEMENT                                                     */
+/* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
 /*============================================================================*/
 
+/* lock_hashtable_init *********************************************************
 
-/* lock_record_alloc_new_pool **************************************************
+   Initialize the global hashtable mapping objects to lock records.
 
-   Get a new lock record pool from the memory allocator.
+*******************************************************************************/
 
-   IN:
-      thread.......the thread that will own the lock records
-         size.........number of lock records in the pool to allocate
+static void lock_hashtable_init(void)
+{
+       pthread_mutex_init(&(lock_hashtable.mutex), NULL);
+
+       lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
+       lock_hashtable.entries = 0;
+       lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
+#endif
+
+       MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
+}
 
-   RETURN VALUE:
-      the new lock record pool, with initialized lock records
+
+/* lock_hashtable_grow *********************************************************
+
+   Grow the lock record hashtable to about twice its current size and
+   rehash the entries.
 
 *******************************************************************************/
 
-static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
+/* must be called with hashtable mutex locked */
+static void lock_hashtable_grow(void)
 {
-       int i;
-       lock_record_pool_t *pool;
+       u4 oldsize;
+       u4 newsize;
+       lock_record_t **oldtable;
+       lock_record_t **newtable;
+       lock_record_t *lr;
+       lock_record_t *next;
+       u4 i;
+       u4 h;
+       u4 newslot;
 
-       /* get the pool from the memory allocator */
+       /* allocate a new table */
 
-       pool = mem_alloc(sizeof(lock_record_pool_header_t)
-                                  + sizeof(lock_record_t) * size);
+       oldsize = lock_hashtable.size;
+       newsize = oldsize*2 + 1; /* XXX should use prime numbers */
 
-       /* initialize the pool header */
+       DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
 
-       pool->header.size = size;
+       oldtable = lock_hashtable.ptr;
+       newtable = MNEW(lock_record_t *, newsize);
 
-       /* initialize the individual lock records */
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_hashtable += sizeof(lock_record_t *) * newsize;
+#endif
 
-       for (i=0; i<size; i++) {
-               lock_record_init(&pool->lr[i], thread);
+       MZERO(newtable, lock_record_t *, newsize);
 
-               pool->lr[i].nextfree = &pool->lr[i+1];
-       }
+       /* rehash the entries */
 
-       /* terminate free list */
+       for (i = 0; i < oldsize; i++) {
+               lr = oldtable[i];
+               while (lr) {
+                       next = lr->hashlink;
 
-       pool->lr[i-1].nextfree = NULL;
+                       h = heap_hashcode(lr->object);
+                       newslot = h % newsize;
 
-       return pool;
-}
+                       lr->hashlink = newtable[newslot];
+                       newtable[newslot] = lr;
 
+                       lr = next;
+               }
+       }
 
-/* lock_record_alloc_pool ******************************************************
+       /* replace the old table */
 
-   Allocate a lock record pool. The pool is either taken from the global free
-   list or requested from the memory allocator.
+       lock_hashtable.ptr  = newtable;
+       lock_hashtable.size = newsize;
 
-   IN:
-      thread.......the thread that will own the lock records
-         size.........number of lock records in the pool to allocate
+       MFREE(oldtable, lock_record_t *, oldsize);
 
-   RETURN VALUE:
-      the new lock record pool, with initialized lock records
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
+#endif
+}
+
+
+/* lock_hashtable_cleanup ******************************************************
+
+   Removes (and frees) lock records which have a cleared object reference
+   from the hashtable. The locked object was reclaimed by the GC.
 
 *******************************************************************************/
 
-static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
+#if defined(ENABLE_GC_CACAO)
+void lock_hashtable_cleanup(void)
 {
-       pthread_mutex_lock(&lock_global_pool_lock);
+       threadobject  *t;
+       lock_record_t *lr;
+       lock_record_t *prev;
+       lock_record_t *next;
+       int i;
 
-       if (lock_global_pool) {
-               int i;
-               lock_record_pool_t *pool;
+       t = THREADOBJECT;
 
-               /* pop a pool from the global freelist */
+       /* lock the hashtable */
 
-               pool = lock_global_pool;
-               lock_global_pool = pool->header.next;
+       pthread_mutex_lock(&(lock_hashtable.mutex));
 
-               pthread_mutex_unlock(&lock_global_pool_lock);
+       /* search the hashtable for cleared references */
 
-               /* re-initialize owner and freelist chaining */
+       for (i = 0; i < lock_hashtable.size; i++) {
+               lr = lock_hashtable.ptr[i];
+               prev = NULL;
 
-               for (i=0; i < pool->header.size; i++) {
-                       pool->lr[i].owner = t;
-                       pool->lr[i].nextfree = &pool->lr[i+1];
-               }
-               pool->lr[i-1].nextfree = NULL;
+               while (lr) {
+                       next = lr->hashlink;
 
-               return pool;
-       }
+                       /* remove lock records with cleared references */
+
+                       if (lr->object == NULL) {
+
+                               /* unlink the lock record from the hashtable */
 
-       pthread_mutex_unlock(&lock_global_pool_lock);
+                               if (prev == NULL)
+                                       lock_hashtable.ptr[i] = next;
+                               else
+                                       prev->hashlink = next;
 
-       /* we have to get a new pool from the allocator */
+                               /* free the lock record */
+
+                               lock_record_free(lr);
+
+                       } else {
+                               prev = lr;
+                       }
 
-       return lock_record_alloc_new_pool(t, size);
+                       lr = next;
+               }
+       }
+
+       /* unlock the hashtable */
+
+       pthread_mutex_unlock(&(lock_hashtable.mutex));
 }
+#endif
 
 
-/* lock_record_free_pools ******************************************************
+/* lock_hashtable_get **********************************************************
 
-   Free the lock record pools in the given linked list. The pools are inserted
-   into the global freelist.
+   Find the lock record for the given object.  If it does not exists,
+   yet, create it and enter it in the hashtable.
 
    IN:
-      pool.........list header
+      t....the current thread
+         o....the object to look up
+
+   RETURN VALUE:
+      the lock record to use for this object
 
 *******************************************************************************/
 
-void lock_record_free_pools(lock_record_pool_t *pool)
+#if defined(ENABLE_GC_BOEHM)
+static void lock_record_finalizer(void *object, void *p);
+#endif
+
+static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
 {
-       lock_record_pool_header_t *last;
+       uintptr_t      lockword;
+       u4             slot;
+       lock_record_t *lr;
 
-       assert(false); /* XXX this function does not match the new locking */
-                      /*     algorithm. We must find another way to free  */
-                      /*     unused lock records.                         */
+       lockword = lock_lockword_get(t, o);
 
-       if (!pool)
-               return;
+       if (IS_FAT_LOCK(lockword))
+               return GET_FAT_LOCK(lockword);
+
+       /* lock the hashtable */
+
+       pthread_mutex_lock(&(lock_hashtable.mutex));
+
+       /* lookup the lock record in the hashtable */
+
+       LLNI_CRITICAL_START_THREAD(t);
+       slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
+       lr   = lock_hashtable.ptr[slot];
 
-       pthread_mutex_lock(&lock_global_pool_lock);
+       for (; lr != NULL; lr = lr->hashlink) {
+               if (lr->object == LLNI_DIRECT(o))
+                       break;
+       }
+       LLNI_CRITICAL_END_THREAD(t);
+
+       if (lr == NULL) {
+               /* not found, we must create a new one */
+
+               lr = lock_record_new();
 
-       /* find the last pool in the list */
+               LLNI_CRITICAL_START_THREAD(t);
+               lr->object = LLNI_DIRECT(o);
+               LLNI_CRITICAL_END_THREAD(t);
 
-       last = &pool->header;
-       while (last->next)
-               last = &last->next->header;
+#if defined(ENABLE_GC_BOEHM)
+               /* register new finalizer to clean up the lock record */
 
-       /* chain it to the lock_global_pool freelist */
+               GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
+#endif
+
+               /* enter it in the hashtable */
 
-       last->next = lock_global_pool;
+               lr->hashlink             = lock_hashtable.ptr[slot];
+               lock_hashtable.ptr[slot] = lr;
+               lock_hashtable.entries++;
 
-       /* insert the freed pools into the freelist */
+               /* check whether the hash should grow */
 
-       lock_global_pool = pool;
+               if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
+                       lock_hashtable_grow();
+               }
+       }
 
-       pthread_mutex_unlock(&lock_global_pool_lock);
+       /* unlock the hashtable */
+
+       pthread_mutex_unlock(&(lock_hashtable.mutex));
+
+       /* return the new lock record */
+
+       return lr;
 }
 
 
-/* lock_record_alloc ***********************************************************
+/* lock_hashtable_remove *******************************************************
 
-   Allocate a lock record which is owned by the current thread.
+   Remove the lock record for the given object from the hashtable
+   and free it afterwards.
 
    IN:
-      t............the current thread 
-
-   POST-CONDITION:
-      The current thread holds the mutex of the returned lock record
-         and is recored as owner of the record.
+       t....the current thread
+       o....the object to look up
 
 *******************************************************************************/
 
-static lock_record_t *lock_record_alloc(threadobject *t)
+static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
 {
-       lock_record_t *r;
+       uintptr_t      lockword;
+       lock_record_t *lr;
+       u4             slot;
+       lock_record_t *tmplr;
+
+       /* lock the hashtable */
 
-       assert(t);
-       r = t->ee.firstfree;
+       pthread_mutex_lock(&(lock_hashtable.mutex));
 
-       if (!r) {
-               int poolsize;
-               lock_record_pool_t *pool;
+       /* get lock record */
 
-               /* get a new pool */
+       lockword = lock_lockword_get(t, o);
 
-               poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2 : INITIALLOCKRECORDS;
-               pool = lock_record_alloc_pool(t, poolsize);
+       assert(IS_FAT_LOCK(lockword));
 
-               /* add it to our per-thread pool list */
+       lr = GET_FAT_LOCK(lockword);
 
-               pool->header.next = t->ee.lockrecordpools;
-               t->ee.lockrecordpools = pool;
-               t->ee.lockrecordcount += pool->header.size;
+       /* remove the lock-record from the hashtable */
 
-               /* take the first record from the pool */
-               r = &pool->lr[0];
+       LLNI_CRITICAL_START_THREAD(t);
+       slot  = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
+       tmplr = lock_hashtable.ptr[slot];
+       LLNI_CRITICAL_END_THREAD(t);
+
+       if (tmplr == lr) {
+               /* special handling if it's the first in the chain */
+
+               lock_hashtable.ptr[slot] = lr->hashlink;
        }
+       else {
+               for (; tmplr != NULL; tmplr = tmplr->hashlink) {
+                       if (tmplr->hashlink == lr) {
+                               tmplr->hashlink = lr->hashlink;
+                               break;
+                       }
+               }
 
-       /* pop the record from the freelist */
+               assert(tmplr != NULL);
+       }
 
-       t->ee.firstfree = r->nextfree;
-#ifndef NDEBUG
-       r->nextfree = NULL; /* in order to find invalid uses of nextfree */
-#endif
+       /* decrease entry count */
 
-       /* pre-acquire the mutex of the new lock record */
+       lock_hashtable.entries--;
 
-       pthread_mutex_lock(&(r->mutex));
+       /* unlock the hashtable */
 
-       return r;
-}
+       pthread_mutex_unlock(&(lock_hashtable.mutex));
 
+       /* free the lock record */
 
-/* lock_record_recycle *********************************************************
+       lock_record_free(lr);
+}
 
-   Recycle the given lock record. It will be inserted in the appropriate
-   free list.
 
-   IN:
-      t............the owner
-         r............lock record to recycle
+/* lock_record_finalizer *******************************************************
+
+   XXX Remove me for exact GC.
 
 *******************************************************************************/
 
-static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
+static void lock_record_finalizer(void *object, void *p)
 {
-       assert(t);
-       assert(r);
-       assert(r->owner == t);
-       assert(r->nextfree == NULL);
+       java_handle_t *o;
+       classinfo     *c;
 
-       r->nextfree = t->ee.firstfree;
-       t->ee.firstfree = r;
-}
+       o = (java_handle_t *) object;
+
+#if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
+       /* XXX this is only a dirty hack to make Boehm work with handles */
+
+       o = LLNI_WRAP((java_object_t *) o);
+#endif
 
+       LLNI_class_get(o, c);
+
+#if !defined(NDEBUG)
+       if (opt_DebugFinalizer) {
+               log_start();
+               log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
+               class_print(c);
+               log_print("]");
+               log_finish();
+       }
+#endif
+
+       /* check for a finalizer function */
+
+       if (c->finalizer != NULL)
+               finalizer_run(object, p);
+
+       /* remove the lock-record entry from the hashtable and free it */
+
+       lock_hashtable_remove(THREADOBJECT, o);
+}
 
 
 /*============================================================================*/
@@ -454,82 +686,261 @@ static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
 
 *******************************************************************************/
 
-void lock_init_object_lock(java_objectheader *o)
+void lock_init_object_lock(java_object_t *o)
 {
        assert(o);
 
-       o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
+       o->lockword = THIN_UNLOCKED;
 }
 
 
-/* lock_get_initial_lock_word **************************************************
+/*============================================================================*/
+/* LOCKING ALGORITHM                                                          */
+/*============================================================================*/
 
-   Returns the initial (unlocked) lock word. The pointer is
-   required in the code generator to set up a virtual
-   java_objectheader for code patch locking.
+
+/* lock_lockword_get ***********************************************************
+
+   Get the lockword for the given object.
+
+   IN:
+      t............the current thread
+      o............the object
 
 *******************************************************************************/
 
-lock_record_t *lock_get_initial_lock_word(void)
+static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
 {
-       return (lock_record_t *) THIN_UNLOCKED;
+       uintptr_t lockword;
+
+       LLNI_CRITICAL_START_THREAD(t);
+       lockword = LLNI_DIRECT(o)->lockword;
+       LLNI_CRITICAL_END_THREAD(t);
+
+       return lockword;
 }
 
 
+/* lock_lockword_set ***********************************************************
 
-/*============================================================================*/
-/* LOCKING ALGORITHM                                                          */
-/*============================================================================*/
+   Set the lockword for the given object.
+
+   IN:
+      t............the current thread
+      o............the object
+         lockword.....the new lockword value
+
+*******************************************************************************/
+
+static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
+{
+       LLNI_CRITICAL_START_THREAD(t);
+       LLNI_DIRECT(o)->lockword = lockword;
+       LLNI_CRITICAL_END_THREAD(t);
+}
+
+
+/* lock_record_enter ***********************************************************
+
+   Enter the lock represented by the given lock record.
+
+   IN:
+      t.................the current thread
+         lr................the lock record
+
+*******************************************************************************/
+
+static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
+{
+       pthread_mutex_lock(&(lr->mutex));
+       lr->owner = t;
+}
+
+
+/* lock_record_exit ************************************************************
+
+   Release the lock represented by the given lock record.
+
+   IN:
+      t.................the current thread
+         lr................the lock record
+
+   PRE-CONDITION:
+      The current thread must own the lock represented by this lock record.
+         This is NOT checked by this function!
+
+*******************************************************************************/
+
+static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
+{
+       lr->owner = NULL;
+       pthread_mutex_unlock(&(lr->mutex));
+}
 
 
 /* lock_inflate ****************************************************************
 
    Inflate the lock of the given object. This may only be called by the
-   owner of the monitor.
+   owner of the monitor of the object.
 
    IN:
       t............the current thread
          o............the object of which to inflate the lock
-
-   RETURN VALUE:
-      the new lock record of the object
+         lr...........the lock record to install. The current thread must
+                      own the lock of this lock record!
 
    PRE-CONDITION:
-      The current thread must be the owner of this object's monitor!
+      The current thread must be the owner of this object's monitor AND
+         of the lock record's lock!
 
 *******************************************************************************/
 
-static lock_record_t *lock_inflate(threadobject *t, java_objectheader *o)
+static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
 {
-       lock_record_t *lr;
-       ptrint lockword;
-       ptrint count;
+       uintptr_t lockword;
 
        /* get the current lock count */
 
-       lockword = (ptrint) o->monitorPtr;
+       lockword = lock_lockword_get(t, o);
 
-       assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
-
-       count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
+       if (IS_FAT_LOCK(lockword)) {
+               assert(GET_FAT_LOCK(lockword) == lr);
+               return;
+       }
+       else {
+               assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
 
-       /* allocate a fat lock */
+               /* copy the count from the thin lock */
 
-       lr = lock_record_alloc(t);
-       lr->count = count;
+               lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
+       }
 
-#if defined(LOCK_VERBOSE)
-       printf("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
-                       t->index, (void*) o, (long)o->monitorPtr, (int)count);
-#endif
+       DEBUGLOCKS(("[lock_inflate      : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
+                               lr, t, o, lockword, lr->count));
 
        /* install it */
 
-       o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
+       lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
+}
 
-       return lr;
+
+/* TODO Move this function into threadlist.[ch]. */
+
+static threadobject *threads_lookup_thread_id(int index)
+{
+       threadobject *t;
+
+       threadlist_lock();
+
+       for (t = threadlist_first(); t != NULL; t = threadlist_next(t)) {
+               if (t->state == THREAD_STATE_NEW)
+                       continue;
+               if (t->index == index)
+                       break;
+       }
+
+       threadlist_unlock();
+       return t;
+}
+
+static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o)
+{
+       int index;
+       threadobject *t_other;
+       int old_flc;
+
+       index = GET_THREAD_INDEX(lockword);
+       t_other = threads_lookup_thread_id(index);
+       if (!t_other)
+/*             failure, TODO: add statistics */
+               return;
+
+       pthread_mutex_lock(&t_other->flc_lock);
+       old_flc = t_other->flc_bit;
+       t_other->flc_bit = true;
+
+       DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d",
+                               t->index, t_other->index));
+
+       /* Set FLC bit first, then read the lockword again */
+       MEMORY_BARRIER();
+
+       lockword = lock_lockword_get(t, o);
+
+       /* Lockword is still the way it was seen before */
+       if (IS_THIN_LOCK(lockword) && (GET_THREAD_INDEX(lockword) == index))
+       {
+               /* Add tuple (t, o) to the other thread's FLC list */
+               t->flc_object = o;
+               t->flc_next = t_other->flc_list;
+               t_other->flc_list = t;
+
+               for (;;)
+               {
+                       threadobject *current;
+
+                       /* Wait until another thread sees the flc bit and notifies
+                          us of unlocking. */
+                       pthread_cond_wait(&t->flc_cond, &t_other->flc_lock);
+
+                       /* Traverse FLC list looking if we're still there */
+                       current = t_other->flc_list;
+                       while (current && current != t)
+                               current = current->flc_next;
+                       if (!current)
+                               /* not in list anymore, can stop waiting */
+                               break;
+
+                       /* We are still in the list -- the other thread cannot have seen
+                          the FLC bit yet */
+                       assert(t_other->flc_bit);
+               }
+
+               t->flc_object = NULL;   /* for garbage collector? */
+               t->flc_next = NULL;
+       }
+       else
+               t_other->flc_bit = old_flc;
+
+       pthread_mutex_unlock(&t_other->flc_lock);
 }
 
+static void notify_flc_waiters(threadobject *t, java_handle_t *o)
+{
+       threadobject *current;
+
+       pthread_mutex_lock(&t->flc_lock);
+
+       current = t->flc_list;
+       while (current)
+       {
+               if (current->flc_object != o)
+               {
+                       /* The object has to be inflated so the other threads can properly
+                          block on it. */
+
+                       /* Only if not already inflated */
+                       ptrint lockword = lock_lockword_get(t, current->flc_object);
+                       if (IS_THIN_LOCK(lockword)) {
+                               lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
+                               lock_record_enter(t, lr);
+
+                               DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
+                                                       t->index, (void*) current->flc_object, (void*) lr));
+
+                               lock_inflate(t, current->flc_object, lr);
+                       }
+               }
+               /* Wake the waiting thread */
+               pthread_cond_broadcast(&current->flc_cond);
+
+               current = current->flc_next;
+       }
+
+       t->flc_list = NULL;
+       t->flc_bit = false;
+       pthread_mutex_unlock(&t->flc_lock);
+}
 
 /* lock_monitor_enter **********************************************************
 
@@ -542,23 +953,42 @@ static lock_record_t *lock_inflate(threadobject *t, java_objectheader *o)
       t............the current thread
          o............the object of which to enter the monitor
 
+   RETURN VALUE:
+      true.........the lock has been successfully acquired
+         false........an exception has been thrown
+
 *******************************************************************************/
 
-void lock_monitor_enter(threadobject *t, java_objectheader *o)
+bool lock_monitor_enter(java_handle_t *o)
 {
+       threadobject  *t;
        /* CAUTION: This code assumes that ptrint is unsigned! */
-       ptrint lockword;
-       ptrint thinlock;
+       ptrint         lockword;
+       ptrint         thinlock;
+       lock_record_t *lr;
+
+       if (o == NULL) {
+               exceptions_throw_nullpointerexception();
+               return false;
+       }
+
+       t = THREADOBJECT;
 
        thinlock = t->thinlock;
 
+retry:
        /* most common case: try to thin-lock an unlocked object */
 
-       if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
+       LLNI_CRITICAL_START_THREAD(t);
+       lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
+       LLNI_CRITICAL_END_THREAD(t);
+
+       if (lockword == THIN_UNLOCKED) {
                /* success. we locked it */
                /* The Java Memory Model requires a memory barrier here: */
-               MEMORY_BARRIER();
-               return;
+               /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
+               MEMORY_BARRIER_AFTER_ATOMIC();
+               return true;
        }
 
        /* next common case: recursive lock with small recursion count */
@@ -573,88 +1003,60 @@ void lock_monitor_enter(threadobject *t, java_objectheader *o)
                {
                        /* the recursion count is low enough */
 
-                       o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
+                       lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
 
                        /* success. we locked it */
-                       return;
+                       return true;
                }
                else {
-                       lock_record_t *lr;
-
                        /* recursion count overflow */
 
-                       lr = lock_inflate(t, o);
+                       lr = lock_hashtable_get(t, o);
+                       lock_record_enter(t, lr);
+                       lock_inflate(t, o, lr);
                        lr->count++;
 
-                       return;
+                       notify_flc_waiters(t, o);
+
+                       return true;
                }
        }
 
        /* the lock is either contented or fat */
 
-       {
-               lock_record_t *lr;
-               ptrint fatlock;
-
-               if (IS_FAT_LOCK(lockword)) {
-
-                       lr = GET_FAT_LOCK(lockword);
-
-                       /* check for recursive entering */
-                       if (lr->owner == t) {
-                               lr->count++;
-                               return;
-                       }
-               }
-               else {
-                       /* alloc a lock record owned by us */
-                       lr = lock_record_alloc(t);
-                       fatlock = MAKE_FAT_LOCK(lr);
-
-#if defined(LOCK_VERBOSE)
-                       printf("thread %3d: SPINNING for inflating lock of %p, current lockword = %lx\n",
-                                       t->index, (void*)o, (long)lockword);
-#endif
+       if (IS_FAT_LOCK(lockword)) {
 
-                       /* SPIN LOOP */
-                       while (true) {
-                               lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, fatlock);
-                               if (lockword == THIN_UNLOCKED) {
-#if defined(LOCK_VERBOSE)
-                                       printf("thread %3d: successfully inflated lock of %p\n",
-                                                       t->index, (void*)o);
-#endif
-                                       /* we managed to install our lock record */
-                                       /* The Java Memory Model requires a memory barrier here: */
-                                       MEMORY_BARRIER();
-                                       return;
-                               }
-
-                               if (IS_FAT_LOCK(lockword)) {
-#if defined(LOCK_VERBOSE)
-                                       printf("thread %3d: lock of %p was inflated by other thread, lockword = %lx\n",
-                                                       t->index, (void*)o, (long)lockword);
-#endif
-                                       /* another thread inflated the lock */
-                                       pthread_mutex_unlock(&(lr->mutex));
-                                       lock_record_recycle(t, lr);
+               lr = GET_FAT_LOCK(lockword);
 
-                                       lr = GET_FAT_LOCK(lockword);
-                                       break;
-                               }
-                       }
+               /* check for recursive entering */
+               if (lr->owner == t) {
+                       lr->count++;
+                       return true;
                }
 
                /* acquire the mutex of the lock record */
-               pthread_mutex_lock(&(lr->mutex));
 
-               /* enter us as the owner */
-               lr->owner = t;
+               lock_record_enter(t, lr);
 
                assert(lr->count == 0);
 
-               return;
+               return true;
        }
+
+       /****** inflation path ******/
+
+#if defined(ENABLE_JVMTI)
+       /* Monitor Contended Enter */
+       jvmti_MonitorContendedEntering(false, o);
+#endif
+
+       sable_flc_waiting(lockword, t, o);
+
+#if defined(ENABLE_JVMTI)
+       /* Monitor Contended Entered */
+       jvmti_MonitorContendedEntering(true, o);
+#endif
+       goto retry;
 }
 
 
@@ -676,32 +1078,51 @@ void lock_monitor_enter(threadobject *t, java_objectheader *o)
 
 *******************************************************************************/
 
-bool lock_monitor_exit(threadobject *t, java_objectheader *o)
+bool lock_monitor_exit(java_handle_t *o)
 {
-       ptrint lockword;
-       ptrint thinlock;
+       threadobject *t;
+       uintptr_t     lockword;
+       ptrint        thinlock;
+
+       if (o == NULL) {
+               exceptions_throw_nullpointerexception();
+               return false;
+       }
+
+       t = THREADOBJECT;
+
+       thinlock = t->thinlock;
 
        /* We don't have to worry about stale values here, as any stale value */
        /* will indicate that we don't own the lock.                          */
 
-       lockword = (ptrint) o->monitorPtr;
-       thinlock = t->thinlock;
+       lockword = lock_lockword_get(t, o);
 
        /* most common case: we release a thin lock that we hold once */
 
        if (lockword == thinlock) {
                /* memory barrier for Java Memory Model */
-               MEMORY_BARRIER();
-               o->monitorPtr = THIN_UNLOCKED;
+               STORE_ORDER_BARRIER();
+               lock_lockword_set(t, o, THIN_UNLOCKED);
                /* memory barrier for thin locking */
                MEMORY_BARRIER();
+
+               /* check if there has been a flat lock contention on this object */
+
+               if (t->flc_bit) {
+                       DEBUGLOCKS(("thread %d saw flc bit", t->index));
+
+                       /* there has been a contention on this thin lock */
+                       notify_flc_waiters(t, o);
+               }
+
                return true;
        }
 
        /* next common case: we release a recursive lock, count > 0 */
 
        if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
-               o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
+               lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
                return true;
        }
 
@@ -718,7 +1139,7 @@ bool lock_monitor_exit(threadobject *t, java_objectheader *o)
                /* will be != t and thus fail this check.                             */
 
                if (lr->owner != t) {
-                       *exceptionptr = new_illegalmonitorstateexception();
+                       exceptions_throw_illegalmonitorstateexception();
                        return false;
                }
 
@@ -741,11 +1162,45 @@ bool lock_monitor_exit(threadobject *t, java_objectheader *o)
 
        /* legal thin lock cases have been handled above, so this is an error */
 
-       *exceptionptr = new_illegalmonitorstateexception();
+       exceptions_throw_illegalmonitorstateexception();
+
        return false;
 }
 
 
+/* lock_record_add_waiter ******************************************************
+
+   Add a thread to the list of waiting threads of a lock record.
+
+   IN:
+      lr...........the lock record
+      thread.......the thread to add
+
+*******************************************************************************/
+
+static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
+{
+       lock_waiter_t *w;
+
+       /* Allocate a waiter data structure. */
+
+       w = NEW(lock_waiter_t);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_waiter += sizeof(lock_waiter_t);
+#endif
+
+       /* Store the thread in the waiter structure. */
+
+       w->thread = thread;
+
+       /* Add the waiter as last entry to waiters list. */
+
+       list_add_last(lr->waiters, w);
+}
+
+
 /* lock_record_remove_waiter ***************************************************
 
    Remove a thread from the list of waiting threads of a lock record.
@@ -759,25 +1214,113 @@ bool lock_monitor_exit(threadobject *t, java_objectheader *o)
    
 *******************************************************************************/
 
-static void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
+static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
 {
-       lock_waiter_t **link;
+       list_t        *l;
        lock_waiter_t *w;
 
-       link = &(lr->waiters);
-       while ((w = *link)) {
-               if (w->waiter == t) {
-                       *link = w->next;
+       /* Get the waiters list. */
+
+       l = lr->waiters;
+
+       for (w = list_first(l); w != NULL; w = list_next(l, w)) {
+               if (w->thread == thread) {
+                       /* Remove the waiter entry from the list. */
+
+                       list_remove(l, w);
+
+                       /* Free the waiter data structure. */
+
+                       FREE(w, lock_waiter_t);
+
+#if defined(ENABLE_STATISTICS)
+                       if (opt_stat)
+                               size_lock_waiter -= sizeof(lock_waiter_t);
+#endif
+
                        return;
                }
+       }
+
+       /* This should never happen. */
 
-               link = &(w->next);
+       vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
+}
+
+
+/* lock_record_wait ************************************************************
+
+   Wait on a lock record for a given (maximum) amount of time.
+
+   IN:
+      t............the current thread
+         lr...........the lock record
+         millis.......milliseconds of timeout
+         nanos........nanoseconds of timeout
+
+   RETURN VALUE:
+      true.........we have been interrupted,
+      false........everything ok
+
+   PRE-CONDITION:
+      The current thread must be the owner of the lock record.
+         This is NOT checked by this function!
+   
+*******************************************************************************/
+
+static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
+{
+       s4   lockcount;
+       bool wasinterrupted = false;
+
+       DEBUGLOCKS(("[lock_record_wait  : lr=%p, t=%p, millis=%lld, nanos=%d]",
+                               lr, thread, millis, nanos));
+
+       /* { the thread t owns the fat lock record lr on the object o } */
+
+       /* register us as waiter for this object */
+
+       lock_record_add_waiter(lr, thread);
+
+       /* remember the old lock count */
+
+       lockcount = lr->count;
+
+       /* unlock this record */
+
+       lr->count = 0;
+       lock_record_exit(thread, lr);
+
+       /* wait until notified/interrupted/timed out */
+
+       threads_wait_with_timeout_relative(thread, millis, nanos);
+
+       /* re-enter the monitor */
+
+       lock_record_enter(thread, lr);
+
+       /* remove us from the list of waiting threads */
+
+       lock_record_remove_waiter(lr, thread);
+
+       /* restore the old lock count */
+
+       lr->count = lockcount;
+
+       /* We can only be signaled OR interrupted, not both. If both flags
+          are set, reset only signaled and leave the thread in
+          interrupted state. Otherwise, clear both. */
+
+       if (!thread->signaled) {
+               wasinterrupted = thread->interrupted;
+               thread->interrupted = false;
        }
 
-       /* this should never happen */
-       fprintf(stderr,"error: waiting thread not found in list of waiters\n");
-       fflush(stderr);
-       abort();
+       thread->signaled = false;
+
+       /* return if we have been interrupted */
+
+       return wasinterrupted;
 }
 
 
@@ -796,15 +1339,12 @@ static void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
    
 *******************************************************************************/
 
-static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
+static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
 {
-       ptrint         lockword;
+       uintptr_t      lockword;
        lock_record_t *lr;
-       lock_waiter_t *waiter;
-       s4             lockcount;
-       bool           wasinterrupted;
 
-       lockword = (ptrint) o->monitorPtr;
+       lockword = lock_lockword_get(t, o);
 
        /* check if we own this monitor */
        /* We don't have to worry about stale values here, as any stale value */
@@ -815,7 +1355,7 @@ static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis,
                lr = GET_FAT_LOCK(lockword);
 
                if (lr->owner != t) {
-                       *exceptionptr = new_illegalmonitorstateexception();
+                       exceptions_throw_illegalmonitorstateexception();
                        return;
                }
        }
@@ -823,57 +1363,93 @@ static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis,
                /* it's a thin lock */
 
                if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
-                       *exceptionptr = new_illegalmonitorstateexception();
+                       exceptions_throw_illegalmonitorstateexception();
                        return;
                }
 
                /* inflate this lock */
-               lr = lock_inflate(t, o);
+
+               lr = lock_hashtable_get(t, o);
+               lock_record_enter(t, lr);
+               lock_inflate(t, o, lr);
+
+               notify_flc_waiters(t, o);
        }
 
        /* { the thread t owns the fat lock record lr on the object o } */
 
-       /* register us as waiter for this object */
+       if (lock_record_wait(t, lr, millis, nanos))
+               exceptions_throw_interruptedexception();
+}
 
-       waiter = NEW(lock_waiter_t);
-       waiter->waiter = t;
-       waiter->next = lr->waiters;
-       lr->waiters = waiter;
 
-       /* remember the old lock count */
+/* lock_record_notify **********************************************************
 
-       lockcount = lr->count;
+   Notify one thread or all threads waiting on the given lock record.
 
-       /* unlock this record */
+   IN:
+      t............the current thread
+         lr...........the lock record
+         one..........if true, only notify one thread
 
-       lr->count = 0;
-       lr->owner = NULL;
-       pthread_mutex_unlock(&(lr->mutex));
+   PRE-CONDITION:
+      The current thread must be the owner of the lock record.
+         This is NOT checked by this function!
+   
+*******************************************************************************/
 
-       /* wait until notified/interrupted/timed out */
+static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
+{
+       list_t        *l;
+       lock_waiter_t *w;
+       threadobject  *waitingthread;
 
-       wasinterrupted = threads_wait_with_timeout_relative(t, millis, nanos);
+       /* { the thread t owns the fat lock record lr on the object o } */
 
-       /* re-enter the monitor */
+       /* Get the waiters list. */
 
-       lock_monitor_enter(t, o);
+       l = lr->waiters;
 
-       /* assert that the lock record is still the same */
+       for (w = list_first(l); w != NULL; w = list_next(l, w)) {
+               /* signal the waiting thread */
 
-       assert( GET_FAT_LOCK((ptrint) o->monitorPtr) == lr );
+               waitingthread = w->thread;
 
-       /* remove us from the list of waiting threads */
+               /* We must skip threads which have already been notified or
+                  interrupted. They will remove themselves from the list. */
 
-       lock_record_remove_waiter(lr, t);
+               if (waitingthread->signaled || waitingthread->interrupted)
+                       continue;
 
-       /* restore the old lock count */
+               /* Enter the wait-mutex. */
 
-       lr->count = lockcount;
+               pthread_mutex_lock(&(waitingthread->waitmutex));
+
+               DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
+                                       lr, t, waitingthread, waitingthread->sleeping, one));
+
+               /* Signal the thread if it's sleeping. sleeping can be false
+                  when the waiting thread is blocked between giving up the
+                  monitor and entering the waitmutex. It will eventually
+                  observe that it's signaled and refrain from going to
+                  sleep. */
+
+               if (waitingthread->sleeping)
+                       pthread_cond_signal(&(waitingthread->waitcond));
+
+               /* Mark the thread as signaled. */
+
+               waitingthread->signaled = true;
 
-       /* if we have been interrupted, throw the appropriate exception */
+               /* Leave the wait-mutex. */
 
-       if (wasinterrupted)
-               *exceptionptr = new_exception(string_java_lang_InterruptedException);
+               pthread_mutex_unlock(&(waitingthread->waitmutex));
+
+               /* if we should only wake one, we are done */
+
+               if (one)
+                       break;
+       }
 }
 
 
@@ -891,14 +1467,12 @@ static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis,
    
 *******************************************************************************/
 
-static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
+static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
 {
-       ptrint lockword;
+       uintptr_t      lockword;
        lock_record_t *lr;
-       lock_waiter_t *waiter;
-       threadobject *waitingthread;
 
-       lockword = (ptrint) o->monitorPtr;
+       lockword = lock_lockword_get(t, o);
 
        /* check if we own this monitor */
        /* We don't have to worry about stale values here, as any stale value */
@@ -909,7 +1483,7 @@ static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
                lr = GET_FAT_LOCK(lockword);
 
                if (lr->owner != t) {
-                       *exceptionptr = new_illegalmonitorstateexception();
+                       exceptions_throw_illegalmonitorstateexception();
                        return;
                }
        }
@@ -917,35 +1491,17 @@ static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
                /* it's a thin lock */
 
                if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
-                       *exceptionptr = new_illegalmonitorstateexception();
+                       exceptions_throw_illegalmonitorstateexception();
                        return;
                }
 
-               /* inflate this lock */
-               lr = lock_inflate(t, o);
+               /* no thread can wait on a thin lock, so there's nothing to do. */
+               return;
        }
 
        /* { the thread t owns the fat lock record lr on the object o } */
 
-       /* for each waiter: */
-
-       for (waiter = lr->waiters; waiter; waiter = waiter->next) {
-
-               /* signal the waiting thread */
-
-               waitingthread = waiter->waiter;
-
-               pthread_mutex_lock(&waitingthread->waitmutex);
-               if (waitingthread->sleeping)
-                       pthread_cond_signal(&waitingthread->waitcond);
-               waitingthread->signaled = true;
-               pthread_mutex_unlock(&waitingthread->waitmutex);
-
-               /* if we should only wake one, we are done */
-
-               if (one)
-                       break;
-       }
+       lock_record_notify(t, lr, one);
 }
 
 
@@ -967,22 +1523,23 @@ static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
    
 *******************************************************************************/
 
-bool lock_is_held_by_current_thread(java_objectheader *o)
+bool lock_is_held_by_current_thread(java_handle_t *o)
 {
-       ptrint        lockword;
-       threadobject *t;
+       threadobject  *t;
+       uintptr_t      lockword;
+       lock_record_t *lr;
+
+       t = THREADOBJECT;
 
        /* check if we own this monitor */
        /* We don't have to worry about stale values here, as any stale value */
        /* will fail this check.                                              */
 
-       lockword = (ptrint) o->monitorPtr;
-       t = THREADOBJECT;
+       lockword = lock_lockword_get(t, o);
 
        if (IS_FAT_LOCK(lockword)) {
-               lock_record_t *lr;
-
                /* it's a fat lock */
+
                lr = GET_FAT_LOCK(lockword);
 
                return (lr->owner == t);
@@ -1012,10 +1569,13 @@ bool lock_is_held_by_current_thread(java_objectheader *o)
    
 *******************************************************************************/
 
-void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
+void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
 {
-       threadobject *t = (threadobject*) THREADOBJECT;
-       lock_monitor_wait(t, o, millis, nanos);
+       threadobject *thread;
+
+       thread = THREADOBJECT;
+
+       lock_monitor_wait(thread, o, millis, nanos);
 }
 
 
@@ -1028,10 +1588,13 @@ void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
    
 *******************************************************************************/
 
-void lock_notify_object(java_objectheader *o)
+void lock_notify_object(java_handle_t *o)
 {
-       threadobject *t = (threadobject*) THREADOBJECT;
-       lock_monitor_notify(t, o, true);
+       threadobject *thread;
+
+       thread = THREADOBJECT;
+
+       lock_monitor_notify(thread, o, true);
 }
 
 
@@ -1044,12 +1607,16 @@ void lock_notify_object(java_objectheader *o)
    
 *******************************************************************************/
 
-void lock_notify_all_object(java_objectheader *o)
+void lock_notify_all_object(java_handle_t *o)
 {
-       threadobject *t = (threadobject*) THREADOBJECT;
-       lock_monitor_notify(t, o, false);
+       threadobject *thread;
+
+       thread = THREADOBJECT;
+
+       lock_monitor_notify(thread, o, false);
 }
 
+
 /*
  * These are local overrides for various environment variables in Emacs.
  * Please do not remove this and leave it at the end of the file, where