* Removed all Id tags.
[cacao.git] / src / threads / native / lock.c
index ed7d795f85a7e2f720140eb5c6f80e743c4eecf4..3adfb24dc3ff802a7b820aceb310abbab88de4e0 100644 (file)
@@ -1,6 +1,6 @@
 /* src/threads/native/lock.c - lock implementation
 
-   Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+   Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
    J. Wenninger, Institut f. Computersprachen - TU Wien
    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
    02110-1301, USA.
 
-   Contact: cacao@cacaojvm.org
-
-   Authors: Stefan Ring
-                       Edwin Steiner
-
-   Changes: Christian Thalinger
-
-   $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
-
 */
 
 
 #include <sys/time.h>
 #include <pthread.h>
 
-#include "mm/memory.h"
 #include "vm/types.h"
+
+#include "mm/memory.h"
+
+#include "threads/native/lock.h"
+#include "threads/native/threads.h"
+
 #include "vm/global.h"
 #include "vm/exceptions.h"
+#include "vm/finalizer.h"
 #include "vm/stringlocal.h"
+#include "vm/vm.h"
+
+#include "vmcore/options.h"
+
+#if defined(ENABLE_STATISTICS)
+# include "vmcore/statistics.h"
+#endif
+
+#if defined(ENABLE_VMLOG)
+#include <vmlog_cacao.h>
+#endif
 
 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
 
 #include "native/jvmti/cacaodbg.h"
 #endif
 
+#if defined(ENABLE_GC_BOEHM)
+# include "mm/boehm-gc/include/gc.h"
+#endif
+
 
 /******************************************************************************/
 /* DEBUGGING MACROS                                                           */
 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
 
 
-/******************************************************************************/
-/* GLOBAL VARIABLES                                                           */
-/******************************************************************************/
-
-/* global lock record pool list header */
-lock_record_pool_t *lock_global_pool;
-
-/* mutex for synchronizing access to the global pool */
-pthread_mutex_t lock_global_pool_lock;
+/* global variables ***********************************************************/
 
 /* hashtable mapping objects to lock records */
 static lock_hashtable_t lock_hashtable;
@@ -191,9 +195,6 @@ static lock_hashtable_t lock_hashtable;
 /******************************************************************************/
 
 static void lock_hashtable_init(void);
-static lock_record_t * lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o);
-
-static lock_record_t * lock_record_alloc(threadobject *t);
 
 static void lock_record_enter(threadobject *t, lock_record_t *lr);
 static void lock_record_exit(threadobject *t, lock_record_t *lr);
@@ -214,54 +215,16 @@ static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
 
 void lock_init(void)
 {
-       pthread_mutex_init(&lock_global_pool_lock, NULL);
+       /* initialize lock hashtable */
 
        lock_hashtable_init();
-}
-
-
-/* lock_record_init ************************************************************
-
-   Initialize a lock record.
-
-   IN:
-      r............the lock record to initialize
-         t............will become the owner
 
-*******************************************************************************/
-
-static void lock_record_init(lock_record_t *r, threadobject *t)
-{
-       r->owner = NULL;
-       r->count = 0;
-       r->waiters = NULL;
-
-#if !defined(NDEBUG)
-       r->nextfree = NULL;
+#if defined(ENABLE_VMLOG)
+       vmlog_cacao_init_lock();
 #endif
-
-       pthread_mutex_init(&(r->mutex), NULL);
-}
-
-
-/* lock_init_execution_env *****************************************************
-
-   Initialize the execution environment for a thread.
-
-   IN:
-      thread.......the thread
-
-*******************************************************************************/
-
-void lock_init_execution_env(threadobject *thread)
-{
-       thread->ee.firstfree = NULL;
-       thread->ee.lockrecordpools = NULL;
-       thread->ee.lockrecordcount = 0;
 }
 
 
-
 /* lock_pre_compute_thinlock ***************************************************
 
    Pre-compute the thin lock value for a thread index.
@@ -280,220 +243,79 @@ ptrint lock_pre_compute_thinlock(s4 index)
 }
 
 
+/* lock_record_new *************************************************************
 
-/*============================================================================*/
-/* LOCK RECORD MANAGEMENT                                                     */
-/*============================================================================*/
-
-
-/* lock_record_alloc_new_pool **************************************************
-
-   Get a new lock record pool from the memory allocator.
-
-   IN:
-      thread.......the thread that will own the lock records
-         size.........number of lock records in the pool to allocate
-
-   RETURN VALUE:
-      the new lock record pool, with initialized lock records
+   Allocate a lock record.
 
 *******************************************************************************/
 
-static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
+static lock_record_t *lock_record_new(void)
 {
-       int i;
-       lock_record_pool_t *pool;
-
-       /* get the pool from the memory allocator */
-
-       pool = mem_alloc(sizeof(lock_record_pool_header_t)
-                                  + sizeof(lock_record_t) * size);
-
-       /* initialize the pool header */
-
-       pool->header.size = size;
-
-       /* initialize the individual lock records */
-
-       for (i=0; i<size; i++) {
-               lock_record_init(&pool->lr[i], thread);
-
-               pool->lr[i].nextfree = &pool->lr[i+1];
-       }
-
-       /* terminate free list */
-
-       pool->lr[i-1].nextfree = NULL;
-
-       return pool;
-}
-
-
-/* lock_record_alloc_pool ******************************************************
-
-   Allocate a lock record pool. The pool is either taken from the global free
-   list or requested from the memory allocator.
-
-   IN:
-      thread.......the thread that will own the lock records
-         size.........number of lock records in the pool to allocate
-
-   RETURN VALUE:
-      the new lock record pool, with initialized lock records
-
-*******************************************************************************/
-
-static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
-{
-       pthread_mutex_lock(&lock_global_pool_lock);
-
-       if (lock_global_pool) {
-               int i;
-               lock_record_pool_t *pool;
-
-               /* pop a pool from the global freelist */
-
-               pool = lock_global_pool;
-               lock_global_pool = pool->header.next;
-
-               pthread_mutex_unlock(&lock_global_pool_lock);
-
-               /* re-initialize owner and freelist chaining */
-
-               for (i=0; i < pool->header.size; i++) {
-                       pool->lr[i].owner = NULL;
-                       pool->lr[i].nextfree = &pool->lr[i+1];
-               }
-               pool->lr[i-1].nextfree = NULL;
-
-               return pool;
-       }
-
-       pthread_mutex_unlock(&lock_global_pool_lock);
-
-       /* we have to get a new pool from the allocator */
-
-       return lock_record_alloc_new_pool(t, size);
-}
-
-
-/* lock_record_free_pools ******************************************************
-
-   Free the lock record pools in the given linked list. The pools are inserted
-   into the global freelist.
-
-   IN:
-      pool.........list header
-
-*******************************************************************************/
-
-void lock_record_free_pools(lock_record_pool_t *pool)
-{
-       lock_record_pool_header_t *last;
-
-       assert(false); /* XXX this function does not match the new locking */
-                      /*     algorithm. We must find another way to free  */
-                      /*     unused lock records.                         */
-
-       if (!pool)
-               return;
+       lock_record_t *lr;
 
-       pthread_mutex_lock(&lock_global_pool_lock);
+       /* allocate the data structure on the C heap */
 
-       /* find the last pool in the list */
+       lr = NEW(lock_record_t);
 
-       last = &pool->header;
-       while (last->next)
-               last = &last->next->header;
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_record += sizeof(lock_record_t);
+#endif
 
-       /* chain it to the lock_global_pool freelist */
+       /* initialize the members */
 
-       last->next = lock_global_pool;
+       lr->object  = NULL;
+       lr->owner   = NULL;
+       lr->count   = 0;
+       lr->waiters = NULL;
 
-       /* insert the freed pools into the freelist */
+       /* initialize the mutex */
 
-       lock_global_pool = pool;
+       pthread_mutex_init(&(lr->mutex), NULL);
 
-       pthread_mutex_unlock(&lock_global_pool_lock);
+       return lr;
 }
 
 
-/* lock_record_alloc ***********************************************************
+/* lock_record_free ************************************************************
 
-   Allocate a lock record which is owned by the current thread.
+   Free a lock record.
 
    IN:
-      t............the current thread 
+       lr....lock record to free
 
 *******************************************************************************/
 
-static lock_record_t *lock_record_alloc(threadobject *t)
+static void lock_record_free(lock_record_t *lr)
 {
-       lock_record_t *r;
-
-       assert(t);
-       r = t->ee.firstfree;
-
-       if (!r) {
-               int poolsize;
-               lock_record_pool_t *pool;
-
-               /* get a new pool */
-
-               poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2
-                                                                                : LOCK_INITIAL_LOCK_RECORDS;
-               pool = lock_record_alloc_pool(t, poolsize);
-
-               /* add it to our per-thread pool list */
-
-               pool->header.next = t->ee.lockrecordpools;
-               t->ee.lockrecordpools = pool;
-               t->ee.lockrecordcount += pool->header.size;
+#if 0
+       /* check the members */
 
-               /* take the first record from the pool */
-               r = &pool->lr[0];
-       }
-
-       /* pop the record from the freelist */
-
-       t->ee.firstfree = r->nextfree;
-#ifndef NDEBUG
-       r->nextfree = NULL; /* in order to find invalid uses of nextfree */
+       lr->object  = o;
+       lr->owner   = NULL;
+       lr->count   = 0;
+       lr->waiters = NULL;
 #endif
 
-       return r;
-}
-
+       /* destroy the mutex */
 
-/* lock_record_recycle *********************************************************
+       pthread_mutex_destroy(&(lr->mutex));
 
-   Recycle the given lock record. It will be inserted in the appropriate
-   free list.
-
-   IN:
-      t............the owner
-         r............lock record to recycle
-
-*******************************************************************************/
+       /* free the data structure */
 
-static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
-{
-       assert(t);
-       assert(r);
-       assert(r->owner == NULL);
-       assert(r->nextfree == NULL);
+       FREE(lr, lock_record_t);
 
-       r->nextfree = t->ee.firstfree;
-       t->ee.firstfree = r;
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_record -= sizeof(lock_record_t);
+#endif
 }
 
 
-
 /*============================================================================*/
 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
 /*============================================================================*/
 
-
 /* lock_hashtable_init *********************************************************
 
    Initialize the global hashtable mapping objects to lock records.
@@ -504,9 +326,15 @@ static void lock_hashtable_init(void)
 {
        pthread_mutex_init(&(lock_hashtable.mutex), NULL);
 
-       lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
+       lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
        lock_hashtable.entries = 0;
-       lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
+       lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
+#endif
+
        MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
 }
 
@@ -540,16 +368,22 @@ static void lock_hashtable_grow(void)
 
        oldtable = lock_hashtable.ptr;
        newtable = MNEW(lock_record_t *, newsize);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_hashtable += sizeof(lock_record_t *) * newsize;
+#endif
+
        MZERO(newtable, lock_record_t *, newsize);
 
        /* rehash the entries */
 
-       for (i=0; i<oldsize; ++i) {
+       for (i = 0; i < oldsize; i++) {
                lr = oldtable[i];
                while (lr) {
                        next = lr->hashlink;
 
-                       h = LOCK_HASH(lr->obj);
+                       h = LOCK_HASH(lr->object);
                        newslot = h % newsize;
 
                        lr->hashlink = newtable[newslot];
@@ -561,38 +395,45 @@ static void lock_hashtable_grow(void)
 
        /* replace the old table */
 
-       lock_hashtable.ptr = newtable;
+       lock_hashtable.ptr  = newtable;
        lock_hashtable.size = newsize;
 
        MFREE(oldtable, lock_record_t *, oldsize);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
+#endif
 }
 
 
-/* lock_hashtable_get_lock_record **********************************************
+/* lock_hashtable_get **********************************************************
 
-   Find the lock record for the given object. If it does not exists, yet,
-   create it and enter it in the hashtable.
+   Find the lock record for the given object.  If it does not exists,
+   yet, create it and enter it in the hashtable.
 
    IN:
-      t.................the current thread
-         o.................the object to look up
+         o....the object to look up
 
    RETURN VALUE:
       the lock record to use for this object
 
 *******************************************************************************/
 
-static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o)
+#if defined(ENABLE_GC_BOEHM)
+static void lock_record_finalizer(void *object, void *p);
+#endif
+
+static lock_record_t *lock_hashtable_get(java_object_t *o)
 {
-       ptrint lockword;
-       u4 slot;
+       ptrint         lockword;
+       u4             slot;
        lock_record_t *lr;
 
        lockword = (ptrint) o->monitorPtr;
 
-       if (IS_FAT_LOCK(lockword)) {
+       if (IS_FAT_LOCK(lockword))
                return GET_FAT_LOCK(lockword);
-       }
 
        /* lock the hashtable */
 
@@ -601,26 +442,33 @@ static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objec
        /* lookup the lock record in the hashtable */
 
        slot = LOCK_HASH(o) % lock_hashtable.size;
-       lr = lock_hashtable.ptr[slot];
-       while (lr) {
-               if (lr->obj == o) {
+       lr   = lock_hashtable.ptr[slot];
+
+       for (; lr != NULL; lr = lr->hashlink) {
+               if (lr->object == o) {
                        pthread_mutex_unlock(&(lock_hashtable.mutex));
                        return lr;
                }
-
-               lr = lr->hashlink;
        }
 
        /* not found, we must create a new one */
 
-       lr = lock_record_alloc(t);
-       lr->obj = o;
+       lr = lock_record_new();
+
+       lr->object = o;
+
+#if defined(ENABLE_GC_BOEHM)
+       /* register new finalizer to clean up the lock record */
+
+       GC_REGISTER_FINALIZER(o, lock_record_finalizer, 0, 0, 0);
+#endif
+
        LOCK_LOG(("thread %d allocated for %p new lr %p\n",
-                       t->index, (void*) o, (void*) lr));
+                         t->index, (void*) o, (void*) lr));
 
        /* enter it in the hashtable */
 
-       lr->hashlink = lock_hashtable.ptr[slot];
+       lr->hashlink             = lock_hashtable.ptr[slot];
        lock_hashtable.ptr[slot] = lr;
        lock_hashtable.entries++;
 
@@ -640,6 +488,102 @@ static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objec
 }
 
 
+/* lock_hashtable_remove *******************************************************
+
+   Remove the lock record for the given object from the hashtable.
+
+   IN:
+       o....the object to look up
+
+*******************************************************************************/
+
+static void lock_hashtable_remove(java_object_t *o)
+{
+       ptrint         lockword;
+       lock_record_t *lr;
+       u4             slot;
+       lock_record_t *tmplr;
+
+       /* lock the hashtable */
+
+       pthread_mutex_lock(&(lock_hashtable.mutex));
+
+       /* get lock record */
+
+       lockword = (ptrint) o->monitorPtr;
+
+       assert(IS_FAT_LOCK(lockword));
+
+       lr = GET_FAT_LOCK(lockword);
+
+       /* remove the lock-record from the hashtable */
+
+       slot  = LOCK_HASH(o) % lock_hashtable.size;
+       tmplr = lock_hashtable.ptr[slot];
+
+       if (tmplr == lr) {
+               /* special handling if it's the first in the chain */
+
+               lock_hashtable.ptr[slot] = lr->hashlink;
+       }
+       else {
+               for (; tmplr != NULL; tmplr = tmplr->hashlink) {
+                       if (tmplr->hashlink == lr) {
+                               tmplr->hashlink = lr->hashlink;
+                               break;
+                       }
+               }
+
+               assert(tmplr != NULL);
+       }
+
+       /* decrease entry count */
+
+       lock_hashtable.entries--;
+
+       /* unlock the hashtable */
+
+       pthread_mutex_unlock(&(lock_hashtable.mutex));
+}
+
+
+/* lock_record_finalizer *******************************************************
+
+   XXX Remove me for exact GC.
+
+*******************************************************************************/
+
+static void lock_record_finalizer(void *object, void *p)
+{
+       java_object_t *o;
+       ptrint         lockword;
+       lock_record_t *lr;
+
+       o = (java_object_t *) object;
+
+       /* check for a finalizer function */
+
+       if (o->vftbl->class->finalizer != NULL)
+               finalizer_run(object, p);
+
+       /* remove the lock-record entry from the hashtable */
+
+       lock_hashtable_remove(o);
+
+       /* get lock record */
+
+       lockword = (ptrint) o->monitorPtr;
+
+       assert(IS_FAT_LOCK(lockword));
+
+       lr = GET_FAT_LOCK(lockword);
+
+       /* now release the lock record */
+
+       lock_record_free(lr);
+}
+
+
 /*============================================================================*/
 /* OBJECT LOCK INITIALIZATION                                                 */
 /*============================================================================*/
@@ -652,7 +596,7 @@ static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objec
 
 *******************************************************************************/
 
-void lock_init_object_lock(java_objectheader *o)
+void lock_init_object_lock(java_object_t *o)
 {
        assert(o);
 
@@ -694,6 +638,7 @@ lock_record_t *lock_get_initial_lock_word(void)
 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
 {
        pthread_mutex_lock(&(lr->mutex));
+
        lr->owner = t;
 }
 
@@ -736,7 +681,7 @@ static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
 
 *******************************************************************************/
 
-static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *lr)
+static void lock_inflate(threadobject *t, java_object_t *o, lock_record_t *lr)
 {
        ptrint lockword;
 
@@ -789,12 +734,13 @@ static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *l
 
 *******************************************************************************/
 
-bool lock_monitor_enter(java_objectheader *o)
+bool lock_monitor_enter(java_object_t *o)
 {
-       threadobject *t;
+       threadobject  *t;
        /* CAUTION: This code assumes that ptrint is unsigned! */
-       ptrint        lockword;
-       ptrint        thinlock;
+       ptrint         lockword;
+       ptrint         thinlock;
+       lock_record_t *lr;
 
        if (o == NULL) {
                exceptions_throw_nullpointerexception();
@@ -832,11 +778,9 @@ bool lock_monitor_enter(java_objectheader *o)
                        return true;
                }
                else {
-                       lock_record_t *lr;
-
                        /* recursion count overflow */
 
-                       lr = lock_hashtable_get_lock_record(t, o);
+                       lr = lock_hashtable_get(o);
                        lock_record_enter(t, lr);
                        lock_inflate(t, o, lr);
                        lr->count++;
@@ -847,79 +791,80 @@ bool lock_monitor_enter(java_objectheader *o)
 
        /* the lock is either contented or fat */
 
-       {
-               lock_record_t *lr;
-
-               if (IS_FAT_LOCK(lockword)) {
+       if (IS_FAT_LOCK(lockword)) {
 
-                       lr = GET_FAT_LOCK(lockword);
+               lr = GET_FAT_LOCK(lockword);
 
-                       /* check for recursive entering */
-                       if (lr->owner == t) {
-                               lr->count++;
-                               return true;
-                       }
+               /* check for recursive entering */
+               if (lr->owner == t) {
+                       lr->count++;
+                       return true;
+               }
 
-                       /* acquire the mutex of the lock record */
+               /* acquire the mutex of the lock record */
 
-                       lock_record_enter(t, lr);
+               lock_record_enter(t, lr);
 
-                       assert(lr->count == 0);
+               assert(lr->count == 0);
 
-                       return true;
-               }
+               return true;
+       }
 
-               /****** inflation path ******/
+       /****** inflation path ******/
 
-               /* first obtain the lock record for this object */
+       /* first obtain the lock record for this object */
 
-               lr = lock_hashtable_get_lock_record(t, o);
+       lr = lock_hashtable_get(o);
 
 #if defined(ENABLE_JVMTI)
-        /* Monitor Contended Enter */
-               jvmti_MonitorContendedEntering(false, o);
+       /* Monitor Contended Enter */
+       jvmti_MonitorContendedEntering(false, o);
 #endif
-               /* enter the monitor */
 
-               lock_record_enter(t, lr);
+       /* enter the monitor */
 
+       lock_record_enter(t, lr);
 
 #if defined(ENABLE_JVMTI)
-               /* Monitor Contended Entered */
-               jvmti_MonitorContendedEntering(true, o);
+       /* Monitor Contended Entered */
+       jvmti_MonitorContendedEntering(true, o);
 #endif
 
-               /* inflation loop */
+       /* inflation loop */
 
-               while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
-                       /* Set the flat lock contention bit to let the owning thread */
-                       /* know that we want to be notified of unlocking.            */
+       while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
+               /* Set the flat lock contention bit to let the owning thread
+                  know that we want to be notified of unlocking. */
 
-                       LOCK_SET_FLC_BIT(o);
+               LOCK_SET_FLC_BIT(o);
 
-                       LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
-                                       t->index, (void*) o, (void*) lr));
+               LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
+                                 t->index, (void*) o, (void*) lr));
 
-                       /* try to lock the object */
+               /* try to lock the object */
 
-                       if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
-                               /* we can inflate the lock ourselves */
-                               LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
-                                               t->index, (void*) o, (void*) lr));
-                               lock_inflate(t, o, lr);
-                       }
-                       else {
-                               /* wait until another thread sees the flc bit and notifies us of unlocking */
-                               LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
-                                               t->index, (void*) o, (void*) lr));
-                               lock_record_wait(t, lr, 0, 0);
-                       }
+               if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
+                       /* we can inflate the lock ourselves */
+
+                       LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
+                                         t->index, (void*) o, (void*) lr));
+
+                       lock_inflate(t, o, lr);
                }
+               else {
+                       /* wait until another thread sees the flc bit and notifies
+                          us of unlocking */
 
-               /* we own the inflated lock now */
+                       LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
+                                         t->index, (void*) o, (void*) lr));
 
-               return true;
+                       lock_record_wait(t, lr, 0, 0);
+               }
        }
+
+       /* we own the inflated lock now */
+
+       return true;
 }
 
 
@@ -941,7 +886,7 @@ bool lock_monitor_enter(java_objectheader *o)
 
 *******************************************************************************/
 
-bool lock_monitor_exit(java_objectheader *o)
+bool lock_monitor_exit(java_object_t *o)
 {
        threadobject *t;
        ptrint        lockword;
@@ -979,7 +924,7 @@ bool lock_monitor_exit(java_objectheader *o)
 
                        /* there has been a contention on this thin lock */
 
-                       lr = lock_hashtable_get_lock_record(t, o);
+                       lr = lock_hashtable_get(o);
 
                        LOCK_LOG(("thread %d for %p got lr %p\n",
                                        t->index, (void*) o, (void*) lr));
@@ -1047,6 +992,36 @@ bool lock_monitor_exit(java_objectheader *o)
 }
 
 
+/* lock_record_add_waiter ******************************************************
+
+   Add a thread to the list of waiting threads of a lock record.
+
+   IN:
+      lr...........the lock record
+      thread.......the thread to add
+
+*******************************************************************************/
+
+static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
+{
+       lock_waiter_t *waiter;
+
+       /* allocate a waiter data structure */
+
+       waiter = NEW(lock_waiter_t);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_waiter += sizeof(lock_waiter_t);
+#endif
+
+       waiter->waiter = thread;
+       waiter->next   = lr->waiters;
+
+       lr->waiters = waiter;
+}
+
+
 /* lock_record_remove_waiter ***************************************************
 
    Remove a thread from the list of waiting threads of a lock record.
@@ -1060,15 +1035,26 @@ bool lock_monitor_exit(java_objectheader *o)
    
 *******************************************************************************/
 
-static void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
+static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
 {
        lock_waiter_t **link;
-       lock_waiter_t *w;
+       lock_waiter_t  *w;
 
        link = &(lr->waiters);
+
        while ((w = *link)) {
-               if (w->waiter == t) {
+               if (w->waiter == thread) {
                        *link = w->next;
+
+                       /* free the waiter data structure */
+
+                       FREE(w, lock_waiter_t);
+
+#if defined(ENABLE_STATISTICS)
+                       if (opt_stat)
+                               size_lock_waiter -= sizeof(lock_waiter_t);
+#endif
+
                        return;
                }
 
@@ -1076,9 +1062,8 @@ static void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
        }
 
        /* this should never happen */
-       fprintf(stderr,"error: waiting thread not found in list of waiters\n");
-       fflush(stderr);
-       abort();
+
+       vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
 }
 
 
@@ -1098,20 +1083,16 @@ static void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
    
 *******************************************************************************/
 
-static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos)
+static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
 {
-       lock_waiter_t *waiter;
-       s4             lockcount;
-       bool           wasinterrupted;
+       s4   lockcount;
+       bool wasinterrupted;
 
        /* { the thread t owns the fat lock record lr on the object o } */
 
        /* register us as waiter for this object */
 
-       waiter = NEW(lock_waiter_t);
-       waiter->waiter = t;
-       waiter->next = lr->waiters;
-       lr->waiters = waiter;
+       lock_record_add_waiter(lr, thread);
 
        /* remember the old lock count */
 
@@ -1120,19 +1101,19 @@ static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 n
        /* unlock this record */
 
        lr->count = 0;
-       lock_record_exit(t, lr);
+       lock_record_exit(thread, lr);
 
        /* wait until notified/interrupted/timed out */
 
-       wasinterrupted = threads_wait_with_timeout_relative(t, millis, nanos);
+       wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
 
        /* re-enter the monitor */
 
-       lock_record_enter(t, lr);
+       lock_record_enter(thread, lr);
 
        /* remove us from the list of waiting threads */
 
-       lock_record_remove_waiter(lr, t);
+       lock_record_remove_waiter(lr, thread);
 
        /* restore the old lock count */
 
@@ -1141,7 +1122,7 @@ static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 n
        /* if we have been interrupted, throw the appropriate exception */
 
        if (wasinterrupted)
-               *exceptionptr = new_exception(string_java_lang_InterruptedException);
+               exceptions_throw_interruptedexception();
 }
 
 
@@ -1160,7 +1141,7 @@ static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 n
    
 *******************************************************************************/
 
-static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
+static void lock_monitor_wait(threadobject *t, java_object_t *o, s8 millis, s4 nanos)
 {
        ptrint         lockword;
        lock_record_t *lr;
@@ -1189,7 +1170,9 @@ static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis,
                }
 
                /* inflate this lock */
-               lr = lock_hashtable_get_lock_record(t, o);
+
+               lr = lock_hashtable_get(o);
+
                lock_record_enter(t, lr);
                lock_inflate(t, o, lr);
        }
@@ -1224,16 +1207,19 @@ static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
 
        /* for each waiter: */
 
-       for (waiter = lr->waiters; waiter; waiter = waiter->next) {
+       for (waiter = lr->waiters; waiter != NULL; waiter = waiter->next) {
 
                /* signal the waiting thread */
 
                waitingthread = waiter->waiter;
 
                pthread_mutex_lock(&waitingthread->waitmutex);
+
                if (waitingthread->sleeping)
                        pthread_cond_signal(&waitingthread->waitcond);
+
                waitingthread->signaled = true;
+
                pthread_mutex_unlock(&waitingthread->waitmutex);
 
                /* if we should only wake one, we are done */
@@ -1258,7 +1244,7 @@ static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
    
 *******************************************************************************/
 
-static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
+static void lock_monitor_notify(threadobject *t, java_object_t *o, bool one)
 {
        ptrint lockword;
        lock_record_t *lr;
@@ -1287,7 +1273,9 @@ static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
                }
 
                /* inflate this lock */
-               lr = lock_hashtable_get_lock_record(t, o);
+
+               lr = lock_hashtable_get(o);
+
                lock_record_enter(t, lr);
                lock_inflate(t, o, lr);
        }
@@ -1316,22 +1304,23 @@ static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
    
 *******************************************************************************/
 
-bool lock_is_held_by_current_thread(java_objectheader *o)
+bool lock_is_held_by_current_thread(java_object_t *o)
 {
-       ptrint        lockword;
-       threadobject *t;
+       threadobject  *t;
+       ptrint         lockword;
+       lock_record_t *lr;
+
+       t = THREADOBJECT;
 
        /* check if we own this monitor */
        /* We don't have to worry about stale values here, as any stale value */
        /* will fail this check.                                              */
 
        lockword = (ptrint) o->monitorPtr;
-       t = THREADOBJECT;
 
        if (IS_FAT_LOCK(lockword)) {
-               lock_record_t *lr;
-
                /* it's a fat lock */
+
                lr = GET_FAT_LOCK(lockword);
 
                return (lr->owner == t);
@@ -1361,7 +1350,7 @@ bool lock_is_held_by_current_thread(java_objectheader *o)
    
 *******************************************************************************/
 
-void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
+void lock_wait_for_object(java_object_t *o, s8 millis, s4 nanos)
 {
        threadobject *thread;
 
@@ -1380,7 +1369,7 @@ void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
    
 *******************************************************************************/
 
-void lock_notify_object(java_objectheader *o)
+void lock_notify_object(java_object_t *o)
 {
        threadobject *thread;
 
@@ -1399,7 +1388,7 @@ void lock_notify_object(java_objectheader *o)
    
 *******************************************************************************/
 
-void lock_notify_all_object(java_objectheader *o)
+void lock_notify_all_object(java_object_t *o)
 {
        threadobject *thread;