* Removed all Id tags.
[cacao.git] / src / threads / native / lock.c
index 7aaa6a03183b3706b3b8a817ea62aac6b8a6e48e..3adfb24dc3ff802a7b820aceb310abbab88de4e0 100644 (file)
@@ -1,6 +1,6 @@
 /* src/threads/native/lock.c - lock implementation
 
-   Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+   Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
    J. Wenninger, Institut f. Computersprachen - TU Wien
    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
    02110-1301, USA.
 
-   Contact: cacao@cacaojvm.org
-
-   Authors: Stefan Ring
-                       Edwin Steiner
-
-   Changes: Christian Thalinger
-
-   $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
-
 */
 
 
 #include <sys/time.h>
 #include <pthread.h>
 
-#include "mm/memory.h"
 #include "vm/types.h"
+
+#include "mm/memory.h"
+
+#include "threads/native/lock.h"
+#include "threads/native/threads.h"
+
 #include "vm/global.h"
 #include "vm/exceptions.h"
+#include "vm/finalizer.h"
 #include "vm/stringlocal.h"
+#include "vm/vm.h"
+
+#include "vmcore/options.h"
+
+#if defined(ENABLE_STATISTICS)
+# include "vmcore/statistics.h"
+#endif
+
+#if defined(ENABLE_VMLOG)
+#include <vmlog_cacao.h>
+#endif
 
 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
 
 #include "machine-instr.h"
 #endif
 
+#if defined(ENABLE_JVMTI)
+#include "native/jvmti/cacaodbg.h"
+#endif
+
+#if defined(ENABLE_GC_BOEHM)
+# include "mm/boehm-gc/include/gc.h"
+#endif
+
+
+/******************************************************************************/
+/* DEBUGGING MACROS                                                           */
+/******************************************************************************/
 
 /* #define LOCK_VERBOSE */
 
+#if defined(LOCK_VERBOSE)
+#define LOCK_LOG(args)  do { printf args; fflush(stdout); } while (0)
+#else
+#define LOCK_LOG(args)
+#endif
+
 
 /******************************************************************************/
 /* MACROS                                                                     */
 /******************************************************************************/
 
 /* number of lock records in the first pool allocated for a thread */
-#define INITIALLOCKRECORDS 8
+#define LOCK_INITIAL_LOCK_RECORDS 8
+
+#define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
+
+#define LOCK_HASH(obj)  ((ptrint)(obj))
 
 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
        ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
        (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
 
 
+/******************************************************************************/
+/* MACROS FOR THE FLAT LOCK CONTENTION BIT                                    */
+/******************************************************************************/
+
+#define LOCK_SET_FLC_BIT(obj)    ((obj)->hdrflags |= HDRFLAG_FLC)
+#define LOCK_CLEAR_FLC_BIT(obj)  ((obj)->hdrflags &= ~ HDRFLAG_FLC)
+#define LOCK_TEST_FLC_BIT(obj)   ((obj)->hdrflags & HDRFLAG_FLC)
+
+
 /******************************************************************************/
 /* MACROS FOR THIN/FAT LOCKS                                                  */
 /******************************************************************************/
 
-/* We use a variant of the thin locks described in the paper
+/* We use a variant of the tasuki locks described in the paper
+ *     
+ *     Tamiya Onodera, Kiyokuni Kawachiya
+ *     A Study of Locking Objects with Bimodal Fields
+ *     Proceedings of the ACM OOPSLA '99, pp. 223-237
+ *     1999
+ *
+ * The underlying thin locks are a variant of the thin locks described in
  * 
  *     Bacon, Konuru, Murthy, Serrano
  *     Thin Locks: Featherweight Synchronization for Java
 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
 
 
+/* global variables ***********************************************************/
+
+/* hashtable mapping objects to lock records */
+static lock_hashtable_t lock_hashtable;
+
+
 /******************************************************************************/
-/* GLOBAL VARIABLES                                                           */
+/* PROTOTYPES                                                                 */
 /******************************************************************************/
 
-/* global lock record pool list header */
-lock_record_pool_t *lock_global_pool;
-
-/* mutex for synchronizing access to the global pool */
-pthread_mutex_t lock_global_pool_lock;
+static void lock_hashtable_init(void);
 
+static void lock_record_enter(threadobject *t, lock_record_t *lr);
+static void lock_record_exit(threadobject *t, lock_record_t *lr);
+static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
+static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
 
 
 /*============================================================================*/
@@ -163,284 +215,373 @@ pthread_mutex_t lock_global_pool_lock;
 
 void lock_init(void)
 {
-       pthread_mutex_init(&lock_global_pool_lock, NULL);
+       /* initialize lock hashtable */
+
+       lock_hashtable_init();
+
+#if defined(ENABLE_VMLOG)
+       vmlog_cacao_init_lock();
+#endif
 }
 
 
-/* lock_record_init ************************************************************
+/* lock_pre_compute_thinlock ***************************************************
 
-   Initialize a lock record.
+   Pre-compute the thin lock value for a thread index.
 
    IN:
-      r............the lock record to initialize
-         t............will become the owner
+      index........the thead index (>= 1)
+
+   RETURN VALUE:
+      the thin lock value for this thread index
+
+*******************************************************************************/
+
+ptrint lock_pre_compute_thinlock(s4 index)
+{
+       return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
+}
+
+
+/* lock_record_new *************************************************************
+
+   Allocate a lock record.
 
 *******************************************************************************/
 
-static void lock_record_init(lock_record_t *r, threadobject *t)
+static lock_record_t *lock_record_new(void)
 {
-       r->owner = t;
-       r->count = 0;
-       r->waiters = NULL;
+       lock_record_t *lr;
+
+       /* allocate the data structure on the C heap */
+
+       lr = NEW(lock_record_t);
 
-#if !defined(NDEBUG)
-       r->nextfree = NULL;
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_record += sizeof(lock_record_t);
 #endif
 
-       pthread_mutex_init(&(r->mutex), NULL);
+       /* initialize the members */
+
+       lr->object  = NULL;
+       lr->owner   = NULL;
+       lr->count   = 0;
+       lr->waiters = NULL;
+
+       /* initialize the mutex */
+
+       pthread_mutex_init(&(lr->mutex), NULL);
+
+       return lr;
 }
 
 
-/* lock_init_execution_env *****************************************************
+/* lock_record_free ************************************************************
 
-   Initialize the execution environment for a thread.
+   Free a lock record.
 
    IN:
-      thread.......the thread
+       lr....lock record to free
 
 *******************************************************************************/
 
-void lock_init_execution_env(threadobject *thread)
+static void lock_record_free(lock_record_t *lr)
 {
-       thread->ee.firstfree = NULL;
-       thread->ee.lockrecordpools = NULL;
-       thread->ee.lockrecordcount = 0;
-}
+#if 0
+       /* check the members */
 
+       lr->object  = o;
+       lr->owner   = NULL;
+       lr->count   = 0;
+       lr->waiters = NULL;
+#endif
 
+       /* destroy the mutex */
 
-/* lock_pre_compute_thinlock ***************************************************
+       pthread_mutex_destroy(&(lr->mutex));
 
-   Pre-compute the thin lock value for a thread index.
+       /* free the data structure */
 
-   IN:
-      index........the thead index (>= 1)
+       FREE(lr, lock_record_t);
 
-   RETURN VALUE:
-      the thin lock value for this thread index
-
-*******************************************************************************/
-
-ptrint lock_pre_compute_thinlock(s4 index)
-{
-       return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_record -= sizeof(lock_record_t);
+#endif
 }
 
 
-
 /*============================================================================*/
-/* LOCK RECORD MANAGEMENT                                                     */
+/* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
 /*============================================================================*/
 
+/* lock_hashtable_init *********************************************************
 
-/* lock_record_alloc_new_pool **************************************************
+   Initialize the global hashtable mapping objects to lock records.
 
-   Get a new lock record pool from the memory allocator.
+*******************************************************************************/
 
-   IN:
-      thread.......the thread that will own the lock records
-         size.........number of lock records in the pool to allocate
+static void lock_hashtable_init(void)
+{
+       pthread_mutex_init(&(lock_hashtable.mutex), NULL);
 
-   RETURN VALUE:
-      the new lock record pool, with initialized lock records
+       lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
+       lock_hashtable.entries = 0;
+       lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
+#endif
+
+       MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
+}
+
+
+/* lock_hashtable_grow *********************************************************
+
+   Grow the lock record hashtable to about twice its current size and
+   rehash the entries.
 
 *******************************************************************************/
 
-static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
+/* must be called with hashtable mutex locked */
+static void lock_hashtable_grow(void)
 {
-       int i;
-       lock_record_pool_t *pool;
+       u4 oldsize;
+       u4 newsize;
+       lock_record_t **oldtable;
+       lock_record_t **newtable;
+       lock_record_t *lr;
+       lock_record_t *next;
+       u4 i;
+       u4 h;
+       u4 newslot;
+
+       /* allocate a new table */
 
-       /* get the pool from the memory allocator */
+       oldsize = lock_hashtable.size;
+       newsize = oldsize*2 + 1; /* XXX should use prime numbers */
 
-       pool = mem_alloc(sizeof(lock_record_pool_header_t)
-                                  + sizeof(lock_record_t) * size);
+       LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
 
-       /* initialize the pool header */
+       oldtable = lock_hashtable.ptr;
+       newtable = MNEW(lock_record_t *, newsize);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_hashtable += sizeof(lock_record_t *) * newsize;
+#endif
 
-       pool->header.size = size;
+       MZERO(newtable, lock_record_t *, newsize);
 
-       /* initialize the individual lock records */
+       /* rehash the entries */
 
-       for (i=0; i<size; i++) {
-               lock_record_init(&pool->lr[i], thread);
+       for (i = 0; i < oldsize; i++) {
+               lr = oldtable[i];
+               while (lr) {
+                       next = lr->hashlink;
 
-               pool->lr[i].nextfree = &pool->lr[i+1];
+                       h = LOCK_HASH(lr->object);
+                       newslot = h % newsize;
+
+                       lr->hashlink = newtable[newslot];
+                       newtable[newslot] = lr;
+
+                       lr = next;
+               }
        }
 
-       /* terminate free list */
+       /* replace the old table */
 
-       pool->lr[i-1].nextfree = NULL;
+       lock_hashtable.ptr  = newtable;
+       lock_hashtable.size = newsize;
 
-       return pool;
+       MFREE(oldtable, lock_record_t *, oldsize);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
+#endif
 }
 
 
-/* lock_record_alloc_pool ******************************************************
+/* lock_hashtable_get **********************************************************
 
-   Allocate a lock record pool. The pool is either taken from the global free
-   list or requested from the memory allocator.
+   Find the lock record for the given object.  If it does not exists,
+   yet, create it and enter it in the hashtable.
 
    IN:
-      thread.......the thread that will own the lock records
-         size.........number of lock records in the pool to allocate
+         o....the object to look up
 
    RETURN VALUE:
-      the new lock record pool, with initialized lock records
+      the lock record to use for this object
 
 *******************************************************************************/
 
-static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
+#if defined(ENABLE_GC_BOEHM)
+static void lock_record_finalizer(void *object, void *p);
+#endif
+
+static lock_record_t *lock_hashtable_get(java_object_t *o)
 {
-       pthread_mutex_lock(&lock_global_pool_lock);
+       ptrint         lockword;
+       u4             slot;
+       lock_record_t *lr;
 
-       if (lock_global_pool) {
-               int i;
-               lock_record_pool_t *pool;
+       lockword = (ptrint) o->monitorPtr;
 
-               /* pop a pool from the global freelist */
+       if (IS_FAT_LOCK(lockword))
+               return GET_FAT_LOCK(lockword);
 
-               pool = lock_global_pool;
-               lock_global_pool = pool->header.next;
+       /* lock the hashtable */
 
-               pthread_mutex_unlock(&lock_global_pool_lock);
+       pthread_mutex_lock(&(lock_hashtable.mutex));
 
-               /* re-initialize owner and freelist chaining */
+       /* lookup the lock record in the hashtable */
 
-               for (i=0; i < pool->header.size; i++) {
-                       pool->lr[i].owner = t;
-                       pool->lr[i].nextfree = &pool->lr[i+1];
-               }
-               pool->lr[i-1].nextfree = NULL;
+       slot = LOCK_HASH(o) % lock_hashtable.size;
+       lr   = lock_hashtable.ptr[slot];
 
-               return pool;
+       for (; lr != NULL; lr = lr->hashlink) {
+               if (lr->object == o) {
+                       pthread_mutex_unlock(&(lock_hashtable.mutex));
+                       return lr;
+               }
        }
 
-       pthread_mutex_unlock(&lock_global_pool_lock);
+       /* not found, we must create a new one */
 
-       /* we have to get a new pool from the allocator */
+       lr = lock_record_new();
 
-       return lock_record_alloc_new_pool(t, size);
-}
+       lr->object = o;
 
+#if defined(ENABLE_GC_BOEHM)
+       /* register new finalizer to clean up the lock record */
 
-/* lock_record_free_pools ******************************************************
-
-   Free the lock record pools in the given linked list. The pools are inserted
-   into the global freelist.
-
-   IN:
-      pool.........list header
-
-*******************************************************************************/
-
-void lock_record_free_pools(lock_record_pool_t *pool)
-{
-       lock_record_pool_header_t *last;
-
-       assert(false); /* XXX this function does not match the new locking */
-                      /*     algorithm. We must find another way to free  */
-                      /*     unused lock records.                         */
+       GC_REGISTER_FINALIZER(o, lock_record_finalizer, 0, 0, 0);
+#endif
 
-       if (!pool)
-               return;
+       LOCK_LOG(("thread %d allocated for %p new lr %p\n",
+                         t->index, (void*) o, (void*) lr));
 
-       pthread_mutex_lock(&lock_global_pool_lock);
+       /* enter it in the hashtable */
 
-       /* find the last pool in the list */
+       lr->hashlink             = lock_hashtable.ptr[slot];
+       lock_hashtable.ptr[slot] = lr;
+       lock_hashtable.entries++;
 
-       last = &pool->header;
-       while (last->next)
-               last = &last->next->header;
+       /* check whether the hash should grow */
 
-       /* chain it to the lock_global_pool freelist */
+       if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
+               lock_hashtable_grow();
+       }
 
-       last->next = lock_global_pool;
+       /* unlock the hashtable */
 
-       /* insert the freed pools into the freelist */
+       pthread_mutex_unlock(&(lock_hashtable.mutex));
 
-       lock_global_pool = pool;
+       /* return the new lock record */
 
-       pthread_mutex_unlock(&lock_global_pool_lock);
+       return lr;
 }
 
 
-/* lock_record_alloc ***********************************************************
+/* lock_hashtable_remove *******************************************************
 
-   Allocate a lock record which is owned by the current thread.
+   Remove the lock record for the given object from the hashtable.
 
    IN:
-      t............the current thread 
-
-   POST-CONDITION:
-      The current thread holds the mutex of the returned lock record
-         and is recored as owner of the record.
+       o....the object to look up
 
 *******************************************************************************/
 
-static lock_record_t *lock_record_alloc(threadobject *t)
+static void lock_hashtable_remove(java_object_t *o)
 {
-       lock_record_t *r;
+       ptrint         lockword;
+       lock_record_t *lr;
+       u4             slot;
+       lock_record_t *tmplr;
+
+       /* lock the hashtable */
 
-       assert(t);
-       r = t->ee.firstfree;
+       pthread_mutex_lock(&(lock_hashtable.mutex));
 
-       if (!r) {
-               int poolsize;
-               lock_record_pool_t *pool;
+       /* get lock record */
 
-               /* get a new pool */
+       lockword = (ptrint) o->monitorPtr;
 
-               poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2 : INITIALLOCKRECORDS;
-               pool = lock_record_alloc_pool(t, poolsize);
+       assert(IS_FAT_LOCK(lockword));
 
-               /* add it to our per-thread pool list */
+       lr = GET_FAT_LOCK(lockword);
 
-               pool->header.next = t->ee.lockrecordpools;
-               t->ee.lockrecordpools = pool;
-               t->ee.lockrecordcount += pool->header.size;
+       /* remove the lock-record from the hashtable */
 
-               /* take the first record from the pool */
-               r = &pool->lr[0];
+       slot  = LOCK_HASH(o) % lock_hashtable.size;
+       tmplr = lock_hashtable.ptr[slot];
+
+       if (tmplr == lr) {
+               /* special handling if it's the first in the chain */
+
+               lock_hashtable.ptr[slot] = lr->hashlink;
        }
+       else {
+               for (; tmplr != NULL; tmplr = tmplr->hashlink) {
+                       if (tmplr->hashlink == lr) {
+                               tmplr->hashlink = lr->hashlink;
+                               break;
+                       }
+               }
 
-       /* pop the record from the freelist */
+               assert(tmplr != NULL);
+       }
 
-       t->ee.firstfree = r->nextfree;
-#ifndef NDEBUG
-       r->nextfree = NULL; /* in order to find invalid uses of nextfree */
-#endif
+       /* decrease entry count */
 
-       /* pre-acquire the mutex of the new lock record */
+       lock_hashtable.entries--;
 
-       pthread_mutex_lock(&(r->mutex));
+       /* unlock the hashtable */
 
-       return r;
+       pthread_mutex_unlock(&(lock_hashtable.mutex));
 }
 
 
-/* lock_record_recycle *********************************************************
-
-   Recycle the given lock record. It will be inserted in the appropriate
-   free list.
+/* lock_record_finalizer *******************************************************
 
-   IN:
-      t............the owner
-         r............lock record to recycle
+   XXX Remove me for exact GC.
 
 *******************************************************************************/
 
-static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
+static void lock_record_finalizer(void *object, void *p)
 {
-       assert(t);
-       assert(r);
-       assert(r->owner == t);
-       assert(r->nextfree == NULL);
+       java_object_t *o;
+       ptrint         lockword;
+       lock_record_t *lr;
 
-       r->nextfree = t->ee.firstfree;
-       t->ee.firstfree = r;
-}
+       o = (java_object_t *) object;
+
+       /* check for a finalizer function */
+
+       if (o->vftbl->class->finalizer != NULL)
+               finalizer_run(object, p);
+
+       /* remove the lock-record entry from the hashtable */
+
+       lock_hashtable_remove(o);
+
+       /* get lock record */
+
+       lockword = (ptrint) o->monitorPtr;
 
+       assert(IS_FAT_LOCK(lockword));
+
+       lr = GET_FAT_LOCK(lockword);
+
+       /* now release the lock record */
+
+       lock_record_free(lr);
+}
 
 
 /*============================================================================*/
@@ -455,11 +596,12 @@ static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
 
 *******************************************************************************/
 
-void lock_init_object_lock(java_objectheader *o)
+void lock_init_object_lock(java_object_t *o)
 {
        assert(o);
 
        o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
+       LOCK_CLEAR_FLC_BIT(o);
 }
 
 
@@ -483,52 +625,95 @@ lock_record_t *lock_get_initial_lock_word(void)
 /*============================================================================*/
 
 
+/* lock_record_enter ***********************************************************
+
+   Enter the lock represented by the given lock record.
+
+   IN:
+      t.................the current thread
+         lr................the lock record
+
+*******************************************************************************/
+
+static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
+{
+       pthread_mutex_lock(&(lr->mutex));
+
+       lr->owner = t;
+}
+
+
+/* lock_record_exit ************************************************************
+
+   Release the lock represented by the given lock record.
+
+   IN:
+      t.................the current thread
+         lr................the lock record
+
+   PRE-CONDITION:
+      The current thread must own the lock represented by this lock record.
+         This is NOT checked by this function!
+
+*******************************************************************************/
+
+static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
+{
+       lr->owner = NULL;
+       pthread_mutex_unlock(&(lr->mutex));
+}
+
+
 /* lock_inflate ****************************************************************
 
    Inflate the lock of the given object. This may only be called by the
-   owner of the monitor.
+   owner of the monitor of the object.
 
    IN:
       t............the current thread
          o............the object of which to inflate the lock
-
-   RETURN VALUE:
-      the new lock record of the object
+         lr...........the lock record to install. The current thread must
+                      own the lock of this lock record!
 
    PRE-CONDITION:
-      The current thread must be the owner of this object's monitor!
+      The current thread must be the owner of this object's monitor AND
+         of the lock record's lock!
 
 *******************************************************************************/
 
-static lock_record_t *lock_inflate(threadobject *t, java_objectheader *o)
+static void lock_inflate(threadobject *t, java_object_t *o, lock_record_t *lr)
 {
-       lock_record_t *lr;
        ptrint lockword;
-       ptrint count;
 
        /* get the current lock count */
 
        lockword = (ptrint) o->monitorPtr;
 
-       assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
+       if (IS_FAT_LOCK(lockword)) {
+               assert(GET_FAT_LOCK(lockword) == lr);
+       }
+       else {
+               assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
+
+               /* copy the count from the thin lock */
 
-       count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
+               lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
+       }
 
-       /* allocate a fat lock */
+       LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
+                       t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
 
-       lr = lock_record_alloc(t);
-       lr->count = count;
+       /* clear flat-lock-contention bit */
 
-#if defined(LOCK_VERBOSE)
-       printf("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
-                       t->index, (void*) o, (long)o->monitorPtr, (int)count);
-#endif
+       LOCK_CLEAR_FLC_BIT(o);
+
+       /* notify waiting objects */
+
+       lock_record_notify(t, lr, false);
 
        /* install it */
 
        o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
-
-       return lr;
 }
 
 
@@ -543,13 +728,26 @@ static lock_record_t *lock_inflate(threadobject *t, java_objectheader *o)
       t............the current thread
          o............the object of which to enter the monitor
 
+   RETURN VALUE:
+      true.........the lock has been successfully acquired
+         false........an exception has been thrown
+
 *******************************************************************************/
 
-void lock_monitor_enter(threadobject *t, java_objectheader *o)
+bool lock_monitor_enter(java_object_t *o)
 {
+       threadobject  *t;
        /* CAUTION: This code assumes that ptrint is unsigned! */
-       ptrint lockword;
-       ptrint thinlock;
+       ptrint         lockword;
+       ptrint         thinlock;
+       lock_record_t *lr;
+
+       if (o == NULL) {
+               exceptions_throw_nullpointerexception();
+               return false;
+       }
+
+       t = THREADOBJECT;
 
        thinlock = t->thinlock;
 
@@ -559,7 +757,7 @@ void lock_monitor_enter(threadobject *t, java_objectheader *o)
                /* success. we locked it */
                /* The Java Memory Model requires a memory barrier here: */
                MEMORY_BARRIER();
-               return;
+               return true;
        }
 
        /* next common case: recursive lock with small recursion count */
@@ -577,89 +775,100 @@ void lock_monitor_enter(threadobject *t, java_objectheader *o)
                        o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
 
                        /* success. we locked it */
-                       return;
+                       return true;
                }
                else {
-                       lock_record_t *lr;
-
                        /* recursion count overflow */
 
-                       lr = lock_inflate(t, o);
+                       lr = lock_hashtable_get(o);
+                       lock_record_enter(t, lr);
+                       lock_inflate(t, o, lr);
                        lr->count++;
 
-                       return;
+                       return true;
                }
        }
 
        /* the lock is either contented or fat */
 
-       {
-               lock_record_t *lr;
-               ptrint fatlock;
-
-               if (IS_FAT_LOCK(lockword)) {
+       if (IS_FAT_LOCK(lockword)) {
 
-                       lr = GET_FAT_LOCK(lockword);
+               lr = GET_FAT_LOCK(lockword);
 
-                       /* check for recursive entering */
-                       if (lr->owner == t) {
-                               lr->count++;
-                               return;
-                       }
+               /* check for recursive entering */
+               if (lr->owner == t) {
+                       lr->count++;
+                       return true;
                }
-               else {
-                       /* alloc a lock record owned by us */
-                       lr = lock_record_alloc(t);
-                       fatlock = MAKE_FAT_LOCK(lr);
 
-#if defined(LOCK_VERBOSE)
-                       printf("thread %3d: SPINNING for inflating lock of %p, current lockword = %lx\n",
-                                       t->index, (void*)o, (long)lockword);
-#endif
+               /* acquire the mutex of the lock record */
 
-                       /* SPIN LOOP */
-                       while (true) {
-                               lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, fatlock);
-                               if (lockword == THIN_UNLOCKED) {
-#if defined(LOCK_VERBOSE)
-                                       printf("thread %3d: successfully inflated lock of %p\n",
-                                                       t->index, (void*)o);
+               lock_record_enter(t, lr);
+
+               assert(lr->count == 0);
+
+               return true;
+       }
+
+       /****** inflation path ******/
+
+       /* first obtain the lock record for this object */
+
+       lr = lock_hashtable_get(o);
+
+#if defined(ENABLE_JVMTI)
+       /* Monitor Contended Enter */
+       jvmti_MonitorContendedEntering(false, o);
 #endif
-                                       /* we managed to install our lock record */
-                                       /* The Java Memory Model requires a memory barrier here: */
-                                       MEMORY_BARRIER();
-                                       return;
-                               }
 
-                               if (IS_FAT_LOCK(lockword)) {
-#if defined(LOCK_VERBOSE)
-                                       printf("thread %3d: lock of %p was inflated by other thread, lockword = %lx\n",
-                                                       t->index, (void*)o, (long)lockword);
+       /* enter the monitor */
+
+       lock_record_enter(t, lr);
+
+#if defined(ENABLE_JVMTI)
+       /* Monitor Contended Entered */
+       jvmti_MonitorContendedEntering(true, o);
 #endif
-                                       /* another thread inflated the lock */
-                                       pthread_mutex_unlock(&(lr->mutex));
-                                       lock_record_recycle(t, lr);
 
-                                       lr = GET_FAT_LOCK(lockword);
-                                       break;
-                               }
-                       }
-               }
+       /* inflation loop */
 
-               /* acquire the mutex of the lock record */
-               pthread_mutex_lock(&(lr->mutex));
+       while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
+               /* Set the flat lock contention bit to let the owning thread
+                  know that we want to be notified of unlocking. */
 
-               /* enter us as the owner */
-               lr->owner = t;
+               LOCK_SET_FLC_BIT(o);
 
-               assert(lr->count == 0);
+               LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
+                                 t->index, (void*) o, (void*) lr));
+
+               /* try to lock the object */
+
+               if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
+                       /* we can inflate the lock ourselves */
+
+                       LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
+                                         t->index, (void*) o, (void*) lr));
+
+                       lock_inflate(t, o, lr);
+               }
+               else {
+                       /* wait until another thread sees the flc bit and notifies
+                          us of unlocking */
 
-               return;
+                       LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
+                                         t->index, (void*) o, (void*) lr));
+
+                       lock_record_wait(t, lr, 0, 0);
+               }
        }
+
+       /* we own the inflated lock now */
+
+       return true;
 }
 
 
-/* lock_monitor_exit *****************************************************************
+/* lock_monitor_exit ***********************************************************
 
    Decrement the counter of a (currently owned) monitor. If the counter
    reaches zero, release the monitor.
@@ -677,10 +886,18 @@ void lock_monitor_enter(threadobject *t, java_objectheader *o)
 
 *******************************************************************************/
 
-bool lock_monitor_exit(threadobject *t, java_objectheader *o)
+bool lock_monitor_exit(java_object_t *o)
 {
-       ptrint lockword;
-       ptrint thinlock;
+       threadobject *t;
+       ptrint        lockword;
+       ptrint        thinlock;
+
+       if (o == NULL) {
+               exceptions_throw_nullpointerexception();
+               return false;
+       }
+
+       t = THREADOBJECT;
 
        /* We don't have to worry about stale values here, as any stale value */
        /* will indicate that we don't own the lock.                          */
@@ -696,6 +913,33 @@ bool lock_monitor_exit(threadobject *t, java_objectheader *o)
                o->monitorPtr = THIN_UNLOCKED;
                /* memory barrier for thin locking */
                MEMORY_BARRIER();
+
+               /* check if there has been a flat lock contention on this object */
+
+               if (LOCK_TEST_FLC_BIT(o)) {
+                       lock_record_t *lr;
+
+                       LOCK_LOG(("thread %d saw flc bit on %p %s\n",
+                                       t->index, (void*) o, o->vftbl->class->name->text));
+
+                       /* there has been a contention on this thin lock */
+
+                       lr = lock_hashtable_get(o);
+
+                       LOCK_LOG(("thread %d for %p got lr %p\n",
+                                       t->index, (void*) o, (void*) lr));
+
+                       lock_record_enter(t, lr);
+
+                       if (LOCK_TEST_FLC_BIT(o)) {
+                               /* notify a thread that it can try to inflate the lock now */
+
+                               lock_record_notify(t, lr, true);
+                       }
+
+                       lock_record_exit(t, lr);
+               }
+
                return true;
        }
 
@@ -719,7 +963,7 @@ bool lock_monitor_exit(threadobject *t, java_objectheader *o)
                /* will be != t and thus fail this check.                             */
 
                if (lr->owner != t) {
-                       *exceptionptr = new_illegalmonitorstateexception();
+                       exceptions_throw_illegalmonitorstateexception();
                        return false;
                }
 
@@ -742,12 +986,43 @@ bool lock_monitor_exit(threadobject *t, java_objectheader *o)
 
        /* legal thin lock cases have been handled above, so this is an error */
 
-       *exceptionptr = new_illegalmonitorstateexception();
+       exceptions_throw_illegalmonitorstateexception();
+
        return false;
 }
 
 
-/* lock_record_remove_waiter *********************************************************
+/* lock_record_add_waiter ******************************************************
+
+   Add a thread to the list of waiting threads of a lock record.
+
+   IN:
+      lr...........the lock record
+      thread.......the thread to add
+
+*******************************************************************************/
+
+static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
+{
+       lock_waiter_t *waiter;
+
+       /* allocate a waiter data structure */
+
+       waiter = NEW(lock_waiter_t);
+
+#if defined(ENABLE_STATISTICS)
+       if (opt_stat)
+               size_lock_waiter += sizeof(lock_waiter_t);
+#endif
+
+       waiter->waiter = thread;
+       waiter->next   = lr->waiters;
+
+       lr->waiters = waiter;
+}
+
+
+/* lock_record_remove_waiter ***************************************************
 
    Remove a thread from the list of waiting threads of a lock record.
 
@@ -760,15 +1035,26 @@ bool lock_monitor_exit(threadobject *t, java_objectheader *o)
    
 *******************************************************************************/
 
-void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
+static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
 {
        lock_waiter_t **link;
-       lock_waiter_t *w;
+       lock_waiter_t  *w;
 
        link = &(lr->waiters);
+
        while ((w = *link)) {
-               if (w->waiter == t) {
+               if (w->waiter == thread) {
                        *link = w->next;
+
+                       /* free the waiter data structure */
+
+                       FREE(w, lock_waiter_t);
+
+#if defined(ENABLE_STATISTICS)
+                       if (opt_stat)
+                               size_lock_waiter -= sizeof(lock_waiter_t);
+#endif
+
                        return;
                }
 
@@ -776,13 +1062,71 @@ void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
        }
 
        /* this should never happen */
-       fprintf(stderr,"error: waiting thread not found in list of waiters\n");
-       fflush(stderr);
-       abort();
+
+       vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
 }
 
 
-/* lock_monitor_wait *****************************************************************
+/* lock_record_wait ************************************************************
+
+   Wait on a lock record for a given (maximum) amount of time.
+
+   IN:
+      t............the current thread
+         lr...........the lock record
+         millis.......milliseconds of timeout
+         nanos........nanoseconds of timeout
+
+   PRE-CONDITION:
+      The current thread must be the owner of the lock record.
+         This is NOT checked by this function!
+   
+*******************************************************************************/
+
+static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
+{
+       s4   lockcount;
+       bool wasinterrupted;
+
+       /* { the thread t owns the fat lock record lr on the object o } */
+
+       /* register us as waiter for this object */
+
+       lock_record_add_waiter(lr, thread);
+
+       /* remember the old lock count */
+
+       lockcount = lr->count;
+
+       /* unlock this record */
+
+       lr->count = 0;
+       lock_record_exit(thread, lr);
+
+       /* wait until notified/interrupted/timed out */
+
+       wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
+
+       /* re-enter the monitor */
+
+       lock_record_enter(thread, lr);
+
+       /* remove us from the list of waiting threads */
+
+       lock_record_remove_waiter(lr, thread);
+
+       /* restore the old lock count */
+
+       lr->count = lockcount;
+
+       /* if we have been interrupted, throw the appropriate exception */
+
+       if (wasinterrupted)
+               exceptions_throw_interruptedexception();
+}
+
+
+/* lock_monitor_wait ***********************************************************
 
    Wait on an object for a given (maximum) amount of time.
 
@@ -797,13 +1141,10 @@ void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
    
 *******************************************************************************/
 
-void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
+static void lock_monitor_wait(threadobject *t, java_object_t *o, s8 millis, s4 nanos)
 {
        ptrint         lockword;
        lock_record_t *lr;
-       lock_waiter_t *waiter;
-       s4             lockcount;
-       bool           wasinterrupted;
 
        lockword = (ptrint) o->monitorPtr;
 
@@ -816,7 +1157,7 @@ void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nano
                lr = GET_FAT_LOCK(lockword);
 
                if (lr->owner != t) {
-                       *exceptionptr = new_illegalmonitorstateexception();
+                       exceptions_throw_illegalmonitorstateexception();
                        return;
                }
        }
@@ -824,61 +1165,72 @@ void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nano
                /* it's a thin lock */
 
                if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
-                       *exceptionptr = new_illegalmonitorstateexception();
+                       exceptions_throw_illegalmonitorstateexception();
                        return;
                }
 
                /* inflate this lock */
-               lr = lock_inflate(t, o);
+
+               lr = lock_hashtable_get(o);
+
+               lock_record_enter(t, lr);
+               lock_inflate(t, o, lr);
        }
 
        /* { the thread t owns the fat lock record lr on the object o } */
 
-       /* register us as waiter for this object */
+       lock_record_wait(t, lr, millis, nanos);
+}
 
-       waiter = NEW(lock_waiter_t);
-       waiter->waiter = t;
-       waiter->next = lr->waiters;
-       lr->waiters = waiter;
 
-       /* remember the old lock count */
+/* lock_record_notify **********************************************************
 
-       lockcount = lr->count;
+   Notify one thread or all threads waiting on the given lock record.
 
-       /* unlock this record */
+   IN:
+      t............the current thread
+         lr...........the lock record
+         one..........if true, only notify one thread
 
-       lr->count = 0;
-       lr->owner = NULL;
-       pthread_mutex_unlock(&(lr->mutex));
+   PRE-CONDITION:
+      The current thread must be the owner of the lock record.
+         This is NOT checked by this function!
+   
+*******************************************************************************/
 
-       /* wait until notified/interrupted/timed out */
+static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
+{
+       lock_waiter_t *waiter;
+       threadobject *waitingthread;
 
-       wasinterrupted = threads_wait_with_timeout_relative(t, millis, nanos);
+       /* { the thread t owns the fat lock record lr on the object o } */
 
-       /* re-enter the monitor */
+       /* for each waiter: */
 
-       lock_monitor_enter(t, o);
+       for (waiter = lr->waiters; waiter != NULL; waiter = waiter->next) {
 
-       /* assert that the lock record is still the same */
+               /* signal the waiting thread */
 
-       assert( GET_FAT_LOCK((ptrint) o->monitorPtr) == lr );
+               waitingthread = waiter->waiter;
 
-       /* remove us from the list of waiting threads */
+               pthread_mutex_lock(&waitingthread->waitmutex);
 
-       lock_record_remove_waiter(lr, t);
+               if (waitingthread->sleeping)
+                       pthread_cond_signal(&waitingthread->waitcond);
 
-       /* restore the old lock count */
+               waitingthread->signaled = true;
 
-       lr->count = lockcount;
+               pthread_mutex_unlock(&waitingthread->waitmutex);
 
-       /* if we have been interrupted, throw the appropriate exception */
+               /* if we should only wake one, we are done */
 
-       if (wasinterrupted)
-               *exceptionptr = new_exception(string_java_lang_InterruptedException);
+               if (one)
+                       break;
+       }
 }
 
 
-/* lock_monitor_notify **************************************************************
+/* lock_monitor_notify *********************************************************
 
    Notify one thread or all threads waiting on the given object.
 
@@ -892,12 +1244,10 @@ void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nano
    
 *******************************************************************************/
 
-static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
+static void lock_monitor_notify(threadobject *t, java_object_t *o, bool one)
 {
        ptrint lockword;
        lock_record_t *lr;
-       lock_waiter_t *waiter;
-       threadobject *waitingthread;
 
        lockword = (ptrint) o->monitorPtr;
 
@@ -910,7 +1260,7 @@ static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
                lr = GET_FAT_LOCK(lockword);
 
                if (lr->owner != t) {
-                       *exceptionptr = new_illegalmonitorstateexception();
+                       exceptions_throw_illegalmonitorstateexception();
                        return;
                }
        }
@@ -918,35 +1268,21 @@ static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
                /* it's a thin lock */
 
                if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
-                       *exceptionptr = new_illegalmonitorstateexception();
+                       exceptions_throw_illegalmonitorstateexception();
                        return;
                }
 
                /* inflate this lock */
-               lr = lock_inflate(t, o);
-       }
-
-       /* { the thread t owns the fat lock record lr on the object o } */
 
-       /* for each waiter: */
-
-       for (waiter = lr->waiters; waiter; waiter = waiter->next) {
-
-               /* signal the waiting thread */
+               lr = lock_hashtable_get(o);
 
-               waitingthread = waiter->waiter;
-
-               pthread_mutex_lock(&waitingthread->waitmutex);
-               if (waitingthread->sleeping)
-                       pthread_cond_signal(&waitingthread->waitcond);
-               waitingthread->signaled = true;
-               pthread_mutex_unlock(&waitingthread->waitmutex);
+               lock_record_enter(t, lr);
+               lock_inflate(t, o, lr);
+       }
 
-               /* if we should only wake one, we are done */
+       /* { the thread t owns the fat lock record lr on the object o } */
 
-               if (one)
-                       break;
-       }
+       lock_record_notify(t, lr, one);
 }
 
 
@@ -956,22 +1292,25 @@ static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
 /*============================================================================*/
 
 
-/* lock_does_thread_hold_lock **************************************************
+/* lock_is_held_by_current_thread **********************************************
 
-   Return true if the given thread owns the monitor of the given object.
+   Return true if the current thread owns the monitor of the given object.
 
    IN:
-      t............the thread
          o............the object
-   
-   RETURN VALUE:
-      true, if the thread is locking the object
 
+   RETURN VALUE:
+      true, if the current thread holds the lock of this object.
+   
 *******************************************************************************/
 
-bool lock_does_thread_hold_lock(threadobject *t, java_objectheader *o)
+bool lock_is_held_by_current_thread(java_object_t *o)
 {
-       ptrint lockword;
+       threadobject  *t;
+       ptrint         lockword;
+       lock_record_t *lr;
+
+       t = THREADOBJECT;
 
        /* check if we own this monitor */
        /* We don't have to worry about stale values here, as any stale value */
@@ -980,9 +1319,8 @@ bool lock_does_thread_hold_lock(threadobject *t, java_objectheader *o)
        lockword = (ptrint) o->monitorPtr;
 
        if (IS_FAT_LOCK(lockword)) {
-               lock_record_t *lr;
-
                /* it's a fat lock */
+
                lr = GET_FAT_LOCK(lockword);
 
                return (lr->owner == t);
@@ -1012,10 +1350,13 @@ bool lock_does_thread_hold_lock(threadobject *t, java_objectheader *o)
    
 *******************************************************************************/
 
-void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
+void lock_wait_for_object(java_object_t *o, s8 millis, s4 nanos)
 {
-       threadobject *t = (threadobject*) THREADOBJECT;
-       lock_monitor_wait(t, o, millis, nanos);
+       threadobject *thread;
+
+       thread = THREADOBJECT;
+
+       lock_monitor_wait(thread, o, millis, nanos);
 }
 
 
@@ -1028,10 +1369,13 @@ void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
    
 *******************************************************************************/
 
-void lock_notify_object(java_objectheader *o)
+void lock_notify_object(java_object_t *o)
 {
-       threadobject *t = (threadobject*) THREADOBJECT;
-       lock_monitor_notify(t, o, true);
+       threadobject *thread;
+
+       thread = THREADOBJECT;
+
+       lock_monitor_notify(thread, o, true);
 }
 
 
@@ -1044,12 +1388,16 @@ void lock_notify_object(java_objectheader *o)
    
 *******************************************************************************/
 
-void lock_notify_all_object(java_objectheader *o)
+void lock_notify_all_object(java_object_t *o)
 {
-       threadobject *t = (threadobject*) THREADOBJECT;
-       lock_monitor_notify(t, o, false);
+       threadobject *thread;
+
+       thread = THREADOBJECT;
+
+       lock_monitor_notify(thread, o, false);
 }
 
+
 /*
  * These are local overrides for various environment variables in Emacs.
  * Please do not remove this and leave it at the end of the file, where