#include "machine-instr.h"
#endif
+
+/******************************************************************************/
+/* DEBUGGING MACROS */
+/******************************************************************************/
+
/* #define LOCK_VERBOSE */
+#if defined(LOCK_VERBOSE)
+#define LOCK_LOG(args) do { printf args; fflush(stdout); } while (0)
+#else
+#define LOCK_LOG(args)
+#endif
+
/******************************************************************************/
/* MACROS */
/******************************************************************************/
/* number of lock records in the first pool allocated for a thread */
-#define INITIALLOCKRECORDS 8
+#define LOCK_INITIAL_LOCK_RECORDS 8
+
+#define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
+
+#define LOCK_HASH(obj) ((ptrint)(obj))
#define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
(compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
+/******************************************************************************/
+/* MACROS FOR THE FLAT LOCK CONTENTION BIT */
+/******************************************************************************/
+
+#define LOCK_SET_FLC_BIT(obj) ((obj)->flcword = 1)
+#define LOCK_CLEAR_FLC_BIT(obj) ((obj)->flcword = 0)
+#define LOCK_TEST_FLC_BIT(obj) ((obj)->flcword != 0)
+
+
/******************************************************************************/
/* MACROS FOR THIN/FAT LOCKS */
/******************************************************************************/
-/* We use a variant of the thin locks described in the paper
+/* We use a variant of the tasuki locks described in the paper
+ *
+ * Tamiya Onodera, Kiyokuni Kawachiya
+ * A Study of Locking Objects with Bimodal Fields
+ * Proceedings of the ACM OOPSLA '99, pp. 223-237
+ * 1999
+ *
+ * The underlying thin locks are a variant of the thin locks described in
*
* Bacon, Konuru, Murthy, Serrano
* Thin Locks: Featherweight Synchronization for Java
/* mutex for synchronizing access to the global pool */
pthread_mutex_t lock_global_pool_lock;
+/* hashtable mapping objects to lock records */
+static lock_hashtable_t lock_hashtable;
+
+
+/******************************************************************************/
+/* PROTOTYPES */
+/******************************************************************************/
+
+static void lock_hashtable_init(void);
+static lock_record_t * lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o);
+
+static lock_record_t * lock_record_alloc(threadobject *t);
+
+static void lock_record_enter(threadobject *t, lock_record_t *lr);
+static void lock_record_exit(threadobject *t, lock_record_t *lr);
+static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
+static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
/*============================================================================*/
void lock_init(void)
{
pthread_mutex_init(&lock_global_pool_lock, NULL);
+
+ lock_hashtable_init();
}
static void lock_record_init(lock_record_t *r, threadobject *t)
{
- r->owner = t;
+ r->owner = NULL;
r->count = 0;
r->waiters = NULL;
/* re-initialize owner and freelist chaining */
for (i=0; i < pool->header.size; i++) {
- pool->lr[i].owner = t;
+ pool->lr[i].owner = NULL;
pool->lr[i].nextfree = &pool->lr[i+1];
}
pool->lr[i-1].nextfree = NULL;
IN:
t............the current thread
- POST-CONDITION:
- The current thread holds the mutex of the returned lock record
- and is recored as owner of the record.
-
*******************************************************************************/
static lock_record_t *lock_record_alloc(threadobject *t)
/* get a new pool */
- poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2 : INITIALLOCKRECORDS;
+ poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2
+ : LOCK_INITIAL_LOCK_RECORDS;
pool = lock_record_alloc_pool(t, poolsize);
/* add it to our per-thread pool list */
r->nextfree = NULL; /* in order to find invalid uses of nextfree */
#endif
- /* pre-acquire the mutex of the new lock record */
-
- pthread_mutex_lock(&(r->mutex));
-
return r;
}
{
assert(t);
assert(r);
- assert(r->owner == t);
+ assert(r->owner == NULL);
assert(r->nextfree == NULL);
r->nextfree = t->ee.firstfree;
+/*============================================================================*/
+/* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
+/*============================================================================*/
+
+
+/* lock_hashtable_init *********************************************************
+
+ Initialize the global hashtable mapping objects to lock records.
+
+*******************************************************************************/
+
+static void lock_hashtable_init(void)
+{
+ pthread_mutex_init(&(lock_hashtable.mutex), NULL);
+
+ lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
+ lock_hashtable.entries = 0;
+ lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
+ MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
+}
+
+
+/* lock_hashtable_grow *********************************************************
+
+ Grow the lock record hashtable to about twice its current size and
+ rehash the entries.
+
+*******************************************************************************/
+
+/* must be called with hashtable mutex locked */
+static void lock_hashtable_grow(void)
+{
+ u4 oldsize;
+ u4 newsize;
+ lock_record_t **oldtable;
+ lock_record_t **newtable;
+ lock_record_t *lr;
+ lock_record_t *next;
+ u4 i;
+ u4 h;
+ u4 newslot;
+
+ /* allocate a new table */
+
+ oldsize = lock_hashtable.size;
+ newsize = oldsize*2 + 1; /* XXX should use prime numbers */
+
+ LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
+
+ oldtable = lock_hashtable.ptr;
+ newtable = MNEW(lock_record_t *, newsize);
+ MZERO(newtable, lock_record_t *, newsize);
+
+ /* rehash the entries */
+
+ for (i=0; i<oldsize; ++i) {
+ lr = oldtable[i];
+ while (lr) {
+ next = lr->hashlink;
+
+ h = LOCK_HASH(lr->obj);
+ newslot = h % newsize;
+
+ lr->hashlink = newtable[newslot];
+ newtable[newslot] = lr;
+
+ lr = next;
+ }
+ }
+
+ /* replace the old table */
+
+ lock_hashtable.ptr = newtable;
+ lock_hashtable.size = newsize;
+
+ MFREE(oldtable, lock_record_t *, oldsize);
+}
+
+
+/* lock_hashtable_get_lock_record **********************************************
+
+ Find the lock record for the given object. If it does not exists, yet,
+ create it and enter it in the hashtable.
+
+ IN:
+ t.................the current thread
+ o.................the object to look up
+
+ RETURN VALUE:
+ the lock record to use for this object
+
+*******************************************************************************/
+
+static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o)
+{
+ ptrint lockword;
+ u4 slot;
+ lock_record_t *lr;
+
+ lockword = (ptrint) o->monitorPtr;
+
+ if (IS_FAT_LOCK(lockword)) {
+ return GET_FAT_LOCK(lockword);
+ }
+
+ /* lock the hashtable */
+
+ pthread_mutex_lock(&(lock_hashtable.mutex));
+
+ /* lookup the lock record in the hashtable */
+
+ slot = LOCK_HASH(o) % lock_hashtable.size;
+ lr = lock_hashtable.ptr[slot];
+ while (lr) {
+ if (lr->obj == o) {
+ pthread_mutex_unlock(&(lock_hashtable.mutex));
+ return lr;
+ }
+
+ lr = lr->hashlink;
+ }
+
+ /* not found, we must create a new one */
+
+ lr = lock_record_alloc(t);
+ lr->obj = o;
+ LOCK_LOG(("thread %d allocated for %p new lr %p\n",
+ t->index, (void*) o, (void*) lr));
+
+ /* enter it in the hashtable */
+
+ lr->hashlink = lock_hashtable.ptr[slot];
+ lock_hashtable.ptr[slot] = lr;
+ lock_hashtable.entries++;
+
+ /* check whether the hash should grow */
+
+ if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
+ lock_hashtable_grow();
+ }
+
+ /* unlock the hashtable */
+
+ pthread_mutex_unlock(&(lock_hashtable.mutex));
+
+ /* return the new lock record */
+
+ return lr;
+}
+
+
/*============================================================================*/
/* OBJECT LOCK INITIALIZATION */
/*============================================================================*/
assert(o);
o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
+ o->flcword = 0;
}
/*============================================================================*/
+/* lock_record_enter ***********************************************************
+
+ Enter the lock represented by the given lock record.
+
+ IN:
+ t.................the current thread
+ lr................the lock record
+
+*******************************************************************************/
+
+static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
+{
+ pthread_mutex_lock(&(lr->mutex));
+ lr->owner = t;
+}
+
+
+/* lock_record_exit ************************************************************
+
+ Release the lock represented by the given lock record.
+
+ IN:
+ t.................the current thread
+ lr................the lock record
+
+ PRE-CONDITION:
+ The current thread must own the lock represented by this lock record.
+ This is NOT checked by this function!
+
+*******************************************************************************/
+
+static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
+{
+ lr->owner = NULL;
+ pthread_mutex_unlock(&(lr->mutex));
+}
+
+
/* lock_inflate ****************************************************************
Inflate the lock of the given object. This may only be called by the
- owner of the monitor.
+ owner of the monitor of the object.
IN:
t............the current thread
o............the object of which to inflate the lock
-
- RETURN VALUE:
- the new lock record of the object
+ lr...........the lock record to install. The current thread must
+ own the lock of this lock record!
PRE-CONDITION:
- The current thread must be the owner of this object's monitor!
+ The current thread must be the owner of this object's monitor AND
+ of the lock record's lock!
*******************************************************************************/
-static lock_record_t *lock_inflate(threadobject *t, java_objectheader *o)
+static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *lr)
{
- lock_record_t *lr;
ptrint lockword;
- ptrint count;
/* get the current lock count */
lockword = (ptrint) o->monitorPtr;
- assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
+ if (IS_FAT_LOCK(lockword)) {
+ assert(GET_FAT_LOCK(lockword) == lr);
+ }
+ else {
+ assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
- count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
+ /* copy the count from the thin lock */
- /* allocate a fat lock */
+ lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
+ }
- lr = lock_record_alloc(t);
- lr->count = count;
+ LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
+ t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
-#if defined(LOCK_VERBOSE)
- printf("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
- t->index, (void*) o, (long)o->monitorPtr, (int)count);
-#endif
+ /* clear flat-lock-contention bit */
+
+ LOCK_CLEAR_FLC_BIT(o);
+
+ /* notify waiting objects */
+
+ lock_record_notify(t, lr, false);
/* install it */
o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
-
- return lr;
}
/* recursion count overflow */
- lr = lock_inflate(t, o);
+ lr = lock_hashtable_get_lock_record(t, o);
+ lock_record_enter(t, lr);
+ lock_inflate(t, o, lr);
lr->count++;
return;
{
lock_record_t *lr;
- ptrint fatlock;
if (IS_FAT_LOCK(lockword)) {
lr->count++;
return;
}
- }
- else {
- /* alloc a lock record owned by us */
- lr = lock_record_alloc(t);
- fatlock = MAKE_FAT_LOCK(lr);
-#if defined(LOCK_VERBOSE)
- printf("thread %3d: SPINNING for inflating lock of %p, current lockword = %lx\n",
- t->index, (void*)o, (long)lockword);
-#endif
+ /* acquire the mutex of the lock record */
- /* SPIN LOOP */
- while (true) {
- lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, fatlock);
- if (lockword == THIN_UNLOCKED) {
-#if defined(LOCK_VERBOSE)
- printf("thread %3d: successfully inflated lock of %p\n",
- t->index, (void*)o);
-#endif
- /* we managed to install our lock record */
- /* The Java Memory Model requires a memory barrier here: */
- MEMORY_BARRIER();
- return;
- }
+ lock_record_enter(t, lr);
- if (IS_FAT_LOCK(lockword)) {
-#if defined(LOCK_VERBOSE)
- printf("thread %3d: lock of %p was inflated by other thread, lockword = %lx\n",
- t->index, (void*)o, (long)lockword);
-#endif
- /* another thread inflated the lock */
- pthread_mutex_unlock(&(lr->mutex));
- lock_record_recycle(t, lr);
+ assert(lr->count == 0);
- lr = GET_FAT_LOCK(lockword);
- break;
- }
- }
+ return;
}
- /* acquire the mutex of the lock record */
- pthread_mutex_lock(&(lr->mutex));
+ /****** inflation path ******/
+
+ /* first obtain the lock record for this object */
+
+ lr = lock_hashtable_get_lock_record(t, o);
+
+ /* enter the monitor */
- /* enter us as the owner */
- lr->owner = t;
+ lock_record_enter(t, lr);
- assert(lr->count == 0);
+ /* inflation loop */
+
+ while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
+ /* Set the flat lock contention bit to let the owning thread */
+ /* know that we want to be notified of unlocking. */
+
+ LOCK_SET_FLC_BIT(o);
+
+ LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
+ t->index, (void*) o, (void*) lr));
+
+ /* try to lock the object */
+
+ if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
+ /* we can inflate the lock ourselves */
+ LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
+ t->index, (void*) o, (void*) lr));
+ lock_inflate(t, o, lr);
+ }
+ else {
+ /* wait until another thread sees the flc bit and notifies us of unlocking */
+ LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
+ t->index, (void*) o, (void*) lr));
+ lock_record_wait(t, lr, 0, 0);
+ }
+ }
+
+ /* we own the inflated lock now */
return;
}
o->monitorPtr = THIN_UNLOCKED;
/* memory barrier for thin locking */
MEMORY_BARRIER();
+
+ /* check if there has been a flat lock contention on this object */
+
+ if (LOCK_TEST_FLC_BIT(o)) {
+ lock_record_t *lr;
+
+ LOCK_LOG(("thread %d saw flc bit on %p %s\n",
+ t->index, (void*) o, o->vftbl->class->name->text));
+
+ /* there has been a contention on this thin lock */
+
+ lr = lock_hashtable_get_lock_record(t, o);
+
+ LOCK_LOG(("thread %d for %p got lr %p\n",
+ t->index, (void*) o, (void*) lr));
+
+ lock_record_enter(t, lr);
+
+ if (LOCK_TEST_FLC_BIT(o)) {
+ /* notify a thread that it can try to inflate the lock now */
+
+ lock_record_notify(t, lr, true);
+ }
+
+ lock_record_exit(t, lr);
+ }
+
return true;
}
}
-/* lock_monitor_wait ***********************************************************
+/* lock_record_wait ************************************************************
- Wait on an object for a given (maximum) amount of time.
+ Wait on a lock record for a given (maximum) amount of time.
IN:
t............the current thread
- o............the object
+ lr...........the lock record
millis.......milliseconds of timeout
nanos........nanoseconds of timeout
PRE-CONDITION:
- The current thread must be the owner of the object's monitor.
+ The current thread must be the owner of the lock record.
+ This is NOT checked by this function!
*******************************************************************************/
-static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
+static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos)
{
- ptrint lockword;
- lock_record_t *lr;
lock_waiter_t *waiter;
s4 lockcount;
bool wasinterrupted;
- lockword = (ptrint) o->monitorPtr;
-
- /* check if we own this monitor */
- /* We don't have to worry about stale values here, as any stale value */
- /* will fail this check. */
-
- if (IS_FAT_LOCK(lockword)) {
-
- lr = GET_FAT_LOCK(lockword);
-
- if (lr->owner != t) {
- *exceptionptr = new_illegalmonitorstateexception();
- return;
- }
- }
- else {
- /* it's a thin lock */
-
- if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
- *exceptionptr = new_illegalmonitorstateexception();
- return;
- }
-
- /* inflate this lock */
- lr = lock_inflate(t, o);
- }
-
/* { the thread t owns the fat lock record lr on the object o } */
/* register us as waiter for this object */
/* unlock this record */
lr->count = 0;
- lr->owner = NULL;
- pthread_mutex_unlock(&(lr->mutex));
+ lock_record_exit(t, lr);
/* wait until notified/interrupted/timed out */
/* re-enter the monitor */
- lock_monitor_enter(t, o);
-
- /* assert that the lock record is still the same */
-
- assert( GET_FAT_LOCK((ptrint) o->monitorPtr) == lr );
+ lock_record_enter(t, lr);
/* remove us from the list of waiting threads */
}
-/* lock_monitor_notify *********************************************************
+/* lock_monitor_wait ***********************************************************
- Notify one thread or all threads waiting on the given object.
+ Wait on an object for a given (maximum) amount of time.
IN:
t............the current thread
o............the object
- one..........if true, only notify one thread
+ millis.......milliseconds of timeout
+ nanos........nanoseconds of timeout
PRE-CONDITION:
The current thread must be the owner of the object's monitor.
*******************************************************************************/
-static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
+static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
{
- ptrint lockword;
+ ptrint lockword;
lock_record_t *lr;
- lock_waiter_t *waiter;
- threadobject *waitingthread;
lockword = (ptrint) o->monitorPtr;
}
/* inflate this lock */
- lr = lock_inflate(t, o);
+ lr = lock_hashtable_get_lock_record(t, o);
+ lock_record_enter(t, lr);
+ lock_inflate(t, o, lr);
}
/* { the thread t owns the fat lock record lr on the object o } */
+ lock_record_wait(t, lr, millis, nanos);
+}
+
+
+/* lock_record_notify **********************************************************
+
+ Notify one thread or all threads waiting on the given lock record.
+
+ IN:
+ t............the current thread
+ lr...........the lock record
+ one..........if true, only notify one thread
+
+ PRE-CONDITION:
+ The current thread must be the owner of the lock record.
+ This is NOT checked by this function!
+
+*******************************************************************************/
+
+static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
+{
+ lock_waiter_t *waiter;
+ threadobject *waitingthread;
+
+ /* { the thread t owns the fat lock record lr on the object o } */
+
/* for each waiter: */
for (waiter = lr->waiters; waiter; waiter = waiter->next) {
}
+/* lock_monitor_notify *********************************************************
+
+ Notify one thread or all threads waiting on the given object.
+
+ IN:
+ t............the current thread
+ o............the object
+ one..........if true, only notify one thread
+
+ PRE-CONDITION:
+ The current thread must be the owner of the object's monitor.
+
+*******************************************************************************/
+
+static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
+{
+ ptrint lockword;
+ lock_record_t *lr;
+
+ lockword = (ptrint) o->monitorPtr;
+
+ /* check if we own this monitor */
+ /* We don't have to worry about stale values here, as any stale value */
+ /* will fail this check. */
+
+ if (IS_FAT_LOCK(lockword)) {
+
+ lr = GET_FAT_LOCK(lockword);
+
+ if (lr->owner != t) {
+ *exceptionptr = new_illegalmonitorstateexception();
+ return;
+ }
+ }
+ else {
+ /* it's a thin lock */
+
+ if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
+ *exceptionptr = new_illegalmonitorstateexception();
+ return;
+ }
+
+ /* inflate this lock */
+ lr = lock_hashtable_get_lock_record(t, o);
+ lock_record_enter(t, lr);
+ lock_inflate(t, o, lr);
+ }
+
+ /* { the thread t owns the fat lock record lr on the object o } */
+
+ lock_record_notify(t, lr, one);
+}
+
+
/*============================================================================*/
/* INQUIRY FUNCIONS */