/* src/threads/lock.cpp - lock implementation
- Copyright (C) 1996-2005, 2006, 2007, 2008
+ Copyright (C) 1996-2005, 2006, 2007, 2008, 2010
CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
This file is part of CACAO.
#include "vm/types.h"
-#include "mm/memory.h"
+#include "mm/memory.hpp"
#include "native/llni.h"
static void lock_hashtable_init(void);
-static inline Lockword* lock_lockword_get(java_handle_t* o);
+static inline uintptr_t* lock_lockword_get(java_handle_t* o);
static void lock_record_enter(threadobject *t, lock_record_t *lr);
static void lock_record_exit(threadobject *t, lock_record_t *lr);
static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
u4 slot;
lock_record_t *lr;
- Lockword* lockword = lock_lockword_get(o);
+ // lw_cache is used throughout this file because the lockword can change at
+ // any time, unless it is absolutely certain that we are holding the lock.
+ // We don't do deflation, so we would also not expect a fat lockword to
+ // change, but for the sake of uniformity, lw_cache is used even in this
+ // case.
+ uintptr_t lw_cache = *lock_lockword_get(o);
+ Lockword lockword(lw_cache);
- if (lockword->is_fat_lock())
- return lockword->get_fat_lock();
+ if (lockword.is_fat_lock())
+ return lockword.get_fat_lock();
// Lock the hashtable.
lock_hashtable.mutex->lock();
#if defined(ENABLE_GC_BOEHM)
/* register new finalizer to clean up the lock record */
- GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
+ GC_finalization_proc ofinal = 0;
+ GC_REGISTER_FINALIZER_UNREACHABLE(LLNI_DIRECT(o), lock_record_finalizer, 0, &ofinal, 0);
+
+ /* There was a finalizer -- reinstall it. We do not want to disrupt the
+ normal finalizer operation. We hold the monitor on this object, so
+ this is thread-safe. */
+ if (ofinal)
+ GC_REGISTER_FINALIZER_NO_ORDER(LLNI_DIRECT(o), ofinal, 0, 0, 0);
#endif
/* enter it in the hashtable */
return lr;
}
+/* lock_schedule_lockrecord_removal ********************************************
+
+ Gives the locking system a chance to schedule the removal of an unused lock
+ record. This function is called after an object's finalizer has run.
+
+ IN:
+ o....the object which has been finalized
+
+*******************************************************************************/
+
+#if defined(ENABLE_GC_BOEHM)
+void lock_schedule_lockrecord_removal(java_handle_t *o)
+{
+ Lockword lockword(*lock_lockword_get(o));
+ if (!lockword.is_fat_lock())
+ /* there is no lock record */
+ return;
+
+ /* register new finalizer to clean up the lock record */
+ GC_REGISTER_FINALIZER_UNREACHABLE(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
+}
+#endif
+
/* lock_hashtable_remove *******************************************************
/* get lock record */
- Lockword* lockword = lock_lockword_get(o);
+ uintptr_t lw_cache = *lock_lockword_get(o);
+ Lockword lockword(lw_cache);
// Sanity check.
- assert(lockword->is_fat_lock());
+ assert(lockword.is_fat_lock());
- lr = lockword->get_fat_lock();
+ lr = lockword.get_fat_lock();
/* remove the lock-record from the hashtable */
}
#endif
- /* check for a finalizer function */
-
- if (c->finalizer != NULL)
- finalizer_run(object, p);
-
/* remove the lock-record entry from the hashtable and free it */
lock_hashtable_remove(THREADOBJECT, o);
*******************************************************************************/
-static inline Lockword* lock_lockword_get(java_handle_t* o)
+static inline uintptr_t* lock_lockword_get(java_handle_t* o)
{
#if defined(ENABLE_GC_CACAO)
// Sanity check.
static void lock_inflate(java_handle_t *o, lock_record_t *lr)
{
- Lockword* lockword = lock_lockword_get(o);
- lockword->inflate(lr);
+ Lockword lockword(*lock_lockword_get(o));
+ lockword.inflate(lr);
}
-static void sable_flc_waiting(Lockword *lockword, threadobject *t, java_handle_t *o)
+/* sable_flc_waiting ***********************************************************
+
+ Enqueue the current thread on another thread's FLC list. The function
+ blocks until the lock has been inflated by the owning thread.
+
+ The algorithm used to be an almost literal copy from SableVM. The
+ superfluous list traversal in the waiting loop has been removed since,
+ though.
+
+ IN:
+ lockword.....the object's lockword as seen at the first locking attempt
+ t............the current thread
+ o............the object of which to enter the monitor
+
+*******************************************************************************/
+
+static void sable_flc_waiting(uintptr_t lw_cache, threadobject *t, java_handle_t *o)
{
int32_t index;
threadobject *t_other;
int old_flc;
- index = lockword->get_thin_lock_thread_index();
+ Lockword lockword(lw_cache);
+ index = lockword.get_thin_lock_thread_index();
t_other = ThreadList::get_thread_by_index(index);
// The lockword could have changed during our way here. If the
// Set FLC bit first, then read the lockword again.
Atomic::memory_barrier();
- lockword = lock_lockword_get(o);
+ lw_cache = *lock_lockword_get(o);
/* Lockword is still the way it was seen before */
- if (lockword->is_thin_lock() && (lockword->get_thin_lock_thread_index() == index))
+ if (lockword.is_thin_lock() && (lockword.get_thin_lock_thread_index() == index))
{
threadobject *f;
/* Add tuple (t, o) to the other thread's FLC list */
t_other->flc_tail = t;
f = t_other->flc_tail;
- for (;;)
+ // The other thread will clear flc_object.
+ while (t->flc_object)
{
- threadobject *current;
+ // We are not cleared yet -- the other thread cannot have seen
+ // the FLC bit yet.
+ assert(t_other->flc_bit);
// Wait until another thread sees the flc bit and notifies
// us of unlocking.
t->flc_cond->wait(t_other->flc_lock);
-
- if (t_other->flc_tail != f)
- break;
- /* Traverse FLC list looking if we're still there */
- current = t_other->flc_list;
- while (current && current != t)
- current = current->flc_next;
- if (!current)
- /* not in list anymore, can stop waiting */
- break;
-
- /* We are still in the list -- the other thread cannot have seen
- the FLC bit yet */
- assert(t_other->flc_bit);
}
- t->flc_object = NULL; /* for garbage collector? */
t->flc_next = NULL;
}
else
t_other->flc_lock->unlock();
}
+/* notify_flc_waiters **********************************************************
+
+ Traverse the thread's FLC list and inflate all corresponding locks. Notify
+ the associated threads as well.
+
+ IN:
+ t............the current thread
+ o............the object currently being unlocked
+
+*******************************************************************************/
+
static void notify_flc_waiters(threadobject *t, java_handle_t *o)
{
threadobject *current;
block on it. */
// Only if not already inflated.
- Lockword* lockword = lock_lockword_get(current->flc_object);
- if (lockword->is_thin_lock()) {
+ Lockword lockword(*lock_lockword_get(current->flc_object));
+ if (lockword.is_thin_lock()) {
lock_record_t *lr = lock_hashtable_get(current->flc_object);
lock_record_enter(t, lr);
// Wake the waiting threads.
current->flc_cond->broadcast();
+ current->flc_object = NULL;
current = current->flc_next;
}
retry:
// Most common case: try to thin-lock an unlocked object.
- Lockword* lockword = lock_lockword_get(o);
- bool result = lockword->lock(thinlock);
+ uintptr_t *lw_ptr = lock_lockword_get(o);
+ uintptr_t lw_cache = *lw_ptr;
+ Lockword lockword(lw_cache);
+ bool result = Lockword(*lw_ptr).lock(thinlock);
if (result == true) {
// Success, we locked it.
- // NOTE: The Java Memory Model requires an instruction barrier
- // here (because of the CAS above).
+ // NOTE: The Java Memory Model requires a memory barrier here.
+#if defined(CAS_PROVIDES_FULL_BARRIER) && CAS_PROVIDES_FULL_BARRIER
+ // On some architectures, the CAS (hidden in the
+ // lockword.lock call above), already provides this barrier,
+ // so we only need to inform the compiler.
Atomic::instruction_barrier();
+#else
+ Atomic::memory_barrier();
+#endif
return true;
}
// NOTE: We don't have to worry about stale values here, as any
// stale value will indicate another thread holding the lock (or
// an inflated lock).
- if (lockword->get_thin_lock_without_count() == thinlock) {
+ if (lockword.get_thin_lock_without_count() == thinlock) {
// We own this monitor. Check the current recursion count.
- if (lockword->is_max_thin_lock_count() == false) {
+ if (lockword.is_max_thin_lock_count() == false) {
// The recursion count is low enough.
- lockword->increase_thin_lock_count();
+ Lockword(*lw_ptr).increase_thin_lock_count();
// Success, we locked it.
return true;
}
// The lock is either contented or fat.
- if (lockword->is_fat_lock()) {
- lock_record_t* lr = lockword->get_fat_lock();
+ if (lockword.is_fat_lock()) {
+ lock_record_t* lr = lockword.get_fat_lock();
// Check for recursive entering.
if (lr->owner == t) {
jvmti_MonitorContendedEntering(false, o);
#endif
- sable_flc_waiting(lockword, t, o);
+ sable_flc_waiting(lw_cache, t, o);
#if defined(ENABLE_JVMTI)
/* Monitor Contended Entered */
// We don't have to worry about stale values here, as any stale
// value will indicate that we don't own the lock.
- Lockword* lockword = lock_lockword_get(o);
+ uintptr_t *lw_ptr = lock_lockword_get(o);
+ uintptr_t lw_cache = *lw_ptr;
+ Lockword lockword(lw_cache);
// Most common case: we release a thin lock that we hold once.
- if (lockword->get_thin_lock() == thinlock) {
+ if (lockword.get_thin_lock() == thinlock) {
// Memory barrier for Java Memory Model.
Atomic::write_memory_barrier();
- lockword->unlock();
- // Memory barrier for thin locking.
+ Lockword(*lw_ptr).unlock();
+ // Memory barrier for FLC bit testing.
Atomic::memory_barrier();
/* check if there has been a flat lock contention on this object */
}
// Next common case: we release a recursive lock, count > 0.
- if (lockword->get_thin_lock_without_count() == thinlock) {
- lockword->decrease_thin_lock_count();
+ if (lockword.get_thin_lock_without_count() == thinlock) {
+ Lockword(*lw_ptr).decrease_thin_lock_count();
return true;
}
// Either the lock is fat, or we don't hold it at all.
- if (lockword->is_fat_lock()) {
- lock_record_t* lr = lockword->get_fat_lock();
+ if (lockword.is_fat_lock()) {
+ lock_record_t* lr = lockword.get_fat_lock();
// Check if we own this monitor.
// NOTE: We don't have to worry about stale values here, as
{
lock_record_t *lr;
- Lockword* lockword = lock_lockword_get(o);
+ uintptr_t *lw_ptr = lock_lockword_get(o);
+ uintptr_t lw_cache = *lw_ptr;
+ Lockword lockword(lw_cache);
// Check if we own this monitor.
// NOTE: We don't have to worry about stale values here, as any
// stale value will fail this check.
- if (lockword->is_fat_lock()) {
- lr = lockword->get_fat_lock();
+ if (lockword.is_fat_lock()) {
+ lr = lockword.get_fat_lock();
if (lr->owner != t) {
exceptions_throw_illegalmonitorstateexception();
}
else {
// It's a thin lock.
- if (lockword->get_thin_lock_without_count() != t->thinlock) {
+ if (lockword.get_thin_lock_without_count() != t->thinlock) {
exceptions_throw_illegalmonitorstateexception();
return;
}
lock_record_enter(t, lr);
// Inflate this lock.
- lockword->inflate(lr);
+ Lockword(*lw_ptr).inflate(lr);
notify_flc_waiters(t, o);
}
// This scope is inside a critical section.
GCCriticalSection cs;
- Lockword* lockword = lock_lockword_get(o);
+ uintptr_t lw_cache = *lock_lockword_get(o);
+ Lockword lockword(lw_cache);
// Check if we own this monitor.
// NOTE: We don't have to worry about stale values here, as any
// stale value will fail this check.
- if (lockword->is_fat_lock()) {
- lr = lockword->get_fat_lock();
+ if (lockword.is_fat_lock()) {
+ lr = lockword.get_fat_lock();
if (lr->owner != t) {
exceptions_throw_illegalmonitorstateexception();
}
else {
// It's a thin lock.
- if (lockword->get_thin_lock_without_count() != t->thinlock) {
+ if (lockword.get_thin_lock_without_count() != t->thinlock) {
exceptions_throw_illegalmonitorstateexception();
return;
}
// NOTE: We don't have to worry about stale values here, as any
// stale value will fail this check.
threadobject* t = thread_get_current();
- Lockword* lockword = lock_lockword_get(o);
+ uintptr_t lw_cache = *lock_lockword_get(o);
+ Lockword lockword(lw_cache);
- if (lockword->is_fat_lock()) {
+ if (lockword.is_fat_lock()) {
// It's a fat lock.
- lock_record_t* lr = lockword->get_fat_lock();
+ lock_record_t* lr = lockword.get_fat_lock();
return (lr->owner == t);
}
else {
// It's a thin lock.
- return (lockword->get_thin_lock_without_count() == t->thinlock);
+ return (lockword.get_thin_lock_without_count() == t->thinlock);
}
}