#include "native/llni.h"
#include "threads/lock-common.h"
+#include "threads/mutex.hpp"
#include "threads/threadlist.h"
-#include "threads/thread.h"
+#include "threads/thread.hpp"
#include "threads/posix/lock.h"
#include "toolbox/list.h"
#include "vm/global.h"
-#include "vm/exceptions.h"
+#include "vm/exceptions.hpp"
#include "vm/finalizer.h"
-#include "vm/stringlocal.h"
-#include "vm/vm.h"
+#include "vm/string.hpp"
+#include "vm/vm.hpp"
#include "vmcore/options.h"
#if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
#include "threads/posix/generic-primitives.h"
#else
-#include "machine-instr.h"
+#include "threads/atomic.hpp"
#endif
#if defined(ENABLE_JVMTI)
#define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
-#define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
- ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
-
/******************************************************************************/
/* MACROS FOR THIN/FAT LOCKS */
*
* ,----------------------,-----------,---,
* | thread ID | count | 0 |
- * `----------------------'-----------'---´
+ * `----------------------'-----------'---'
*
* thread ID......the 'index' of the owning thread, or 0
* count..........number of times the lock has been entered minus 1
*
* ,----------------------------------,---,
* | lock_record_t * (without LSB) | 1 |
- * `----------------------------------'---´
+ * `----------------------------------'---'
*
* 1..............the shape bit is 1 in fat lock mode
*/
static lock_record_t *lock_record_new(void)
{
- int result;
lock_record_t *lr;
/* allocate the data structure on the C heap */
/* initialize the mutex */
- result = pthread_mutex_init(&(lr->mutex), NULL);
- if (result != 0)
- vm_abort_errnum(result, "lock_record_new: pthread_mutex_init failed");
+ lr->mutex = Mutex_new();
DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
static void lock_record_free(lock_record_t *lr)
{
- int result;
-
DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
/* Destroy the mutex. */
- result = pthread_mutex_destroy(&(lr->mutex));
- if (result != 0)
- vm_abort_errnum(result, "lock_record_free: pthread_mutex_destroy failed");
+ Mutex_delete(lr->mutex);
#if defined(ENABLE_GC_CACAO)
/* unregister the lock object reference with the GC */
static void lock_hashtable_init(void)
{
- pthread_mutex_init(&(lock_hashtable.mutex), NULL);
+ lock_hashtable.mutex = Mutex_new();
lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
lock_hashtable.entries = 0;
/* lock the hashtable */
- pthread_mutex_lock(&(lock_hashtable.mutex));
+ Mutex_lock(lock_hashtable.mutex);
/* search the hashtable for cleared references */
/* unlock the hashtable */
- pthread_mutex_unlock(&(lock_hashtable.mutex));
+ Mutex_unlock(lock_hashtable.mutex);
}
#endif
/* lock the hashtable */
- pthread_mutex_lock(&(lock_hashtable.mutex));
+ Mutex_lock(lock_hashtable.mutex);
/* lookup the lock record in the hashtable */
/* unlock the hashtable */
- pthread_mutex_unlock(&(lock_hashtable.mutex));
+ Mutex_unlock(lock_hashtable.mutex);
/* return the new lock record */
/* lock the hashtable */
- pthread_mutex_lock(&(lock_hashtable.mutex));
+ Mutex_lock(lock_hashtable.mutex);
/* get lock record */
/* unlock the hashtable */
- pthread_mutex_unlock(&(lock_hashtable.mutex));
+ Mutex_unlock(lock_hashtable.mutex);
/* free the lock record */
static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
{
- pthread_mutex_lock(&(lr->mutex));
+ Mutex_lock(lr->mutex);
lr->owner = t;
}
static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
{
lr->owner = NULL;
- pthread_mutex_unlock(&(lr->mutex));
+ Mutex_unlock(lr->mutex);
}
/* failure, TODO: add statistics */
return;
- pthread_mutex_lock(&t_other->flc_lock);
+ Mutex_lock(t_other->flc_lock);
old_flc = t_other->flc_bit;
t_other->flc_bit = true;
t->index, t_other->index));
/* Set FLC bit first, then read the lockword again */
- MEMORY_BARRIER();
+ Atomic_memory_barrier();
lockword = lock_lockword_get(t, o);
/* Wait until another thread sees the flc bit and notifies
us of unlocking. */
- pthread_cond_wait(&t->flc_cond, &t_other->flc_lock);
+ Condition_wait(t->flc_cond, t_other->flc_lock);
/* Traverse FLC list looking if we're still there */
current = t_other->flc_list;
else
t_other->flc_bit = old_flc;
- pthread_mutex_unlock(&t_other->flc_lock);
+ Mutex_unlock(t_other->flc_lock);
}
static void notify_flc_waiters(threadobject *t, java_handle_t *o)
{
threadobject *current;
- pthread_mutex_lock(&t->flc_lock);
+ Mutex_lock(t->flc_lock);
current = t->flc_list;
while (current)
}
}
/* Wake the waiting thread */
- pthread_cond_broadcast(¤t->flc_cond);
+ Condition_broadcast(current->flc_cond);
current = current->flc_next;
}
t->flc_list = NULL;
t->flc_bit = false;
- pthread_mutex_unlock(&t->flc_lock);
+ Mutex_unlock(t->flc_lock);
}
/* lock_monitor_enter **********************************************************
/* most common case: try to thin-lock an unlocked object */
LLNI_CRITICAL_START_THREAD(t);
- lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
+ lockword = Atomic_compare_and_swap_ptr(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
LLNI_CRITICAL_END_THREAD(t);
if (lockword == THIN_UNLOCKED) {
/* success. we locked it */
/* The Java Memory Model requires a memory barrier here: */
/* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
- MEMORY_BARRIER_AFTER_ATOMIC();
+ Atomic_instruction_barrier();
return true;
}
if (lockword == thinlock) {
/* memory barrier for Java Memory Model */
- STORE_ORDER_BARRIER();
+ Atomic_write_memory_barrier();
lock_lockword_set(t, o, THIN_UNLOCKED);
- /* memory barrier for thin locking */
- MEMORY_BARRIER();
+ /* Memory barrier for thin locking. */
+ Atomic_memory_barrier();
/* check if there has been a flat lock contention on this object */
/* unlock this lock record */
lr->owner = NULL;
- pthread_mutex_unlock(&(lr->mutex));
+ Mutex_unlock(lr->mutex);
return true;
}
waitingthread = w->thread;
- /* We must skip threads which have already been notified or
- interrupted. They will remove themselves from the list. */
+ /* We must skip threads which have already been notified. They will
+ remove themselves from the list. */
- if (waitingthread->signaled || waitingthread->interrupted)
+ if (waitingthread->signaled)
continue;
/* Enter the wait-mutex. */
- pthread_mutex_lock(&(waitingthread->waitmutex));
-
- DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
- lr, t, waitingthread, waitingthread->sleeping, one));
+ Mutex_lock(waitingthread->waitmutex);
- /* Signal the thread if it's sleeping. sleeping can be false
- when the waiting thread is blocked between giving up the
- monitor and entering the waitmutex. It will eventually
- observe that it's signaled and refrain from going to
- sleep. */
+ DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]",
+ lr, t, waitingthread, one));
- if (waitingthread->sleeping)
- pthread_cond_signal(&(waitingthread->waitcond));
+ Condition_signal(waitingthread->waitcond);
/* Mark the thread as signaled. */
/* Leave the wait-mutex. */
- pthread_mutex_unlock(&(waitingthread->waitmutex));
+ Mutex_unlock(waitingthread->waitmutex);
/* if we should only wake one, we are done */