* This commit adds C++ wrapper classes for OpenJDK. Actually I'm done
[cacao.git] / src / threads / posix / lock.c
index 9dfd4571219445fc463051661755a6e3a4196912..79f42c14b9e4dbec7189501be4a78ac4c342d696 100644 (file)
 #include "native/llni.h"
 
 #include "threads/lock-common.h"
+#include "threads/mutex.hpp"
 #include "threads/threadlist.h"
-#include "threads/thread.h"
+#include "threads/thread.hpp"
 
 #include "threads/posix/lock.h"
 
 #include "toolbox/list.h"
 
 #include "vm/global.h"
-#include "vm/exceptions.h"
+#include "vm/exceptions.hpp"
 #include "vm/finalizer.h"
-#include "vm/stringlocal.h"
-#include "vm/vm.h"
+#include "vm/string.hpp"
+#include "vm/vm.hpp"
 
 #include "vmcore/options.h"
 
@@ -71,7 +72,7 @@
 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
 #include "threads/posix/generic-primitives.h"
 #else
-#include "machine-instr.h"
+#include "threads/atomic.hpp"
 #endif
 
 #if defined(ENABLE_JVMTI)
 
 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
 
-#define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
-       ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
-
 
 /******************************************************************************/
 /* MACROS FOR THIN/FAT LOCKS                                                  */
  *
  *     ,----------------------,-----------,---,
  *     |      thread ID       |   count   | 0 |
- *     `----------------------'-----------'---´
+ *     `----------------------'-----------'---'
  *
  *     thread ID......the 'index' of the owning thread, or 0
  *     count..........number of times the lock has been entered        minus 1
  *
  *     ,----------------------------------,---,
  *     |    lock_record_t * (without LSB) | 1 |
- *     `----------------------------------'---´
+ *     `----------------------------------'---'
  *
  *     1..............the shape bit is 1 in fat lock mode
  */
@@ -246,7 +244,6 @@ ptrint lock_pre_compute_thinlock(s4 index)
 
 static lock_record_t *lock_record_new(void)
 {
-       int result;
        lock_record_t *lr;
 
        /* allocate the data structure on the C heap */
@@ -273,9 +270,7 @@ static lock_record_t *lock_record_new(void)
 
        /* initialize the mutex */
 
-       result = pthread_mutex_init(&(lr->mutex), NULL);
-       if (result != 0)
-               vm_abort_errnum(result, "lock_record_new: pthread_mutex_init failed");
+       lr->mutex = Mutex_new();
 
        DEBUGLOCKS(("[lock_record_new   : lr=%p]", (void *) lr));
 
@@ -294,15 +289,11 @@ static lock_record_t *lock_record_new(void)
 
 static void lock_record_free(lock_record_t *lr)
 {
-       int result;
-
        DEBUGLOCKS(("[lock_record_free  : lr=%p]", (void *) lr));
 
        /* Destroy the mutex. */
 
-       result = pthread_mutex_destroy(&(lr->mutex));
-       if (result != 0)
-               vm_abort_errnum(result, "lock_record_free: pthread_mutex_destroy failed");
+       Mutex_delete(lr->mutex);
 
 #if defined(ENABLE_GC_CACAO)
        /* unregister the lock object reference with the GC */
@@ -337,7 +328,7 @@ static void lock_record_free(lock_record_t *lr)
 
 static void lock_hashtable_init(void)
 {
-       pthread_mutex_init(&(lock_hashtable.mutex), NULL);
+       lock_hashtable.mutex = Mutex_new();
 
        lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
        lock_hashtable.entries = 0;
@@ -440,7 +431,7 @@ void lock_hashtable_cleanup(void)
 
        /* lock the hashtable */
 
-       pthread_mutex_lock(&(lock_hashtable.mutex));
+       Mutex_lock(lock_hashtable.mutex);
 
        /* search the hashtable for cleared references */
 
@@ -476,7 +467,7 @@ void lock_hashtable_cleanup(void)
 
        /* unlock the hashtable */
 
-       pthread_mutex_unlock(&(lock_hashtable.mutex));
+       Mutex_unlock(lock_hashtable.mutex);
 }
 #endif
 
@@ -512,7 +503,7 @@ static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
 
        /* lock the hashtable */
 
-       pthread_mutex_lock(&(lock_hashtable.mutex));
+       Mutex_lock(lock_hashtable.mutex);
 
        /* lookup the lock record in the hashtable */
 
@@ -556,7 +547,7 @@ static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
 
        /* unlock the hashtable */
 
-       pthread_mutex_unlock(&(lock_hashtable.mutex));
+       Mutex_unlock(lock_hashtable.mutex);
 
        /* return the new lock record */
 
@@ -584,7 +575,7 @@ static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
 
        /* lock the hashtable */
 
-       pthread_mutex_lock(&(lock_hashtable.mutex));
+       Mutex_lock(lock_hashtable.mutex);
 
        /* get lock record */
 
@@ -623,7 +614,7 @@ static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
 
        /* unlock the hashtable */
 
-       pthread_mutex_unlock(&(lock_hashtable.mutex));
+       Mutex_unlock(lock_hashtable.mutex);
 
        /* free the lock record */
 
@@ -751,7 +742,7 @@ static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_
 
 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
 {
-       pthread_mutex_lock(&(lr->mutex));
+       Mutex_lock(lr->mutex);
        lr->owner = t;
 }
 
@@ -773,7 +764,7 @@ static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
 {
        lr->owner = NULL;
-       pthread_mutex_unlock(&(lr->mutex));
+       Mutex_unlock(lr->mutex);
 }
 
 
@@ -854,7 +845,7 @@ static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o
 /*             failure, TODO: add statistics */
                return;
 
-       pthread_mutex_lock(&t_other->flc_lock);
+       Mutex_lock(t_other->flc_lock);
        old_flc = t_other->flc_bit;
        t_other->flc_bit = true;
 
@@ -862,7 +853,7 @@ static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o
                                t->index, t_other->index));
 
        /* Set FLC bit first, then read the lockword again */
-       MEMORY_BARRIER();
+       Atomic_memory_barrier();
 
        lockword = lock_lockword_get(t, o);
 
@@ -880,7 +871,7 @@ static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o
 
                        /* Wait until another thread sees the flc bit and notifies
                           us of unlocking. */
-                       pthread_cond_wait(&t->flc_cond, &t_other->flc_lock);
+                       Condition_wait(t->flc_cond, t_other->flc_lock);
 
                        /* Traverse FLC list looking if we're still there */
                        current = t_other->flc_list;
@@ -901,14 +892,14 @@ static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o
        else
                t_other->flc_bit = old_flc;
 
-       pthread_mutex_unlock(&t_other->flc_lock);
+       Mutex_unlock(t_other->flc_lock);
 }
 
 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
 {
        threadobject *current;
 
-       pthread_mutex_lock(&t->flc_lock);
+       Mutex_lock(t->flc_lock);
 
        current = t->flc_list;
        while (current)
@@ -931,14 +922,14 @@ static void notify_flc_waiters(threadobject *t, java_handle_t *o)
                        }
                }
                /* Wake the waiting thread */
-               pthread_cond_broadcast(&current->flc_cond);
+               Condition_broadcast(current->flc_cond);
 
                current = current->flc_next;
        }
 
        t->flc_list = NULL;
        t->flc_bit = false;
-       pthread_mutex_unlock(&t->flc_lock);
+       Mutex_unlock(t->flc_lock);
 }
 
 /* lock_monitor_enter **********************************************************
@@ -979,14 +970,14 @@ retry:
        /* most common case: try to thin-lock an unlocked object */
 
        LLNI_CRITICAL_START_THREAD(t);
-       lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
+       lockword = Atomic_compare_and_swap_ptr(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
        LLNI_CRITICAL_END_THREAD(t);
 
        if (lockword == THIN_UNLOCKED) {
                /* success. we locked it */
                /* The Java Memory Model requires a memory barrier here: */
                /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
-               MEMORY_BARRIER_AFTER_ATOMIC();
+               Atomic_instruction_barrier();
                return true;
        }
 
@@ -1101,10 +1092,10 @@ bool lock_monitor_exit(java_handle_t *o)
 
        if (lockword == thinlock) {
                /* memory barrier for Java Memory Model */
-               STORE_ORDER_BARRIER();
+               Atomic_write_memory_barrier();
                lock_lockword_set(t, o, THIN_UNLOCKED);
-               /* memory barrier for thin locking */
-               MEMORY_BARRIER();
+               /* Memory barrier for thin locking. */
+               Atomic_memory_barrier();
 
                /* check if there has been a flat lock contention on this object */
 
@@ -1154,7 +1145,7 @@ bool lock_monitor_exit(java_handle_t *o)
                /* unlock this lock record */
 
                lr->owner = NULL;
-               pthread_mutex_unlock(&(lr->mutex));
+               Mutex_unlock(lr->mutex);
 
                return true;
        }
@@ -1414,27 +1405,20 @@ static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
 
                waitingthread = w->thread;
 
-               /* We must skip threads which have already been notified or
-                  interrupted. They will remove themselves from the list. */
+               /* We must skip threads which have already been notified. They will
+                  remove themselves from the list. */
 
-               if (waitingthread->signaled || waitingthread->interrupted)
+               if (waitingthread->signaled)
                        continue;
 
                /* Enter the wait-mutex. */
 
-               pthread_mutex_lock(&(waitingthread->waitmutex));
-
-               DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
-                                       lr, t, waitingthread, waitingthread->sleeping, one));
+               Mutex_lock(waitingthread->waitmutex);
 
-               /* Signal the thread if it's sleeping. sleeping can be false
-                  when the waiting thread is blocked between giving up the
-                  monitor and entering the waitmutex. It will eventually
-                  observe that it's signaled and refrain from going to
-                  sleep. */
+               DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]",
+                                       lr, t, waitingthread, one));
 
-               if (waitingthread->sleeping)
-                       pthread_cond_signal(&(waitingthread->waitcond));
+               Condition_signal(waitingthread->waitcond);
 
                /* Mark the thread as signaled. */
 
@@ -1442,7 +1426,7 @@ static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
 
                /* Leave the wait-mutex. */
 
-               pthread_mutex_unlock(&(waitingthread->waitmutex));
+               Mutex_unlock(waitingthread->waitmutex);
 
                /* if we should only wake one, we are done */