* configure.ac (AC_CONFIG_FILES): Moved src/threads/native/Makefile to
[cacao.git] / src / threads / native / lock.c
index 7042862a8bf4d068510529a0a840796e12c4cd62..6ef203fc82bc2bf6d5820532f3e03fbdfd6e8983 100644 (file)
@@ -1,9 +1,7 @@
 /* src/threads/native/lock.c - lock implementation
 
-   Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
-   C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
-   E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
-   J. Wenninger, Institut f. Computersprachen - TU Wien
+   Copyright (C) 1996-2005, 2006, 2007, 2008
+   CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
 
    This file is part of CACAO.
 
@@ -41,6 +39,7 @@
 #include "native/llni.h"
 
 #include "threads/lock-common.h"
+#include "threads/threadlist.h"
 #include "threads/threads-common.h"
 
 #include "threads/native/lock.h"
@@ -248,6 +247,7 @@ ptrint lock_pre_compute_thinlock(s4 index)
 
 static lock_record_t *lock_record_new(void)
 {
+       int result;
        lock_record_t *lr;
 
        /* allocate the data structure on the C heap */
@@ -274,7 +274,9 @@ static lock_record_t *lock_record_new(void)
 
        /* initialize the mutex */
 
-       pthread_mutex_init(&(lr->mutex), NULL);
+       result = pthread_mutex_init(&(lr->mutex), NULL);
+       if (result != 0)
+               vm_abort_errnum(result, "lock_record_new: pthread_mutex_init failed");
 
        DEBUGLOCKS(("[lock_record_new   : lr=%p]", (void *) lr));
 
@@ -293,11 +295,15 @@ static lock_record_t *lock_record_new(void)
 
 static void lock_record_free(lock_record_t *lr)
 {
+       int result;
+
        DEBUGLOCKS(("[lock_record_free  : lr=%p]", (void *) lr));
 
        /* Destroy the mutex. */
 
-       pthread_mutex_destroy(&(lr->mutex));
+       result = pthread_mutex_destroy(&(lr->mutex));
+       if (result != 0)
+               vm_abort_errnum(result, "lock_record_free: pthread_mutex_destroy failed");
 
 #if defined(ENABLE_GC_CACAO)
        /* unregister the lock object reference with the GC */
@@ -818,20 +824,22 @@ static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
 }
 
 
+/* TODO Move this function into threadlist.[ch]. */
+
 static threadobject *threads_lookup_thread_id(int index)
 {
        threadobject *t;
 
-       threads_list_lock();
+       threadlist_lock();
 
-       for (t = threads_list_first(); t != NULL; t = threads_list_next(t)) {
+       for (t = threadlist_first(); t != NULL; t = threadlist_next(t)) {
                if (t->state == THREAD_STATE_NEW)
                        continue;
                if (t->index == index)
                        break;
        }
 
-       threads_list_unlock();
+       threadlist_unlock();
        return t;
 }
 
@@ -908,15 +916,20 @@ static void notify_flc_waiters(threadobject *t, java_handle_t *o)
        {
                if (current->flc_object != o)
                {
-                       /* This entry is for another object that we are holding as
-                               well -- inflate it */
-                       lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
-                       lock_record_enter(t, lr);
+                       /* The object has to be inflated so the other threads can properly
+                          block on it. */
 
-                       DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
-                                               t->index, (void*) current->flc_object, (void*) lr));
+                       /* Only if not already inflated */
+                       ptrint lockword = lock_lockword_get(t, current->flc_object);
+                       if (IS_THIN_LOCK(lockword)) {
+                               lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
+                               lock_record_enter(t, lr);
 
-                       lock_inflate(t, current->flc_object, lr);
+                               DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
+                                                       t->index, (void*) current->flc_object, (void*) lr));
+
+                               lock_inflate(t, current->flc_object, lr);
+                       }
                }
                /* Wake the waiting thread */
                pthread_cond_broadcast(&current->flc_cond);
@@ -1210,11 +1223,11 @@ static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
 
        l = lr->waiters;
 
-       for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
+       for (w = list_first(l); w != NULL; w = list_next(l, w)) {
                if (w->thread == thread) {
                        /* Remove the waiter entry from the list. */
 
-                       list_remove_unsynced(l, w);
+                       list_remove(l, w);
 
                        /* Free the waiter data structure. */
 
@@ -1258,7 +1271,7 @@ static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
 {
        s4   lockcount;
-       bool wasinterrupted;
+       bool wasinterrupted = false;
 
        DEBUGLOCKS(("[lock_record_wait  : lr=%p, t=%p, millis=%lld, nanos=%d]",
                                lr, thread, millis, nanos));
@@ -1280,7 +1293,7 @@ static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis,
 
        /* wait until notified/interrupted/timed out */
 
-       wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
+       threads_wait_with_timeout_relative(thread, millis, nanos);
 
        /* re-enter the monitor */
 
@@ -1294,6 +1307,17 @@ static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis,
 
        lr->count = lockcount;
 
+       /* We can only be signaled OR interrupted, not both. If both flags
+          are set, reset only signaled and leave the thread in
+          interrupted state. Otherwise, clear both. */
+
+       if (!thread->signaled) {
+               wasinterrupted = thread->interrupted;
+               thread->interrupted = false;
+       }
+
+       thread->signaled = false;
+
        /* return if we have been interrupted */
 
        return wasinterrupted;
@@ -1386,15 +1410,15 @@ static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
 
        l = lr->waiters;
 
-       for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
+       for (w = list_first(l); w != NULL; w = list_next(l, w)) {
                /* signal the waiting thread */
 
                waitingthread = w->thread;
 
-               /* If the thread was already signaled but hasn't removed
-                  itself from the list yet, just ignore it. */
+               /* We must skip threads which have already been notified or
+                  interrupted. They will remove themselves from the list. */
 
-               if (waitingthread->signaled == true)
+               if (waitingthread->signaled || waitingthread->interrupted)
                        continue;
 
                /* Enter the wait-mutex. */
@@ -1404,7 +1428,11 @@ static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
                DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
                                        lr, t, waitingthread, waitingthread->sleeping, one));
 
-               /* Signal the thread if it's sleeping. */
+               /* Signal the thread if it's sleeping. sleeping can be false
+                  when the waiting thread is blocked between giving up the
+                  monitor and entering the waitmutex. It will eventually
+                  observe that it's signaled and refrain from going to
+                  sleep. */
 
                if (waitingthread->sleeping)
                        pthread_cond_signal(&(waitingthread->waitcond));