/* src/threads/native/lock.c - lock implementation
- Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+ Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
J. Wenninger, Institut f. Computersprachen - TU Wien
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
- Contact: cacao@cacaojvm.org
-
- Authors: Stefan Ring
- Edwin Steiner
-
- Changes: Christian Thalinger
-
$Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
*/
#include <sys/time.h>
#include <pthread.h>
-#include "mm/memory.h"
#include "vm/types.h"
+
+#include "mm/memory.h"
+
+#include "threads/native/lock.h"
+#include "threads/native/threads.h"
+
#include "vm/global.h"
#include "vm/exceptions.h"
#include "vm/stringlocal.h"
+#include "vm/vm.h"
+
+#include "vmcore/options.h"
+
+#if defined(ENABLE_STATISTICS)
+# include "vmcore/statistics.h"
+#endif
+
+#if defined(ENABLE_VMLOG)
+#include <vmlog_cacao.h>
+#endif
/* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
pthread_mutex_init(&lock_global_pool_lock, NULL);
lock_hashtable_init();
+
+#if defined(ENABLE_VMLOG)
+ vmlog_cacao_init_lock();
+#endif
}
static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
{
- int i;
lock_record_pool_t *pool;
+ s4 i;
/* get the pool from the memory allocator */
pool = mem_alloc(sizeof(lock_record_pool_header_t)
+ sizeof(lock_record_t) * size);
+#if defined(ENABLE_STATISTICS)
+ if (opt_stat)
+ size_lock_record_pool += sizeof(lock_record_pool_header_t) +
+ sizeof(lock_record_t) * size;
+#endif
+
/* initialize the pool header */
pool->header.size = size;
/* initialize the individual lock records */
- for (i=0; i<size; i++) {
+ for (i = 0; i < size; i++) {
lock_record_init(&pool->lr[i], thread);
- pool->lr[i].nextfree = &pool->lr[i+1];
+ pool->lr[i].nextfree = &pool->lr[i + 1];
}
/* terminate free list */
- pool->lr[i-1].nextfree = NULL;
+ pool->lr[i - 1].nextfree = NULL;
return pool;
}
{
pthread_mutex_lock(&lock_global_pool_lock);
- if (lock_global_pool) {
+ if (lock_global_pool != NULL) {
int i;
lock_record_pool_t *pool;
/* pop a pool from the global freelist */
- pool = lock_global_pool;
+ pool = lock_global_pool;
lock_global_pool = pool->header.next;
pthread_mutex_unlock(&lock_global_pool_lock);
/* re-initialize owner and freelist chaining */
- for (i=0; i < pool->header.size; i++) {
- pool->lr[i].owner = NULL;
- pool->lr[i].nextfree = &pool->lr[i+1];
+ for (i = 0; i < pool->header.size; i++) {
+ pool->lr[i].owner = NULL;
+ pool->lr[i].nextfree = &pool->lr[i + 1];
}
- pool->lr[i-1].nextfree = NULL;
+ pool->lr[i - 1].nextfree = NULL;
return pool;
}
/* algorithm. We must find another way to free */
/* unused lock records. */
- if (!pool)
+ if (pool == NULL)
return;
pthread_mutex_lock(&lock_global_pool_lock);
/* find the last pool in the list */
last = &pool->header;
+
while (last->next)
last = &last->next->header;
{
pthread_mutex_init(&(lock_hashtable.mutex), NULL);
- lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
+ lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
lock_hashtable.entries = 0;
- lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
+ lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
+
+#if defined(ENABLE_STATISTICS)
+ if (opt_stat)
+ size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
+#endif
+
MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
}
oldtable = lock_hashtable.ptr;
newtable = MNEW(lock_record_t *, newsize);
+
+#if defined(ENABLE_STATISTICS)
+ if (opt_stat)
+ size_lock_hashtable += sizeof(lock_record_t *) * newsize;
+#endif
+
MZERO(newtable, lock_record_t *, newsize);
/* rehash the entries */
- for (i=0; i<oldsize; ++i) {
+ for (i = 0; i < oldsize; i++) {
lr = oldtable[i];
while (lr) {
next = lr->hashlink;
/* replace the old table */
- lock_hashtable.ptr = newtable;
+ lock_hashtable.ptr = newtable;
lock_hashtable.size = newsize;
MFREE(oldtable, lock_record_t *, oldsize);
+
+#if defined(ENABLE_STATISTICS)
+ if (opt_stat)
+ size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
+#endif
}
}
+/* lock_record_add_waiter ******************************************************
+
+ Add a thread to the list of waiting threads of a lock record.
+
+ IN:
+ lr...........the lock record
+ thread.......the thread to add
+
+*******************************************************************************/
+
+static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
+{
+ lock_waiter_t *waiter;
+
+ /* allocate a waiter data structure */
+
+ waiter = NEW(lock_waiter_t);
+
+#if defined(ENABLE_STATISTICS)
+ if (opt_stat)
+ size_lock_waiter += sizeof(lock_waiter_t);
+#endif
+
+ waiter->waiter = thread;
+ waiter->next = lr->waiters;
+
+ lr->waiters = waiter;
+}
+
+
/* lock_record_remove_waiter ***************************************************
Remove a thread from the list of waiting threads of a lock record.
*******************************************************************************/
-static void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
+static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
{
lock_waiter_t **link;
- lock_waiter_t *w;
+ lock_waiter_t *w;
link = &(lr->waiters);
+
while ((w = *link)) {
- if (w->waiter == t) {
+ if (w->waiter == thread) {
*link = w->next;
+
+ /* free the waiter data structure */
+
+ FREE(w, lock_waiter_t);
+
+#if defined(ENABLE_STATISTICS)
+ if (opt_stat)
+ size_lock_waiter -= sizeof(lock_waiter_t);
+#endif
+
return;
}
}
/* this should never happen */
- fprintf(stderr,"error: waiting thread not found in list of waiters\n");
- fflush(stderr);
- abort();
+
+ vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
}
*******************************************************************************/
-static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos)
+static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
{
- lock_waiter_t *waiter;
- s4 lockcount;
- bool wasinterrupted;
+ s4 lockcount;
+ bool wasinterrupted;
/* { the thread t owns the fat lock record lr on the object o } */
/* register us as waiter for this object */
- waiter = NEW(lock_waiter_t);
- waiter->waiter = t;
- waiter->next = lr->waiters;
- lr->waiters = waiter;
+ lock_record_add_waiter(lr, thread);
/* remember the old lock count */
/* unlock this record */
lr->count = 0;
- lock_record_exit(t, lr);
+ lock_record_exit(thread, lr);
/* wait until notified/interrupted/timed out */
- wasinterrupted = threads_wait_with_timeout_relative(t, millis, nanos);
+ wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
/* re-enter the monitor */
- lock_record_enter(t, lr);
+ lock_record_enter(thread, lr);
/* remove us from the list of waiting threads */
- lock_record_remove_waiter(lr, t);
+ lock_record_remove_waiter(lr, thread);
/* restore the old lock count */
/* if we have been interrupted, throw the appropriate exception */
if (wasinterrupted)
- *exceptionptr = new_exception(string_java_lang_InterruptedException);
+ exceptions_throw_interruptedexception();
}
void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
{
- threadobject *t = (threadobject*) THREADOBJECT;
- lock_monitor_wait(t, o, millis, nanos);
+ threadobject *thread;
+
+ thread = THREADOBJECT;
+
+ lock_monitor_wait(thread, o, millis, nanos);
}
void lock_notify_object(java_objectheader *o)
{
- threadobject *t = (threadobject*) THREADOBJECT;
- lock_monitor_notify(t, o, true);
+ threadobject *thread;
+
+ thread = THREADOBJECT;
+
+ lock_monitor_notify(thread, o, true);
}
void lock_notify_all_object(java_objectheader *o)
{
- threadobject *t = (threadobject*) THREADOBJECT;
- lock_monitor_notify(t, o, false);
+ threadobject *thread;
+
+ thread = THREADOBJECT;
+
+ lock_monitor_notify(thread, o, false);
}
+
/*
* These are local overrides for various environment variables in Emacs.
* Please do not remove this and leave it at the end of the file, where