1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 Contact: cacao@cacaojvm.org
30 Changes: Christian Thalinger
32 $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
45 #include "mm/memory.h"
47 #include "vm/global.h"
48 #include "vm/exceptions.h"
49 #include "vm/stringlocal.h"
51 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
55 /* includes for atomic instructions: */
57 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
58 #include "threads/native/generic-primitives.h"
60 #include "machine-instr.h"
64 /******************************************************************************/
65 /* DEBUGGING MACROS */
66 /******************************************************************************/
68 /* #define LOCK_VERBOSE */
70 #if defined(LOCK_VERBOSE)
71 #define LOCK_LOG(args) do { printf args; fflush(stdout); } while (0)
73 #define LOCK_LOG(args)
77 /******************************************************************************/
79 /******************************************************************************/
81 /* number of lock records in the first pool allocated for a thread */
82 #define LOCK_INITIAL_LOCK_RECORDS 8
84 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
86 #define LOCK_HASH(obj) ((ptrint)(obj))
88 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
89 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
91 /* CAUTION: oldvalue is evaluated twice! */
92 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
93 (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
96 /******************************************************************************/
97 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
98 /******************************************************************************/
100 #define LOCK_SET_FLC_BIT(obj) ((obj)->flcword = 1)
101 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->flcword = 0)
102 #define LOCK_TEST_FLC_BIT(obj) ((obj)->flcword != 0)
105 /******************************************************************************/
106 /* MACROS FOR THIN/FAT LOCKS */
107 /******************************************************************************/
109 /* We use a variant of the tasuki locks described in the paper
111 * Tamiya Onodera, Kiyokuni Kawachiya
112 * A Study of Locking Objects with Bimodal Fields
113 * Proceedings of the ACM OOPSLA '99, pp. 223-237
116 * The underlying thin locks are a variant of the thin locks described in
118 * Bacon, Konuru, Murthy, Serrano
119 * Thin Locks: Featherweight Synchronization for Java
120 * Proceedings of the ACM Conference on Programming Language Design and
121 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
124 * In thin lock mode the lockword (monitorPtr) looks like this:
126 * ,----------------------,-----------,---,
127 * | thread ID | count | 0 |
128 * `----------------------'-----------'---´
130 * thread ID......the 'index' of the owning thread, or 0
131 * count..........number of times the lock has been entered minus 1
132 * 0..............the shape bit is 0 in thin lock mode
134 * In fat lock mode it is basically a lock_record_t *:
136 * ,----------------------------------,---,
137 * | lock_record_t * (without LSB) | 1 |
138 * `----------------------------------'---´
140 * 1..............the shape bit is 1 in fat lock mode
143 #if SIZEOF_VOID_P == 8
144 #define THIN_LOCK_WORD_SIZE 64
146 #define THIN_LOCK_WORD_SIZE 32
149 #define THIN_LOCK_SHAPE_BIT 0x01
151 #define THIN_UNLOCKED 0
153 #define THIN_LOCK_COUNT_SHIFT 1
154 #define THIN_LOCK_COUNT_SIZE 8
155 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
156 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
157 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
159 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
160 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
162 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
163 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
165 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
166 #define MAKE_FAT_LOCK(ptr) ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
168 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
171 /******************************************************************************/
172 /* GLOBAL VARIABLES */
173 /******************************************************************************/
175 /* global lock record pool list header */
176 lock_record_pool_t *lock_global_pool;
178 /* mutex for synchronizing access to the global pool */
179 pthread_mutex_t lock_global_pool_lock;
181 /* hashtable mapping objects to lock records */
182 static lock_hashtable_t lock_hashtable;
185 /******************************************************************************/
187 /******************************************************************************/
189 static void lock_hashtable_init(void);
190 static lock_record_t * lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o);
192 static lock_record_t * lock_record_alloc(threadobject *t);
194 static void lock_record_enter(threadobject *t, lock_record_t *lr);
195 static void lock_record_exit(threadobject *t, lock_record_t *lr);
196 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
197 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
200 /*============================================================================*/
201 /* INITIALIZATION OF DATA STRUCTURES */
202 /*============================================================================*/
205 /* lock_init *******************************************************************
207 Initialize global data for locking.
209 *******************************************************************************/
213 pthread_mutex_init(&lock_global_pool_lock, NULL);
215 lock_hashtable_init();
219 /* lock_record_init ************************************************************
221 Initialize a lock record.
224 r............the lock record to initialize
225 t............will become the owner
227 *******************************************************************************/
229 static void lock_record_init(lock_record_t *r, threadobject *t)
239 pthread_mutex_init(&(r->mutex), NULL);
243 /* lock_init_execution_env *****************************************************
245 Initialize the execution environment for a thread.
248 thread.......the thread
250 *******************************************************************************/
252 void lock_init_execution_env(threadobject *thread)
254 thread->ee.firstfree = NULL;
255 thread->ee.lockrecordpools = NULL;
256 thread->ee.lockrecordcount = 0;
261 /* lock_pre_compute_thinlock ***************************************************
263 Pre-compute the thin lock value for a thread index.
266 index........the thead index (>= 1)
269 the thin lock value for this thread index
271 *******************************************************************************/
273 ptrint lock_pre_compute_thinlock(s4 index)
275 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
280 /*============================================================================*/
281 /* LOCK RECORD MANAGEMENT */
282 /*============================================================================*/
285 /* lock_record_alloc_new_pool **************************************************
287 Get a new lock record pool from the memory allocator.
290 thread.......the thread that will own the lock records
291 size.........number of lock records in the pool to allocate
294 the new lock record pool, with initialized lock records
296 *******************************************************************************/
298 static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
301 lock_record_pool_t *pool;
303 /* get the pool from the memory allocator */
305 pool = mem_alloc(sizeof(lock_record_pool_header_t)
306 + sizeof(lock_record_t) * size);
308 /* initialize the pool header */
310 pool->header.size = size;
312 /* initialize the individual lock records */
314 for (i=0; i<size; i++) {
315 lock_record_init(&pool->lr[i], thread);
317 pool->lr[i].nextfree = &pool->lr[i+1];
320 /* terminate free list */
322 pool->lr[i-1].nextfree = NULL;
328 /* lock_record_alloc_pool ******************************************************
330 Allocate a lock record pool. The pool is either taken from the global free
331 list or requested from the memory allocator.
334 thread.......the thread that will own the lock records
335 size.........number of lock records in the pool to allocate
338 the new lock record pool, with initialized lock records
340 *******************************************************************************/
342 static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
344 pthread_mutex_lock(&lock_global_pool_lock);
346 if (lock_global_pool) {
348 lock_record_pool_t *pool;
350 /* pop a pool from the global freelist */
352 pool = lock_global_pool;
353 lock_global_pool = pool->header.next;
355 pthread_mutex_unlock(&lock_global_pool_lock);
357 /* re-initialize owner and freelist chaining */
359 for (i=0; i < pool->header.size; i++) {
360 pool->lr[i].owner = NULL;
361 pool->lr[i].nextfree = &pool->lr[i+1];
363 pool->lr[i-1].nextfree = NULL;
368 pthread_mutex_unlock(&lock_global_pool_lock);
370 /* we have to get a new pool from the allocator */
372 return lock_record_alloc_new_pool(t, size);
376 /* lock_record_free_pools ******************************************************
378 Free the lock record pools in the given linked list. The pools are inserted
379 into the global freelist.
382 pool.........list header
384 *******************************************************************************/
386 void lock_record_free_pools(lock_record_pool_t *pool)
388 lock_record_pool_header_t *last;
390 assert(false); /* XXX this function does not match the new locking */
391 /* algorithm. We must find another way to free */
392 /* unused lock records. */
397 pthread_mutex_lock(&lock_global_pool_lock);
399 /* find the last pool in the list */
401 last = &pool->header;
403 last = &last->next->header;
405 /* chain it to the lock_global_pool freelist */
407 last->next = lock_global_pool;
409 /* insert the freed pools into the freelist */
411 lock_global_pool = pool;
413 pthread_mutex_unlock(&lock_global_pool_lock);
417 /* lock_record_alloc ***********************************************************
419 Allocate a lock record which is owned by the current thread.
422 t............the current thread
424 *******************************************************************************/
426 static lock_record_t *lock_record_alloc(threadobject *t)
435 lock_record_pool_t *pool;
439 poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2
440 : LOCK_INITIAL_LOCK_RECORDS;
441 pool = lock_record_alloc_pool(t, poolsize);
443 /* add it to our per-thread pool list */
445 pool->header.next = t->ee.lockrecordpools;
446 t->ee.lockrecordpools = pool;
447 t->ee.lockrecordcount += pool->header.size;
449 /* take the first record from the pool */
453 /* pop the record from the freelist */
455 t->ee.firstfree = r->nextfree;
457 r->nextfree = NULL; /* in order to find invalid uses of nextfree */
464 /* lock_record_recycle *********************************************************
466 Recycle the given lock record. It will be inserted in the appropriate
470 t............the owner
471 r............lock record to recycle
473 *******************************************************************************/
475 static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
479 assert(r->owner == NULL);
480 assert(r->nextfree == NULL);
482 r->nextfree = t->ee.firstfree;
488 /*============================================================================*/
489 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
490 /*============================================================================*/
493 /* lock_hashtable_init *********************************************************
495 Initialize the global hashtable mapping objects to lock records.
497 *******************************************************************************/
499 static void lock_hashtable_init(void)
501 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
503 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
504 lock_hashtable.entries = 0;
505 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
506 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
510 /* lock_hashtable_grow *********************************************************
512 Grow the lock record hashtable to about twice its current size and
515 *******************************************************************************/
517 /* must be called with hashtable mutex locked */
518 static void lock_hashtable_grow(void)
522 lock_record_t **oldtable;
523 lock_record_t **newtable;
530 /* allocate a new table */
532 oldsize = lock_hashtable.size;
533 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
535 LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
537 oldtable = lock_hashtable.ptr;
538 newtable = MNEW(lock_record_t *, newsize);
539 MZERO(newtable, lock_record_t *, newsize);
541 /* rehash the entries */
543 for (i=0; i<oldsize; ++i) {
548 h = LOCK_HASH(lr->obj);
549 newslot = h % newsize;
551 lr->hashlink = newtable[newslot];
552 newtable[newslot] = lr;
558 /* replace the old table */
560 lock_hashtable.ptr = newtable;
561 lock_hashtable.size = newsize;
563 MFREE(oldtable, lock_record_t *, oldsize);
567 /* lock_hashtable_get_lock_record **********************************************
569 Find the lock record for the given object. If it does not exists, yet,
570 create it and enter it in the hashtable.
573 t.................the current thread
574 o.................the object to look up
577 the lock record to use for this object
579 *******************************************************************************/
581 static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o)
587 lockword = (ptrint) o->monitorPtr;
589 if (IS_FAT_LOCK(lockword)) {
590 return GET_FAT_LOCK(lockword);
593 /* lock the hashtable */
595 pthread_mutex_lock(&(lock_hashtable.mutex));
597 /* lookup the lock record in the hashtable */
599 slot = LOCK_HASH(o) % lock_hashtable.size;
600 lr = lock_hashtable.ptr[slot];
603 pthread_mutex_unlock(&(lock_hashtable.mutex));
610 /* not found, we must create a new one */
612 lr = lock_record_alloc(t);
614 LOCK_LOG(("thread %d allocated for %p new lr %p\n",
615 t->index, (void*) o, (void*) lr));
617 /* enter it in the hashtable */
619 lr->hashlink = lock_hashtable.ptr[slot];
620 lock_hashtable.ptr[slot] = lr;
621 lock_hashtable.entries++;
623 /* check whether the hash should grow */
625 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
626 lock_hashtable_grow();
629 /* unlock the hashtable */
631 pthread_mutex_unlock(&(lock_hashtable.mutex));
633 /* return the new lock record */
639 /*============================================================================*/
640 /* OBJECT LOCK INITIALIZATION */
641 /*============================================================================*/
644 /* lock_init_object_lock *******************************************************
646 Initialize the monitor pointer of the given object. The monitor gets
647 initialized to an unlocked state.
649 *******************************************************************************/
651 void lock_init_object_lock(java_objectheader *o)
655 o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
660 /* lock_get_initial_lock_word **************************************************
662 Returns the initial (unlocked) lock word. The pointer is
663 required in the code generator to set up a virtual
664 java_objectheader for code patch locking.
666 *******************************************************************************/
668 lock_record_t *lock_get_initial_lock_word(void)
670 return (lock_record_t *) THIN_UNLOCKED;
675 /*============================================================================*/
676 /* LOCKING ALGORITHM */
677 /*============================================================================*/
680 /* lock_record_enter ***********************************************************
682 Enter the lock represented by the given lock record.
685 t.................the current thread
686 lr................the lock record
688 *******************************************************************************/
690 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
692 pthread_mutex_lock(&(lr->mutex));
697 /* lock_record_exit ************************************************************
699 Release the lock represented by the given lock record.
702 t.................the current thread
703 lr................the lock record
706 The current thread must own the lock represented by this lock record.
707 This is NOT checked by this function!
709 *******************************************************************************/
711 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
714 pthread_mutex_unlock(&(lr->mutex));
718 /* lock_inflate ****************************************************************
720 Inflate the lock of the given object. This may only be called by the
721 owner of the monitor of the object.
724 t............the current thread
725 o............the object of which to inflate the lock
726 lr...........the lock record to install. The current thread must
727 own the lock of this lock record!
730 The current thread must be the owner of this object's monitor AND
731 of the lock record's lock!
733 *******************************************************************************/
735 static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *lr)
739 /* get the current lock count */
741 lockword = (ptrint) o->monitorPtr;
743 if (IS_FAT_LOCK(lockword)) {
744 assert(GET_FAT_LOCK(lockword) == lr);
747 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
749 /* copy the count from the thin lock */
751 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
754 LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
755 t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
757 /* clear flat-lock-contention bit */
759 LOCK_CLEAR_FLC_BIT(o);
761 /* notify waiting objects */
763 lock_record_notify(t, lr, false);
767 o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
771 /* lock_monitor_enter **********************************************************
773 Acquire the monitor of the given object. If the current thread already
774 owns the monitor, the lock counter is simply increased.
776 This function blocks until it can acquire the monitor.
779 t............the current thread
780 o............the object of which to enter the monitor
782 *******************************************************************************/
784 void lock_monitor_enter(threadobject *t, java_objectheader *o)
786 /* CAUTION: This code assumes that ptrint is unsigned! */
790 thinlock = t->thinlock;
792 /* most common case: try to thin-lock an unlocked object */
794 if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
795 /* success. we locked it */
796 /* The Java Memory Model requires a memory barrier here: */
801 /* next common case: recursive lock with small recursion count */
802 /* We don't have to worry about stale values here, as any stale value */
803 /* will indicate another thread holding the lock (or an inflated lock) */
805 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
806 /* we own this monitor */
807 /* check the current recursion count */
809 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
811 /* the recursion count is low enough */
813 o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
815 /* success. we locked it */
821 /* recursion count overflow */
823 lr = lock_hashtable_get_lock_record(t, o);
824 lock_record_enter(t, lr);
825 lock_inflate(t, o, lr);
832 /* the lock is either contented or fat */
837 if (IS_FAT_LOCK(lockword)) {
839 lr = GET_FAT_LOCK(lockword);
841 /* check for recursive entering */
842 if (lr->owner == t) {
847 /* acquire the mutex of the lock record */
849 lock_record_enter(t, lr);
851 assert(lr->count == 0);
856 /****** inflation path ******/
858 /* first obtain the lock record for this object */
860 lr = lock_hashtable_get_lock_record(t, o);
862 /* enter the monitor */
864 lock_record_enter(t, lr);
868 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
869 /* Set the flat lock contention bit to let the owning thread */
870 /* know that we want to be notified of unlocking. */
874 LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
875 t->index, (void*) o, (void*) lr));
877 /* try to lock the object */
879 if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
880 /* we can inflate the lock ourselves */
881 LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
882 t->index, (void*) o, (void*) lr));
883 lock_inflate(t, o, lr);
886 /* wait until another thread sees the flc bit and notifies us of unlocking */
887 LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
888 t->index, (void*) o, (void*) lr));
889 lock_record_wait(t, lr, 0, 0);
893 /* we own the inflated lock now */
900 /* lock_monitor_exit ***********************************************************
902 Decrement the counter of a (currently owned) monitor. If the counter
903 reaches zero, release the monitor.
905 If the current thread is not the owner of the monitor, an
906 IllegalMonitorState exception is thrown.
909 t............the current thread
910 o............the object of which to exit the monitor
913 true.........everything ok,
914 false........an exception has been thrown
916 *******************************************************************************/
918 bool lock_monitor_exit(threadobject *t, java_objectheader *o)
923 /* We don't have to worry about stale values here, as any stale value */
924 /* will indicate that we don't own the lock. */
926 lockword = (ptrint) o->monitorPtr;
927 thinlock = t->thinlock;
929 /* most common case: we release a thin lock that we hold once */
931 if (lockword == thinlock) {
932 /* memory barrier for Java Memory Model */
934 o->monitorPtr = THIN_UNLOCKED;
935 /* memory barrier for thin locking */
938 /* check if there has been a flat lock contention on this object */
940 if (LOCK_TEST_FLC_BIT(o)) {
943 LOCK_LOG(("thread %d saw flc bit on %p %s\n",
944 t->index, (void*) o, o->vftbl->class->name->text));
946 /* there has been a contention on this thin lock */
948 lr = lock_hashtable_get_lock_record(t, o);
950 LOCK_LOG(("thread %d for %p got lr %p\n",
951 t->index, (void*) o, (void*) lr));
953 lock_record_enter(t, lr);
955 if (LOCK_TEST_FLC_BIT(o)) {
956 /* notify a thread that it can try to inflate the lock now */
958 lock_record_notify(t, lr, true);
961 lock_record_exit(t, lr);
967 /* next common case: we release a recursive lock, count > 0 */
969 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
970 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
974 /* either the lock is fat, or we don't hold it at all */
976 if (IS_FAT_LOCK(lockword)) {
980 lr = GET_FAT_LOCK(lockword);
982 /* check if we own this monitor */
983 /* We don't have to worry about stale values here, as any stale value */
984 /* will be != t and thus fail this check. */
986 if (lr->owner != t) {
987 *exceptionptr = new_illegalmonitorstateexception();
991 /* { the current thread `t` owns the lock record `lr` on object `o` } */
993 if (lr->count != 0) {
994 /* we had locked this one recursively. just decrement, it will */
995 /* still be locked. */
1000 /* unlock this lock record */
1003 pthread_mutex_unlock(&(lr->mutex));
1008 /* legal thin lock cases have been handled above, so this is an error */
1010 *exceptionptr = new_illegalmonitorstateexception();
1015 /* lock_record_remove_waiter ***************************************************
1017 Remove a thread from the list of waiting threads of a lock record.
1020 lr...........the lock record
1021 t............the current thread
1024 The current thread must be the owner of the lock record.
1026 *******************************************************************************/
1028 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
1030 lock_waiter_t **link;
1033 link = &(lr->waiters);
1034 while ((w = *link)) {
1035 if (w->waiter == t) {
1043 /* this should never happen */
1044 fprintf(stderr,"error: waiting thread not found in list of waiters\n");
1050 /* lock_record_wait ************************************************************
1052 Wait on a lock record for a given (maximum) amount of time.
1055 t............the current thread
1056 lr...........the lock record
1057 millis.......milliseconds of timeout
1058 nanos........nanoseconds of timeout
1061 The current thread must be the owner of the lock record.
1062 This is NOT checked by this function!
1064 *******************************************************************************/
1066 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos)
1068 lock_waiter_t *waiter;
1070 bool wasinterrupted;
1072 /* { the thread t owns the fat lock record lr on the object o } */
1074 /* register us as waiter for this object */
1076 waiter = NEW(lock_waiter_t);
1078 waiter->next = lr->waiters;
1079 lr->waiters = waiter;
1081 /* remember the old lock count */
1083 lockcount = lr->count;
1085 /* unlock this record */
1088 lock_record_exit(t, lr);
1090 /* wait until notified/interrupted/timed out */
1092 wasinterrupted = threads_wait_with_timeout_relative(t, millis, nanos);
1094 /* re-enter the monitor */
1096 lock_record_enter(t, lr);
1098 /* remove us from the list of waiting threads */
1100 lock_record_remove_waiter(lr, t);
1102 /* restore the old lock count */
1104 lr->count = lockcount;
1106 /* if we have been interrupted, throw the appropriate exception */
1109 *exceptionptr = new_exception(string_java_lang_InterruptedException);
1113 /* lock_monitor_wait ***********************************************************
1115 Wait on an object for a given (maximum) amount of time.
1118 t............the current thread
1119 o............the object
1120 millis.......milliseconds of timeout
1121 nanos........nanoseconds of timeout
1124 The current thread must be the owner of the object's monitor.
1126 *******************************************************************************/
1128 static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
1133 lockword = (ptrint) o->monitorPtr;
1135 /* check if we own this monitor */
1136 /* We don't have to worry about stale values here, as any stale value */
1137 /* will fail this check. */
1139 if (IS_FAT_LOCK(lockword)) {
1141 lr = GET_FAT_LOCK(lockword);
1143 if (lr->owner != t) {
1144 *exceptionptr = new_illegalmonitorstateexception();
1149 /* it's a thin lock */
1151 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1152 *exceptionptr = new_illegalmonitorstateexception();
1156 /* inflate this lock */
1157 lr = lock_hashtable_get_lock_record(t, o);
1158 lock_record_enter(t, lr);
1159 lock_inflate(t, o, lr);
1162 /* { the thread t owns the fat lock record lr on the object o } */
1164 lock_record_wait(t, lr, millis, nanos);
1168 /* lock_record_notify **********************************************************
1170 Notify one thread or all threads waiting on the given lock record.
1173 t............the current thread
1174 lr...........the lock record
1175 one..........if true, only notify one thread
1178 The current thread must be the owner of the lock record.
1179 This is NOT checked by this function!
1181 *******************************************************************************/
1183 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1185 lock_waiter_t *waiter;
1186 threadobject *waitingthread;
1188 /* { the thread t owns the fat lock record lr on the object o } */
1190 /* for each waiter: */
1192 for (waiter = lr->waiters; waiter; waiter = waiter->next) {
1194 /* signal the waiting thread */
1196 waitingthread = waiter->waiter;
1198 pthread_mutex_lock(&waitingthread->waitmutex);
1199 if (waitingthread->sleeping)
1200 pthread_cond_signal(&waitingthread->waitcond);
1201 waitingthread->signaled = true;
1202 pthread_mutex_unlock(&waitingthread->waitmutex);
1204 /* if we should only wake one, we are done */
1212 /* lock_monitor_notify *********************************************************
1214 Notify one thread or all threads waiting on the given object.
1217 t............the current thread
1218 o............the object
1219 one..........if true, only notify one thread
1222 The current thread must be the owner of the object's monitor.
1224 *******************************************************************************/
1226 static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
1231 lockword = (ptrint) o->monitorPtr;
1233 /* check if we own this monitor */
1234 /* We don't have to worry about stale values here, as any stale value */
1235 /* will fail this check. */
1237 if (IS_FAT_LOCK(lockword)) {
1239 lr = GET_FAT_LOCK(lockword);
1241 if (lr->owner != t) {
1242 *exceptionptr = new_illegalmonitorstateexception();
1247 /* it's a thin lock */
1249 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1250 *exceptionptr = new_illegalmonitorstateexception();
1254 /* inflate this lock */
1255 lr = lock_hashtable_get_lock_record(t, o);
1256 lock_record_enter(t, lr);
1257 lock_inflate(t, o, lr);
1260 /* { the thread t owns the fat lock record lr on the object o } */
1262 lock_record_notify(t, lr, one);
1267 /*============================================================================*/
1268 /* INQUIRY FUNCIONS */
1269 /*============================================================================*/
1272 /* lock_is_held_by_current_thread **********************************************
1274 Return true if the current thread owns the monitor of the given object.
1277 o............the object
1280 true, if the current thread holds the lock of this object.
1282 *******************************************************************************/
1284 bool lock_is_held_by_current_thread(java_objectheader *o)
1289 /* check if we own this monitor */
1290 /* We don't have to worry about stale values here, as any stale value */
1291 /* will fail this check. */
1293 lockword = (ptrint) o->monitorPtr;
1296 if (IS_FAT_LOCK(lockword)) {
1299 /* it's a fat lock */
1300 lr = GET_FAT_LOCK(lockword);
1302 return (lr->owner == t);
1305 /* it's a thin lock */
1307 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1313 /*============================================================================*/
1314 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1315 /*============================================================================*/
1318 /* lock_wait_for_object ********************************************************
1320 Wait for the given object.
1323 o............the object
1324 millis.......milliseconds to wait
1325 nanos........nanoseconds to wait
1327 *******************************************************************************/
1329 void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
1331 threadobject *t = (threadobject*) THREADOBJECT;
1332 lock_monitor_wait(t, o, millis, nanos);
1336 /* lock_notify_object **********************************************************
1338 Notify one thread waiting on the given object.
1341 o............the object
1343 *******************************************************************************/
1345 void lock_notify_object(java_objectheader *o)
1347 threadobject *t = (threadobject*) THREADOBJECT;
1348 lock_monitor_notify(t, o, true);
1352 /* lock_notify_all_object ******************************************************
1354 Notify all threads waiting on the given object.
1357 o............the object
1359 *******************************************************************************/
1361 void lock_notify_all_object(java_objectheader *o)
1363 threadobject *t = (threadobject*) THREADOBJECT;
1364 lock_monitor_notify(t, o, false);
1368 * These are local overrides for various environment variables in Emacs.
1369 * Please do not remove this and leave it at the end of the file, where
1370 * Emacs will automagically detect them.
1371 * ---------------------------------------------------------------------
1374 * indent-tabs-mode: t
1378 * vim:noexpandtab:sw=4:ts=4: