1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
40 #include "mm/memory.h"
42 #include "threads/native/lock.h"
43 #include "threads/native/threads.h"
45 #include "vm/global.h"
46 #include "vm/exceptions.h"
47 #include "vm/stringlocal.h"
49 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
53 /* includes for atomic instructions: */
55 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
56 #include "threads/native/generic-primitives.h"
58 #include "machine-instr.h"
61 #if defined(ENABLE_JVMTI)
62 #include "native/jvmti/cacaodbg.h"
66 /******************************************************************************/
67 /* DEBUGGING MACROS */
68 /******************************************************************************/
70 /* #define LOCK_VERBOSE */
72 #if defined(LOCK_VERBOSE)
73 #define LOCK_LOG(args) do { printf args; fflush(stdout); } while (0)
75 #define LOCK_LOG(args)
79 /******************************************************************************/
81 /******************************************************************************/
83 /* number of lock records in the first pool allocated for a thread */
84 #define LOCK_INITIAL_LOCK_RECORDS 8
86 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
88 #define LOCK_HASH(obj) ((ptrint)(obj))
90 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
91 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
93 /* CAUTION: oldvalue is evaluated twice! */
94 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
95 (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
98 /******************************************************************************/
99 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
100 /******************************************************************************/
102 #define LOCK_SET_FLC_BIT(obj) ((obj)->hdrflags |= HDRFLAG_FLC)
103 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->hdrflags &= ~ HDRFLAG_FLC)
104 #define LOCK_TEST_FLC_BIT(obj) ((obj)->hdrflags & HDRFLAG_FLC)
107 /******************************************************************************/
108 /* MACROS FOR THIN/FAT LOCKS */
109 /******************************************************************************/
111 /* We use a variant of the tasuki locks described in the paper
113 * Tamiya Onodera, Kiyokuni Kawachiya
114 * A Study of Locking Objects with Bimodal Fields
115 * Proceedings of the ACM OOPSLA '99, pp. 223-237
118 * The underlying thin locks are a variant of the thin locks described in
120 * Bacon, Konuru, Murthy, Serrano
121 * Thin Locks: Featherweight Synchronization for Java
122 * Proceedings of the ACM Conference on Programming Language Design and
123 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
126 * In thin lock mode the lockword (monitorPtr) looks like this:
128 * ,----------------------,-----------,---,
129 * | thread ID | count | 0 |
130 * `----------------------'-----------'---´
132 * thread ID......the 'index' of the owning thread, or 0
133 * count..........number of times the lock has been entered minus 1
134 * 0..............the shape bit is 0 in thin lock mode
136 * In fat lock mode it is basically a lock_record_t *:
138 * ,----------------------------------,---,
139 * | lock_record_t * (without LSB) | 1 |
140 * `----------------------------------'---´
142 * 1..............the shape bit is 1 in fat lock mode
145 #if SIZEOF_VOID_P == 8
146 #define THIN_LOCK_WORD_SIZE 64
148 #define THIN_LOCK_WORD_SIZE 32
151 #define THIN_LOCK_SHAPE_BIT 0x01
153 #define THIN_UNLOCKED 0
155 #define THIN_LOCK_COUNT_SHIFT 1
156 #define THIN_LOCK_COUNT_SIZE 8
157 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
158 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
159 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
161 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
162 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
164 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
165 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
167 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
168 #define MAKE_FAT_LOCK(ptr) ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
170 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
173 /******************************************************************************/
174 /* GLOBAL VARIABLES */
175 /******************************************************************************/
177 /* global lock record pool list header */
178 lock_record_pool_t *lock_global_pool;
180 /* mutex for synchronizing access to the global pool */
181 pthread_mutex_t lock_global_pool_lock;
183 /* hashtable mapping objects to lock records */
184 static lock_hashtable_t lock_hashtable;
187 /******************************************************************************/
189 /******************************************************************************/
191 static void lock_hashtable_init(void);
192 static lock_record_t * lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o);
194 static lock_record_t * lock_record_alloc(threadobject *t);
196 static void lock_record_enter(threadobject *t, lock_record_t *lr);
197 static void lock_record_exit(threadobject *t, lock_record_t *lr);
198 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
199 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
202 /*============================================================================*/
203 /* INITIALIZATION OF DATA STRUCTURES */
204 /*============================================================================*/
207 /* lock_init *******************************************************************
209 Initialize global data for locking.
211 *******************************************************************************/
215 pthread_mutex_init(&lock_global_pool_lock, NULL);
217 lock_hashtable_init();
221 /* lock_record_init ************************************************************
223 Initialize a lock record.
226 r............the lock record to initialize
227 t............will become the owner
229 *******************************************************************************/
231 static void lock_record_init(lock_record_t *r, threadobject *t)
241 pthread_mutex_init(&(r->mutex), NULL);
245 /* lock_init_execution_env *****************************************************
247 Initialize the execution environment for a thread.
250 thread.......the thread
252 *******************************************************************************/
254 void lock_init_execution_env(threadobject *thread)
256 thread->ee.firstfree = NULL;
257 thread->ee.lockrecordpools = NULL;
258 thread->ee.lockrecordcount = 0;
263 /* lock_pre_compute_thinlock ***************************************************
265 Pre-compute the thin lock value for a thread index.
268 index........the thead index (>= 1)
271 the thin lock value for this thread index
273 *******************************************************************************/
275 ptrint lock_pre_compute_thinlock(s4 index)
277 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
282 /*============================================================================*/
283 /* LOCK RECORD MANAGEMENT */
284 /*============================================================================*/
287 /* lock_record_alloc_new_pool **************************************************
289 Get a new lock record pool from the memory allocator.
292 thread.......the thread that will own the lock records
293 size.........number of lock records in the pool to allocate
296 the new lock record pool, with initialized lock records
298 *******************************************************************************/
300 static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
303 lock_record_pool_t *pool;
305 /* get the pool from the memory allocator */
307 pool = mem_alloc(sizeof(lock_record_pool_header_t)
308 + sizeof(lock_record_t) * size);
310 /* initialize the pool header */
312 pool->header.size = size;
314 /* initialize the individual lock records */
316 for (i=0; i<size; i++) {
317 lock_record_init(&pool->lr[i], thread);
319 pool->lr[i].nextfree = &pool->lr[i+1];
322 /* terminate free list */
324 pool->lr[i-1].nextfree = NULL;
330 /* lock_record_alloc_pool ******************************************************
332 Allocate a lock record pool. The pool is either taken from the global free
333 list or requested from the memory allocator.
336 thread.......the thread that will own the lock records
337 size.........number of lock records in the pool to allocate
340 the new lock record pool, with initialized lock records
342 *******************************************************************************/
344 static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
346 pthread_mutex_lock(&lock_global_pool_lock);
348 if (lock_global_pool) {
350 lock_record_pool_t *pool;
352 /* pop a pool from the global freelist */
354 pool = lock_global_pool;
355 lock_global_pool = pool->header.next;
357 pthread_mutex_unlock(&lock_global_pool_lock);
359 /* re-initialize owner and freelist chaining */
361 for (i=0; i < pool->header.size; i++) {
362 pool->lr[i].owner = NULL;
363 pool->lr[i].nextfree = &pool->lr[i+1];
365 pool->lr[i-1].nextfree = NULL;
370 pthread_mutex_unlock(&lock_global_pool_lock);
372 /* we have to get a new pool from the allocator */
374 return lock_record_alloc_new_pool(t, size);
378 /* lock_record_free_pools ******************************************************
380 Free the lock record pools in the given linked list. The pools are inserted
381 into the global freelist.
384 pool.........list header
386 *******************************************************************************/
388 void lock_record_free_pools(lock_record_pool_t *pool)
390 lock_record_pool_header_t *last;
392 assert(false); /* XXX this function does not match the new locking */
393 /* algorithm. We must find another way to free */
394 /* unused lock records. */
399 pthread_mutex_lock(&lock_global_pool_lock);
401 /* find the last pool in the list */
403 last = &pool->header;
405 last = &last->next->header;
407 /* chain it to the lock_global_pool freelist */
409 last->next = lock_global_pool;
411 /* insert the freed pools into the freelist */
413 lock_global_pool = pool;
415 pthread_mutex_unlock(&lock_global_pool_lock);
419 /* lock_record_alloc ***********************************************************
421 Allocate a lock record which is owned by the current thread.
424 t............the current thread
426 *******************************************************************************/
428 static lock_record_t *lock_record_alloc(threadobject *t)
437 lock_record_pool_t *pool;
441 poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2
442 : LOCK_INITIAL_LOCK_RECORDS;
443 pool = lock_record_alloc_pool(t, poolsize);
445 /* add it to our per-thread pool list */
447 pool->header.next = t->ee.lockrecordpools;
448 t->ee.lockrecordpools = pool;
449 t->ee.lockrecordcount += pool->header.size;
451 /* take the first record from the pool */
455 /* pop the record from the freelist */
457 t->ee.firstfree = r->nextfree;
459 r->nextfree = NULL; /* in order to find invalid uses of nextfree */
466 /* lock_record_recycle *********************************************************
468 Recycle the given lock record. It will be inserted in the appropriate
472 t............the owner
473 r............lock record to recycle
475 *******************************************************************************/
477 static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
481 assert(r->owner == NULL);
482 assert(r->nextfree == NULL);
484 r->nextfree = t->ee.firstfree;
490 /*============================================================================*/
491 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
492 /*============================================================================*/
495 /* lock_hashtable_init *********************************************************
497 Initialize the global hashtable mapping objects to lock records.
499 *******************************************************************************/
501 static void lock_hashtable_init(void)
503 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
505 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
506 lock_hashtable.entries = 0;
507 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
508 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
512 /* lock_hashtable_grow *********************************************************
514 Grow the lock record hashtable to about twice its current size and
517 *******************************************************************************/
519 /* must be called with hashtable mutex locked */
520 static void lock_hashtable_grow(void)
524 lock_record_t **oldtable;
525 lock_record_t **newtable;
532 /* allocate a new table */
534 oldsize = lock_hashtable.size;
535 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
537 LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
539 oldtable = lock_hashtable.ptr;
540 newtable = MNEW(lock_record_t *, newsize);
541 MZERO(newtable, lock_record_t *, newsize);
543 /* rehash the entries */
545 for (i=0; i<oldsize; ++i) {
550 h = LOCK_HASH(lr->obj);
551 newslot = h % newsize;
553 lr->hashlink = newtable[newslot];
554 newtable[newslot] = lr;
560 /* replace the old table */
562 lock_hashtable.ptr = newtable;
563 lock_hashtable.size = newsize;
565 MFREE(oldtable, lock_record_t *, oldsize);
569 /* lock_hashtable_get_lock_record **********************************************
571 Find the lock record for the given object. If it does not exists, yet,
572 create it and enter it in the hashtable.
575 t.................the current thread
576 o.................the object to look up
579 the lock record to use for this object
581 *******************************************************************************/
583 static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o)
589 lockword = (ptrint) o->monitorPtr;
591 if (IS_FAT_LOCK(lockword)) {
592 return GET_FAT_LOCK(lockword);
595 /* lock the hashtable */
597 pthread_mutex_lock(&(lock_hashtable.mutex));
599 /* lookup the lock record in the hashtable */
601 slot = LOCK_HASH(o) % lock_hashtable.size;
602 lr = lock_hashtable.ptr[slot];
605 pthread_mutex_unlock(&(lock_hashtable.mutex));
612 /* not found, we must create a new one */
614 lr = lock_record_alloc(t);
616 LOCK_LOG(("thread %d allocated for %p new lr %p\n",
617 t->index, (void*) o, (void*) lr));
619 /* enter it in the hashtable */
621 lr->hashlink = lock_hashtable.ptr[slot];
622 lock_hashtable.ptr[slot] = lr;
623 lock_hashtable.entries++;
625 /* check whether the hash should grow */
627 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
628 lock_hashtable_grow();
631 /* unlock the hashtable */
633 pthread_mutex_unlock(&(lock_hashtable.mutex));
635 /* return the new lock record */
641 /*============================================================================*/
642 /* OBJECT LOCK INITIALIZATION */
643 /*============================================================================*/
646 /* lock_init_object_lock *******************************************************
648 Initialize the monitor pointer of the given object. The monitor gets
649 initialized to an unlocked state.
651 *******************************************************************************/
653 void lock_init_object_lock(java_objectheader *o)
657 o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
658 LOCK_CLEAR_FLC_BIT(o);
662 /* lock_get_initial_lock_word **************************************************
664 Returns the initial (unlocked) lock word. The pointer is
665 required in the code generator to set up a virtual
666 java_objectheader for code patch locking.
668 *******************************************************************************/
670 lock_record_t *lock_get_initial_lock_word(void)
672 return (lock_record_t *) THIN_UNLOCKED;
677 /*============================================================================*/
678 /* LOCKING ALGORITHM */
679 /*============================================================================*/
682 /* lock_record_enter ***********************************************************
684 Enter the lock represented by the given lock record.
687 t.................the current thread
688 lr................the lock record
690 *******************************************************************************/
692 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
694 pthread_mutex_lock(&(lr->mutex));
699 /* lock_record_exit ************************************************************
701 Release the lock represented by the given lock record.
704 t.................the current thread
705 lr................the lock record
708 The current thread must own the lock represented by this lock record.
709 This is NOT checked by this function!
711 *******************************************************************************/
713 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
716 pthread_mutex_unlock(&(lr->mutex));
720 /* lock_inflate ****************************************************************
722 Inflate the lock of the given object. This may only be called by the
723 owner of the monitor of the object.
726 t............the current thread
727 o............the object of which to inflate the lock
728 lr...........the lock record to install. The current thread must
729 own the lock of this lock record!
732 The current thread must be the owner of this object's monitor AND
733 of the lock record's lock!
735 *******************************************************************************/
737 static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *lr)
741 /* get the current lock count */
743 lockword = (ptrint) o->monitorPtr;
745 if (IS_FAT_LOCK(lockword)) {
746 assert(GET_FAT_LOCK(lockword) == lr);
749 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
751 /* copy the count from the thin lock */
753 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
756 LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
757 t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
759 /* clear flat-lock-contention bit */
761 LOCK_CLEAR_FLC_BIT(o);
763 /* notify waiting objects */
765 lock_record_notify(t, lr, false);
769 o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
773 /* lock_monitor_enter **********************************************************
775 Acquire the monitor of the given object. If the current thread already
776 owns the monitor, the lock counter is simply increased.
778 This function blocks until it can acquire the monitor.
781 t............the current thread
782 o............the object of which to enter the monitor
785 true.........the lock has been successfully acquired
786 false........an exception has been thrown
788 *******************************************************************************/
790 bool lock_monitor_enter(java_objectheader *o)
793 /* CAUTION: This code assumes that ptrint is unsigned! */
798 exceptions_throw_nullpointerexception();
804 thinlock = t->thinlock;
806 /* most common case: try to thin-lock an unlocked object */
808 if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
809 /* success. we locked it */
810 /* The Java Memory Model requires a memory barrier here: */
815 /* next common case: recursive lock with small recursion count */
816 /* We don't have to worry about stale values here, as any stale value */
817 /* will indicate another thread holding the lock (or an inflated lock) */
819 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
820 /* we own this monitor */
821 /* check the current recursion count */
823 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
825 /* the recursion count is low enough */
827 o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
829 /* success. we locked it */
835 /* recursion count overflow */
837 lr = lock_hashtable_get_lock_record(t, o);
838 lock_record_enter(t, lr);
839 lock_inflate(t, o, lr);
846 /* the lock is either contented or fat */
851 if (IS_FAT_LOCK(lockword)) {
853 lr = GET_FAT_LOCK(lockword);
855 /* check for recursive entering */
856 if (lr->owner == t) {
861 /* acquire the mutex of the lock record */
863 lock_record_enter(t, lr);
865 assert(lr->count == 0);
870 /****** inflation path ******/
872 /* first obtain the lock record for this object */
874 lr = lock_hashtable_get_lock_record(t, o);
876 #if defined(ENABLE_JVMTI)
877 /* Monitor Contended Enter */
878 jvmti_MonitorContendedEntering(false, o);
880 /* enter the monitor */
882 lock_record_enter(t, lr);
885 #if defined(ENABLE_JVMTI)
886 /* Monitor Contended Entered */
887 jvmti_MonitorContendedEntering(true, o);
892 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
893 /* Set the flat lock contention bit to let the owning thread */
894 /* know that we want to be notified of unlocking. */
898 LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
899 t->index, (void*) o, (void*) lr));
901 /* try to lock the object */
903 if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
904 /* we can inflate the lock ourselves */
905 LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
906 t->index, (void*) o, (void*) lr));
907 lock_inflate(t, o, lr);
910 /* wait until another thread sees the flc bit and notifies us of unlocking */
911 LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
912 t->index, (void*) o, (void*) lr));
913 lock_record_wait(t, lr, 0, 0);
917 /* we own the inflated lock now */
924 /* lock_monitor_exit ***********************************************************
926 Decrement the counter of a (currently owned) monitor. If the counter
927 reaches zero, release the monitor.
929 If the current thread is not the owner of the monitor, an
930 IllegalMonitorState exception is thrown.
933 t............the current thread
934 o............the object of which to exit the monitor
937 true.........everything ok,
938 false........an exception has been thrown
940 *******************************************************************************/
942 bool lock_monitor_exit(java_objectheader *o)
949 exceptions_throw_nullpointerexception();
955 /* We don't have to worry about stale values here, as any stale value */
956 /* will indicate that we don't own the lock. */
958 lockword = (ptrint) o->monitorPtr;
959 thinlock = t->thinlock;
961 /* most common case: we release a thin lock that we hold once */
963 if (lockword == thinlock) {
964 /* memory barrier for Java Memory Model */
966 o->monitorPtr = THIN_UNLOCKED;
967 /* memory barrier for thin locking */
970 /* check if there has been a flat lock contention on this object */
972 if (LOCK_TEST_FLC_BIT(o)) {
975 LOCK_LOG(("thread %d saw flc bit on %p %s\n",
976 t->index, (void*) o, o->vftbl->class->name->text));
978 /* there has been a contention on this thin lock */
980 lr = lock_hashtable_get_lock_record(t, o);
982 LOCK_LOG(("thread %d for %p got lr %p\n",
983 t->index, (void*) o, (void*) lr));
985 lock_record_enter(t, lr);
987 if (LOCK_TEST_FLC_BIT(o)) {
988 /* notify a thread that it can try to inflate the lock now */
990 lock_record_notify(t, lr, true);
993 lock_record_exit(t, lr);
999 /* next common case: we release a recursive lock, count > 0 */
1001 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1002 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
1006 /* either the lock is fat, or we don't hold it at all */
1008 if (IS_FAT_LOCK(lockword)) {
1012 lr = GET_FAT_LOCK(lockword);
1014 /* check if we own this monitor */
1015 /* We don't have to worry about stale values here, as any stale value */
1016 /* will be != t and thus fail this check. */
1018 if (lr->owner != t) {
1019 exceptions_throw_illegalmonitorstateexception();
1023 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1025 if (lr->count != 0) {
1026 /* we had locked this one recursively. just decrement, it will */
1027 /* still be locked. */
1032 /* unlock this lock record */
1035 pthread_mutex_unlock(&(lr->mutex));
1040 /* legal thin lock cases have been handled above, so this is an error */
1042 exceptions_throw_illegalmonitorstateexception();
1048 /* lock_record_remove_waiter ***************************************************
1050 Remove a thread from the list of waiting threads of a lock record.
1053 lr...........the lock record
1054 t............the current thread
1057 The current thread must be the owner of the lock record.
1059 *******************************************************************************/
1061 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
1063 lock_waiter_t **link;
1066 link = &(lr->waiters);
1067 while ((w = *link)) {
1068 if (w->waiter == t) {
1076 /* this should never happen */
1077 fprintf(stderr,"error: waiting thread not found in list of waiters\n");
1083 /* lock_record_wait ************************************************************
1085 Wait on a lock record for a given (maximum) amount of time.
1088 t............the current thread
1089 lr...........the lock record
1090 millis.......milliseconds of timeout
1091 nanos........nanoseconds of timeout
1094 The current thread must be the owner of the lock record.
1095 This is NOT checked by this function!
1097 *******************************************************************************/
1099 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos)
1101 lock_waiter_t *waiter;
1103 bool wasinterrupted;
1105 /* { the thread t owns the fat lock record lr on the object o } */
1107 /* register us as waiter for this object */
1109 waiter = NEW(lock_waiter_t);
1111 waiter->next = lr->waiters;
1112 lr->waiters = waiter;
1114 /* remember the old lock count */
1116 lockcount = lr->count;
1118 /* unlock this record */
1121 lock_record_exit(t, lr);
1123 /* wait until notified/interrupted/timed out */
1125 wasinterrupted = threads_wait_with_timeout_relative(t, millis, nanos);
1127 /* re-enter the monitor */
1129 lock_record_enter(t, lr);
1131 /* remove us from the list of waiting threads */
1133 lock_record_remove_waiter(lr, t);
1135 /* restore the old lock count */
1137 lr->count = lockcount;
1139 /* if we have been interrupted, throw the appropriate exception */
1142 exceptions_throw_interruptedexception();
1146 /* lock_monitor_wait ***********************************************************
1148 Wait on an object for a given (maximum) amount of time.
1151 t............the current thread
1152 o............the object
1153 millis.......milliseconds of timeout
1154 nanos........nanoseconds of timeout
1157 The current thread must be the owner of the object's monitor.
1159 *******************************************************************************/
1161 static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
1166 lockword = (ptrint) o->monitorPtr;
1168 /* check if we own this monitor */
1169 /* We don't have to worry about stale values here, as any stale value */
1170 /* will fail this check. */
1172 if (IS_FAT_LOCK(lockword)) {
1174 lr = GET_FAT_LOCK(lockword);
1176 if (lr->owner != t) {
1177 exceptions_throw_illegalmonitorstateexception();
1182 /* it's a thin lock */
1184 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1185 exceptions_throw_illegalmonitorstateexception();
1189 /* inflate this lock */
1190 lr = lock_hashtable_get_lock_record(t, o);
1191 lock_record_enter(t, lr);
1192 lock_inflate(t, o, lr);
1195 /* { the thread t owns the fat lock record lr on the object o } */
1197 lock_record_wait(t, lr, millis, nanos);
1201 /* lock_record_notify **********************************************************
1203 Notify one thread or all threads waiting on the given lock record.
1206 t............the current thread
1207 lr...........the lock record
1208 one..........if true, only notify one thread
1211 The current thread must be the owner of the lock record.
1212 This is NOT checked by this function!
1214 *******************************************************************************/
1216 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1218 lock_waiter_t *waiter;
1219 threadobject *waitingthread;
1221 /* { the thread t owns the fat lock record lr on the object o } */
1223 /* for each waiter: */
1225 for (waiter = lr->waiters; waiter; waiter = waiter->next) {
1227 /* signal the waiting thread */
1229 waitingthread = waiter->waiter;
1231 pthread_mutex_lock(&waitingthread->waitmutex);
1232 if (waitingthread->sleeping)
1233 pthread_cond_signal(&waitingthread->waitcond);
1234 waitingthread->signaled = true;
1235 pthread_mutex_unlock(&waitingthread->waitmutex);
1237 /* if we should only wake one, we are done */
1245 /* lock_monitor_notify *********************************************************
1247 Notify one thread or all threads waiting on the given object.
1250 t............the current thread
1251 o............the object
1252 one..........if true, only notify one thread
1255 The current thread must be the owner of the object's monitor.
1257 *******************************************************************************/
1259 static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
1264 lockword = (ptrint) o->monitorPtr;
1266 /* check if we own this monitor */
1267 /* We don't have to worry about stale values here, as any stale value */
1268 /* will fail this check. */
1270 if (IS_FAT_LOCK(lockword)) {
1272 lr = GET_FAT_LOCK(lockword);
1274 if (lr->owner != t) {
1275 exceptions_throw_illegalmonitorstateexception();
1280 /* it's a thin lock */
1282 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1283 exceptions_throw_illegalmonitorstateexception();
1287 /* inflate this lock */
1288 lr = lock_hashtable_get_lock_record(t, o);
1289 lock_record_enter(t, lr);
1290 lock_inflate(t, o, lr);
1293 /* { the thread t owns the fat lock record lr on the object o } */
1295 lock_record_notify(t, lr, one);
1300 /*============================================================================*/
1301 /* INQUIRY FUNCIONS */
1302 /*============================================================================*/
1305 /* lock_is_held_by_current_thread **********************************************
1307 Return true if the current thread owns the monitor of the given object.
1310 o............the object
1313 true, if the current thread holds the lock of this object.
1315 *******************************************************************************/
1317 bool lock_is_held_by_current_thread(java_objectheader *o)
1322 /* check if we own this monitor */
1323 /* We don't have to worry about stale values here, as any stale value */
1324 /* will fail this check. */
1326 lockword = (ptrint) o->monitorPtr;
1329 if (IS_FAT_LOCK(lockword)) {
1332 /* it's a fat lock */
1333 lr = GET_FAT_LOCK(lockword);
1335 return (lr->owner == t);
1338 /* it's a thin lock */
1340 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1346 /*============================================================================*/
1347 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1348 /*============================================================================*/
1351 /* lock_wait_for_object ********************************************************
1353 Wait for the given object.
1356 o............the object
1357 millis.......milliseconds to wait
1358 nanos........nanoseconds to wait
1360 *******************************************************************************/
1362 void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
1364 threadobject *thread;
1366 thread = THREADOBJECT;
1368 lock_monitor_wait(thread, o, millis, nanos);
1372 /* lock_notify_object **********************************************************
1374 Notify one thread waiting on the given object.
1377 o............the object
1379 *******************************************************************************/
1381 void lock_notify_object(java_objectheader *o)
1383 threadobject *thread;
1385 thread = THREADOBJECT;
1387 lock_monitor_notify(thread, o, true);
1391 /* lock_notify_all_object ******************************************************
1393 Notify all threads waiting on the given object.
1396 o............the object
1398 *******************************************************************************/
1400 void lock_notify_all_object(java_objectheader *o)
1402 threadobject *thread;
1404 thread = THREADOBJECT;
1406 lock_monitor_notify(thread, o, false);
1411 * These are local overrides for various environment variables in Emacs.
1412 * Please do not remove this and leave it at the end of the file, where
1413 * Emacs will automagically detect them.
1414 * ---------------------------------------------------------------------
1417 * indent-tabs-mode: t
1421 * vim:noexpandtab:sw=4:ts=4: