1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 Contact: cacao@cacaojvm.org
30 Changes: Christian Thalinger
32 $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
45 #include "mm/memory.h"
47 #include "vm/global.h"
48 #include "vm/exceptions.h"
49 #include "vm/stringlocal.h"
51 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
55 /* includes for atomic instructions: */
57 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
58 #include "threads/native/generic-primitives.h"
60 #include "machine-instr.h"
63 #if defined(ENABLE_JVMTI)
64 #include "native/jvmti/cacaodbg.h"
68 /******************************************************************************/
69 /* DEBUGGING MACROS */
70 /******************************************************************************/
72 /* #define LOCK_VERBOSE */
74 #if defined(LOCK_VERBOSE)
75 #define LOCK_LOG(args) do { printf args; fflush(stdout); } while (0)
77 #define LOCK_LOG(args)
81 /******************************************************************************/
83 /******************************************************************************/
85 /* number of lock records in the first pool allocated for a thread */
86 #define LOCK_INITIAL_LOCK_RECORDS 8
88 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
90 #define LOCK_HASH(obj) ((ptrint)(obj))
92 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
93 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
95 /* CAUTION: oldvalue is evaluated twice! */
96 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
97 (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
100 /******************************************************************************/
101 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
102 /******************************************************************************/
104 #define LOCK_SET_FLC_BIT(obj) ((obj)->flcword = 1)
105 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->flcword = 0)
106 #define LOCK_TEST_FLC_BIT(obj) ((obj)->flcword != 0)
109 /******************************************************************************/
110 /* MACROS FOR THIN/FAT LOCKS */
111 /******************************************************************************/
113 /* We use a variant of the tasuki locks described in the paper
115 * Tamiya Onodera, Kiyokuni Kawachiya
116 * A Study of Locking Objects with Bimodal Fields
117 * Proceedings of the ACM OOPSLA '99, pp. 223-237
120 * The underlying thin locks are a variant of the thin locks described in
122 * Bacon, Konuru, Murthy, Serrano
123 * Thin Locks: Featherweight Synchronization for Java
124 * Proceedings of the ACM Conference on Programming Language Design and
125 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
128 * In thin lock mode the lockword (monitorPtr) looks like this:
130 * ,----------------------,-----------,---,
131 * | thread ID | count | 0 |
132 * `----------------------'-----------'---´
134 * thread ID......the 'index' of the owning thread, or 0
135 * count..........number of times the lock has been entered minus 1
136 * 0..............the shape bit is 0 in thin lock mode
138 * In fat lock mode it is basically a lock_record_t *:
140 * ,----------------------------------,---,
141 * | lock_record_t * (without LSB) | 1 |
142 * `----------------------------------'---´
144 * 1..............the shape bit is 1 in fat lock mode
147 #if SIZEOF_VOID_P == 8
148 #define THIN_LOCK_WORD_SIZE 64
150 #define THIN_LOCK_WORD_SIZE 32
153 #define THIN_LOCK_SHAPE_BIT 0x01
155 #define THIN_UNLOCKED 0
157 #define THIN_LOCK_COUNT_SHIFT 1
158 #define THIN_LOCK_COUNT_SIZE 8
159 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
160 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
161 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
163 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
164 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
166 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
167 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
169 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
170 #define MAKE_FAT_LOCK(ptr) ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
172 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
175 /******************************************************************************/
176 /* GLOBAL VARIABLES */
177 /******************************************************************************/
179 /* global lock record pool list header */
180 lock_record_pool_t *lock_global_pool;
182 /* mutex for synchronizing access to the global pool */
183 pthread_mutex_t lock_global_pool_lock;
185 /* hashtable mapping objects to lock records */
186 static lock_hashtable_t lock_hashtable;
189 /******************************************************************************/
191 /******************************************************************************/
193 static void lock_hashtable_init(void);
194 static lock_record_t * lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o);
196 static lock_record_t * lock_record_alloc(threadobject *t);
198 static void lock_record_enter(threadobject *t, lock_record_t *lr);
199 static void lock_record_exit(threadobject *t, lock_record_t *lr);
200 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
201 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
204 /*============================================================================*/
205 /* INITIALIZATION OF DATA STRUCTURES */
206 /*============================================================================*/
209 /* lock_init *******************************************************************
211 Initialize global data for locking.
213 *******************************************************************************/
217 pthread_mutex_init(&lock_global_pool_lock, NULL);
219 lock_hashtable_init();
223 /* lock_record_init ************************************************************
225 Initialize a lock record.
228 r............the lock record to initialize
229 t............will become the owner
231 *******************************************************************************/
233 static void lock_record_init(lock_record_t *r, threadobject *t)
243 pthread_mutex_init(&(r->mutex), NULL);
247 /* lock_init_execution_env *****************************************************
249 Initialize the execution environment for a thread.
252 thread.......the thread
254 *******************************************************************************/
256 void lock_init_execution_env(threadobject *thread)
258 thread->ee.firstfree = NULL;
259 thread->ee.lockrecordpools = NULL;
260 thread->ee.lockrecordcount = 0;
265 /* lock_pre_compute_thinlock ***************************************************
267 Pre-compute the thin lock value for a thread index.
270 index........the thead index (>= 1)
273 the thin lock value for this thread index
275 *******************************************************************************/
277 ptrint lock_pre_compute_thinlock(s4 index)
279 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
284 /*============================================================================*/
285 /* LOCK RECORD MANAGEMENT */
286 /*============================================================================*/
289 /* lock_record_alloc_new_pool **************************************************
291 Get a new lock record pool from the memory allocator.
294 thread.......the thread that will own the lock records
295 size.........number of lock records in the pool to allocate
298 the new lock record pool, with initialized lock records
300 *******************************************************************************/
302 static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
305 lock_record_pool_t *pool;
307 /* get the pool from the memory allocator */
309 pool = mem_alloc(sizeof(lock_record_pool_header_t)
310 + sizeof(lock_record_t) * size);
312 /* initialize the pool header */
314 pool->header.size = size;
316 /* initialize the individual lock records */
318 for (i=0; i<size; i++) {
319 lock_record_init(&pool->lr[i], thread);
321 pool->lr[i].nextfree = &pool->lr[i+1];
324 /* terminate free list */
326 pool->lr[i-1].nextfree = NULL;
332 /* lock_record_alloc_pool ******************************************************
334 Allocate a lock record pool. The pool is either taken from the global free
335 list or requested from the memory allocator.
338 thread.......the thread that will own the lock records
339 size.........number of lock records in the pool to allocate
342 the new lock record pool, with initialized lock records
344 *******************************************************************************/
346 static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
348 pthread_mutex_lock(&lock_global_pool_lock);
350 if (lock_global_pool) {
352 lock_record_pool_t *pool;
354 /* pop a pool from the global freelist */
356 pool = lock_global_pool;
357 lock_global_pool = pool->header.next;
359 pthread_mutex_unlock(&lock_global_pool_lock);
361 /* re-initialize owner and freelist chaining */
363 for (i=0; i < pool->header.size; i++) {
364 pool->lr[i].owner = NULL;
365 pool->lr[i].nextfree = &pool->lr[i+1];
367 pool->lr[i-1].nextfree = NULL;
372 pthread_mutex_unlock(&lock_global_pool_lock);
374 /* we have to get a new pool from the allocator */
376 return lock_record_alloc_new_pool(t, size);
380 /* lock_record_free_pools ******************************************************
382 Free the lock record pools in the given linked list. The pools are inserted
383 into the global freelist.
386 pool.........list header
388 *******************************************************************************/
390 void lock_record_free_pools(lock_record_pool_t *pool)
392 lock_record_pool_header_t *last;
394 assert(false); /* XXX this function does not match the new locking */
395 /* algorithm. We must find another way to free */
396 /* unused lock records. */
401 pthread_mutex_lock(&lock_global_pool_lock);
403 /* find the last pool in the list */
405 last = &pool->header;
407 last = &last->next->header;
409 /* chain it to the lock_global_pool freelist */
411 last->next = lock_global_pool;
413 /* insert the freed pools into the freelist */
415 lock_global_pool = pool;
417 pthread_mutex_unlock(&lock_global_pool_lock);
421 /* lock_record_alloc ***********************************************************
423 Allocate a lock record which is owned by the current thread.
426 t............the current thread
428 *******************************************************************************/
430 static lock_record_t *lock_record_alloc(threadobject *t)
439 lock_record_pool_t *pool;
443 poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2
444 : LOCK_INITIAL_LOCK_RECORDS;
445 pool = lock_record_alloc_pool(t, poolsize);
447 /* add it to our per-thread pool list */
449 pool->header.next = t->ee.lockrecordpools;
450 t->ee.lockrecordpools = pool;
451 t->ee.lockrecordcount += pool->header.size;
453 /* take the first record from the pool */
457 /* pop the record from the freelist */
459 t->ee.firstfree = r->nextfree;
461 r->nextfree = NULL; /* in order to find invalid uses of nextfree */
468 /* lock_record_recycle *********************************************************
470 Recycle the given lock record. It will be inserted in the appropriate
474 t............the owner
475 r............lock record to recycle
477 *******************************************************************************/
479 static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
483 assert(r->owner == NULL);
484 assert(r->nextfree == NULL);
486 r->nextfree = t->ee.firstfree;
492 /*============================================================================*/
493 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
494 /*============================================================================*/
497 /* lock_hashtable_init *********************************************************
499 Initialize the global hashtable mapping objects to lock records.
501 *******************************************************************************/
503 static void lock_hashtable_init(void)
505 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
507 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
508 lock_hashtable.entries = 0;
509 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
510 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
514 /* lock_hashtable_grow *********************************************************
516 Grow the lock record hashtable to about twice its current size and
519 *******************************************************************************/
521 /* must be called with hashtable mutex locked */
522 static void lock_hashtable_grow(void)
526 lock_record_t **oldtable;
527 lock_record_t **newtable;
534 /* allocate a new table */
536 oldsize = lock_hashtable.size;
537 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
539 LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
541 oldtable = lock_hashtable.ptr;
542 newtable = MNEW(lock_record_t *, newsize);
543 MZERO(newtable, lock_record_t *, newsize);
545 /* rehash the entries */
547 for (i=0; i<oldsize; ++i) {
552 h = LOCK_HASH(lr->obj);
553 newslot = h % newsize;
555 lr->hashlink = newtable[newslot];
556 newtable[newslot] = lr;
562 /* replace the old table */
564 lock_hashtable.ptr = newtable;
565 lock_hashtable.size = newsize;
567 MFREE(oldtable, lock_record_t *, oldsize);
571 /* lock_hashtable_get_lock_record **********************************************
573 Find the lock record for the given object. If it does not exists, yet,
574 create it and enter it in the hashtable.
577 t.................the current thread
578 o.................the object to look up
581 the lock record to use for this object
583 *******************************************************************************/
585 static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o)
591 lockword = (ptrint) o->monitorPtr;
593 if (IS_FAT_LOCK(lockword)) {
594 return GET_FAT_LOCK(lockword);
597 /* lock the hashtable */
599 pthread_mutex_lock(&(lock_hashtable.mutex));
601 /* lookup the lock record in the hashtable */
603 slot = LOCK_HASH(o) % lock_hashtable.size;
604 lr = lock_hashtable.ptr[slot];
607 pthread_mutex_unlock(&(lock_hashtable.mutex));
614 /* not found, we must create a new one */
616 lr = lock_record_alloc(t);
618 LOCK_LOG(("thread %d allocated for %p new lr %p\n",
619 t->index, (void*) o, (void*) lr));
621 /* enter it in the hashtable */
623 lr->hashlink = lock_hashtable.ptr[slot];
624 lock_hashtable.ptr[slot] = lr;
625 lock_hashtable.entries++;
627 /* check whether the hash should grow */
629 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
630 lock_hashtable_grow();
633 /* unlock the hashtable */
635 pthread_mutex_unlock(&(lock_hashtable.mutex));
637 /* return the new lock record */
643 /*============================================================================*/
644 /* OBJECT LOCK INITIALIZATION */
645 /*============================================================================*/
648 /* lock_init_object_lock *******************************************************
650 Initialize the monitor pointer of the given object. The monitor gets
651 initialized to an unlocked state.
653 *******************************************************************************/
655 void lock_init_object_lock(java_objectheader *o)
659 o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
664 /* lock_get_initial_lock_word **************************************************
666 Returns the initial (unlocked) lock word. The pointer is
667 required in the code generator to set up a virtual
668 java_objectheader for code patch locking.
670 *******************************************************************************/
672 lock_record_t *lock_get_initial_lock_word(void)
674 return (lock_record_t *) THIN_UNLOCKED;
679 /*============================================================================*/
680 /* LOCKING ALGORITHM */
681 /*============================================================================*/
684 /* lock_record_enter ***********************************************************
686 Enter the lock represented by the given lock record.
689 t.................the current thread
690 lr................the lock record
692 *******************************************************************************/
694 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
696 pthread_mutex_lock(&(lr->mutex));
701 /* lock_record_exit ************************************************************
703 Release the lock represented by the given lock record.
706 t.................the current thread
707 lr................the lock record
710 The current thread must own the lock represented by this lock record.
711 This is NOT checked by this function!
713 *******************************************************************************/
715 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
718 pthread_mutex_unlock(&(lr->mutex));
722 /* lock_inflate ****************************************************************
724 Inflate the lock of the given object. This may only be called by the
725 owner of the monitor of the object.
728 t............the current thread
729 o............the object of which to inflate the lock
730 lr...........the lock record to install. The current thread must
731 own the lock of this lock record!
734 The current thread must be the owner of this object's monitor AND
735 of the lock record's lock!
737 *******************************************************************************/
739 static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *lr)
743 /* get the current lock count */
745 lockword = (ptrint) o->monitorPtr;
747 if (IS_FAT_LOCK(lockword)) {
748 assert(GET_FAT_LOCK(lockword) == lr);
751 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
753 /* copy the count from the thin lock */
755 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
758 LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
759 t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
761 /* clear flat-lock-contention bit */
763 LOCK_CLEAR_FLC_BIT(o);
765 /* notify waiting objects */
767 lock_record_notify(t, lr, false);
771 o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
775 /* lock_monitor_enter **********************************************************
777 Acquire the monitor of the given object. If the current thread already
778 owns the monitor, the lock counter is simply increased.
780 This function blocks until it can acquire the monitor.
783 t............the current thread
784 o............the object of which to enter the monitor
787 true.........the lock has been successfully acquired
788 false........an exception has been thrown
790 *******************************************************************************/
792 bool lock_monitor_enter(java_objectheader *o)
795 /* CAUTION: This code assumes that ptrint is unsigned! */
800 exceptions_throw_nullpointerexception();
806 thinlock = t->thinlock;
808 /* most common case: try to thin-lock an unlocked object */
810 if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
811 /* success. we locked it */
812 /* The Java Memory Model requires a memory barrier here: */
817 /* next common case: recursive lock with small recursion count */
818 /* We don't have to worry about stale values here, as any stale value */
819 /* will indicate another thread holding the lock (or an inflated lock) */
821 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
822 /* we own this monitor */
823 /* check the current recursion count */
825 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
827 /* the recursion count is low enough */
829 o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
831 /* success. we locked it */
837 /* recursion count overflow */
839 lr = lock_hashtable_get_lock_record(t, o);
840 lock_record_enter(t, lr);
841 lock_inflate(t, o, lr);
848 /* the lock is either contented or fat */
853 if (IS_FAT_LOCK(lockword)) {
855 lr = GET_FAT_LOCK(lockword);
857 /* check for recursive entering */
858 if (lr->owner == t) {
863 /* acquire the mutex of the lock record */
865 lock_record_enter(t, lr);
867 assert(lr->count == 0);
872 /****** inflation path ******/
874 /* first obtain the lock record for this object */
876 lr = lock_hashtable_get_lock_record(t, o);
878 #if defined(ENABLE_JVMTI)
879 /* Monitor Contended Enter */
880 jvmti_MonitorContendedEntering(false, o);
882 /* enter the monitor */
884 lock_record_enter(t, lr);
887 #if defined(ENABLE_JVMTI)
888 /* Monitor Contended Entered */
889 jvmti_MonitorContendedEntering(true, o);
894 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
895 /* Set the flat lock contention bit to let the owning thread */
896 /* know that we want to be notified of unlocking. */
900 LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
901 t->index, (void*) o, (void*) lr));
903 /* try to lock the object */
905 if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
906 /* we can inflate the lock ourselves */
907 LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
908 t->index, (void*) o, (void*) lr));
909 lock_inflate(t, o, lr);
912 /* wait until another thread sees the flc bit and notifies us of unlocking */
913 LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
914 t->index, (void*) o, (void*) lr));
915 lock_record_wait(t, lr, 0, 0);
919 /* we own the inflated lock now */
926 /* lock_monitor_exit ***********************************************************
928 Decrement the counter of a (currently owned) monitor. If the counter
929 reaches zero, release the monitor.
931 If the current thread is not the owner of the monitor, an
932 IllegalMonitorState exception is thrown.
935 t............the current thread
936 o............the object of which to exit the monitor
939 true.........everything ok,
940 false........an exception has been thrown
942 *******************************************************************************/
944 bool lock_monitor_exit(java_objectheader *o)
952 /* We don't have to worry about stale values here, as any stale value */
953 /* will indicate that we don't own the lock. */
955 lockword = (ptrint) o->monitorPtr;
956 thinlock = t->thinlock;
958 /* most common case: we release a thin lock that we hold once */
960 if (lockword == thinlock) {
961 /* memory barrier for Java Memory Model */
963 o->monitorPtr = THIN_UNLOCKED;
964 /* memory barrier for thin locking */
967 /* check if there has been a flat lock contention on this object */
969 if (LOCK_TEST_FLC_BIT(o)) {
972 LOCK_LOG(("thread %d saw flc bit on %p %s\n",
973 t->index, (void*) o, o->vftbl->class->name->text));
975 /* there has been a contention on this thin lock */
977 lr = lock_hashtable_get_lock_record(t, o);
979 LOCK_LOG(("thread %d for %p got lr %p\n",
980 t->index, (void*) o, (void*) lr));
982 lock_record_enter(t, lr);
984 if (LOCK_TEST_FLC_BIT(o)) {
985 /* notify a thread that it can try to inflate the lock now */
987 lock_record_notify(t, lr, true);
990 lock_record_exit(t, lr);
996 /* next common case: we release a recursive lock, count > 0 */
998 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
999 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
1003 /* either the lock is fat, or we don't hold it at all */
1005 if (IS_FAT_LOCK(lockword)) {
1009 lr = GET_FAT_LOCK(lockword);
1011 /* check if we own this monitor */
1012 /* We don't have to worry about stale values here, as any stale value */
1013 /* will be != t and thus fail this check. */
1015 if (lr->owner != t) {
1016 exceptions_throw_illegalmonitorstateexception();
1020 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1022 if (lr->count != 0) {
1023 /* we had locked this one recursively. just decrement, it will */
1024 /* still be locked. */
1029 /* unlock this lock record */
1032 pthread_mutex_unlock(&(lr->mutex));
1037 /* legal thin lock cases have been handled above, so this is an error */
1039 exceptions_throw_illegalmonitorstateexception();
1045 /* lock_record_remove_waiter ***************************************************
1047 Remove a thread from the list of waiting threads of a lock record.
1050 lr...........the lock record
1051 t............the current thread
1054 The current thread must be the owner of the lock record.
1056 *******************************************************************************/
1058 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
1060 lock_waiter_t **link;
1063 link = &(lr->waiters);
1064 while ((w = *link)) {
1065 if (w->waiter == t) {
1073 /* this should never happen */
1074 fprintf(stderr,"error: waiting thread not found in list of waiters\n");
1080 /* lock_record_wait ************************************************************
1082 Wait on a lock record for a given (maximum) amount of time.
1085 t............the current thread
1086 lr...........the lock record
1087 millis.......milliseconds of timeout
1088 nanos........nanoseconds of timeout
1091 The current thread must be the owner of the lock record.
1092 This is NOT checked by this function!
1094 *******************************************************************************/
1096 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos)
1098 lock_waiter_t *waiter;
1100 bool wasinterrupted;
1102 /* { the thread t owns the fat lock record lr on the object o } */
1104 /* register us as waiter for this object */
1106 waiter = NEW(lock_waiter_t);
1108 waiter->next = lr->waiters;
1109 lr->waiters = waiter;
1111 /* remember the old lock count */
1113 lockcount = lr->count;
1115 /* unlock this record */
1118 lock_record_exit(t, lr);
1120 /* wait until notified/interrupted/timed out */
1122 wasinterrupted = threads_wait_with_timeout_relative(t, millis, nanos);
1124 /* re-enter the monitor */
1126 lock_record_enter(t, lr);
1128 /* remove us from the list of waiting threads */
1130 lock_record_remove_waiter(lr, t);
1132 /* restore the old lock count */
1134 lr->count = lockcount;
1136 /* if we have been interrupted, throw the appropriate exception */
1139 *exceptionptr = new_exception(string_java_lang_InterruptedException);
1143 /* lock_monitor_wait ***********************************************************
1145 Wait on an object for a given (maximum) amount of time.
1148 t............the current thread
1149 o............the object
1150 millis.......milliseconds of timeout
1151 nanos........nanoseconds of timeout
1154 The current thread must be the owner of the object's monitor.
1156 *******************************************************************************/
1158 static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
1163 lockword = (ptrint) o->monitorPtr;
1165 /* check if we own this monitor */
1166 /* We don't have to worry about stale values here, as any stale value */
1167 /* will fail this check. */
1169 if (IS_FAT_LOCK(lockword)) {
1171 lr = GET_FAT_LOCK(lockword);
1173 if (lr->owner != t) {
1174 exceptions_throw_illegalmonitorstateexception();
1179 /* it's a thin lock */
1181 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1182 exceptions_throw_illegalmonitorstateexception();
1186 /* inflate this lock */
1187 lr = lock_hashtable_get_lock_record(t, o);
1188 lock_record_enter(t, lr);
1189 lock_inflate(t, o, lr);
1192 /* { the thread t owns the fat lock record lr on the object o } */
1194 lock_record_wait(t, lr, millis, nanos);
1198 /* lock_record_notify **********************************************************
1200 Notify one thread or all threads waiting on the given lock record.
1203 t............the current thread
1204 lr...........the lock record
1205 one..........if true, only notify one thread
1208 The current thread must be the owner of the lock record.
1209 This is NOT checked by this function!
1211 *******************************************************************************/
1213 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1215 lock_waiter_t *waiter;
1216 threadobject *waitingthread;
1218 /* { the thread t owns the fat lock record lr on the object o } */
1220 /* for each waiter: */
1222 for (waiter = lr->waiters; waiter; waiter = waiter->next) {
1224 /* signal the waiting thread */
1226 waitingthread = waiter->waiter;
1228 pthread_mutex_lock(&waitingthread->waitmutex);
1229 if (waitingthread->sleeping)
1230 pthread_cond_signal(&waitingthread->waitcond);
1231 waitingthread->signaled = true;
1232 pthread_mutex_unlock(&waitingthread->waitmutex);
1234 /* if we should only wake one, we are done */
1242 /* lock_monitor_notify *********************************************************
1244 Notify one thread or all threads waiting on the given object.
1247 t............the current thread
1248 o............the object
1249 one..........if true, only notify one thread
1252 The current thread must be the owner of the object's monitor.
1254 *******************************************************************************/
1256 static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
1261 lockword = (ptrint) o->monitorPtr;
1263 /* check if we own this monitor */
1264 /* We don't have to worry about stale values here, as any stale value */
1265 /* will fail this check. */
1267 if (IS_FAT_LOCK(lockword)) {
1269 lr = GET_FAT_LOCK(lockword);
1271 if (lr->owner != t) {
1272 exceptions_throw_illegalmonitorstateexception();
1277 /* it's a thin lock */
1279 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1280 exceptions_throw_illegalmonitorstateexception();
1284 /* inflate this lock */
1285 lr = lock_hashtable_get_lock_record(t, o);
1286 lock_record_enter(t, lr);
1287 lock_inflate(t, o, lr);
1290 /* { the thread t owns the fat lock record lr on the object o } */
1292 lock_record_notify(t, lr, one);
1297 /*============================================================================*/
1298 /* INQUIRY FUNCIONS */
1299 /*============================================================================*/
1302 /* lock_is_held_by_current_thread **********************************************
1304 Return true if the current thread owns the monitor of the given object.
1307 o............the object
1310 true, if the current thread holds the lock of this object.
1312 *******************************************************************************/
1314 bool lock_is_held_by_current_thread(java_objectheader *o)
1319 /* check if we own this monitor */
1320 /* We don't have to worry about stale values here, as any stale value */
1321 /* will fail this check. */
1323 lockword = (ptrint) o->monitorPtr;
1326 if (IS_FAT_LOCK(lockword)) {
1329 /* it's a fat lock */
1330 lr = GET_FAT_LOCK(lockword);
1332 return (lr->owner == t);
1335 /* it's a thin lock */
1337 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1343 /*============================================================================*/
1344 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1345 /*============================================================================*/
1348 /* lock_wait_for_object ********************************************************
1350 Wait for the given object.
1353 o............the object
1354 millis.......milliseconds to wait
1355 nanos........nanoseconds to wait
1357 *******************************************************************************/
1359 void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
1361 threadobject *t = (threadobject*) THREADOBJECT;
1362 lock_monitor_wait(t, o, millis, nanos);
1366 /* lock_notify_object **********************************************************
1368 Notify one thread waiting on the given object.
1371 o............the object
1373 *******************************************************************************/
1375 void lock_notify_object(java_objectheader *o)
1377 threadobject *t = (threadobject*) THREADOBJECT;
1378 lock_monitor_notify(t, o, true);
1382 /* lock_notify_all_object ******************************************************
1384 Notify all threads waiting on the given object.
1387 o............the object
1389 *******************************************************************************/
1391 void lock_notify_all_object(java_objectheader *o)
1393 threadobject *t = (threadobject*) THREADOBJECT;
1394 lock_monitor_notify(t, o, false);
1398 * These are local overrides for various environment variables in Emacs.
1399 * Please do not remove this and leave it at the end of the file, where
1400 * Emacs will automagically detect them.
1401 * ---------------------------------------------------------------------
1404 * indent-tabs-mode: t
1408 * vim:noexpandtab:sw=4:ts=4: