1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
40 #include "mm/memory.h"
42 #include "threads/native/lock.h"
43 #include "threads/native/threads.h"
45 #include "vm/global.h"
46 #include "vm/exceptions.h"
47 #include "vm/stringlocal.h"
50 #include "vmcore/options.h"
52 #if defined(ENABLE_STATISTICS)
53 # include "vmcore/statistics.h"
56 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
60 /* includes for atomic instructions: */
62 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
63 #include "threads/native/generic-primitives.h"
65 #include "machine-instr.h"
68 #if defined(ENABLE_JVMTI)
69 #include "native/jvmti/cacaodbg.h"
73 /******************************************************************************/
74 /* DEBUGGING MACROS */
75 /******************************************************************************/
77 /* #define LOCK_VERBOSE */
79 #if defined(LOCK_VERBOSE)
80 #define LOCK_LOG(args) do { printf args; fflush(stdout); } while (0)
82 #define LOCK_LOG(args)
86 /******************************************************************************/
88 /******************************************************************************/
90 /* number of lock records in the first pool allocated for a thread */
91 #define LOCK_INITIAL_LOCK_RECORDS 8
93 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
95 #define LOCK_HASH(obj) ((ptrint)(obj))
97 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
98 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
100 /* CAUTION: oldvalue is evaluated twice! */
101 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
102 (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
105 /******************************************************************************/
106 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
107 /******************************************************************************/
109 #define LOCK_SET_FLC_BIT(obj) ((obj)->hdrflags |= HDRFLAG_FLC)
110 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->hdrflags &= ~ HDRFLAG_FLC)
111 #define LOCK_TEST_FLC_BIT(obj) ((obj)->hdrflags & HDRFLAG_FLC)
114 /******************************************************************************/
115 /* MACROS FOR THIN/FAT LOCKS */
116 /******************************************************************************/
118 /* We use a variant of the tasuki locks described in the paper
120 * Tamiya Onodera, Kiyokuni Kawachiya
121 * A Study of Locking Objects with Bimodal Fields
122 * Proceedings of the ACM OOPSLA '99, pp. 223-237
125 * The underlying thin locks are a variant of the thin locks described in
127 * Bacon, Konuru, Murthy, Serrano
128 * Thin Locks: Featherweight Synchronization for Java
129 * Proceedings of the ACM Conference on Programming Language Design and
130 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
133 * In thin lock mode the lockword (monitorPtr) looks like this:
135 * ,----------------------,-----------,---,
136 * | thread ID | count | 0 |
137 * `----------------------'-----------'---´
139 * thread ID......the 'index' of the owning thread, or 0
140 * count..........number of times the lock has been entered minus 1
141 * 0..............the shape bit is 0 in thin lock mode
143 * In fat lock mode it is basically a lock_record_t *:
145 * ,----------------------------------,---,
146 * | lock_record_t * (without LSB) | 1 |
147 * `----------------------------------'---´
149 * 1..............the shape bit is 1 in fat lock mode
152 #if SIZEOF_VOID_P == 8
153 #define THIN_LOCK_WORD_SIZE 64
155 #define THIN_LOCK_WORD_SIZE 32
158 #define THIN_LOCK_SHAPE_BIT 0x01
160 #define THIN_UNLOCKED 0
162 #define THIN_LOCK_COUNT_SHIFT 1
163 #define THIN_LOCK_COUNT_SIZE 8
164 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
165 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
166 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
168 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
169 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
171 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
172 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
174 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
175 #define MAKE_FAT_LOCK(ptr) ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
177 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
180 /******************************************************************************/
181 /* GLOBAL VARIABLES */
182 /******************************************************************************/
184 /* global lock record pool list header */
185 lock_record_pool_t *lock_global_pool;
187 /* mutex for synchronizing access to the global pool */
188 pthread_mutex_t lock_global_pool_lock;
190 /* hashtable mapping objects to lock records */
191 static lock_hashtable_t lock_hashtable;
194 /******************************************************************************/
196 /******************************************************************************/
198 static void lock_hashtable_init(void);
199 static lock_record_t * lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o);
201 static lock_record_t * lock_record_alloc(threadobject *t);
203 static void lock_record_enter(threadobject *t, lock_record_t *lr);
204 static void lock_record_exit(threadobject *t, lock_record_t *lr);
205 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
206 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
209 /*============================================================================*/
210 /* INITIALIZATION OF DATA STRUCTURES */
211 /*============================================================================*/
214 /* lock_init *******************************************************************
216 Initialize global data for locking.
218 *******************************************************************************/
222 pthread_mutex_init(&lock_global_pool_lock, NULL);
224 lock_hashtable_init();
228 /* lock_record_init ************************************************************
230 Initialize a lock record.
233 r............the lock record to initialize
234 t............will become the owner
236 *******************************************************************************/
238 static void lock_record_init(lock_record_t *r, threadobject *t)
248 pthread_mutex_init(&(r->mutex), NULL);
252 /* lock_init_execution_env *****************************************************
254 Initialize the execution environment for a thread.
257 thread.......the thread
259 *******************************************************************************/
261 void lock_init_execution_env(threadobject *thread)
263 thread->ee.firstfree = NULL;
264 thread->ee.lockrecordpools = NULL;
265 thread->ee.lockrecordcount = 0;
270 /* lock_pre_compute_thinlock ***************************************************
272 Pre-compute the thin lock value for a thread index.
275 index........the thead index (>= 1)
278 the thin lock value for this thread index
280 *******************************************************************************/
282 ptrint lock_pre_compute_thinlock(s4 index)
284 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
289 /*============================================================================*/
290 /* LOCK RECORD MANAGEMENT */
291 /*============================================================================*/
294 /* lock_record_alloc_new_pool **************************************************
296 Get a new lock record pool from the memory allocator.
299 thread.......the thread that will own the lock records
300 size.........number of lock records in the pool to allocate
303 the new lock record pool, with initialized lock records
305 *******************************************************************************/
307 static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
310 lock_record_pool_t *pool;
312 /* get the pool from the memory allocator */
314 pool = mem_alloc(sizeof(lock_record_pool_header_t)
315 + sizeof(lock_record_t) * size);
317 /* initialize the pool header */
319 pool->header.size = size;
321 /* initialize the individual lock records */
323 for (i=0; i<size; i++) {
324 lock_record_init(&pool->lr[i], thread);
326 pool->lr[i].nextfree = &pool->lr[i+1];
329 /* terminate free list */
331 pool->lr[i-1].nextfree = NULL;
337 /* lock_record_alloc_pool ******************************************************
339 Allocate a lock record pool. The pool is either taken from the global free
340 list or requested from the memory allocator.
343 thread.......the thread that will own the lock records
344 size.........number of lock records in the pool to allocate
347 the new lock record pool, with initialized lock records
349 *******************************************************************************/
351 static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
353 pthread_mutex_lock(&lock_global_pool_lock);
355 if (lock_global_pool) {
357 lock_record_pool_t *pool;
359 /* pop a pool from the global freelist */
361 pool = lock_global_pool;
362 lock_global_pool = pool->header.next;
364 pthread_mutex_unlock(&lock_global_pool_lock);
366 /* re-initialize owner and freelist chaining */
368 for (i=0; i < pool->header.size; i++) {
369 pool->lr[i].owner = NULL;
370 pool->lr[i].nextfree = &pool->lr[i+1];
372 pool->lr[i-1].nextfree = NULL;
377 pthread_mutex_unlock(&lock_global_pool_lock);
379 /* we have to get a new pool from the allocator */
381 return lock_record_alloc_new_pool(t, size);
385 /* lock_record_free_pools ******************************************************
387 Free the lock record pools in the given linked list. The pools are inserted
388 into the global freelist.
391 pool.........list header
393 *******************************************************************************/
395 void lock_record_free_pools(lock_record_pool_t *pool)
397 lock_record_pool_header_t *last;
399 assert(false); /* XXX this function does not match the new locking */
400 /* algorithm. We must find another way to free */
401 /* unused lock records. */
406 pthread_mutex_lock(&lock_global_pool_lock);
408 /* find the last pool in the list */
410 last = &pool->header;
412 last = &last->next->header;
414 /* chain it to the lock_global_pool freelist */
416 last->next = lock_global_pool;
418 /* insert the freed pools into the freelist */
420 lock_global_pool = pool;
422 pthread_mutex_unlock(&lock_global_pool_lock);
426 /* lock_record_alloc ***********************************************************
428 Allocate a lock record which is owned by the current thread.
431 t............the current thread
433 *******************************************************************************/
435 static lock_record_t *lock_record_alloc(threadobject *t)
444 lock_record_pool_t *pool;
448 poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2
449 : LOCK_INITIAL_LOCK_RECORDS;
450 pool = lock_record_alloc_pool(t, poolsize);
452 /* add it to our per-thread pool list */
454 pool->header.next = t->ee.lockrecordpools;
455 t->ee.lockrecordpools = pool;
456 t->ee.lockrecordcount += pool->header.size;
458 /* take the first record from the pool */
462 /* pop the record from the freelist */
464 t->ee.firstfree = r->nextfree;
466 r->nextfree = NULL; /* in order to find invalid uses of nextfree */
473 /* lock_record_recycle *********************************************************
475 Recycle the given lock record. It will be inserted in the appropriate
479 t............the owner
480 r............lock record to recycle
482 *******************************************************************************/
484 static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
488 assert(r->owner == NULL);
489 assert(r->nextfree == NULL);
491 r->nextfree = t->ee.firstfree;
497 /*============================================================================*/
498 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
499 /*============================================================================*/
502 /* lock_hashtable_init *********************************************************
504 Initialize the global hashtable mapping objects to lock records.
506 *******************************************************************************/
508 static void lock_hashtable_init(void)
510 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
512 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
513 lock_hashtable.entries = 0;
514 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
516 #if defined(ENABLE_STATISTICS)
518 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
521 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
525 /* lock_hashtable_grow *********************************************************
527 Grow the lock record hashtable to about twice its current size and
530 *******************************************************************************/
532 /* must be called with hashtable mutex locked */
533 static void lock_hashtable_grow(void)
537 lock_record_t **oldtable;
538 lock_record_t **newtable;
545 /* allocate a new table */
547 oldsize = lock_hashtable.size;
548 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
550 LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
552 oldtable = lock_hashtable.ptr;
553 newtable = MNEW(lock_record_t *, newsize);
555 #if defined(ENABLE_STATISTICS)
557 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
560 MZERO(newtable, lock_record_t *, newsize);
562 /* rehash the entries */
564 for (i = 0; i < oldsize; i++) {
569 h = LOCK_HASH(lr->obj);
570 newslot = h % newsize;
572 lr->hashlink = newtable[newslot];
573 newtable[newslot] = lr;
579 /* replace the old table */
581 lock_hashtable.ptr = newtable;
582 lock_hashtable.size = newsize;
584 MFREE(oldtable, lock_record_t *, oldsize);
586 #if defined(ENABLE_STATISTICS)
588 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
593 /* lock_hashtable_get_lock_record **********************************************
595 Find the lock record for the given object. If it does not exists, yet,
596 create it and enter it in the hashtable.
599 t.................the current thread
600 o.................the object to look up
603 the lock record to use for this object
605 *******************************************************************************/
607 static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o)
613 lockword = (ptrint) o->monitorPtr;
615 if (IS_FAT_LOCK(lockword)) {
616 return GET_FAT_LOCK(lockword);
619 /* lock the hashtable */
621 pthread_mutex_lock(&(lock_hashtable.mutex));
623 /* lookup the lock record in the hashtable */
625 slot = LOCK_HASH(o) % lock_hashtable.size;
626 lr = lock_hashtable.ptr[slot];
629 pthread_mutex_unlock(&(lock_hashtable.mutex));
636 /* not found, we must create a new one */
638 lr = lock_record_alloc(t);
640 LOCK_LOG(("thread %d allocated for %p new lr %p\n",
641 t->index, (void*) o, (void*) lr));
643 /* enter it in the hashtable */
645 lr->hashlink = lock_hashtable.ptr[slot];
646 lock_hashtable.ptr[slot] = lr;
647 lock_hashtable.entries++;
649 /* check whether the hash should grow */
651 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
652 lock_hashtable_grow();
655 /* unlock the hashtable */
657 pthread_mutex_unlock(&(lock_hashtable.mutex));
659 /* return the new lock record */
665 /*============================================================================*/
666 /* OBJECT LOCK INITIALIZATION */
667 /*============================================================================*/
670 /* lock_init_object_lock *******************************************************
672 Initialize the monitor pointer of the given object. The monitor gets
673 initialized to an unlocked state.
675 *******************************************************************************/
677 void lock_init_object_lock(java_objectheader *o)
681 o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
682 LOCK_CLEAR_FLC_BIT(o);
686 /* lock_get_initial_lock_word **************************************************
688 Returns the initial (unlocked) lock word. The pointer is
689 required in the code generator to set up a virtual
690 java_objectheader for code patch locking.
692 *******************************************************************************/
694 lock_record_t *lock_get_initial_lock_word(void)
696 return (lock_record_t *) THIN_UNLOCKED;
701 /*============================================================================*/
702 /* LOCKING ALGORITHM */
703 /*============================================================================*/
706 /* lock_record_enter ***********************************************************
708 Enter the lock represented by the given lock record.
711 t.................the current thread
712 lr................the lock record
714 *******************************************************************************/
716 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
718 pthread_mutex_lock(&(lr->mutex));
723 /* lock_record_exit ************************************************************
725 Release the lock represented by the given lock record.
728 t.................the current thread
729 lr................the lock record
732 The current thread must own the lock represented by this lock record.
733 This is NOT checked by this function!
735 *******************************************************************************/
737 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
740 pthread_mutex_unlock(&(lr->mutex));
744 /* lock_inflate ****************************************************************
746 Inflate the lock of the given object. This may only be called by the
747 owner of the monitor of the object.
750 t............the current thread
751 o............the object of which to inflate the lock
752 lr...........the lock record to install. The current thread must
753 own the lock of this lock record!
756 The current thread must be the owner of this object's monitor AND
757 of the lock record's lock!
759 *******************************************************************************/
761 static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *lr)
765 /* get the current lock count */
767 lockword = (ptrint) o->monitorPtr;
769 if (IS_FAT_LOCK(lockword)) {
770 assert(GET_FAT_LOCK(lockword) == lr);
773 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
775 /* copy the count from the thin lock */
777 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
780 LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
781 t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
783 /* clear flat-lock-contention bit */
785 LOCK_CLEAR_FLC_BIT(o);
787 /* notify waiting objects */
789 lock_record_notify(t, lr, false);
793 o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
797 /* lock_monitor_enter **********************************************************
799 Acquire the monitor of the given object. If the current thread already
800 owns the monitor, the lock counter is simply increased.
802 This function blocks until it can acquire the monitor.
805 t............the current thread
806 o............the object of which to enter the monitor
809 true.........the lock has been successfully acquired
810 false........an exception has been thrown
812 *******************************************************************************/
814 bool lock_monitor_enter(java_objectheader *o)
817 /* CAUTION: This code assumes that ptrint is unsigned! */
822 exceptions_throw_nullpointerexception();
828 thinlock = t->thinlock;
830 /* most common case: try to thin-lock an unlocked object */
832 if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
833 /* success. we locked it */
834 /* The Java Memory Model requires a memory barrier here: */
839 /* next common case: recursive lock with small recursion count */
840 /* We don't have to worry about stale values here, as any stale value */
841 /* will indicate another thread holding the lock (or an inflated lock) */
843 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
844 /* we own this monitor */
845 /* check the current recursion count */
847 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
849 /* the recursion count is low enough */
851 o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
853 /* success. we locked it */
859 /* recursion count overflow */
861 lr = lock_hashtable_get_lock_record(t, o);
862 lock_record_enter(t, lr);
863 lock_inflate(t, o, lr);
870 /* the lock is either contented or fat */
875 if (IS_FAT_LOCK(lockword)) {
877 lr = GET_FAT_LOCK(lockword);
879 /* check for recursive entering */
880 if (lr->owner == t) {
885 /* acquire the mutex of the lock record */
887 lock_record_enter(t, lr);
889 assert(lr->count == 0);
894 /****** inflation path ******/
896 /* first obtain the lock record for this object */
898 lr = lock_hashtable_get_lock_record(t, o);
900 #if defined(ENABLE_JVMTI)
901 /* Monitor Contended Enter */
902 jvmti_MonitorContendedEntering(false, o);
904 /* enter the monitor */
906 lock_record_enter(t, lr);
909 #if defined(ENABLE_JVMTI)
910 /* Monitor Contended Entered */
911 jvmti_MonitorContendedEntering(true, o);
916 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
917 /* Set the flat lock contention bit to let the owning thread */
918 /* know that we want to be notified of unlocking. */
922 LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
923 t->index, (void*) o, (void*) lr));
925 /* try to lock the object */
927 if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
928 /* we can inflate the lock ourselves */
929 LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
930 t->index, (void*) o, (void*) lr));
931 lock_inflate(t, o, lr);
934 /* wait until another thread sees the flc bit and notifies us of unlocking */
935 LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
936 t->index, (void*) o, (void*) lr));
937 lock_record_wait(t, lr, 0, 0);
941 /* we own the inflated lock now */
948 /* lock_monitor_exit ***********************************************************
950 Decrement the counter of a (currently owned) monitor. If the counter
951 reaches zero, release the monitor.
953 If the current thread is not the owner of the monitor, an
954 IllegalMonitorState exception is thrown.
957 t............the current thread
958 o............the object of which to exit the monitor
961 true.........everything ok,
962 false........an exception has been thrown
964 *******************************************************************************/
966 bool lock_monitor_exit(java_objectheader *o)
973 exceptions_throw_nullpointerexception();
979 /* We don't have to worry about stale values here, as any stale value */
980 /* will indicate that we don't own the lock. */
982 lockword = (ptrint) o->monitorPtr;
983 thinlock = t->thinlock;
985 /* most common case: we release a thin lock that we hold once */
987 if (lockword == thinlock) {
988 /* memory barrier for Java Memory Model */
990 o->monitorPtr = THIN_UNLOCKED;
991 /* memory barrier for thin locking */
994 /* check if there has been a flat lock contention on this object */
996 if (LOCK_TEST_FLC_BIT(o)) {
999 LOCK_LOG(("thread %d saw flc bit on %p %s\n",
1000 t->index, (void*) o, o->vftbl->class->name->text));
1002 /* there has been a contention on this thin lock */
1004 lr = lock_hashtable_get_lock_record(t, o);
1006 LOCK_LOG(("thread %d for %p got lr %p\n",
1007 t->index, (void*) o, (void*) lr));
1009 lock_record_enter(t, lr);
1011 if (LOCK_TEST_FLC_BIT(o)) {
1012 /* notify a thread that it can try to inflate the lock now */
1014 lock_record_notify(t, lr, true);
1017 lock_record_exit(t, lr);
1023 /* next common case: we release a recursive lock, count > 0 */
1025 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1026 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
1030 /* either the lock is fat, or we don't hold it at all */
1032 if (IS_FAT_LOCK(lockword)) {
1036 lr = GET_FAT_LOCK(lockword);
1038 /* check if we own this monitor */
1039 /* We don't have to worry about stale values here, as any stale value */
1040 /* will be != t and thus fail this check. */
1042 if (lr->owner != t) {
1043 exceptions_throw_illegalmonitorstateexception();
1047 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1049 if (lr->count != 0) {
1050 /* we had locked this one recursively. just decrement, it will */
1051 /* still be locked. */
1056 /* unlock this lock record */
1059 pthread_mutex_unlock(&(lr->mutex));
1064 /* legal thin lock cases have been handled above, so this is an error */
1066 exceptions_throw_illegalmonitorstateexception();
1072 /* lock_record_add_waiter ******************************************************
1074 Add a thread to the list of waiting threads of a lock record.
1077 lr...........the lock record
1078 thread.......the thread to add
1080 *******************************************************************************/
1082 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1084 lock_waiter_t *waiter;
1086 /* allocate a waiter data structure */
1088 waiter = NEW(lock_waiter_t);
1090 #if defined(ENABLE_STATISTICS)
1092 size_lock_waiter += sizeof(lock_waiter_t);
1095 waiter->waiter = thread;
1096 waiter->next = lr->waiters;
1098 lr->waiters = waiter;
1102 /* lock_record_remove_waiter ***************************************************
1104 Remove a thread from the list of waiting threads of a lock record.
1107 lr...........the lock record
1108 t............the current thread
1111 The current thread must be the owner of the lock record.
1113 *******************************************************************************/
1115 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1117 lock_waiter_t **link;
1120 link = &(lr->waiters);
1122 while ((w = *link)) {
1123 if (w->waiter == thread) {
1126 /* free the waiter data structure */
1128 FREE(w, lock_waiter_t);
1130 #if defined(ENABLE_STATISTICS)
1132 size_lock_waiter -= sizeof(lock_waiter_t);
1141 /* this should never happen */
1143 vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
1147 /* lock_record_wait ************************************************************
1149 Wait on a lock record for a given (maximum) amount of time.
1152 t............the current thread
1153 lr...........the lock record
1154 millis.......milliseconds of timeout
1155 nanos........nanoseconds of timeout
1158 The current thread must be the owner of the lock record.
1159 This is NOT checked by this function!
1161 *******************************************************************************/
1163 static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1166 bool wasinterrupted;
1168 /* { the thread t owns the fat lock record lr on the object o } */
1170 /* register us as waiter for this object */
1172 lock_record_add_waiter(lr, thread);
1174 /* remember the old lock count */
1176 lockcount = lr->count;
1178 /* unlock this record */
1181 lock_record_exit(thread, lr);
1183 /* wait until notified/interrupted/timed out */
1185 wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1187 /* re-enter the monitor */
1189 lock_record_enter(thread, lr);
1191 /* remove us from the list of waiting threads */
1193 lock_record_remove_waiter(lr, thread);
1195 /* restore the old lock count */
1197 lr->count = lockcount;
1199 /* if we have been interrupted, throw the appropriate exception */
1202 exceptions_throw_interruptedexception();
1206 /* lock_monitor_wait ***********************************************************
1208 Wait on an object for a given (maximum) amount of time.
1211 t............the current thread
1212 o............the object
1213 millis.......milliseconds of timeout
1214 nanos........nanoseconds of timeout
1217 The current thread must be the owner of the object's monitor.
1219 *******************************************************************************/
1221 static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
1226 lockword = (ptrint) o->monitorPtr;
1228 /* check if we own this monitor */
1229 /* We don't have to worry about stale values here, as any stale value */
1230 /* will fail this check. */
1232 if (IS_FAT_LOCK(lockword)) {
1234 lr = GET_FAT_LOCK(lockword);
1236 if (lr->owner != t) {
1237 exceptions_throw_illegalmonitorstateexception();
1242 /* it's a thin lock */
1244 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1245 exceptions_throw_illegalmonitorstateexception();
1249 /* inflate this lock */
1250 lr = lock_hashtable_get_lock_record(t, o);
1251 lock_record_enter(t, lr);
1252 lock_inflate(t, o, lr);
1255 /* { the thread t owns the fat lock record lr on the object o } */
1257 lock_record_wait(t, lr, millis, nanos);
1261 /* lock_record_notify **********************************************************
1263 Notify one thread or all threads waiting on the given lock record.
1266 t............the current thread
1267 lr...........the lock record
1268 one..........if true, only notify one thread
1271 The current thread must be the owner of the lock record.
1272 This is NOT checked by this function!
1274 *******************************************************************************/
1276 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1278 lock_waiter_t *waiter;
1279 threadobject *waitingthread;
1281 /* { the thread t owns the fat lock record lr on the object o } */
1283 /* for each waiter: */
1285 for (waiter = lr->waiters; waiter; waiter = waiter->next) {
1287 /* signal the waiting thread */
1289 waitingthread = waiter->waiter;
1291 pthread_mutex_lock(&waitingthread->waitmutex);
1292 if (waitingthread->sleeping)
1293 pthread_cond_signal(&waitingthread->waitcond);
1294 waitingthread->signaled = true;
1295 pthread_mutex_unlock(&waitingthread->waitmutex);
1297 /* if we should only wake one, we are done */
1305 /* lock_monitor_notify *********************************************************
1307 Notify one thread or all threads waiting on the given object.
1310 t............the current thread
1311 o............the object
1312 one..........if true, only notify one thread
1315 The current thread must be the owner of the object's monitor.
1317 *******************************************************************************/
1319 static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
1324 lockword = (ptrint) o->monitorPtr;
1326 /* check if we own this monitor */
1327 /* We don't have to worry about stale values here, as any stale value */
1328 /* will fail this check. */
1330 if (IS_FAT_LOCK(lockword)) {
1332 lr = GET_FAT_LOCK(lockword);
1334 if (lr->owner != t) {
1335 exceptions_throw_illegalmonitorstateexception();
1340 /* it's a thin lock */
1342 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1343 exceptions_throw_illegalmonitorstateexception();
1347 /* inflate this lock */
1348 lr = lock_hashtable_get_lock_record(t, o);
1349 lock_record_enter(t, lr);
1350 lock_inflate(t, o, lr);
1353 /* { the thread t owns the fat lock record lr on the object o } */
1355 lock_record_notify(t, lr, one);
1360 /*============================================================================*/
1361 /* INQUIRY FUNCIONS */
1362 /*============================================================================*/
1365 /* lock_is_held_by_current_thread **********************************************
1367 Return true if the current thread owns the monitor of the given object.
1370 o............the object
1373 true, if the current thread holds the lock of this object.
1375 *******************************************************************************/
1377 bool lock_is_held_by_current_thread(java_objectheader *o)
1382 /* check if we own this monitor */
1383 /* We don't have to worry about stale values here, as any stale value */
1384 /* will fail this check. */
1386 lockword = (ptrint) o->monitorPtr;
1389 if (IS_FAT_LOCK(lockword)) {
1392 /* it's a fat lock */
1393 lr = GET_FAT_LOCK(lockword);
1395 return (lr->owner == t);
1398 /* it's a thin lock */
1400 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1406 /*============================================================================*/
1407 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1408 /*============================================================================*/
1411 /* lock_wait_for_object ********************************************************
1413 Wait for the given object.
1416 o............the object
1417 millis.......milliseconds to wait
1418 nanos........nanoseconds to wait
1420 *******************************************************************************/
1422 void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
1424 threadobject *thread;
1426 thread = THREADOBJECT;
1428 lock_monitor_wait(thread, o, millis, nanos);
1432 /* lock_notify_object **********************************************************
1434 Notify one thread waiting on the given object.
1437 o............the object
1439 *******************************************************************************/
1441 void lock_notify_object(java_objectheader *o)
1443 threadobject *thread;
1445 thread = THREADOBJECT;
1447 lock_monitor_notify(thread, o, true);
1451 /* lock_notify_all_object ******************************************************
1453 Notify all threads waiting on the given object.
1456 o............the object
1458 *******************************************************************************/
1460 void lock_notify_all_object(java_objectheader *o)
1462 threadobject *thread;
1464 thread = THREADOBJECT;
1466 lock_monitor_notify(thread, o, false);
1471 * These are local overrides for various environment variables in Emacs.
1472 * Please do not remove this and leave it at the end of the file, where
1473 * Emacs will automagically detect them.
1474 * ---------------------------------------------------------------------
1477 * indent-tabs-mode: t
1481 * vim:noexpandtab:sw=4:ts=4: