1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
40 #include "mm/memory.h"
42 #include "threads/native/lock.h"
43 #include "threads/native/threads.h"
45 #include "vm/global.h"
46 #include "vm/exceptions.h"
47 #include "vm/stringlocal.h"
50 #include "vmcore/options.h"
52 #if defined(ENABLE_STATISTICS)
53 # include "vmcore/statistics.h"
56 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
60 /* includes for atomic instructions: */
62 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
63 #include "threads/native/generic-primitives.h"
65 #include "machine-instr.h"
68 #if defined(ENABLE_JVMTI)
69 #include "native/jvmti/cacaodbg.h"
73 /******************************************************************************/
74 /* DEBUGGING MACROS */
75 /******************************************************************************/
77 /* #define LOCK_VERBOSE */
79 #if defined(LOCK_VERBOSE)
80 #define LOCK_LOG(args) do { printf args; fflush(stdout); } while (0)
82 #define LOCK_LOG(args)
86 /******************************************************************************/
88 /******************************************************************************/
90 /* number of lock records in the first pool allocated for a thread */
91 #define LOCK_INITIAL_LOCK_RECORDS 8
93 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
95 #define LOCK_HASH(obj) ((ptrint)(obj))
97 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
98 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
100 /* CAUTION: oldvalue is evaluated twice! */
101 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
102 (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
105 /******************************************************************************/
106 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
107 /******************************************************************************/
109 #define LOCK_SET_FLC_BIT(obj) ((obj)->hdrflags |= HDRFLAG_FLC)
110 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->hdrflags &= ~ HDRFLAG_FLC)
111 #define LOCK_TEST_FLC_BIT(obj) ((obj)->hdrflags & HDRFLAG_FLC)
114 /******************************************************************************/
115 /* MACROS FOR THIN/FAT LOCKS */
116 /******************************************************************************/
118 /* We use a variant of the tasuki locks described in the paper
120 * Tamiya Onodera, Kiyokuni Kawachiya
121 * A Study of Locking Objects with Bimodal Fields
122 * Proceedings of the ACM OOPSLA '99, pp. 223-237
125 * The underlying thin locks are a variant of the thin locks described in
127 * Bacon, Konuru, Murthy, Serrano
128 * Thin Locks: Featherweight Synchronization for Java
129 * Proceedings of the ACM Conference on Programming Language Design and
130 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
133 * In thin lock mode the lockword (monitorPtr) looks like this:
135 * ,----------------------,-----------,---,
136 * | thread ID | count | 0 |
137 * `----------------------'-----------'---´
139 * thread ID......the 'index' of the owning thread, or 0
140 * count..........number of times the lock has been entered minus 1
141 * 0..............the shape bit is 0 in thin lock mode
143 * In fat lock mode it is basically a lock_record_t *:
145 * ,----------------------------------,---,
146 * | lock_record_t * (without LSB) | 1 |
147 * `----------------------------------'---´
149 * 1..............the shape bit is 1 in fat lock mode
152 #if SIZEOF_VOID_P == 8
153 #define THIN_LOCK_WORD_SIZE 64
155 #define THIN_LOCK_WORD_SIZE 32
158 #define THIN_LOCK_SHAPE_BIT 0x01
160 #define THIN_UNLOCKED 0
162 #define THIN_LOCK_COUNT_SHIFT 1
163 #define THIN_LOCK_COUNT_SIZE 8
164 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
165 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
166 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
168 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
169 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
171 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
172 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
174 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
175 #define MAKE_FAT_LOCK(ptr) ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
177 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
180 /******************************************************************************/
181 /* GLOBAL VARIABLES */
182 /******************************************************************************/
184 /* global lock record pool list header */
185 lock_record_pool_t *lock_global_pool;
187 /* mutex for synchronizing access to the global pool */
188 pthread_mutex_t lock_global_pool_lock;
190 /* hashtable mapping objects to lock records */
191 static lock_hashtable_t lock_hashtable;
194 /******************************************************************************/
196 /******************************************************************************/
198 static void lock_hashtable_init(void);
199 static lock_record_t * lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o);
201 static lock_record_t * lock_record_alloc(threadobject *t);
203 static void lock_record_enter(threadobject *t, lock_record_t *lr);
204 static void lock_record_exit(threadobject *t, lock_record_t *lr);
205 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
206 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
209 /*============================================================================*/
210 /* INITIALIZATION OF DATA STRUCTURES */
211 /*============================================================================*/
214 /* lock_init *******************************************************************
216 Initialize global data for locking.
218 *******************************************************************************/
222 pthread_mutex_init(&lock_global_pool_lock, NULL);
224 lock_hashtable_init();
228 /* lock_record_init ************************************************************
230 Initialize a lock record.
233 r............the lock record to initialize
234 t............will become the owner
236 *******************************************************************************/
238 static void lock_record_init(lock_record_t *r, threadobject *t)
248 pthread_mutex_init(&(r->mutex), NULL);
252 /* lock_init_execution_env *****************************************************
254 Initialize the execution environment for a thread.
257 thread.......the thread
259 *******************************************************************************/
261 void lock_init_execution_env(threadobject *thread)
263 thread->ee.firstfree = NULL;
264 thread->ee.lockrecordpools = NULL;
265 thread->ee.lockrecordcount = 0;
270 /* lock_pre_compute_thinlock ***************************************************
272 Pre-compute the thin lock value for a thread index.
275 index........the thead index (>= 1)
278 the thin lock value for this thread index
280 *******************************************************************************/
282 ptrint lock_pre_compute_thinlock(s4 index)
284 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
289 /*============================================================================*/
290 /* LOCK RECORD MANAGEMENT */
291 /*============================================================================*/
294 /* lock_record_alloc_new_pool **************************************************
296 Get a new lock record pool from the memory allocator.
299 thread.......the thread that will own the lock records
300 size.........number of lock records in the pool to allocate
303 the new lock record pool, with initialized lock records
305 *******************************************************************************/
307 static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
309 lock_record_pool_t *pool;
312 /* get the pool from the memory allocator */
314 pool = mem_alloc(sizeof(lock_record_pool_header_t)
315 + sizeof(lock_record_t) * size);
317 #if defined(ENABLE_STATISTICS)
319 size_lock_record_pool += sizeof(lock_record_pool_header_t) +
320 sizeof(lock_record_t) * size;
323 /* initialize the pool header */
325 pool->header.size = size;
327 /* initialize the individual lock records */
329 for (i = 0; i < size; i++) {
330 lock_record_init(&pool->lr[i], thread);
332 pool->lr[i].nextfree = &pool->lr[i + 1];
335 /* terminate free list */
337 pool->lr[i - 1].nextfree = NULL;
343 /* lock_record_alloc_pool ******************************************************
345 Allocate a lock record pool. The pool is either taken from the global free
346 list or requested from the memory allocator.
349 thread.......the thread that will own the lock records
350 size.........number of lock records in the pool to allocate
353 the new lock record pool, with initialized lock records
355 *******************************************************************************/
357 static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
359 pthread_mutex_lock(&lock_global_pool_lock);
361 if (lock_global_pool != NULL) {
363 lock_record_pool_t *pool;
365 /* pop a pool from the global freelist */
367 pool = lock_global_pool;
368 lock_global_pool = pool->header.next;
370 pthread_mutex_unlock(&lock_global_pool_lock);
372 /* re-initialize owner and freelist chaining */
374 for (i = 0; i < pool->header.size; i++) {
375 pool->lr[i].owner = NULL;
376 pool->lr[i].nextfree = &pool->lr[i + 1];
378 pool->lr[i - 1].nextfree = NULL;
383 pthread_mutex_unlock(&lock_global_pool_lock);
385 /* we have to get a new pool from the allocator */
387 return lock_record_alloc_new_pool(t, size);
391 /* lock_record_free_pools ******************************************************
393 Free the lock record pools in the given linked list. The pools are inserted
394 into the global freelist.
397 pool.........list header
399 *******************************************************************************/
401 void lock_record_free_pools(lock_record_pool_t *pool)
403 lock_record_pool_header_t *last;
405 assert(false); /* XXX this function does not match the new locking */
406 /* algorithm. We must find another way to free */
407 /* unused lock records. */
412 pthread_mutex_lock(&lock_global_pool_lock);
414 /* find the last pool in the list */
416 last = &pool->header;
419 last = &last->next->header;
421 /* chain it to the lock_global_pool freelist */
423 last->next = lock_global_pool;
425 /* insert the freed pools into the freelist */
427 lock_global_pool = pool;
429 pthread_mutex_unlock(&lock_global_pool_lock);
433 /* lock_record_alloc ***********************************************************
435 Allocate a lock record which is owned by the current thread.
438 t............the current thread
440 *******************************************************************************/
442 static lock_record_t *lock_record_alloc(threadobject *t)
451 lock_record_pool_t *pool;
455 poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2
456 : LOCK_INITIAL_LOCK_RECORDS;
457 pool = lock_record_alloc_pool(t, poolsize);
459 /* add it to our per-thread pool list */
461 pool->header.next = t->ee.lockrecordpools;
462 t->ee.lockrecordpools = pool;
463 t->ee.lockrecordcount += pool->header.size;
465 /* take the first record from the pool */
469 /* pop the record from the freelist */
471 t->ee.firstfree = r->nextfree;
473 r->nextfree = NULL; /* in order to find invalid uses of nextfree */
480 /* lock_record_recycle *********************************************************
482 Recycle the given lock record. It will be inserted in the appropriate
486 t............the owner
487 r............lock record to recycle
489 *******************************************************************************/
491 static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
495 assert(r->owner == NULL);
496 assert(r->nextfree == NULL);
498 r->nextfree = t->ee.firstfree;
504 /*============================================================================*/
505 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
506 /*============================================================================*/
509 /* lock_hashtable_init *********************************************************
511 Initialize the global hashtable mapping objects to lock records.
513 *******************************************************************************/
515 static void lock_hashtable_init(void)
517 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
519 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
520 lock_hashtable.entries = 0;
521 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
523 #if defined(ENABLE_STATISTICS)
525 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
528 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
532 /* lock_hashtable_grow *********************************************************
534 Grow the lock record hashtable to about twice its current size and
537 *******************************************************************************/
539 /* must be called with hashtable mutex locked */
540 static void lock_hashtable_grow(void)
544 lock_record_t **oldtable;
545 lock_record_t **newtable;
552 /* allocate a new table */
554 oldsize = lock_hashtable.size;
555 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
557 LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
559 oldtable = lock_hashtable.ptr;
560 newtable = MNEW(lock_record_t *, newsize);
562 #if defined(ENABLE_STATISTICS)
564 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
567 MZERO(newtable, lock_record_t *, newsize);
569 /* rehash the entries */
571 for (i = 0; i < oldsize; i++) {
576 h = LOCK_HASH(lr->obj);
577 newslot = h % newsize;
579 lr->hashlink = newtable[newslot];
580 newtable[newslot] = lr;
586 /* replace the old table */
588 lock_hashtable.ptr = newtable;
589 lock_hashtable.size = newsize;
591 MFREE(oldtable, lock_record_t *, oldsize);
593 #if defined(ENABLE_STATISTICS)
595 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
600 /* lock_hashtable_get_lock_record **********************************************
602 Find the lock record for the given object. If it does not exists, yet,
603 create it and enter it in the hashtable.
606 t.................the current thread
607 o.................the object to look up
610 the lock record to use for this object
612 *******************************************************************************/
614 static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o)
620 lockword = (ptrint) o->monitorPtr;
622 if (IS_FAT_LOCK(lockword)) {
623 return GET_FAT_LOCK(lockword);
626 /* lock the hashtable */
628 pthread_mutex_lock(&(lock_hashtable.mutex));
630 /* lookup the lock record in the hashtable */
632 slot = LOCK_HASH(o) % lock_hashtable.size;
633 lr = lock_hashtable.ptr[slot];
636 pthread_mutex_unlock(&(lock_hashtable.mutex));
643 /* not found, we must create a new one */
645 lr = lock_record_alloc(t);
647 LOCK_LOG(("thread %d allocated for %p new lr %p\n",
648 t->index, (void*) o, (void*) lr));
650 /* enter it in the hashtable */
652 lr->hashlink = lock_hashtable.ptr[slot];
653 lock_hashtable.ptr[slot] = lr;
654 lock_hashtable.entries++;
656 /* check whether the hash should grow */
658 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
659 lock_hashtable_grow();
662 /* unlock the hashtable */
664 pthread_mutex_unlock(&(lock_hashtable.mutex));
666 /* return the new lock record */
672 /*============================================================================*/
673 /* OBJECT LOCK INITIALIZATION */
674 /*============================================================================*/
677 /* lock_init_object_lock *******************************************************
679 Initialize the monitor pointer of the given object. The monitor gets
680 initialized to an unlocked state.
682 *******************************************************************************/
684 void lock_init_object_lock(java_objectheader *o)
688 o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
689 LOCK_CLEAR_FLC_BIT(o);
693 /* lock_get_initial_lock_word **************************************************
695 Returns the initial (unlocked) lock word. The pointer is
696 required in the code generator to set up a virtual
697 java_objectheader for code patch locking.
699 *******************************************************************************/
701 lock_record_t *lock_get_initial_lock_word(void)
703 return (lock_record_t *) THIN_UNLOCKED;
708 /*============================================================================*/
709 /* LOCKING ALGORITHM */
710 /*============================================================================*/
713 /* lock_record_enter ***********************************************************
715 Enter the lock represented by the given lock record.
718 t.................the current thread
719 lr................the lock record
721 *******************************************************************************/
723 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
725 pthread_mutex_lock(&(lr->mutex));
730 /* lock_record_exit ************************************************************
732 Release the lock represented by the given lock record.
735 t.................the current thread
736 lr................the lock record
739 The current thread must own the lock represented by this lock record.
740 This is NOT checked by this function!
742 *******************************************************************************/
744 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
747 pthread_mutex_unlock(&(lr->mutex));
751 /* lock_inflate ****************************************************************
753 Inflate the lock of the given object. This may only be called by the
754 owner of the monitor of the object.
757 t............the current thread
758 o............the object of which to inflate the lock
759 lr...........the lock record to install. The current thread must
760 own the lock of this lock record!
763 The current thread must be the owner of this object's monitor AND
764 of the lock record's lock!
766 *******************************************************************************/
768 static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *lr)
772 /* get the current lock count */
774 lockword = (ptrint) o->monitorPtr;
776 if (IS_FAT_LOCK(lockword)) {
777 assert(GET_FAT_LOCK(lockword) == lr);
780 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
782 /* copy the count from the thin lock */
784 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
787 LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
788 t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
790 /* clear flat-lock-contention bit */
792 LOCK_CLEAR_FLC_BIT(o);
794 /* notify waiting objects */
796 lock_record_notify(t, lr, false);
800 o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
804 /* lock_monitor_enter **********************************************************
806 Acquire the monitor of the given object. If the current thread already
807 owns the monitor, the lock counter is simply increased.
809 This function blocks until it can acquire the monitor.
812 t............the current thread
813 o............the object of which to enter the monitor
816 true.........the lock has been successfully acquired
817 false........an exception has been thrown
819 *******************************************************************************/
821 bool lock_monitor_enter(java_objectheader *o)
824 /* CAUTION: This code assumes that ptrint is unsigned! */
829 exceptions_throw_nullpointerexception();
835 thinlock = t->thinlock;
837 /* most common case: try to thin-lock an unlocked object */
839 if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
840 /* success. we locked it */
841 /* The Java Memory Model requires a memory barrier here: */
846 /* next common case: recursive lock with small recursion count */
847 /* We don't have to worry about stale values here, as any stale value */
848 /* will indicate another thread holding the lock (or an inflated lock) */
850 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
851 /* we own this monitor */
852 /* check the current recursion count */
854 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
856 /* the recursion count is low enough */
858 o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
860 /* success. we locked it */
866 /* recursion count overflow */
868 lr = lock_hashtable_get_lock_record(t, o);
869 lock_record_enter(t, lr);
870 lock_inflate(t, o, lr);
877 /* the lock is either contented or fat */
882 if (IS_FAT_LOCK(lockword)) {
884 lr = GET_FAT_LOCK(lockword);
886 /* check for recursive entering */
887 if (lr->owner == t) {
892 /* acquire the mutex of the lock record */
894 lock_record_enter(t, lr);
896 assert(lr->count == 0);
901 /****** inflation path ******/
903 /* first obtain the lock record for this object */
905 lr = lock_hashtable_get_lock_record(t, o);
907 #if defined(ENABLE_JVMTI)
908 /* Monitor Contended Enter */
909 jvmti_MonitorContendedEntering(false, o);
911 /* enter the monitor */
913 lock_record_enter(t, lr);
916 #if defined(ENABLE_JVMTI)
917 /* Monitor Contended Entered */
918 jvmti_MonitorContendedEntering(true, o);
923 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
924 /* Set the flat lock contention bit to let the owning thread */
925 /* know that we want to be notified of unlocking. */
929 LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
930 t->index, (void*) o, (void*) lr));
932 /* try to lock the object */
934 if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
935 /* we can inflate the lock ourselves */
936 LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
937 t->index, (void*) o, (void*) lr));
938 lock_inflate(t, o, lr);
941 /* wait until another thread sees the flc bit and notifies us of unlocking */
942 LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
943 t->index, (void*) o, (void*) lr));
944 lock_record_wait(t, lr, 0, 0);
948 /* we own the inflated lock now */
955 /* lock_monitor_exit ***********************************************************
957 Decrement the counter of a (currently owned) monitor. If the counter
958 reaches zero, release the monitor.
960 If the current thread is not the owner of the monitor, an
961 IllegalMonitorState exception is thrown.
964 t............the current thread
965 o............the object of which to exit the monitor
968 true.........everything ok,
969 false........an exception has been thrown
971 *******************************************************************************/
973 bool lock_monitor_exit(java_objectheader *o)
980 exceptions_throw_nullpointerexception();
986 /* We don't have to worry about stale values here, as any stale value */
987 /* will indicate that we don't own the lock. */
989 lockword = (ptrint) o->monitorPtr;
990 thinlock = t->thinlock;
992 /* most common case: we release a thin lock that we hold once */
994 if (lockword == thinlock) {
995 /* memory barrier for Java Memory Model */
997 o->monitorPtr = THIN_UNLOCKED;
998 /* memory barrier for thin locking */
1001 /* check if there has been a flat lock contention on this object */
1003 if (LOCK_TEST_FLC_BIT(o)) {
1006 LOCK_LOG(("thread %d saw flc bit on %p %s\n",
1007 t->index, (void*) o, o->vftbl->class->name->text));
1009 /* there has been a contention on this thin lock */
1011 lr = lock_hashtable_get_lock_record(t, o);
1013 LOCK_LOG(("thread %d for %p got lr %p\n",
1014 t->index, (void*) o, (void*) lr));
1016 lock_record_enter(t, lr);
1018 if (LOCK_TEST_FLC_BIT(o)) {
1019 /* notify a thread that it can try to inflate the lock now */
1021 lock_record_notify(t, lr, true);
1024 lock_record_exit(t, lr);
1030 /* next common case: we release a recursive lock, count > 0 */
1032 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1033 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
1037 /* either the lock is fat, or we don't hold it at all */
1039 if (IS_FAT_LOCK(lockword)) {
1043 lr = GET_FAT_LOCK(lockword);
1045 /* check if we own this monitor */
1046 /* We don't have to worry about stale values here, as any stale value */
1047 /* will be != t and thus fail this check. */
1049 if (lr->owner != t) {
1050 exceptions_throw_illegalmonitorstateexception();
1054 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1056 if (lr->count != 0) {
1057 /* we had locked this one recursively. just decrement, it will */
1058 /* still be locked. */
1063 /* unlock this lock record */
1066 pthread_mutex_unlock(&(lr->mutex));
1071 /* legal thin lock cases have been handled above, so this is an error */
1073 exceptions_throw_illegalmonitorstateexception();
1079 /* lock_record_add_waiter ******************************************************
1081 Add a thread to the list of waiting threads of a lock record.
1084 lr...........the lock record
1085 thread.......the thread to add
1087 *******************************************************************************/
1089 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1091 lock_waiter_t *waiter;
1093 /* allocate a waiter data structure */
1095 waiter = NEW(lock_waiter_t);
1097 #if defined(ENABLE_STATISTICS)
1099 size_lock_waiter += sizeof(lock_waiter_t);
1102 waiter->waiter = thread;
1103 waiter->next = lr->waiters;
1105 lr->waiters = waiter;
1109 /* lock_record_remove_waiter ***************************************************
1111 Remove a thread from the list of waiting threads of a lock record.
1114 lr...........the lock record
1115 t............the current thread
1118 The current thread must be the owner of the lock record.
1120 *******************************************************************************/
1122 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1124 lock_waiter_t **link;
1127 link = &(lr->waiters);
1129 while ((w = *link)) {
1130 if (w->waiter == thread) {
1133 /* free the waiter data structure */
1135 FREE(w, lock_waiter_t);
1137 #if defined(ENABLE_STATISTICS)
1139 size_lock_waiter -= sizeof(lock_waiter_t);
1148 /* this should never happen */
1150 vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
1154 /* lock_record_wait ************************************************************
1156 Wait on a lock record for a given (maximum) amount of time.
1159 t............the current thread
1160 lr...........the lock record
1161 millis.......milliseconds of timeout
1162 nanos........nanoseconds of timeout
1165 The current thread must be the owner of the lock record.
1166 This is NOT checked by this function!
1168 *******************************************************************************/
1170 static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1173 bool wasinterrupted;
1175 /* { the thread t owns the fat lock record lr on the object o } */
1177 /* register us as waiter for this object */
1179 lock_record_add_waiter(lr, thread);
1181 /* remember the old lock count */
1183 lockcount = lr->count;
1185 /* unlock this record */
1188 lock_record_exit(thread, lr);
1190 /* wait until notified/interrupted/timed out */
1192 wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1194 /* re-enter the monitor */
1196 lock_record_enter(thread, lr);
1198 /* remove us from the list of waiting threads */
1200 lock_record_remove_waiter(lr, thread);
1202 /* restore the old lock count */
1204 lr->count = lockcount;
1206 /* if we have been interrupted, throw the appropriate exception */
1209 exceptions_throw_interruptedexception();
1213 /* lock_monitor_wait ***********************************************************
1215 Wait on an object for a given (maximum) amount of time.
1218 t............the current thread
1219 o............the object
1220 millis.......milliseconds of timeout
1221 nanos........nanoseconds of timeout
1224 The current thread must be the owner of the object's monitor.
1226 *******************************************************************************/
1228 static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
1233 lockword = (ptrint) o->monitorPtr;
1235 /* check if we own this monitor */
1236 /* We don't have to worry about stale values here, as any stale value */
1237 /* will fail this check. */
1239 if (IS_FAT_LOCK(lockword)) {
1241 lr = GET_FAT_LOCK(lockword);
1243 if (lr->owner != t) {
1244 exceptions_throw_illegalmonitorstateexception();
1249 /* it's a thin lock */
1251 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1252 exceptions_throw_illegalmonitorstateexception();
1256 /* inflate this lock */
1257 lr = lock_hashtable_get_lock_record(t, o);
1258 lock_record_enter(t, lr);
1259 lock_inflate(t, o, lr);
1262 /* { the thread t owns the fat lock record lr on the object o } */
1264 lock_record_wait(t, lr, millis, nanos);
1268 /* lock_record_notify **********************************************************
1270 Notify one thread or all threads waiting on the given lock record.
1273 t............the current thread
1274 lr...........the lock record
1275 one..........if true, only notify one thread
1278 The current thread must be the owner of the lock record.
1279 This is NOT checked by this function!
1281 *******************************************************************************/
1283 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1285 lock_waiter_t *waiter;
1286 threadobject *waitingthread;
1288 /* { the thread t owns the fat lock record lr on the object o } */
1290 /* for each waiter: */
1292 for (waiter = lr->waiters; waiter; waiter = waiter->next) {
1294 /* signal the waiting thread */
1296 waitingthread = waiter->waiter;
1298 pthread_mutex_lock(&waitingthread->waitmutex);
1299 if (waitingthread->sleeping)
1300 pthread_cond_signal(&waitingthread->waitcond);
1301 waitingthread->signaled = true;
1302 pthread_mutex_unlock(&waitingthread->waitmutex);
1304 /* if we should only wake one, we are done */
1312 /* lock_monitor_notify *********************************************************
1314 Notify one thread or all threads waiting on the given object.
1317 t............the current thread
1318 o............the object
1319 one..........if true, only notify one thread
1322 The current thread must be the owner of the object's monitor.
1324 *******************************************************************************/
1326 static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
1331 lockword = (ptrint) o->monitorPtr;
1333 /* check if we own this monitor */
1334 /* We don't have to worry about stale values here, as any stale value */
1335 /* will fail this check. */
1337 if (IS_FAT_LOCK(lockword)) {
1339 lr = GET_FAT_LOCK(lockword);
1341 if (lr->owner != t) {
1342 exceptions_throw_illegalmonitorstateexception();
1347 /* it's a thin lock */
1349 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1350 exceptions_throw_illegalmonitorstateexception();
1354 /* inflate this lock */
1355 lr = lock_hashtable_get_lock_record(t, o);
1356 lock_record_enter(t, lr);
1357 lock_inflate(t, o, lr);
1360 /* { the thread t owns the fat lock record lr on the object o } */
1362 lock_record_notify(t, lr, one);
1367 /*============================================================================*/
1368 /* INQUIRY FUNCIONS */
1369 /*============================================================================*/
1372 /* lock_is_held_by_current_thread **********************************************
1374 Return true if the current thread owns the monitor of the given object.
1377 o............the object
1380 true, if the current thread holds the lock of this object.
1382 *******************************************************************************/
1384 bool lock_is_held_by_current_thread(java_objectheader *o)
1389 /* check if we own this monitor */
1390 /* We don't have to worry about stale values here, as any stale value */
1391 /* will fail this check. */
1393 lockword = (ptrint) o->monitorPtr;
1396 if (IS_FAT_LOCK(lockword)) {
1399 /* it's a fat lock */
1400 lr = GET_FAT_LOCK(lockword);
1402 return (lr->owner == t);
1405 /* it's a thin lock */
1407 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1413 /*============================================================================*/
1414 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1415 /*============================================================================*/
1418 /* lock_wait_for_object ********************************************************
1420 Wait for the given object.
1423 o............the object
1424 millis.......milliseconds to wait
1425 nanos........nanoseconds to wait
1427 *******************************************************************************/
1429 void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
1431 threadobject *thread;
1433 thread = THREADOBJECT;
1435 lock_monitor_wait(thread, o, millis, nanos);
1439 /* lock_notify_object **********************************************************
1441 Notify one thread waiting on the given object.
1444 o............the object
1446 *******************************************************************************/
1448 void lock_notify_object(java_objectheader *o)
1450 threadobject *thread;
1452 thread = THREADOBJECT;
1454 lock_monitor_notify(thread, o, true);
1458 /* lock_notify_all_object ******************************************************
1460 Notify all threads waiting on the given object.
1463 o............the object
1465 *******************************************************************************/
1467 void lock_notify_all_object(java_objectheader *o)
1469 threadobject *thread;
1471 thread = THREADOBJECT;
1473 lock_monitor_notify(thread, o, false);
1478 * These are local overrides for various environment variables in Emacs.
1479 * Please do not remove this and leave it at the end of the file, where
1480 * Emacs will automagically detect them.
1481 * ---------------------------------------------------------------------
1484 * indent-tabs-mode: t
1488 * vim:noexpandtab:sw=4:ts=4: