1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
40 #include "mm/memory.h"
42 #include "threads/native/lock.h"
43 #include "threads/native/threads.h"
45 #include "vm/global.h"
46 #include "vm/exceptions.h"
47 #include "vm/stringlocal.h"
50 #include "vmcore/options.h"
52 #if defined(ENABLE_STATISTICS)
53 # include "vmcore/statistics.h"
56 #if defined(ENABLE_VMLOG)
57 #include <vmlog_cacao.h>
60 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
64 /* includes for atomic instructions: */
66 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
67 #include "threads/native/generic-primitives.h"
69 #include "machine-instr.h"
72 #if defined(ENABLE_JVMTI)
73 #include "native/jvmti/cacaodbg.h"
77 /******************************************************************************/
78 /* DEBUGGING MACROS */
79 /******************************************************************************/
81 /* #define LOCK_VERBOSE */
83 #if defined(LOCK_VERBOSE)
84 #define LOCK_LOG(args) do { printf args; fflush(stdout); } while (0)
86 #define LOCK_LOG(args)
90 /******************************************************************************/
92 /******************************************************************************/
94 /* number of lock records in the first pool allocated for a thread */
95 #define LOCK_INITIAL_LOCK_RECORDS 8
97 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
99 #define LOCK_HASH(obj) ((ptrint)(obj))
101 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
102 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
104 /* CAUTION: oldvalue is evaluated twice! */
105 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
106 (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
109 /******************************************************************************/
110 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
111 /******************************************************************************/
113 #define LOCK_SET_FLC_BIT(obj) ((obj)->hdrflags |= HDRFLAG_FLC)
114 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->hdrflags &= ~ HDRFLAG_FLC)
115 #define LOCK_TEST_FLC_BIT(obj) ((obj)->hdrflags & HDRFLAG_FLC)
118 /******************************************************************************/
119 /* MACROS FOR THIN/FAT LOCKS */
120 /******************************************************************************/
122 /* We use a variant of the tasuki locks described in the paper
124 * Tamiya Onodera, Kiyokuni Kawachiya
125 * A Study of Locking Objects with Bimodal Fields
126 * Proceedings of the ACM OOPSLA '99, pp. 223-237
129 * The underlying thin locks are a variant of the thin locks described in
131 * Bacon, Konuru, Murthy, Serrano
132 * Thin Locks: Featherweight Synchronization for Java
133 * Proceedings of the ACM Conference on Programming Language Design and
134 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
137 * In thin lock mode the lockword (monitorPtr) looks like this:
139 * ,----------------------,-----------,---,
140 * | thread ID | count | 0 |
141 * `----------------------'-----------'---´
143 * thread ID......the 'index' of the owning thread, or 0
144 * count..........number of times the lock has been entered minus 1
145 * 0..............the shape bit is 0 in thin lock mode
147 * In fat lock mode it is basically a lock_record_t *:
149 * ,----------------------------------,---,
150 * | lock_record_t * (without LSB) | 1 |
151 * `----------------------------------'---´
153 * 1..............the shape bit is 1 in fat lock mode
156 #if SIZEOF_VOID_P == 8
157 #define THIN_LOCK_WORD_SIZE 64
159 #define THIN_LOCK_WORD_SIZE 32
162 #define THIN_LOCK_SHAPE_BIT 0x01
164 #define THIN_UNLOCKED 0
166 #define THIN_LOCK_COUNT_SHIFT 1
167 #define THIN_LOCK_COUNT_SIZE 8
168 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
169 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
170 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
172 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
173 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
175 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
176 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
178 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
179 #define MAKE_FAT_LOCK(ptr) ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
181 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
184 /******************************************************************************/
185 /* GLOBAL VARIABLES */
186 /******************************************************************************/
188 /* global lock record pool list header */
189 lock_record_pool_t *lock_global_pool;
191 /* mutex for synchronizing access to the global pool */
192 pthread_mutex_t lock_global_pool_lock;
194 /* hashtable mapping objects to lock records */
195 static lock_hashtable_t lock_hashtable;
198 /******************************************************************************/
200 /******************************************************************************/
202 static void lock_hashtable_init(void);
203 static lock_record_t * lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o);
205 static lock_record_t * lock_record_alloc(threadobject *t);
207 static void lock_record_enter(threadobject *t, lock_record_t *lr);
208 static void lock_record_exit(threadobject *t, lock_record_t *lr);
209 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
210 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
213 /*============================================================================*/
214 /* INITIALIZATION OF DATA STRUCTURES */
215 /*============================================================================*/
218 /* lock_init *******************************************************************
220 Initialize global data for locking.
222 *******************************************************************************/
226 pthread_mutex_init(&lock_global_pool_lock, NULL);
228 lock_hashtable_init();
230 #if defined(ENABLE_VMLOG)
231 vmlog_cacao_init_lock();
236 /* lock_record_init ************************************************************
238 Initialize a lock record.
241 r............the lock record to initialize
242 t............will become the owner
244 *******************************************************************************/
246 static void lock_record_init(lock_record_t *r, threadobject *t)
256 pthread_mutex_init(&(r->mutex), NULL);
260 /* lock_init_execution_env *****************************************************
262 Initialize the execution environment for a thread.
265 thread.......the thread
267 *******************************************************************************/
269 void lock_init_execution_env(threadobject *thread)
271 thread->ee.firstfree = NULL;
272 thread->ee.lockrecordpools = NULL;
273 thread->ee.lockrecordcount = 0;
278 /* lock_pre_compute_thinlock ***************************************************
280 Pre-compute the thin lock value for a thread index.
283 index........the thead index (>= 1)
286 the thin lock value for this thread index
288 *******************************************************************************/
290 ptrint lock_pre_compute_thinlock(s4 index)
292 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
297 /*============================================================================*/
298 /* LOCK RECORD MANAGEMENT */
299 /*============================================================================*/
302 /* lock_record_alloc_new_pool **************************************************
304 Get a new lock record pool from the memory allocator.
307 thread.......the thread that will own the lock records
308 size.........number of lock records in the pool to allocate
311 the new lock record pool, with initialized lock records
313 *******************************************************************************/
315 static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
317 lock_record_pool_t *pool;
320 /* get the pool from the memory allocator */
322 pool = mem_alloc(sizeof(lock_record_pool_header_t)
323 + sizeof(lock_record_t) * size);
325 #if defined(ENABLE_STATISTICS)
327 size_lock_record_pool += sizeof(lock_record_pool_header_t) +
328 sizeof(lock_record_t) * size;
331 /* initialize the pool header */
333 pool->header.size = size;
335 /* initialize the individual lock records */
337 for (i = 0; i < size; i++) {
338 lock_record_init(&pool->lr[i], thread);
340 pool->lr[i].nextfree = &pool->lr[i + 1];
343 /* terminate free list */
345 pool->lr[i - 1].nextfree = NULL;
351 /* lock_record_alloc_pool ******************************************************
353 Allocate a lock record pool. The pool is either taken from the global free
354 list or requested from the memory allocator.
357 thread.......the thread that will own the lock records
358 size.........number of lock records in the pool to allocate
361 the new lock record pool, with initialized lock records
363 *******************************************************************************/
365 static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
367 pthread_mutex_lock(&lock_global_pool_lock);
369 if (lock_global_pool != NULL) {
371 lock_record_pool_t *pool;
373 /* pop a pool from the global freelist */
375 pool = lock_global_pool;
376 lock_global_pool = pool->header.next;
378 pthread_mutex_unlock(&lock_global_pool_lock);
380 /* re-initialize owner and freelist chaining */
382 for (i = 0; i < pool->header.size; i++) {
383 pool->lr[i].owner = NULL;
384 pool->lr[i].nextfree = &pool->lr[i + 1];
386 pool->lr[i - 1].nextfree = NULL;
391 pthread_mutex_unlock(&lock_global_pool_lock);
393 /* we have to get a new pool from the allocator */
395 return lock_record_alloc_new_pool(t, size);
399 /* lock_record_free_pools ******************************************************
401 Free the lock record pools in the given linked list. The pools are inserted
402 into the global freelist.
405 pool.........list header
407 *******************************************************************************/
409 void lock_record_free_pools(lock_record_pool_t *pool)
411 lock_record_pool_header_t *last;
413 assert(false); /* XXX this function does not match the new locking */
414 /* algorithm. We must find another way to free */
415 /* unused lock records. */
420 pthread_mutex_lock(&lock_global_pool_lock);
422 /* find the last pool in the list */
424 last = &pool->header;
427 last = &last->next->header;
429 /* chain it to the lock_global_pool freelist */
431 last->next = lock_global_pool;
433 /* insert the freed pools into the freelist */
435 lock_global_pool = pool;
437 pthread_mutex_unlock(&lock_global_pool_lock);
441 /* lock_record_alloc ***********************************************************
443 Allocate a lock record which is owned by the current thread.
446 t............the current thread
448 *******************************************************************************/
450 static lock_record_t *lock_record_alloc(threadobject *t)
459 lock_record_pool_t *pool;
463 poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2
464 : LOCK_INITIAL_LOCK_RECORDS;
465 pool = lock_record_alloc_pool(t, poolsize);
467 /* add it to our per-thread pool list */
469 pool->header.next = t->ee.lockrecordpools;
470 t->ee.lockrecordpools = pool;
471 t->ee.lockrecordcount += pool->header.size;
473 /* take the first record from the pool */
477 /* pop the record from the freelist */
479 t->ee.firstfree = r->nextfree;
481 r->nextfree = NULL; /* in order to find invalid uses of nextfree */
488 /* lock_record_recycle *********************************************************
490 Recycle the given lock record. It will be inserted in the appropriate
494 t............the owner
495 r............lock record to recycle
497 *******************************************************************************/
499 static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
503 assert(r->owner == NULL);
504 assert(r->nextfree == NULL);
506 r->nextfree = t->ee.firstfree;
512 /*============================================================================*/
513 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
514 /*============================================================================*/
517 /* lock_hashtable_init *********************************************************
519 Initialize the global hashtable mapping objects to lock records.
521 *******************************************************************************/
523 static void lock_hashtable_init(void)
525 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
527 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
528 lock_hashtable.entries = 0;
529 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
531 #if defined(ENABLE_STATISTICS)
533 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
536 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
540 /* lock_hashtable_grow *********************************************************
542 Grow the lock record hashtable to about twice its current size and
545 *******************************************************************************/
547 /* must be called with hashtable mutex locked */
548 static void lock_hashtable_grow(void)
552 lock_record_t **oldtable;
553 lock_record_t **newtable;
560 /* allocate a new table */
562 oldsize = lock_hashtable.size;
563 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
565 LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
567 oldtable = lock_hashtable.ptr;
568 newtable = MNEW(lock_record_t *, newsize);
570 #if defined(ENABLE_STATISTICS)
572 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
575 MZERO(newtable, lock_record_t *, newsize);
577 /* rehash the entries */
579 for (i = 0; i < oldsize; i++) {
584 h = LOCK_HASH(lr->obj);
585 newslot = h % newsize;
587 lr->hashlink = newtable[newslot];
588 newtable[newslot] = lr;
594 /* replace the old table */
596 lock_hashtable.ptr = newtable;
597 lock_hashtable.size = newsize;
599 MFREE(oldtable, lock_record_t *, oldsize);
601 #if defined(ENABLE_STATISTICS)
603 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
608 /* lock_hashtable_get_lock_record **********************************************
610 Find the lock record for the given object. If it does not exists, yet,
611 create it and enter it in the hashtable.
614 t.................the current thread
615 o.................the object to look up
618 the lock record to use for this object
620 *******************************************************************************/
622 static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o)
628 lockword = (ptrint) o->monitorPtr;
630 if (IS_FAT_LOCK(lockword)) {
631 return GET_FAT_LOCK(lockword);
634 /* lock the hashtable */
636 pthread_mutex_lock(&(lock_hashtable.mutex));
638 /* lookup the lock record in the hashtable */
640 slot = LOCK_HASH(o) % lock_hashtable.size;
641 lr = lock_hashtable.ptr[slot];
644 pthread_mutex_unlock(&(lock_hashtable.mutex));
651 /* not found, we must create a new one */
653 lr = lock_record_alloc(t);
655 LOCK_LOG(("thread %d allocated for %p new lr %p\n",
656 t->index, (void*) o, (void*) lr));
658 /* enter it in the hashtable */
660 lr->hashlink = lock_hashtable.ptr[slot];
661 lock_hashtable.ptr[slot] = lr;
662 lock_hashtable.entries++;
664 /* check whether the hash should grow */
666 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
667 lock_hashtable_grow();
670 /* unlock the hashtable */
672 pthread_mutex_unlock(&(lock_hashtable.mutex));
674 /* return the new lock record */
680 /*============================================================================*/
681 /* OBJECT LOCK INITIALIZATION */
682 /*============================================================================*/
685 /* lock_init_object_lock *******************************************************
687 Initialize the monitor pointer of the given object. The monitor gets
688 initialized to an unlocked state.
690 *******************************************************************************/
692 void lock_init_object_lock(java_objectheader *o)
696 o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
697 LOCK_CLEAR_FLC_BIT(o);
701 /* lock_get_initial_lock_word **************************************************
703 Returns the initial (unlocked) lock word. The pointer is
704 required in the code generator to set up a virtual
705 java_objectheader for code patch locking.
707 *******************************************************************************/
709 lock_record_t *lock_get_initial_lock_word(void)
711 return (lock_record_t *) THIN_UNLOCKED;
716 /*============================================================================*/
717 /* LOCKING ALGORITHM */
718 /*============================================================================*/
721 /* lock_record_enter ***********************************************************
723 Enter the lock represented by the given lock record.
726 t.................the current thread
727 lr................the lock record
729 *******************************************************************************/
731 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
733 pthread_mutex_lock(&(lr->mutex));
738 /* lock_record_exit ************************************************************
740 Release the lock represented by the given lock record.
743 t.................the current thread
744 lr................the lock record
747 The current thread must own the lock represented by this lock record.
748 This is NOT checked by this function!
750 *******************************************************************************/
752 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
755 pthread_mutex_unlock(&(lr->mutex));
759 /* lock_inflate ****************************************************************
761 Inflate the lock of the given object. This may only be called by the
762 owner of the monitor of the object.
765 t............the current thread
766 o............the object of which to inflate the lock
767 lr...........the lock record to install. The current thread must
768 own the lock of this lock record!
771 The current thread must be the owner of this object's monitor AND
772 of the lock record's lock!
774 *******************************************************************************/
776 static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *lr)
780 /* get the current lock count */
782 lockword = (ptrint) o->monitorPtr;
784 if (IS_FAT_LOCK(lockword)) {
785 assert(GET_FAT_LOCK(lockword) == lr);
788 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
790 /* copy the count from the thin lock */
792 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
795 LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
796 t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
798 /* clear flat-lock-contention bit */
800 LOCK_CLEAR_FLC_BIT(o);
802 /* notify waiting objects */
804 lock_record_notify(t, lr, false);
808 o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
812 /* lock_monitor_enter **********************************************************
814 Acquire the monitor of the given object. If the current thread already
815 owns the monitor, the lock counter is simply increased.
817 This function blocks until it can acquire the monitor.
820 t............the current thread
821 o............the object of which to enter the monitor
824 true.........the lock has been successfully acquired
825 false........an exception has been thrown
827 *******************************************************************************/
829 bool lock_monitor_enter(java_objectheader *o)
832 /* CAUTION: This code assumes that ptrint is unsigned! */
837 exceptions_throw_nullpointerexception();
843 thinlock = t->thinlock;
845 /* most common case: try to thin-lock an unlocked object */
847 if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
848 /* success. we locked it */
849 /* The Java Memory Model requires a memory barrier here: */
854 /* next common case: recursive lock with small recursion count */
855 /* We don't have to worry about stale values here, as any stale value */
856 /* will indicate another thread holding the lock (or an inflated lock) */
858 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
859 /* we own this monitor */
860 /* check the current recursion count */
862 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
864 /* the recursion count is low enough */
866 o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
868 /* success. we locked it */
874 /* recursion count overflow */
876 lr = lock_hashtable_get_lock_record(t, o);
877 lock_record_enter(t, lr);
878 lock_inflate(t, o, lr);
885 /* the lock is either contented or fat */
890 if (IS_FAT_LOCK(lockword)) {
892 lr = GET_FAT_LOCK(lockword);
894 /* check for recursive entering */
895 if (lr->owner == t) {
900 /* acquire the mutex of the lock record */
902 lock_record_enter(t, lr);
904 assert(lr->count == 0);
909 /****** inflation path ******/
911 /* first obtain the lock record for this object */
913 lr = lock_hashtable_get_lock_record(t, o);
915 #if defined(ENABLE_JVMTI)
916 /* Monitor Contended Enter */
917 jvmti_MonitorContendedEntering(false, o);
919 /* enter the monitor */
921 lock_record_enter(t, lr);
924 #if defined(ENABLE_JVMTI)
925 /* Monitor Contended Entered */
926 jvmti_MonitorContendedEntering(true, o);
931 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
932 /* Set the flat lock contention bit to let the owning thread */
933 /* know that we want to be notified of unlocking. */
937 LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
938 t->index, (void*) o, (void*) lr));
940 /* try to lock the object */
942 if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
943 /* we can inflate the lock ourselves */
944 LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
945 t->index, (void*) o, (void*) lr));
946 lock_inflate(t, o, lr);
949 /* wait until another thread sees the flc bit and notifies us of unlocking */
950 LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
951 t->index, (void*) o, (void*) lr));
952 lock_record_wait(t, lr, 0, 0);
956 /* we own the inflated lock now */
963 /* lock_monitor_exit ***********************************************************
965 Decrement the counter of a (currently owned) monitor. If the counter
966 reaches zero, release the monitor.
968 If the current thread is not the owner of the monitor, an
969 IllegalMonitorState exception is thrown.
972 t............the current thread
973 o............the object of which to exit the monitor
976 true.........everything ok,
977 false........an exception has been thrown
979 *******************************************************************************/
981 bool lock_monitor_exit(java_objectheader *o)
988 exceptions_throw_nullpointerexception();
994 /* We don't have to worry about stale values here, as any stale value */
995 /* will indicate that we don't own the lock. */
997 lockword = (ptrint) o->monitorPtr;
998 thinlock = t->thinlock;
1000 /* most common case: we release a thin lock that we hold once */
1002 if (lockword == thinlock) {
1003 /* memory barrier for Java Memory Model */
1005 o->monitorPtr = THIN_UNLOCKED;
1006 /* memory barrier for thin locking */
1009 /* check if there has been a flat lock contention on this object */
1011 if (LOCK_TEST_FLC_BIT(o)) {
1014 LOCK_LOG(("thread %d saw flc bit on %p %s\n",
1015 t->index, (void*) o, o->vftbl->class->name->text));
1017 /* there has been a contention on this thin lock */
1019 lr = lock_hashtable_get_lock_record(t, o);
1021 LOCK_LOG(("thread %d for %p got lr %p\n",
1022 t->index, (void*) o, (void*) lr));
1024 lock_record_enter(t, lr);
1026 if (LOCK_TEST_FLC_BIT(o)) {
1027 /* notify a thread that it can try to inflate the lock now */
1029 lock_record_notify(t, lr, true);
1032 lock_record_exit(t, lr);
1038 /* next common case: we release a recursive lock, count > 0 */
1040 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1041 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
1045 /* either the lock is fat, or we don't hold it at all */
1047 if (IS_FAT_LOCK(lockword)) {
1051 lr = GET_FAT_LOCK(lockword);
1053 /* check if we own this monitor */
1054 /* We don't have to worry about stale values here, as any stale value */
1055 /* will be != t and thus fail this check. */
1057 if (lr->owner != t) {
1058 exceptions_throw_illegalmonitorstateexception();
1062 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1064 if (lr->count != 0) {
1065 /* we had locked this one recursively. just decrement, it will */
1066 /* still be locked. */
1071 /* unlock this lock record */
1074 pthread_mutex_unlock(&(lr->mutex));
1079 /* legal thin lock cases have been handled above, so this is an error */
1081 exceptions_throw_illegalmonitorstateexception();
1087 /* lock_record_add_waiter ******************************************************
1089 Add a thread to the list of waiting threads of a lock record.
1092 lr...........the lock record
1093 thread.......the thread to add
1095 *******************************************************************************/
1097 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1099 lock_waiter_t *waiter;
1101 /* allocate a waiter data structure */
1103 waiter = NEW(lock_waiter_t);
1105 #if defined(ENABLE_STATISTICS)
1107 size_lock_waiter += sizeof(lock_waiter_t);
1110 waiter->waiter = thread;
1111 waiter->next = lr->waiters;
1113 lr->waiters = waiter;
1117 /* lock_record_remove_waiter ***************************************************
1119 Remove a thread from the list of waiting threads of a lock record.
1122 lr...........the lock record
1123 t............the current thread
1126 The current thread must be the owner of the lock record.
1128 *******************************************************************************/
1130 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1132 lock_waiter_t **link;
1135 link = &(lr->waiters);
1137 while ((w = *link)) {
1138 if (w->waiter == thread) {
1141 /* free the waiter data structure */
1143 FREE(w, lock_waiter_t);
1145 #if defined(ENABLE_STATISTICS)
1147 size_lock_waiter -= sizeof(lock_waiter_t);
1156 /* this should never happen */
1158 vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
1162 /* lock_record_wait ************************************************************
1164 Wait on a lock record for a given (maximum) amount of time.
1167 t............the current thread
1168 lr...........the lock record
1169 millis.......milliseconds of timeout
1170 nanos........nanoseconds of timeout
1173 The current thread must be the owner of the lock record.
1174 This is NOT checked by this function!
1176 *******************************************************************************/
1178 static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1181 bool wasinterrupted;
1183 /* { the thread t owns the fat lock record lr on the object o } */
1185 /* register us as waiter for this object */
1187 lock_record_add_waiter(lr, thread);
1189 /* remember the old lock count */
1191 lockcount = lr->count;
1193 /* unlock this record */
1196 lock_record_exit(thread, lr);
1198 /* wait until notified/interrupted/timed out */
1200 wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1202 /* re-enter the monitor */
1204 lock_record_enter(thread, lr);
1206 /* remove us from the list of waiting threads */
1208 lock_record_remove_waiter(lr, thread);
1210 /* restore the old lock count */
1212 lr->count = lockcount;
1214 /* if we have been interrupted, throw the appropriate exception */
1217 exceptions_throw_interruptedexception();
1221 /* lock_monitor_wait ***********************************************************
1223 Wait on an object for a given (maximum) amount of time.
1226 t............the current thread
1227 o............the object
1228 millis.......milliseconds of timeout
1229 nanos........nanoseconds of timeout
1232 The current thread must be the owner of the object's monitor.
1234 *******************************************************************************/
1236 static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
1241 lockword = (ptrint) o->monitorPtr;
1243 /* check if we own this monitor */
1244 /* We don't have to worry about stale values here, as any stale value */
1245 /* will fail this check. */
1247 if (IS_FAT_LOCK(lockword)) {
1249 lr = GET_FAT_LOCK(lockword);
1251 if (lr->owner != t) {
1252 exceptions_throw_illegalmonitorstateexception();
1257 /* it's a thin lock */
1259 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1260 exceptions_throw_illegalmonitorstateexception();
1264 /* inflate this lock */
1265 lr = lock_hashtable_get_lock_record(t, o);
1266 lock_record_enter(t, lr);
1267 lock_inflate(t, o, lr);
1270 /* { the thread t owns the fat lock record lr on the object o } */
1272 lock_record_wait(t, lr, millis, nanos);
1276 /* lock_record_notify **********************************************************
1278 Notify one thread or all threads waiting on the given lock record.
1281 t............the current thread
1282 lr...........the lock record
1283 one..........if true, only notify one thread
1286 The current thread must be the owner of the lock record.
1287 This is NOT checked by this function!
1289 *******************************************************************************/
1291 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1293 lock_waiter_t *waiter;
1294 threadobject *waitingthread;
1296 /* { the thread t owns the fat lock record lr on the object o } */
1298 /* for each waiter: */
1300 for (waiter = lr->waiters; waiter; waiter = waiter->next) {
1302 /* signal the waiting thread */
1304 waitingthread = waiter->waiter;
1306 pthread_mutex_lock(&waitingthread->waitmutex);
1307 if (waitingthread->sleeping)
1308 pthread_cond_signal(&waitingthread->waitcond);
1309 waitingthread->signaled = true;
1310 pthread_mutex_unlock(&waitingthread->waitmutex);
1312 /* if we should only wake one, we are done */
1320 /* lock_monitor_notify *********************************************************
1322 Notify one thread or all threads waiting on the given object.
1325 t............the current thread
1326 o............the object
1327 one..........if true, only notify one thread
1330 The current thread must be the owner of the object's monitor.
1332 *******************************************************************************/
1334 static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
1339 lockword = (ptrint) o->monitorPtr;
1341 /* check if we own this monitor */
1342 /* We don't have to worry about stale values here, as any stale value */
1343 /* will fail this check. */
1345 if (IS_FAT_LOCK(lockword)) {
1347 lr = GET_FAT_LOCK(lockword);
1349 if (lr->owner != t) {
1350 exceptions_throw_illegalmonitorstateexception();
1355 /* it's a thin lock */
1357 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1358 exceptions_throw_illegalmonitorstateexception();
1362 /* inflate this lock */
1363 lr = lock_hashtable_get_lock_record(t, o);
1364 lock_record_enter(t, lr);
1365 lock_inflate(t, o, lr);
1368 /* { the thread t owns the fat lock record lr on the object o } */
1370 lock_record_notify(t, lr, one);
1375 /*============================================================================*/
1376 /* INQUIRY FUNCIONS */
1377 /*============================================================================*/
1380 /* lock_is_held_by_current_thread **********************************************
1382 Return true if the current thread owns the monitor of the given object.
1385 o............the object
1388 true, if the current thread holds the lock of this object.
1390 *******************************************************************************/
1392 bool lock_is_held_by_current_thread(java_objectheader *o)
1397 /* check if we own this monitor */
1398 /* We don't have to worry about stale values here, as any stale value */
1399 /* will fail this check. */
1401 lockword = (ptrint) o->monitorPtr;
1404 if (IS_FAT_LOCK(lockword)) {
1407 /* it's a fat lock */
1408 lr = GET_FAT_LOCK(lockword);
1410 return (lr->owner == t);
1413 /* it's a thin lock */
1415 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1421 /*============================================================================*/
1422 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1423 /*============================================================================*/
1426 /* lock_wait_for_object ********************************************************
1428 Wait for the given object.
1431 o............the object
1432 millis.......milliseconds to wait
1433 nanos........nanoseconds to wait
1435 *******************************************************************************/
1437 void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
1439 threadobject *thread;
1441 thread = THREADOBJECT;
1443 lock_monitor_wait(thread, o, millis, nanos);
1447 /* lock_notify_object **********************************************************
1449 Notify one thread waiting on the given object.
1452 o............the object
1454 *******************************************************************************/
1456 void lock_notify_object(java_objectheader *o)
1458 threadobject *thread;
1460 thread = THREADOBJECT;
1462 lock_monitor_notify(thread, o, true);
1466 /* lock_notify_all_object ******************************************************
1468 Notify all threads waiting on the given object.
1471 o............the object
1473 *******************************************************************************/
1475 void lock_notify_all_object(java_objectheader *o)
1477 threadobject *thread;
1479 thread = THREADOBJECT;
1481 lock_monitor_notify(thread, o, false);
1486 * These are local overrides for various environment variables in Emacs.
1487 * Please do not remove this and leave it at the end of the file, where
1488 * Emacs will automagically detect them.
1489 * ---------------------------------------------------------------------
1492 * indent-tabs-mode: t
1496 * vim:noexpandtab:sw=4:ts=4: