1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
40 #include "mm/memory.h"
42 #include "threads/native/lock.h"
43 #include "threads/native/threads.h"
45 #include "vm/global.h"
46 #include "vm/exceptions.h"
47 #include "vm/stringlocal.h"
50 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
54 /* includes for atomic instructions: */
56 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
57 #include "threads/native/generic-primitives.h"
59 #include "machine-instr.h"
62 #if defined(ENABLE_JVMTI)
63 #include "native/jvmti/cacaodbg.h"
67 /******************************************************************************/
68 /* DEBUGGING MACROS */
69 /******************************************************************************/
71 /* #define LOCK_VERBOSE */
73 #if defined(LOCK_VERBOSE)
74 #define LOCK_LOG(args) do { printf args; fflush(stdout); } while (0)
76 #define LOCK_LOG(args)
80 /******************************************************************************/
82 /******************************************************************************/
84 /* number of lock records in the first pool allocated for a thread */
85 #define LOCK_INITIAL_LOCK_RECORDS 8
87 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
89 #define LOCK_HASH(obj) ((ptrint)(obj))
91 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
92 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
94 /* CAUTION: oldvalue is evaluated twice! */
95 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
96 (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
99 /******************************************************************************/
100 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
101 /******************************************************************************/
103 #define LOCK_SET_FLC_BIT(obj) ((obj)->hdrflags |= HDRFLAG_FLC)
104 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->hdrflags &= ~ HDRFLAG_FLC)
105 #define LOCK_TEST_FLC_BIT(obj) ((obj)->hdrflags & HDRFLAG_FLC)
108 /******************************************************************************/
109 /* MACROS FOR THIN/FAT LOCKS */
110 /******************************************************************************/
112 /* We use a variant of the tasuki locks described in the paper
114 * Tamiya Onodera, Kiyokuni Kawachiya
115 * A Study of Locking Objects with Bimodal Fields
116 * Proceedings of the ACM OOPSLA '99, pp. 223-237
119 * The underlying thin locks are a variant of the thin locks described in
121 * Bacon, Konuru, Murthy, Serrano
122 * Thin Locks: Featherweight Synchronization for Java
123 * Proceedings of the ACM Conference on Programming Language Design and
124 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
127 * In thin lock mode the lockword (monitorPtr) looks like this:
129 * ,----------------------,-----------,---,
130 * | thread ID | count | 0 |
131 * `----------------------'-----------'---´
133 * thread ID......the 'index' of the owning thread, or 0
134 * count..........number of times the lock has been entered minus 1
135 * 0..............the shape bit is 0 in thin lock mode
137 * In fat lock mode it is basically a lock_record_t *:
139 * ,----------------------------------,---,
140 * | lock_record_t * (without LSB) | 1 |
141 * `----------------------------------'---´
143 * 1..............the shape bit is 1 in fat lock mode
146 #if SIZEOF_VOID_P == 8
147 #define THIN_LOCK_WORD_SIZE 64
149 #define THIN_LOCK_WORD_SIZE 32
152 #define THIN_LOCK_SHAPE_BIT 0x01
154 #define THIN_UNLOCKED 0
156 #define THIN_LOCK_COUNT_SHIFT 1
157 #define THIN_LOCK_COUNT_SIZE 8
158 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
159 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
160 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
162 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
163 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
165 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
166 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
168 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
169 #define MAKE_FAT_LOCK(ptr) ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
171 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
174 /******************************************************************************/
175 /* GLOBAL VARIABLES */
176 /******************************************************************************/
178 /* global lock record pool list header */
179 lock_record_pool_t *lock_global_pool;
181 /* mutex for synchronizing access to the global pool */
182 pthread_mutex_t lock_global_pool_lock;
184 /* hashtable mapping objects to lock records */
185 static lock_hashtable_t lock_hashtable;
188 /******************************************************************************/
190 /******************************************************************************/
192 static void lock_hashtable_init(void);
193 static lock_record_t * lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o);
195 static lock_record_t * lock_record_alloc(threadobject *t);
197 static void lock_record_enter(threadobject *t, lock_record_t *lr);
198 static void lock_record_exit(threadobject *t, lock_record_t *lr);
199 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
200 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
203 /*============================================================================*/
204 /* INITIALIZATION OF DATA STRUCTURES */
205 /*============================================================================*/
208 /* lock_init *******************************************************************
210 Initialize global data for locking.
212 *******************************************************************************/
216 pthread_mutex_init(&lock_global_pool_lock, NULL);
218 lock_hashtable_init();
222 /* lock_record_init ************************************************************
224 Initialize a lock record.
227 r............the lock record to initialize
228 t............will become the owner
230 *******************************************************************************/
232 static void lock_record_init(lock_record_t *r, threadobject *t)
242 pthread_mutex_init(&(r->mutex), NULL);
246 /* lock_init_execution_env *****************************************************
248 Initialize the execution environment for a thread.
251 thread.......the thread
253 *******************************************************************************/
255 void lock_init_execution_env(threadobject *thread)
257 thread->ee.firstfree = NULL;
258 thread->ee.lockrecordpools = NULL;
259 thread->ee.lockrecordcount = 0;
264 /* lock_pre_compute_thinlock ***************************************************
266 Pre-compute the thin lock value for a thread index.
269 index........the thead index (>= 1)
272 the thin lock value for this thread index
274 *******************************************************************************/
276 ptrint lock_pre_compute_thinlock(s4 index)
278 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
283 /*============================================================================*/
284 /* LOCK RECORD MANAGEMENT */
285 /*============================================================================*/
288 /* lock_record_alloc_new_pool **************************************************
290 Get a new lock record pool from the memory allocator.
293 thread.......the thread that will own the lock records
294 size.........number of lock records in the pool to allocate
297 the new lock record pool, with initialized lock records
299 *******************************************************************************/
301 static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
304 lock_record_pool_t *pool;
306 /* get the pool from the memory allocator */
308 pool = mem_alloc(sizeof(lock_record_pool_header_t)
309 + sizeof(lock_record_t) * size);
311 /* initialize the pool header */
313 pool->header.size = size;
315 /* initialize the individual lock records */
317 for (i=0; i<size; i++) {
318 lock_record_init(&pool->lr[i], thread);
320 pool->lr[i].nextfree = &pool->lr[i+1];
323 /* terminate free list */
325 pool->lr[i-1].nextfree = NULL;
331 /* lock_record_alloc_pool ******************************************************
333 Allocate a lock record pool. The pool is either taken from the global free
334 list or requested from the memory allocator.
337 thread.......the thread that will own the lock records
338 size.........number of lock records in the pool to allocate
341 the new lock record pool, with initialized lock records
343 *******************************************************************************/
345 static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
347 pthread_mutex_lock(&lock_global_pool_lock);
349 if (lock_global_pool) {
351 lock_record_pool_t *pool;
353 /* pop a pool from the global freelist */
355 pool = lock_global_pool;
356 lock_global_pool = pool->header.next;
358 pthread_mutex_unlock(&lock_global_pool_lock);
360 /* re-initialize owner and freelist chaining */
362 for (i=0; i < pool->header.size; i++) {
363 pool->lr[i].owner = NULL;
364 pool->lr[i].nextfree = &pool->lr[i+1];
366 pool->lr[i-1].nextfree = NULL;
371 pthread_mutex_unlock(&lock_global_pool_lock);
373 /* we have to get a new pool from the allocator */
375 return lock_record_alloc_new_pool(t, size);
379 /* lock_record_free_pools ******************************************************
381 Free the lock record pools in the given linked list. The pools are inserted
382 into the global freelist.
385 pool.........list header
387 *******************************************************************************/
389 void lock_record_free_pools(lock_record_pool_t *pool)
391 lock_record_pool_header_t *last;
393 assert(false); /* XXX this function does not match the new locking */
394 /* algorithm. We must find another way to free */
395 /* unused lock records. */
400 pthread_mutex_lock(&lock_global_pool_lock);
402 /* find the last pool in the list */
404 last = &pool->header;
406 last = &last->next->header;
408 /* chain it to the lock_global_pool freelist */
410 last->next = lock_global_pool;
412 /* insert the freed pools into the freelist */
414 lock_global_pool = pool;
416 pthread_mutex_unlock(&lock_global_pool_lock);
420 /* lock_record_alloc ***********************************************************
422 Allocate a lock record which is owned by the current thread.
425 t............the current thread
427 *******************************************************************************/
429 static lock_record_t *lock_record_alloc(threadobject *t)
438 lock_record_pool_t *pool;
442 poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2
443 : LOCK_INITIAL_LOCK_RECORDS;
444 pool = lock_record_alloc_pool(t, poolsize);
446 /* add it to our per-thread pool list */
448 pool->header.next = t->ee.lockrecordpools;
449 t->ee.lockrecordpools = pool;
450 t->ee.lockrecordcount += pool->header.size;
452 /* take the first record from the pool */
456 /* pop the record from the freelist */
458 t->ee.firstfree = r->nextfree;
460 r->nextfree = NULL; /* in order to find invalid uses of nextfree */
467 /* lock_record_recycle *********************************************************
469 Recycle the given lock record. It will be inserted in the appropriate
473 t............the owner
474 r............lock record to recycle
476 *******************************************************************************/
478 static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
482 assert(r->owner == NULL);
483 assert(r->nextfree == NULL);
485 r->nextfree = t->ee.firstfree;
491 /*============================================================================*/
492 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
493 /*============================================================================*/
496 /* lock_hashtable_init *********************************************************
498 Initialize the global hashtable mapping objects to lock records.
500 *******************************************************************************/
502 static void lock_hashtable_init(void)
504 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
506 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
507 lock_hashtable.entries = 0;
508 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
509 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
513 /* lock_hashtable_grow *********************************************************
515 Grow the lock record hashtable to about twice its current size and
518 *******************************************************************************/
520 /* must be called with hashtable mutex locked */
521 static void lock_hashtable_grow(void)
525 lock_record_t **oldtable;
526 lock_record_t **newtable;
533 /* allocate a new table */
535 oldsize = lock_hashtable.size;
536 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
538 LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
540 oldtable = lock_hashtable.ptr;
541 newtable = MNEW(lock_record_t *, newsize);
542 MZERO(newtable, lock_record_t *, newsize);
544 /* rehash the entries */
546 for (i=0; i<oldsize; ++i) {
551 h = LOCK_HASH(lr->obj);
552 newslot = h % newsize;
554 lr->hashlink = newtable[newslot];
555 newtable[newslot] = lr;
561 /* replace the old table */
563 lock_hashtable.ptr = newtable;
564 lock_hashtable.size = newsize;
566 MFREE(oldtable, lock_record_t *, oldsize);
570 /* lock_hashtable_get_lock_record **********************************************
572 Find the lock record for the given object. If it does not exists, yet,
573 create it and enter it in the hashtable.
576 t.................the current thread
577 o.................the object to look up
580 the lock record to use for this object
582 *******************************************************************************/
584 static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o)
590 lockword = (ptrint) o->monitorPtr;
592 if (IS_FAT_LOCK(lockword)) {
593 return GET_FAT_LOCK(lockword);
596 /* lock the hashtable */
598 pthread_mutex_lock(&(lock_hashtable.mutex));
600 /* lookup the lock record in the hashtable */
602 slot = LOCK_HASH(o) % lock_hashtable.size;
603 lr = lock_hashtable.ptr[slot];
606 pthread_mutex_unlock(&(lock_hashtable.mutex));
613 /* not found, we must create a new one */
615 lr = lock_record_alloc(t);
617 LOCK_LOG(("thread %d allocated for %p new lr %p\n",
618 t->index, (void*) o, (void*) lr));
620 /* enter it in the hashtable */
622 lr->hashlink = lock_hashtable.ptr[slot];
623 lock_hashtable.ptr[slot] = lr;
624 lock_hashtable.entries++;
626 /* check whether the hash should grow */
628 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
629 lock_hashtable_grow();
632 /* unlock the hashtable */
634 pthread_mutex_unlock(&(lock_hashtable.mutex));
636 /* return the new lock record */
642 /*============================================================================*/
643 /* OBJECT LOCK INITIALIZATION */
644 /*============================================================================*/
647 /* lock_init_object_lock *******************************************************
649 Initialize the monitor pointer of the given object. The monitor gets
650 initialized to an unlocked state.
652 *******************************************************************************/
654 void lock_init_object_lock(java_objectheader *o)
658 o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
659 LOCK_CLEAR_FLC_BIT(o);
663 /* lock_get_initial_lock_word **************************************************
665 Returns the initial (unlocked) lock word. The pointer is
666 required in the code generator to set up a virtual
667 java_objectheader for code patch locking.
669 *******************************************************************************/
671 lock_record_t *lock_get_initial_lock_word(void)
673 return (lock_record_t *) THIN_UNLOCKED;
678 /*============================================================================*/
679 /* LOCKING ALGORITHM */
680 /*============================================================================*/
683 /* lock_record_enter ***********************************************************
685 Enter the lock represented by the given lock record.
688 t.................the current thread
689 lr................the lock record
691 *******************************************************************************/
693 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
695 pthread_mutex_lock(&(lr->mutex));
700 /* lock_record_exit ************************************************************
702 Release the lock represented by the given lock record.
705 t.................the current thread
706 lr................the lock record
709 The current thread must own the lock represented by this lock record.
710 This is NOT checked by this function!
712 *******************************************************************************/
714 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
717 pthread_mutex_unlock(&(lr->mutex));
721 /* lock_inflate ****************************************************************
723 Inflate the lock of the given object. This may only be called by the
724 owner of the monitor of the object.
727 t............the current thread
728 o............the object of which to inflate the lock
729 lr...........the lock record to install. The current thread must
730 own the lock of this lock record!
733 The current thread must be the owner of this object's monitor AND
734 of the lock record's lock!
736 *******************************************************************************/
738 static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *lr)
742 /* get the current lock count */
744 lockword = (ptrint) o->monitorPtr;
746 if (IS_FAT_LOCK(lockword)) {
747 assert(GET_FAT_LOCK(lockword) == lr);
750 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
752 /* copy the count from the thin lock */
754 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
757 LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
758 t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
760 /* clear flat-lock-contention bit */
762 LOCK_CLEAR_FLC_BIT(o);
764 /* notify waiting objects */
766 lock_record_notify(t, lr, false);
770 o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
774 /* lock_monitor_enter **********************************************************
776 Acquire the monitor of the given object. If the current thread already
777 owns the monitor, the lock counter is simply increased.
779 This function blocks until it can acquire the monitor.
782 t............the current thread
783 o............the object of which to enter the monitor
786 true.........the lock has been successfully acquired
787 false........an exception has been thrown
789 *******************************************************************************/
791 bool lock_monitor_enter(java_objectheader *o)
794 /* CAUTION: This code assumes that ptrint is unsigned! */
799 exceptions_throw_nullpointerexception();
805 thinlock = t->thinlock;
807 /* most common case: try to thin-lock an unlocked object */
809 if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
810 /* success. we locked it */
811 /* The Java Memory Model requires a memory barrier here: */
816 /* next common case: recursive lock with small recursion count */
817 /* We don't have to worry about stale values here, as any stale value */
818 /* will indicate another thread holding the lock (or an inflated lock) */
820 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
821 /* we own this monitor */
822 /* check the current recursion count */
824 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
826 /* the recursion count is low enough */
828 o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
830 /* success. we locked it */
836 /* recursion count overflow */
838 lr = lock_hashtable_get_lock_record(t, o);
839 lock_record_enter(t, lr);
840 lock_inflate(t, o, lr);
847 /* the lock is either contented or fat */
852 if (IS_FAT_LOCK(lockword)) {
854 lr = GET_FAT_LOCK(lockword);
856 /* check for recursive entering */
857 if (lr->owner == t) {
862 /* acquire the mutex of the lock record */
864 lock_record_enter(t, lr);
866 assert(lr->count == 0);
871 /****** inflation path ******/
873 /* first obtain the lock record for this object */
875 lr = lock_hashtable_get_lock_record(t, o);
877 #if defined(ENABLE_JVMTI)
878 /* Monitor Contended Enter */
879 jvmti_MonitorContendedEntering(false, o);
881 /* enter the monitor */
883 lock_record_enter(t, lr);
886 #if defined(ENABLE_JVMTI)
887 /* Monitor Contended Entered */
888 jvmti_MonitorContendedEntering(true, o);
893 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
894 /* Set the flat lock contention bit to let the owning thread */
895 /* know that we want to be notified of unlocking. */
899 LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
900 t->index, (void*) o, (void*) lr));
902 /* try to lock the object */
904 if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
905 /* we can inflate the lock ourselves */
906 LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
907 t->index, (void*) o, (void*) lr));
908 lock_inflate(t, o, lr);
911 /* wait until another thread sees the flc bit and notifies us of unlocking */
912 LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
913 t->index, (void*) o, (void*) lr));
914 lock_record_wait(t, lr, 0, 0);
918 /* we own the inflated lock now */
925 /* lock_monitor_exit ***********************************************************
927 Decrement the counter of a (currently owned) monitor. If the counter
928 reaches zero, release the monitor.
930 If the current thread is not the owner of the monitor, an
931 IllegalMonitorState exception is thrown.
934 t............the current thread
935 o............the object of which to exit the monitor
938 true.........everything ok,
939 false........an exception has been thrown
941 *******************************************************************************/
943 bool lock_monitor_exit(java_objectheader *o)
950 exceptions_throw_nullpointerexception();
956 /* We don't have to worry about stale values here, as any stale value */
957 /* will indicate that we don't own the lock. */
959 lockword = (ptrint) o->monitorPtr;
960 thinlock = t->thinlock;
962 /* most common case: we release a thin lock that we hold once */
964 if (lockword == thinlock) {
965 /* memory barrier for Java Memory Model */
967 o->monitorPtr = THIN_UNLOCKED;
968 /* memory barrier for thin locking */
971 /* check if there has been a flat lock contention on this object */
973 if (LOCK_TEST_FLC_BIT(o)) {
976 LOCK_LOG(("thread %d saw flc bit on %p %s\n",
977 t->index, (void*) o, o->vftbl->class->name->text));
979 /* there has been a contention on this thin lock */
981 lr = lock_hashtable_get_lock_record(t, o);
983 LOCK_LOG(("thread %d for %p got lr %p\n",
984 t->index, (void*) o, (void*) lr));
986 lock_record_enter(t, lr);
988 if (LOCK_TEST_FLC_BIT(o)) {
989 /* notify a thread that it can try to inflate the lock now */
991 lock_record_notify(t, lr, true);
994 lock_record_exit(t, lr);
1000 /* next common case: we release a recursive lock, count > 0 */
1002 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1003 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
1007 /* either the lock is fat, or we don't hold it at all */
1009 if (IS_FAT_LOCK(lockword)) {
1013 lr = GET_FAT_LOCK(lockword);
1015 /* check if we own this monitor */
1016 /* We don't have to worry about stale values here, as any stale value */
1017 /* will be != t and thus fail this check. */
1019 if (lr->owner != t) {
1020 exceptions_throw_illegalmonitorstateexception();
1024 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1026 if (lr->count != 0) {
1027 /* we had locked this one recursively. just decrement, it will */
1028 /* still be locked. */
1033 /* unlock this lock record */
1036 pthread_mutex_unlock(&(lr->mutex));
1041 /* legal thin lock cases have been handled above, so this is an error */
1043 exceptions_throw_illegalmonitorstateexception();
1049 /* lock_record_add_waiter ******************************************************
1051 Add a thread to the list of waiting threads of a lock record.
1054 lr...........the lock record
1055 thread.......the thread to add
1057 *******************************************************************************/
1059 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1061 lock_waiter_t *waiter;
1063 /* allocate a waiter data structure */
1065 waiter = NEW(lock_waiter_t);
1067 waiter->waiter = thread;
1068 waiter->next = lr->waiters;
1070 lr->waiters = waiter;
1074 /* lock_record_remove_waiter ***************************************************
1076 Remove a thread from the list of waiting threads of a lock record.
1079 lr...........the lock record
1080 t............the current thread
1083 The current thread must be the owner of the lock record.
1085 *******************************************************************************/
1087 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1089 lock_waiter_t **link;
1092 link = &(lr->waiters);
1094 while ((w = *link)) {
1095 if (w->waiter == thread) {
1098 /* free the waiter data structure */
1100 FREE(w, lock_waiter_t);
1108 /* this should never happen */
1110 vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
1114 /* lock_record_wait ************************************************************
1116 Wait on a lock record for a given (maximum) amount of time.
1119 t............the current thread
1120 lr...........the lock record
1121 millis.......milliseconds of timeout
1122 nanos........nanoseconds of timeout
1125 The current thread must be the owner of the lock record.
1126 This is NOT checked by this function!
1128 *******************************************************************************/
1130 static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1133 bool wasinterrupted;
1135 /* { the thread t owns the fat lock record lr on the object o } */
1137 /* register us as waiter for this object */
1139 lock_record_add_waiter(lr, thread);
1141 /* remember the old lock count */
1143 lockcount = lr->count;
1145 /* unlock this record */
1148 lock_record_exit(thread, lr);
1150 /* wait until notified/interrupted/timed out */
1152 wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1154 /* re-enter the monitor */
1156 lock_record_enter(thread, lr);
1158 /* remove us from the list of waiting threads */
1160 lock_record_remove_waiter(lr, thread);
1162 /* restore the old lock count */
1164 lr->count = lockcount;
1166 /* if we have been interrupted, throw the appropriate exception */
1169 exceptions_throw_interruptedexception();
1173 /* lock_monitor_wait ***********************************************************
1175 Wait on an object for a given (maximum) amount of time.
1178 t............the current thread
1179 o............the object
1180 millis.......milliseconds of timeout
1181 nanos........nanoseconds of timeout
1184 The current thread must be the owner of the object's monitor.
1186 *******************************************************************************/
1188 static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
1193 lockword = (ptrint) o->monitorPtr;
1195 /* check if we own this monitor */
1196 /* We don't have to worry about stale values here, as any stale value */
1197 /* will fail this check. */
1199 if (IS_FAT_LOCK(lockword)) {
1201 lr = GET_FAT_LOCK(lockword);
1203 if (lr->owner != t) {
1204 exceptions_throw_illegalmonitorstateexception();
1209 /* it's a thin lock */
1211 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1212 exceptions_throw_illegalmonitorstateexception();
1216 /* inflate this lock */
1217 lr = lock_hashtable_get_lock_record(t, o);
1218 lock_record_enter(t, lr);
1219 lock_inflate(t, o, lr);
1222 /* { the thread t owns the fat lock record lr on the object o } */
1224 lock_record_wait(t, lr, millis, nanos);
1228 /* lock_record_notify **********************************************************
1230 Notify one thread or all threads waiting on the given lock record.
1233 t............the current thread
1234 lr...........the lock record
1235 one..........if true, only notify one thread
1238 The current thread must be the owner of the lock record.
1239 This is NOT checked by this function!
1241 *******************************************************************************/
1243 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1245 lock_waiter_t *waiter;
1246 threadobject *waitingthread;
1248 /* { the thread t owns the fat lock record lr on the object o } */
1250 /* for each waiter: */
1252 for (waiter = lr->waiters; waiter; waiter = waiter->next) {
1254 /* signal the waiting thread */
1256 waitingthread = waiter->waiter;
1258 pthread_mutex_lock(&waitingthread->waitmutex);
1259 if (waitingthread->sleeping)
1260 pthread_cond_signal(&waitingthread->waitcond);
1261 waitingthread->signaled = true;
1262 pthread_mutex_unlock(&waitingthread->waitmutex);
1264 /* if we should only wake one, we are done */
1272 /* lock_monitor_notify *********************************************************
1274 Notify one thread or all threads waiting on the given object.
1277 t............the current thread
1278 o............the object
1279 one..........if true, only notify one thread
1282 The current thread must be the owner of the object's monitor.
1284 *******************************************************************************/
1286 static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
1291 lockword = (ptrint) o->monitorPtr;
1293 /* check if we own this monitor */
1294 /* We don't have to worry about stale values here, as any stale value */
1295 /* will fail this check. */
1297 if (IS_FAT_LOCK(lockword)) {
1299 lr = GET_FAT_LOCK(lockword);
1301 if (lr->owner != t) {
1302 exceptions_throw_illegalmonitorstateexception();
1307 /* it's a thin lock */
1309 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1310 exceptions_throw_illegalmonitorstateexception();
1314 /* inflate this lock */
1315 lr = lock_hashtable_get_lock_record(t, o);
1316 lock_record_enter(t, lr);
1317 lock_inflate(t, o, lr);
1320 /* { the thread t owns the fat lock record lr on the object o } */
1322 lock_record_notify(t, lr, one);
1327 /*============================================================================*/
1328 /* INQUIRY FUNCIONS */
1329 /*============================================================================*/
1332 /* lock_is_held_by_current_thread **********************************************
1334 Return true if the current thread owns the monitor of the given object.
1337 o............the object
1340 true, if the current thread holds the lock of this object.
1342 *******************************************************************************/
1344 bool lock_is_held_by_current_thread(java_objectheader *o)
1349 /* check if we own this monitor */
1350 /* We don't have to worry about stale values here, as any stale value */
1351 /* will fail this check. */
1353 lockword = (ptrint) o->monitorPtr;
1356 if (IS_FAT_LOCK(lockword)) {
1359 /* it's a fat lock */
1360 lr = GET_FAT_LOCK(lockword);
1362 return (lr->owner == t);
1365 /* it's a thin lock */
1367 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1373 /*============================================================================*/
1374 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1375 /*============================================================================*/
1378 /* lock_wait_for_object ********************************************************
1380 Wait for the given object.
1383 o............the object
1384 millis.......milliseconds to wait
1385 nanos........nanoseconds to wait
1387 *******************************************************************************/
1389 void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
1391 threadobject *thread;
1393 thread = THREADOBJECT;
1395 lock_monitor_wait(thread, o, millis, nanos);
1399 /* lock_notify_object **********************************************************
1401 Notify one thread waiting on the given object.
1404 o............the object
1406 *******************************************************************************/
1408 void lock_notify_object(java_objectheader *o)
1410 threadobject *thread;
1412 thread = THREADOBJECT;
1414 lock_monitor_notify(thread, o, true);
1418 /* lock_notify_all_object ******************************************************
1420 Notify all threads waiting on the given object.
1423 o............the object
1425 *******************************************************************************/
1427 void lock_notify_all_object(java_objectheader *o)
1429 threadobject *thread;
1431 thread = THREADOBJECT;
1433 lock_monitor_notify(thread, o, false);
1438 * These are local overrides for various environment variables in Emacs.
1439 * Please do not remove this and leave it at the end of the file, where
1440 * Emacs will automagically detect them.
1441 * ---------------------------------------------------------------------
1444 * indent-tabs-mode: t
1448 * vim:noexpandtab:sw=4:ts=4: