1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
38 #include "mm/memory.h"
40 #include "threads/native/lock.h"
41 #include "threads/native/threads.h"
43 #include "vm/global.h"
44 #include "vm/exceptions.h"
45 #include "vm/finalizer.h"
46 #include "vm/stringlocal.h"
49 #include "vmcore/options.h"
51 #if defined(ENABLE_STATISTICS)
52 # include "vmcore/statistics.h"
55 #if defined(ENABLE_VMLOG)
56 #include <vmlog_cacao.h>
59 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
63 /* includes for atomic instructions: */
65 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
66 #include "threads/native/generic-primitives.h"
68 #include "machine-instr.h"
71 #if defined(ENABLE_JVMTI)
72 #include "native/jvmti/cacaodbg.h"
75 #if defined(ENABLE_GC_BOEHM)
76 # include "mm/boehm-gc/include/gc.h"
80 /******************************************************************************/
81 /* DEBUGGING MACROS */
82 /******************************************************************************/
84 /* #define LOCK_VERBOSE */
86 #if defined(LOCK_VERBOSE)
87 #define LOCK_LOG(args) do { printf args; fflush(stdout); } while (0)
89 #define LOCK_LOG(args)
93 /******************************************************************************/
95 /******************************************************************************/
97 /* number of lock records in the first pool allocated for a thread */
98 #define LOCK_INITIAL_LOCK_RECORDS 8
100 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
102 #define LOCK_HASH(obj) ((ptrint)(obj))
104 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
105 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
107 /* CAUTION: oldvalue is evaluated twice! */
108 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
109 (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
112 /******************************************************************************/
113 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
114 /******************************************************************************/
116 #define LOCK_SET_FLC_BIT(obj) ((obj)->hdrflags |= HDRFLAG_FLC)
117 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->hdrflags &= ~ HDRFLAG_FLC)
118 #define LOCK_TEST_FLC_BIT(obj) ((obj)->hdrflags & HDRFLAG_FLC)
121 /******************************************************************************/
122 /* MACROS FOR THIN/FAT LOCKS */
123 /******************************************************************************/
125 /* We use a variant of the tasuki locks described in the paper
127 * Tamiya Onodera, Kiyokuni Kawachiya
128 * A Study of Locking Objects with Bimodal Fields
129 * Proceedings of the ACM OOPSLA '99, pp. 223-237
132 * The underlying thin locks are a variant of the thin locks described in
134 * Bacon, Konuru, Murthy, Serrano
135 * Thin Locks: Featherweight Synchronization for Java
136 * Proceedings of the ACM Conference on Programming Language Design and
137 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
140 * In thin lock mode the lockword (monitorPtr) looks like this:
142 * ,----------------------,-----------,---,
143 * | thread ID | count | 0 |
144 * `----------------------'-----------'---´
146 * thread ID......the 'index' of the owning thread, or 0
147 * count..........number of times the lock has been entered minus 1
148 * 0..............the shape bit is 0 in thin lock mode
150 * In fat lock mode it is basically a lock_record_t *:
152 * ,----------------------------------,---,
153 * | lock_record_t * (without LSB) | 1 |
154 * `----------------------------------'---´
156 * 1..............the shape bit is 1 in fat lock mode
159 #if SIZEOF_VOID_P == 8
160 #define THIN_LOCK_WORD_SIZE 64
162 #define THIN_LOCK_WORD_SIZE 32
165 #define THIN_LOCK_SHAPE_BIT 0x01
167 #define THIN_UNLOCKED 0
169 #define THIN_LOCK_COUNT_SHIFT 1
170 #define THIN_LOCK_COUNT_SIZE 8
171 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
172 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
173 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
175 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
176 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
178 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
179 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
181 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
182 #define MAKE_FAT_LOCK(ptr) ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
184 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
187 /* global variables ***********************************************************/
189 /* hashtable mapping objects to lock records */
190 static lock_hashtable_t lock_hashtable;
193 /******************************************************************************/
195 /******************************************************************************/
197 static void lock_hashtable_init(void);
199 static void lock_record_enter(threadobject *t, lock_record_t *lr);
200 static void lock_record_exit(threadobject *t, lock_record_t *lr);
201 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
202 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
205 /*============================================================================*/
206 /* INITIALIZATION OF DATA STRUCTURES */
207 /*============================================================================*/
210 /* lock_init *******************************************************************
212 Initialize global data for locking.
214 *******************************************************************************/
218 /* initialize lock hashtable */
220 lock_hashtable_init();
222 #if defined(ENABLE_VMLOG)
223 vmlog_cacao_init_lock();
228 /* lock_pre_compute_thinlock ***************************************************
230 Pre-compute the thin lock value for a thread index.
233 index........the thead index (>= 1)
236 the thin lock value for this thread index
238 *******************************************************************************/
240 ptrint lock_pre_compute_thinlock(s4 index)
242 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
246 /* lock_record_new *************************************************************
248 Allocate a lock record.
250 *******************************************************************************/
252 static lock_record_t *lock_record_new(void)
256 /* allocate the data structure on the C heap */
258 lr = NEW(lock_record_t);
260 #if defined(ENABLE_STATISTICS)
262 size_lock_record += sizeof(lock_record_t);
265 /* initialize the members */
272 /* initialize the mutex */
274 pthread_mutex_init(&(lr->mutex), NULL);
280 /* lock_record_free ************************************************************
285 lr....lock record to free
287 *******************************************************************************/
289 static void lock_record_free(lock_record_t *lr)
292 /* check the members */
300 /* destroy the mutex */
302 pthread_mutex_destroy(&(lr->mutex));
304 /* free the data structure */
306 FREE(lr, lock_record_t);
308 #if defined(ENABLE_STATISTICS)
310 size_lock_record -= sizeof(lock_record_t);
315 /*============================================================================*/
316 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
317 /*============================================================================*/
319 /* lock_hashtable_init *********************************************************
321 Initialize the global hashtable mapping objects to lock records.
323 *******************************************************************************/
325 static void lock_hashtable_init(void)
327 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
329 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
330 lock_hashtable.entries = 0;
331 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
333 #if defined(ENABLE_STATISTICS)
335 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
338 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
342 /* lock_hashtable_grow *********************************************************
344 Grow the lock record hashtable to about twice its current size and
347 *******************************************************************************/
349 /* must be called with hashtable mutex locked */
350 static void lock_hashtable_grow(void)
354 lock_record_t **oldtable;
355 lock_record_t **newtable;
362 /* allocate a new table */
364 oldsize = lock_hashtable.size;
365 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
367 LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
369 oldtable = lock_hashtable.ptr;
370 newtable = MNEW(lock_record_t *, newsize);
372 #if defined(ENABLE_STATISTICS)
374 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
377 MZERO(newtable, lock_record_t *, newsize);
379 /* rehash the entries */
381 for (i = 0; i < oldsize; i++) {
386 h = LOCK_HASH(lr->object);
387 newslot = h % newsize;
389 lr->hashlink = newtable[newslot];
390 newtable[newslot] = lr;
396 /* replace the old table */
398 lock_hashtable.ptr = newtable;
399 lock_hashtable.size = newsize;
401 MFREE(oldtable, lock_record_t *, oldsize);
403 #if defined(ENABLE_STATISTICS)
405 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
410 /* lock_hashtable_get **********************************************************
412 Find the lock record for the given object. If it does not exists,
413 yet, create it and enter it in the hashtable.
416 o....the object to look up
419 the lock record to use for this object
421 *******************************************************************************/
423 #if defined(ENABLE_GC_BOEHM)
424 static void lock_record_finalizer(void *object, void *p);
427 static lock_record_t *lock_hashtable_get(java_object_t *o)
433 lockword = (ptrint) o->monitorPtr;
435 if (IS_FAT_LOCK(lockword))
436 return GET_FAT_LOCK(lockword);
438 /* lock the hashtable */
440 pthread_mutex_lock(&(lock_hashtable.mutex));
442 /* lookup the lock record in the hashtable */
444 slot = LOCK_HASH(o) % lock_hashtable.size;
445 lr = lock_hashtable.ptr[slot];
447 for (; lr != NULL; lr = lr->hashlink) {
448 if (lr->object == o) {
449 pthread_mutex_unlock(&(lock_hashtable.mutex));
454 /* not found, we must create a new one */
456 lr = lock_record_new();
460 #if defined(ENABLE_GC_BOEHM)
461 /* register new finalizer to clean up the lock record */
463 GC_REGISTER_FINALIZER(o, lock_record_finalizer, 0, 0, 0);
466 LOCK_LOG(("thread %d allocated for %p new lr %p\n",
467 t->index, (void*) o, (void*) lr));
469 /* enter it in the hashtable */
471 lr->hashlink = lock_hashtable.ptr[slot];
472 lock_hashtable.ptr[slot] = lr;
473 lock_hashtable.entries++;
475 /* check whether the hash should grow */
477 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
478 lock_hashtable_grow();
481 /* unlock the hashtable */
483 pthread_mutex_unlock(&(lock_hashtable.mutex));
485 /* return the new lock record */
491 /* lock_hashtable_remove *******************************************************
493 Remove the lock record for the given object from the hashtable.
496 o....the object to look up
498 *******************************************************************************/
500 static void lock_hashtable_remove(java_object_t *o)
505 lock_record_t *tmplr;
507 /* lock the hashtable */
509 pthread_mutex_lock(&(lock_hashtable.mutex));
511 /* get lock record */
513 lockword = (ptrint) o->monitorPtr;
515 assert(IS_FAT_LOCK(lockword));
517 lr = GET_FAT_LOCK(lockword);
519 /* remove the lock-record from the hashtable */
521 slot = LOCK_HASH(o) % lock_hashtable.size;
522 tmplr = lock_hashtable.ptr[slot];
525 /* special handling if it's the first in the chain */
527 lock_hashtable.ptr[slot] = lr->hashlink;
530 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
531 if (tmplr->hashlink == lr) {
532 tmplr->hashlink = lr->hashlink;
537 assert(tmplr != NULL);
540 /* decrease entry count */
542 lock_hashtable.entries--;
544 /* unlock the hashtable */
546 pthread_mutex_unlock(&(lock_hashtable.mutex));
550 /* lock_record_finalizer *******************************************************
552 XXX Remove me for exact GC.
554 *******************************************************************************/
556 static void lock_record_finalizer(void *object, void *p)
562 o = (java_object_t *) object;
564 /* check for a finalizer function */
566 if (o->vftbl->class->finalizer != NULL)
567 finalizer_run(object, p);
569 /* remove the lock-record entry from the hashtable */
571 lock_hashtable_remove(o);
573 /* get lock record */
575 lockword = (ptrint) o->monitorPtr;
577 assert(IS_FAT_LOCK(lockword));
579 lr = GET_FAT_LOCK(lockword);
581 /* now release the lock record */
583 lock_record_free(lr);
587 /*============================================================================*/
588 /* OBJECT LOCK INITIALIZATION */
589 /*============================================================================*/
592 /* lock_init_object_lock *******************************************************
594 Initialize the monitor pointer of the given object. The monitor gets
595 initialized to an unlocked state.
597 *******************************************************************************/
599 void lock_init_object_lock(java_object_t *o)
603 o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
604 LOCK_CLEAR_FLC_BIT(o);
608 /* lock_get_initial_lock_word **************************************************
610 Returns the initial (unlocked) lock word. The pointer is
611 required in the code generator to set up a virtual
612 java_objectheader for code patch locking.
614 *******************************************************************************/
616 lock_record_t *lock_get_initial_lock_word(void)
618 return (lock_record_t *) THIN_UNLOCKED;
623 /*============================================================================*/
624 /* LOCKING ALGORITHM */
625 /*============================================================================*/
628 /* lock_record_enter ***********************************************************
630 Enter the lock represented by the given lock record.
633 t.................the current thread
634 lr................the lock record
636 *******************************************************************************/
638 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
640 pthread_mutex_lock(&(lr->mutex));
646 /* lock_record_exit ************************************************************
648 Release the lock represented by the given lock record.
651 t.................the current thread
652 lr................the lock record
655 The current thread must own the lock represented by this lock record.
656 This is NOT checked by this function!
658 *******************************************************************************/
660 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
663 pthread_mutex_unlock(&(lr->mutex));
667 /* lock_inflate ****************************************************************
669 Inflate the lock of the given object. This may only be called by the
670 owner of the monitor of the object.
673 t............the current thread
674 o............the object of which to inflate the lock
675 lr...........the lock record to install. The current thread must
676 own the lock of this lock record!
679 The current thread must be the owner of this object's monitor AND
680 of the lock record's lock!
682 *******************************************************************************/
684 static void lock_inflate(threadobject *t, java_object_t *o, lock_record_t *lr)
688 /* get the current lock count */
690 lockword = (ptrint) o->monitorPtr;
692 if (IS_FAT_LOCK(lockword)) {
693 assert(GET_FAT_LOCK(lockword) == lr);
696 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
698 /* copy the count from the thin lock */
700 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
703 LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
704 t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
706 /* clear flat-lock-contention bit */
708 LOCK_CLEAR_FLC_BIT(o);
710 /* notify waiting objects */
712 lock_record_notify(t, lr, false);
716 o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
720 /* lock_monitor_enter **********************************************************
722 Acquire the monitor of the given object. If the current thread already
723 owns the monitor, the lock counter is simply increased.
725 This function blocks until it can acquire the monitor.
728 t............the current thread
729 o............the object of which to enter the monitor
732 true.........the lock has been successfully acquired
733 false........an exception has been thrown
735 *******************************************************************************/
737 bool lock_monitor_enter(java_object_t *o)
740 /* CAUTION: This code assumes that ptrint is unsigned! */
746 exceptions_throw_nullpointerexception();
752 thinlock = t->thinlock;
754 /* most common case: try to thin-lock an unlocked object */
756 if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
757 /* success. we locked it */
758 /* The Java Memory Model requires a memory barrier here: */
763 /* next common case: recursive lock with small recursion count */
764 /* We don't have to worry about stale values here, as any stale value */
765 /* will indicate another thread holding the lock (or an inflated lock) */
767 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
768 /* we own this monitor */
769 /* check the current recursion count */
771 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
773 /* the recursion count is low enough */
775 o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
777 /* success. we locked it */
781 /* recursion count overflow */
783 lr = lock_hashtable_get(o);
784 lock_record_enter(t, lr);
785 lock_inflate(t, o, lr);
792 /* the lock is either contented or fat */
794 if (IS_FAT_LOCK(lockword)) {
796 lr = GET_FAT_LOCK(lockword);
798 /* check for recursive entering */
799 if (lr->owner == t) {
804 /* acquire the mutex of the lock record */
806 lock_record_enter(t, lr);
808 assert(lr->count == 0);
813 /****** inflation path ******/
815 /* first obtain the lock record for this object */
817 lr = lock_hashtable_get(o);
819 #if defined(ENABLE_JVMTI)
820 /* Monitor Contended Enter */
821 jvmti_MonitorContendedEntering(false, o);
824 /* enter the monitor */
826 lock_record_enter(t, lr);
828 #if defined(ENABLE_JVMTI)
829 /* Monitor Contended Entered */
830 jvmti_MonitorContendedEntering(true, o);
835 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
836 /* Set the flat lock contention bit to let the owning thread
837 know that we want to be notified of unlocking. */
841 LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
842 t->index, (void*) o, (void*) lr));
844 /* try to lock the object */
846 if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
847 /* we can inflate the lock ourselves */
849 LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
850 t->index, (void*) o, (void*) lr));
852 lock_inflate(t, o, lr);
855 /* wait until another thread sees the flc bit and notifies
858 LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
859 t->index, (void*) o, (void*) lr));
861 lock_record_wait(t, lr, 0, 0);
865 /* we own the inflated lock now */
871 /* lock_monitor_exit ***********************************************************
873 Decrement the counter of a (currently owned) monitor. If the counter
874 reaches zero, release the monitor.
876 If the current thread is not the owner of the monitor, an
877 IllegalMonitorState exception is thrown.
880 t............the current thread
881 o............the object of which to exit the monitor
884 true.........everything ok,
885 false........an exception has been thrown
887 *******************************************************************************/
889 bool lock_monitor_exit(java_object_t *o)
896 exceptions_throw_nullpointerexception();
902 /* We don't have to worry about stale values here, as any stale value */
903 /* will indicate that we don't own the lock. */
905 lockword = (ptrint) o->monitorPtr;
906 thinlock = t->thinlock;
908 /* most common case: we release a thin lock that we hold once */
910 if (lockword == thinlock) {
911 /* memory barrier for Java Memory Model */
913 o->monitorPtr = THIN_UNLOCKED;
914 /* memory barrier for thin locking */
917 /* check if there has been a flat lock contention on this object */
919 if (LOCK_TEST_FLC_BIT(o)) {
922 LOCK_LOG(("thread %d saw flc bit on %p %s\n",
923 t->index, (void*) o, o->vftbl->class->name->text));
925 /* there has been a contention on this thin lock */
927 lr = lock_hashtable_get(o);
929 LOCK_LOG(("thread %d for %p got lr %p\n",
930 t->index, (void*) o, (void*) lr));
932 lock_record_enter(t, lr);
934 if (LOCK_TEST_FLC_BIT(o)) {
935 /* notify a thread that it can try to inflate the lock now */
937 lock_record_notify(t, lr, true);
940 lock_record_exit(t, lr);
946 /* next common case: we release a recursive lock, count > 0 */
948 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
949 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
953 /* either the lock is fat, or we don't hold it at all */
955 if (IS_FAT_LOCK(lockword)) {
959 lr = GET_FAT_LOCK(lockword);
961 /* check if we own this monitor */
962 /* We don't have to worry about stale values here, as any stale value */
963 /* will be != t and thus fail this check. */
965 if (lr->owner != t) {
966 exceptions_throw_illegalmonitorstateexception();
970 /* { the current thread `t` owns the lock record `lr` on object `o` } */
972 if (lr->count != 0) {
973 /* we had locked this one recursively. just decrement, it will */
974 /* still be locked. */
979 /* unlock this lock record */
982 pthread_mutex_unlock(&(lr->mutex));
987 /* legal thin lock cases have been handled above, so this is an error */
989 exceptions_throw_illegalmonitorstateexception();
995 /* lock_record_add_waiter ******************************************************
997 Add a thread to the list of waiting threads of a lock record.
1000 lr...........the lock record
1001 thread.......the thread to add
1003 *******************************************************************************/
1005 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1007 lock_waiter_t *waiter;
1009 /* allocate a waiter data structure */
1011 waiter = NEW(lock_waiter_t);
1013 #if defined(ENABLE_STATISTICS)
1015 size_lock_waiter += sizeof(lock_waiter_t);
1018 waiter->waiter = thread;
1019 waiter->next = lr->waiters;
1021 lr->waiters = waiter;
1025 /* lock_record_remove_waiter ***************************************************
1027 Remove a thread from the list of waiting threads of a lock record.
1030 lr...........the lock record
1031 t............the current thread
1034 The current thread must be the owner of the lock record.
1036 *******************************************************************************/
1038 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1040 lock_waiter_t **link;
1043 link = &(lr->waiters);
1045 while ((w = *link)) {
1046 if (w->waiter == thread) {
1049 /* free the waiter data structure */
1051 FREE(w, lock_waiter_t);
1053 #if defined(ENABLE_STATISTICS)
1055 size_lock_waiter -= sizeof(lock_waiter_t);
1064 /* this should never happen */
1066 vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
1070 /* lock_record_wait ************************************************************
1072 Wait on a lock record for a given (maximum) amount of time.
1075 t............the current thread
1076 lr...........the lock record
1077 millis.......milliseconds of timeout
1078 nanos........nanoseconds of timeout
1081 The current thread must be the owner of the lock record.
1082 This is NOT checked by this function!
1084 *******************************************************************************/
1086 static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1089 bool wasinterrupted;
1091 /* { the thread t owns the fat lock record lr on the object o } */
1093 /* register us as waiter for this object */
1095 lock_record_add_waiter(lr, thread);
1097 /* remember the old lock count */
1099 lockcount = lr->count;
1101 /* unlock this record */
1104 lock_record_exit(thread, lr);
1106 /* wait until notified/interrupted/timed out */
1108 wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1110 /* re-enter the monitor */
1112 lock_record_enter(thread, lr);
1114 /* remove us from the list of waiting threads */
1116 lock_record_remove_waiter(lr, thread);
1118 /* restore the old lock count */
1120 lr->count = lockcount;
1122 /* if we have been interrupted, throw the appropriate exception */
1125 exceptions_throw_interruptedexception();
1129 /* lock_monitor_wait ***********************************************************
1131 Wait on an object for a given (maximum) amount of time.
1134 t............the current thread
1135 o............the object
1136 millis.......milliseconds of timeout
1137 nanos........nanoseconds of timeout
1140 The current thread must be the owner of the object's monitor.
1142 *******************************************************************************/
1144 static void lock_monitor_wait(threadobject *t, java_object_t *o, s8 millis, s4 nanos)
1149 lockword = (ptrint) o->monitorPtr;
1151 /* check if we own this monitor */
1152 /* We don't have to worry about stale values here, as any stale value */
1153 /* will fail this check. */
1155 if (IS_FAT_LOCK(lockword)) {
1157 lr = GET_FAT_LOCK(lockword);
1159 if (lr->owner != t) {
1160 exceptions_throw_illegalmonitorstateexception();
1165 /* it's a thin lock */
1167 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1168 exceptions_throw_illegalmonitorstateexception();
1172 /* inflate this lock */
1174 lr = lock_hashtable_get(o);
1176 lock_record_enter(t, lr);
1177 lock_inflate(t, o, lr);
1180 /* { the thread t owns the fat lock record lr on the object o } */
1182 lock_record_wait(t, lr, millis, nanos);
1186 /* lock_record_notify **********************************************************
1188 Notify one thread or all threads waiting on the given lock record.
1191 t............the current thread
1192 lr...........the lock record
1193 one..........if true, only notify one thread
1196 The current thread must be the owner of the lock record.
1197 This is NOT checked by this function!
1199 *******************************************************************************/
1201 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1203 lock_waiter_t *waiter;
1204 threadobject *waitingthread;
1206 /* { the thread t owns the fat lock record lr on the object o } */
1208 /* for each waiter: */
1210 for (waiter = lr->waiters; waiter != NULL; waiter = waiter->next) {
1212 /* signal the waiting thread */
1214 waitingthread = waiter->waiter;
1216 pthread_mutex_lock(&waitingthread->waitmutex);
1218 if (waitingthread->sleeping)
1219 pthread_cond_signal(&waitingthread->waitcond);
1221 waitingthread->signaled = true;
1223 pthread_mutex_unlock(&waitingthread->waitmutex);
1225 /* if we should only wake one, we are done */
1233 /* lock_monitor_notify *********************************************************
1235 Notify one thread or all threads waiting on the given object.
1238 t............the current thread
1239 o............the object
1240 one..........if true, only notify one thread
1243 The current thread must be the owner of the object's monitor.
1245 *******************************************************************************/
1247 static void lock_monitor_notify(threadobject *t, java_object_t *o, bool one)
1252 lockword = (ptrint) o->monitorPtr;
1254 /* check if we own this monitor */
1255 /* We don't have to worry about stale values here, as any stale value */
1256 /* will fail this check. */
1258 if (IS_FAT_LOCK(lockword)) {
1260 lr = GET_FAT_LOCK(lockword);
1262 if (lr->owner != t) {
1263 exceptions_throw_illegalmonitorstateexception();
1268 /* it's a thin lock */
1270 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1271 exceptions_throw_illegalmonitorstateexception();
1275 /* inflate this lock */
1277 lr = lock_hashtable_get(o);
1279 lock_record_enter(t, lr);
1280 lock_inflate(t, o, lr);
1283 /* { the thread t owns the fat lock record lr on the object o } */
1285 lock_record_notify(t, lr, one);
1290 /*============================================================================*/
1291 /* INQUIRY FUNCIONS */
1292 /*============================================================================*/
1295 /* lock_is_held_by_current_thread **********************************************
1297 Return true if the current thread owns the monitor of the given object.
1300 o............the object
1303 true, if the current thread holds the lock of this object.
1305 *******************************************************************************/
1307 bool lock_is_held_by_current_thread(java_object_t *o)
1315 /* check if we own this monitor */
1316 /* We don't have to worry about stale values here, as any stale value */
1317 /* will fail this check. */
1319 lockword = (ptrint) o->monitorPtr;
1321 if (IS_FAT_LOCK(lockword)) {
1322 /* it's a fat lock */
1324 lr = GET_FAT_LOCK(lockword);
1326 return (lr->owner == t);
1329 /* it's a thin lock */
1331 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1337 /*============================================================================*/
1338 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1339 /*============================================================================*/
1342 /* lock_wait_for_object ********************************************************
1344 Wait for the given object.
1347 o............the object
1348 millis.......milliseconds to wait
1349 nanos........nanoseconds to wait
1351 *******************************************************************************/
1353 void lock_wait_for_object(java_object_t *o, s8 millis, s4 nanos)
1355 threadobject *thread;
1357 thread = THREADOBJECT;
1359 lock_monitor_wait(thread, o, millis, nanos);
1363 /* lock_notify_object **********************************************************
1365 Notify one thread waiting on the given object.
1368 o............the object
1370 *******************************************************************************/
1372 void lock_notify_object(java_object_t *o)
1374 threadobject *thread;
1376 thread = THREADOBJECT;
1378 lock_monitor_notify(thread, o, true);
1382 /* lock_notify_all_object ******************************************************
1384 Notify all threads waiting on the given object.
1387 o............the object
1389 *******************************************************************************/
1391 void lock_notify_all_object(java_object_t *o)
1393 threadobject *thread;
1395 thread = THREADOBJECT;
1397 lock_monitor_notify(thread, o, false);
1402 * These are local overrides for various environment variables in Emacs.
1403 * Please do not remove this and leave it at the end of the file, where
1404 * Emacs will automagically detect them.
1405 * ---------------------------------------------------------------------
1408 * indent-tabs-mode: t
1412 * vim:noexpandtab:sw=4:ts=4: