1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
38 #include "mm/memory.h"
40 #include "threads/native/lock.h"
41 #include "threads/native/threads.h"
43 #include "toolbox/list.h"
45 #include "vm/global.h"
46 #include "vm/exceptions.h"
47 #include "vm/finalizer.h"
48 #include "vm/stringlocal.h"
51 #include "vmcore/options.h"
53 #if defined(ENABLE_STATISTICS)
54 # include "vmcore/statistics.h"
57 #if defined(ENABLE_VMLOG)
58 #include <vmlog_cacao.h>
61 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
65 /* includes for atomic instructions: */
67 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
68 #include "threads/native/generic-primitives.h"
70 #include "machine-instr.h"
73 #if defined(ENABLE_JVMTI)
74 #include "native/jvmti/cacaodbg.h"
77 #if defined(ENABLE_GC_BOEHM)
78 # include "mm/boehm-gc/include/gc.h"
82 /* debug **********************************************************************/
85 # define DEBUGLOCKS(format) \
87 if (opt_DebugLocks) { \
92 # define DEBUGLOCKS(format)
96 /******************************************************************************/
98 /******************************************************************************/
100 /* number of lock records in the first pool allocated for a thread */
101 #define LOCK_INITIAL_LOCK_RECORDS 8
103 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
105 #define LOCK_HASH(obj) ((ptrint)(obj))
107 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
108 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
110 /* CAUTION: oldvalue is evaluated twice! */
111 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
112 (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
115 /******************************************************************************/
116 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
117 /******************************************************************************/
119 #define LOCK_SET_FLC_BIT(obj) ((obj)->hdrflags |= HDRFLAG_FLC)
120 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->hdrflags &= ~ HDRFLAG_FLC)
121 #define LOCK_TEST_FLC_BIT(obj) ((obj)->hdrflags & HDRFLAG_FLC)
124 /******************************************************************************/
125 /* MACROS FOR THIN/FAT LOCKS */
126 /******************************************************************************/
128 /* We use a variant of the tasuki locks described in the paper
130 * Tamiya Onodera, Kiyokuni Kawachiya
131 * A Study of Locking Objects with Bimodal Fields
132 * Proceedings of the ACM OOPSLA '99, pp. 223-237
135 * The underlying thin locks are a variant of the thin locks described in
137 * Bacon, Konuru, Murthy, Serrano
138 * Thin Locks: Featherweight Synchronization for Java
139 * Proceedings of the ACM Conference on Programming Language Design and
140 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
143 * In thin lock mode the lockword (monitorPtr) looks like this:
145 * ,----------------------,-----------,---,
146 * | thread ID | count | 0 |
147 * `----------------------'-----------'---´
149 * thread ID......the 'index' of the owning thread, or 0
150 * count..........number of times the lock has been entered minus 1
151 * 0..............the shape bit is 0 in thin lock mode
153 * In fat lock mode it is basically a lock_record_t *:
155 * ,----------------------------------,---,
156 * | lock_record_t * (without LSB) | 1 |
157 * `----------------------------------'---´
159 * 1..............the shape bit is 1 in fat lock mode
162 #if SIZEOF_VOID_P == 8
163 #define THIN_LOCK_WORD_SIZE 64
165 #define THIN_LOCK_WORD_SIZE 32
168 #define THIN_LOCK_SHAPE_BIT 0x01
170 #define THIN_UNLOCKED 0
172 #define THIN_LOCK_COUNT_SHIFT 1
173 #define THIN_LOCK_COUNT_SIZE 8
174 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
175 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
176 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
178 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
179 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
181 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
182 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
184 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
185 #define MAKE_FAT_LOCK(ptr) ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
187 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
190 /* global variables ***********************************************************/
192 /* hashtable mapping objects to lock records */
193 static lock_hashtable_t lock_hashtable;
196 /******************************************************************************/
198 /******************************************************************************/
200 static void lock_hashtable_init(void);
202 static void lock_record_enter(threadobject *t, lock_record_t *lr);
203 static void lock_record_exit(threadobject *t, lock_record_t *lr);
204 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
205 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
208 /*============================================================================*/
209 /* INITIALIZATION OF DATA STRUCTURES */
210 /*============================================================================*/
213 /* lock_init *******************************************************************
215 Initialize global data for locking.
217 *******************************************************************************/
221 /* initialize lock hashtable */
223 lock_hashtable_init();
225 #if defined(ENABLE_VMLOG)
226 vmlog_cacao_init_lock();
231 /* lock_pre_compute_thinlock ***************************************************
233 Pre-compute the thin lock value for a thread index.
236 index........the thead index (>= 1)
239 the thin lock value for this thread index
241 *******************************************************************************/
243 ptrint lock_pre_compute_thinlock(s4 index)
245 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
249 /* lock_record_new *************************************************************
251 Allocate a lock record.
253 *******************************************************************************/
255 static lock_record_t *lock_record_new(void)
259 /* allocate the data structure on the C heap */
261 lr = NEW(lock_record_t);
263 #if defined(ENABLE_STATISTICS)
265 size_lock_record += sizeof(lock_record_t);
268 /* initialize the members */
273 lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
275 /* initialize the mutex */
277 pthread_mutex_init(&(lr->mutex), NULL);
279 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
285 /* lock_record_free ************************************************************
290 lr....lock record to free
292 *******************************************************************************/
294 static void lock_record_free(lock_record_t *lr)
296 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
298 /* Destroy the mutex. */
300 pthread_mutex_destroy(&(lr->mutex));
302 /* Free the waiters list. */
304 list_free(lr->waiters);
306 /* Free the data structure. */
308 FREE(lr, lock_record_t);
310 #if defined(ENABLE_STATISTICS)
312 size_lock_record -= sizeof(lock_record_t);
317 /*============================================================================*/
318 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
319 /*============================================================================*/
321 /* lock_hashtable_init *********************************************************
323 Initialize the global hashtable mapping objects to lock records.
325 *******************************************************************************/
327 static void lock_hashtable_init(void)
329 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
331 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
332 lock_hashtable.entries = 0;
333 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
335 #if defined(ENABLE_STATISTICS)
337 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
340 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
344 /* lock_hashtable_grow *********************************************************
346 Grow the lock record hashtable to about twice its current size and
349 *******************************************************************************/
351 /* must be called with hashtable mutex locked */
352 static void lock_hashtable_grow(void)
356 lock_record_t **oldtable;
357 lock_record_t **newtable;
364 /* allocate a new table */
366 oldsize = lock_hashtable.size;
367 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
369 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
371 oldtable = lock_hashtable.ptr;
372 newtable = MNEW(lock_record_t *, newsize);
374 #if defined(ENABLE_STATISTICS)
376 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
379 MZERO(newtable, lock_record_t *, newsize);
381 /* rehash the entries */
383 for (i = 0; i < oldsize; i++) {
388 h = LOCK_HASH(lr->object);
389 newslot = h % newsize;
391 lr->hashlink = newtable[newslot];
392 newtable[newslot] = lr;
398 /* replace the old table */
400 lock_hashtable.ptr = newtable;
401 lock_hashtable.size = newsize;
403 MFREE(oldtable, lock_record_t *, oldsize);
405 #if defined(ENABLE_STATISTICS)
407 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
412 /* lock_hashtable_get **********************************************************
414 Find the lock record for the given object. If it does not exists,
415 yet, create it and enter it in the hashtable.
418 o....the object to look up
421 the lock record to use for this object
423 *******************************************************************************/
425 #if defined(ENABLE_GC_BOEHM)
426 static void lock_record_finalizer(void *object, void *p);
429 static lock_record_t *lock_hashtable_get(java_object_t *o)
435 lockword = (ptrint) o->monitorPtr;
437 if (IS_FAT_LOCK(lockword))
438 return GET_FAT_LOCK(lockword);
440 /* lock the hashtable */
442 pthread_mutex_lock(&(lock_hashtable.mutex));
444 /* lookup the lock record in the hashtable */
446 slot = LOCK_HASH(o) % lock_hashtable.size;
447 lr = lock_hashtable.ptr[slot];
449 for (; lr != NULL; lr = lr->hashlink) {
450 if (lr->object == o) {
451 pthread_mutex_unlock(&(lock_hashtable.mutex));
456 /* not found, we must create a new one */
458 lr = lock_record_new();
462 #if defined(ENABLE_GC_BOEHM)
463 /* register new finalizer to clean up the lock record */
465 GC_REGISTER_FINALIZER(o, lock_record_finalizer, 0, 0, 0);
468 /* enter it in the hashtable */
470 lr->hashlink = lock_hashtable.ptr[slot];
471 lock_hashtable.ptr[slot] = lr;
472 lock_hashtable.entries++;
474 /* check whether the hash should grow */
476 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
477 lock_hashtable_grow();
480 /* unlock the hashtable */
482 pthread_mutex_unlock(&(lock_hashtable.mutex));
484 /* return the new lock record */
490 /* lock_hashtable_remove *******************************************************
492 Remove the lock record for the given object from the hashtable.
495 o....the object to look up
497 *******************************************************************************/
499 static void lock_hashtable_remove(java_object_t *o)
504 lock_record_t *tmplr;
506 /* lock the hashtable */
508 pthread_mutex_lock(&(lock_hashtable.mutex));
510 /* get lock record */
512 lockword = (ptrint) o->monitorPtr;
514 assert(IS_FAT_LOCK(lockword));
516 lr = GET_FAT_LOCK(lockword);
518 /* remove the lock-record from the hashtable */
520 slot = LOCK_HASH(o) % lock_hashtable.size;
521 tmplr = lock_hashtable.ptr[slot];
524 /* special handling if it's the first in the chain */
526 lock_hashtable.ptr[slot] = lr->hashlink;
529 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
530 if (tmplr->hashlink == lr) {
531 tmplr->hashlink = lr->hashlink;
536 assert(tmplr != NULL);
539 /* decrease entry count */
541 lock_hashtable.entries--;
543 /* unlock the hashtable */
545 pthread_mutex_unlock(&(lock_hashtable.mutex));
549 /* lock_record_finalizer *******************************************************
551 XXX Remove me for exact GC.
553 *******************************************************************************/
555 static void lock_record_finalizer(void *object, void *p)
561 o = (java_object_t *) object;
563 /* check for a finalizer function */
565 if (o->vftbl->class->finalizer != NULL)
566 finalizer_run(object, p);
568 /* remove the lock-record entry from the hashtable */
570 lock_hashtable_remove(o);
572 /* get lock record */
574 lockword = (ptrint) o->monitorPtr;
576 assert(IS_FAT_LOCK(lockword));
578 lr = GET_FAT_LOCK(lockword);
580 /* now release the lock record */
582 lock_record_free(lr);
586 /*============================================================================*/
587 /* OBJECT LOCK INITIALIZATION */
588 /*============================================================================*/
591 /* lock_init_object_lock *******************************************************
593 Initialize the monitor pointer of the given object. The monitor gets
594 initialized to an unlocked state.
596 *******************************************************************************/
598 void lock_init_object_lock(java_object_t *o)
602 o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
603 LOCK_CLEAR_FLC_BIT(o);
607 /* lock_get_initial_lock_word **************************************************
609 Returns the initial (unlocked) lock word. The pointer is
610 required in the code generator to set up a virtual
611 java_objectheader for code patch locking.
613 *******************************************************************************/
615 lock_record_t *lock_get_initial_lock_word(void)
617 return (lock_record_t *) THIN_UNLOCKED;
622 /*============================================================================*/
623 /* LOCKING ALGORITHM */
624 /*============================================================================*/
627 /* lock_record_enter ***********************************************************
629 Enter the lock represented by the given lock record.
632 t.................the current thread
633 lr................the lock record
635 *******************************************************************************/
637 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
639 pthread_mutex_lock(&(lr->mutex));
645 /* lock_record_exit ************************************************************
647 Release the lock represented by the given lock record.
650 t.................the current thread
651 lr................the lock record
654 The current thread must own the lock represented by this lock record.
655 This is NOT checked by this function!
657 *******************************************************************************/
659 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
662 pthread_mutex_unlock(&(lr->mutex));
666 /* lock_inflate ****************************************************************
668 Inflate the lock of the given object. This may only be called by the
669 owner of the monitor of the object.
672 t............the current thread
673 o............the object of which to inflate the lock
674 lr...........the lock record to install. The current thread must
675 own the lock of this lock record!
678 The current thread must be the owner of this object's monitor AND
679 of the lock record's lock!
681 *******************************************************************************/
683 static void lock_inflate(threadobject *t, java_object_t *o, lock_record_t *lr)
687 /* get the current lock count */
689 lockword = (ptrint) o->monitorPtr;
691 if (IS_FAT_LOCK(lockword)) {
692 assert(GET_FAT_LOCK(lockword) == lr);
695 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
697 /* copy the count from the thin lock */
699 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
702 DEBUGLOCKS(("[lock_inflate : lr=%p, t=%p, o=%p, o->monitorPtr=%lx, count=%d]",
703 lr, t, o, o->monitorPtr, lr->count));
705 /* clear flat-lock-contention bit */
707 LOCK_CLEAR_FLC_BIT(o);
709 /* notify waiting objects */
711 lock_record_notify(t, lr, false);
715 o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
719 /* lock_monitor_enter **********************************************************
721 Acquire the monitor of the given object. If the current thread already
722 owns the monitor, the lock counter is simply increased.
724 This function blocks until it can acquire the monitor.
727 t............the current thread
728 o............the object of which to enter the monitor
731 true.........the lock has been successfully acquired
732 false........an exception has been thrown
734 *******************************************************************************/
736 bool lock_monitor_enter(java_object_t *o)
739 /* CAUTION: This code assumes that ptrint is unsigned! */
745 exceptions_throw_nullpointerexception();
751 thinlock = t->thinlock;
753 /* most common case: try to thin-lock an unlocked object */
755 if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
756 /* success. we locked it */
757 /* The Java Memory Model requires a memory barrier here: */
762 /* next common case: recursive lock with small recursion count */
763 /* We don't have to worry about stale values here, as any stale value */
764 /* will indicate another thread holding the lock (or an inflated lock) */
766 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
767 /* we own this monitor */
768 /* check the current recursion count */
770 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
772 /* the recursion count is low enough */
774 o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
776 /* success. we locked it */
780 /* recursion count overflow */
782 lr = lock_hashtable_get(o);
783 lock_record_enter(t, lr);
784 lock_inflate(t, o, lr);
791 /* the lock is either contented or fat */
793 if (IS_FAT_LOCK(lockword)) {
795 lr = GET_FAT_LOCK(lockword);
797 /* check for recursive entering */
798 if (lr->owner == t) {
803 /* acquire the mutex of the lock record */
805 lock_record_enter(t, lr);
807 assert(lr->count == 0);
812 /****** inflation path ******/
814 /* first obtain the lock record for this object */
816 lr = lock_hashtable_get(o);
818 #if defined(ENABLE_JVMTI)
819 /* Monitor Contended Enter */
820 jvmti_MonitorContendedEntering(false, o);
823 /* enter the monitor */
825 lock_record_enter(t, lr);
827 #if defined(ENABLE_JVMTI)
828 /* Monitor Contended Entered */
829 jvmti_MonitorContendedEntering(true, o);
834 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
835 /* Set the flat lock contention bit to let the owning thread
836 know that we want to be notified of unlocking. */
840 DEBUGLOCKS(("thread %d set flc bit on %p lr %p",
841 t->index, (void*) o, (void*) lr));
843 /* try to lock the object */
845 if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
846 /* we can inflate the lock ourselves */
848 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
849 t->index, (void*) o, (void*) lr));
851 lock_inflate(t, o, lr);
854 /* Wait until another thread sees the flc bit and notifies
857 (void) lock_record_wait(t, lr, 0, 0);
861 /* we own the inflated lock now */
867 /* lock_monitor_exit ***********************************************************
869 Decrement the counter of a (currently owned) monitor. If the counter
870 reaches zero, release the monitor.
872 If the current thread is not the owner of the monitor, an
873 IllegalMonitorState exception is thrown.
876 t............the current thread
877 o............the object of which to exit the monitor
880 true.........everything ok,
881 false........an exception has been thrown
883 *******************************************************************************/
885 bool lock_monitor_exit(java_object_t *o)
892 exceptions_throw_nullpointerexception();
898 /* We don't have to worry about stale values here, as any stale value */
899 /* will indicate that we don't own the lock. */
901 lockword = (ptrint) o->monitorPtr;
902 thinlock = t->thinlock;
904 /* most common case: we release a thin lock that we hold once */
906 if (lockword == thinlock) {
907 /* memory barrier for Java Memory Model */
909 o->monitorPtr = THIN_UNLOCKED;
910 /* memory barrier for thin locking */
913 /* check if there has been a flat lock contention on this object */
915 if (LOCK_TEST_FLC_BIT(o)) {
918 DEBUGLOCKS(("thread %d saw flc bit on %p",
919 t->index, (void*) o));
921 /* there has been a contention on this thin lock */
923 lr = lock_hashtable_get(o);
925 DEBUGLOCKS(("thread %d for %p got lr %p",
926 t->index, (void*) o, (void*) lr));
928 lock_record_enter(t, lr);
930 if (LOCK_TEST_FLC_BIT(o)) {
931 /* notify a thread that it can try to inflate the lock now */
933 lock_record_notify(t, lr, true);
936 lock_record_exit(t, lr);
942 /* next common case: we release a recursive lock, count > 0 */
944 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
945 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
949 /* either the lock is fat, or we don't hold it at all */
951 if (IS_FAT_LOCK(lockword)) {
955 lr = GET_FAT_LOCK(lockword);
957 /* check if we own this monitor */
958 /* We don't have to worry about stale values here, as any stale value */
959 /* will be != t and thus fail this check. */
961 if (lr->owner != t) {
962 exceptions_throw_illegalmonitorstateexception();
966 /* { the current thread `t` owns the lock record `lr` on object `o` } */
968 if (lr->count != 0) {
969 /* we had locked this one recursively. just decrement, it will */
970 /* still be locked. */
975 /* unlock this lock record */
978 pthread_mutex_unlock(&(lr->mutex));
983 /* legal thin lock cases have been handled above, so this is an error */
985 exceptions_throw_illegalmonitorstateexception();
991 /* lock_record_add_waiter ******************************************************
993 Add a thread to the list of waiting threads of a lock record.
996 lr...........the lock record
997 thread.......the thread to add
999 *******************************************************************************/
1001 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1005 /* Allocate a waiter data structure. */
1007 w = NEW(lock_waiter_t);
1009 #if defined(ENABLE_STATISTICS)
1011 size_lock_waiter += sizeof(lock_waiter_t);
1014 /* Store the thread in the waiter structure. */
1018 /* Add the waiter as last entry to waiters list. */
1020 list_add_last(lr->waiters, w);
1024 /* lock_record_remove_waiter ***************************************************
1026 Remove a thread from the list of waiting threads of a lock record.
1029 lr...........the lock record
1030 t............the current thread
1033 The current thread must be the owner of the lock record.
1035 *******************************************************************************/
1037 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1042 /* Get the waiters list. */
1046 for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1047 if (w->thread == thread) {
1048 /* Remove the waiter entry from the list. */
1050 list_remove_unsynced(l, w);
1052 /* Free the waiter data structure. */
1054 FREE(w, lock_waiter_t);
1056 #if defined(ENABLE_STATISTICS)
1058 size_lock_waiter -= sizeof(lock_waiter_t);
1065 /* This should never happen. */
1067 vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1071 /* lock_record_wait ************************************************************
1073 Wait on a lock record for a given (maximum) amount of time.
1076 t............the current thread
1077 lr...........the lock record
1078 millis.......milliseconds of timeout
1079 nanos........nanoseconds of timeout
1082 true.........we have been interrupted,
1083 false........everything ok
1086 The current thread must be the owner of the lock record.
1087 This is NOT checked by this function!
1089 *******************************************************************************/
1091 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1094 bool wasinterrupted;
1096 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1097 lr, thread, millis, nanos));
1099 /* { the thread t owns the fat lock record lr on the object o } */
1101 /* register us as waiter for this object */
1103 lock_record_add_waiter(lr, thread);
1105 /* remember the old lock count */
1107 lockcount = lr->count;
1109 /* unlock this record */
1112 lock_record_exit(thread, lr);
1114 /* wait until notified/interrupted/timed out */
1116 wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1118 /* re-enter the monitor */
1120 lock_record_enter(thread, lr);
1122 /* remove us from the list of waiting threads */
1124 lock_record_remove_waiter(lr, thread);
1126 /* restore the old lock count */
1128 lr->count = lockcount;
1130 /* return if we have been interrupted */
1132 return wasinterrupted;
1136 /* lock_monitor_wait ***********************************************************
1138 Wait on an object for a given (maximum) amount of time.
1141 t............the current thread
1142 o............the object
1143 millis.......milliseconds of timeout
1144 nanos........nanoseconds of timeout
1147 The current thread must be the owner of the object's monitor.
1149 *******************************************************************************/
1151 static void lock_monitor_wait(threadobject *t, java_object_t *o, s8 millis, s4 nanos)
1156 lockword = (ptrint) o->monitorPtr;
1158 /* check if we own this monitor */
1159 /* We don't have to worry about stale values here, as any stale value */
1160 /* will fail this check. */
1162 if (IS_FAT_LOCK(lockword)) {
1164 lr = GET_FAT_LOCK(lockword);
1166 if (lr->owner != t) {
1167 exceptions_throw_illegalmonitorstateexception();
1172 /* it's a thin lock */
1174 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1175 exceptions_throw_illegalmonitorstateexception();
1179 /* inflate this lock */
1181 lr = lock_hashtable_get(o);
1183 lock_record_enter(t, lr);
1184 lock_inflate(t, o, lr);
1187 /* { the thread t owns the fat lock record lr on the object o } */
1189 if (lock_record_wait(t, lr, millis, nanos))
1190 exceptions_throw_interruptedexception();
1194 /* lock_record_notify **********************************************************
1196 Notify one thread or all threads waiting on the given lock record.
1199 t............the current thread
1200 lr...........the lock record
1201 one..........if true, only notify one thread
1204 The current thread must be the owner of the lock record.
1205 This is NOT checked by this function!
1207 *******************************************************************************/
1209 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1213 threadobject *waitingthread;
1215 /* { the thread t owns the fat lock record lr on the object o } */
1217 /* Get the waiters list. */
1221 for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1222 /* signal the waiting thread */
1224 waitingthread = w->thread;
1226 /* If the thread was already signaled but hasn't removed
1227 itself from the list yet, just ignore it. */
1229 if (waitingthread->signaled == true)
1232 /* Enter the wait-mutex. */
1234 pthread_mutex_lock(&(waitingthread->waitmutex));
1236 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1237 lr, t, waitingthread, waitingthread->sleeping, one));
1239 /* Signal the thread if it's sleeping. */
1241 if (waitingthread->sleeping)
1242 pthread_cond_signal(&(waitingthread->waitcond));
1244 /* Mark the thread as signaled. */
1246 waitingthread->signaled = true;
1248 /* Leave the wait-mutex. */
1250 pthread_mutex_unlock(&(waitingthread->waitmutex));
1252 /* if we should only wake one, we are done */
1260 /* lock_monitor_notify *********************************************************
1262 Notify one thread or all threads waiting on the given object.
1265 t............the current thread
1266 o............the object
1267 one..........if true, only notify one thread
1270 The current thread must be the owner of the object's monitor.
1272 *******************************************************************************/
1274 static void lock_monitor_notify(threadobject *t, java_object_t *o, bool one)
1279 lockword = (ptrint) o->monitorPtr;
1281 /* check if we own this monitor */
1282 /* We don't have to worry about stale values here, as any stale value */
1283 /* will fail this check. */
1285 if (IS_FAT_LOCK(lockword)) {
1287 lr = GET_FAT_LOCK(lockword);
1289 if (lr->owner != t) {
1290 exceptions_throw_illegalmonitorstateexception();
1295 /* it's a thin lock */
1297 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1298 exceptions_throw_illegalmonitorstateexception();
1302 /* inflate this lock */
1304 lr = lock_hashtable_get(o);
1306 lock_record_enter(t, lr);
1307 lock_inflate(t, o, lr);
1310 /* { the thread t owns the fat lock record lr on the object o } */
1312 lock_record_notify(t, lr, one);
1317 /*============================================================================*/
1318 /* INQUIRY FUNCIONS */
1319 /*============================================================================*/
1322 /* lock_is_held_by_current_thread **********************************************
1324 Return true if the current thread owns the monitor of the given object.
1327 o............the object
1330 true, if the current thread holds the lock of this object.
1332 *******************************************************************************/
1334 bool lock_is_held_by_current_thread(java_object_t *o)
1342 /* check if we own this monitor */
1343 /* We don't have to worry about stale values here, as any stale value */
1344 /* will fail this check. */
1346 lockword = (ptrint) o->monitorPtr;
1348 if (IS_FAT_LOCK(lockword)) {
1349 /* it's a fat lock */
1351 lr = GET_FAT_LOCK(lockword);
1353 return (lr->owner == t);
1356 /* it's a thin lock */
1358 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1364 /*============================================================================*/
1365 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1366 /*============================================================================*/
1369 /* lock_wait_for_object ********************************************************
1371 Wait for the given object.
1374 o............the object
1375 millis.......milliseconds to wait
1376 nanos........nanoseconds to wait
1378 *******************************************************************************/
1380 void lock_wait_for_object(java_object_t *o, s8 millis, s4 nanos)
1382 threadobject *thread;
1384 thread = THREADOBJECT;
1386 lock_monitor_wait(thread, o, millis, nanos);
1390 /* lock_notify_object **********************************************************
1392 Notify one thread waiting on the given object.
1395 o............the object
1397 *******************************************************************************/
1399 void lock_notify_object(java_object_t *o)
1401 threadobject *thread;
1403 thread = THREADOBJECT;
1405 lock_monitor_notify(thread, o, true);
1409 /* lock_notify_all_object ******************************************************
1411 Notify all threads waiting on the given object.
1414 o............the object
1416 *******************************************************************************/
1418 void lock_notify_all_object(java_object_t *o)
1420 threadobject *thread;
1422 thread = THREADOBJECT;
1424 lock_monitor_notify(thread, o, false);
1429 * These are local overrides for various environment variables in Emacs.
1430 * Please do not remove this and leave it at the end of the file, where
1431 * Emacs will automagically detect them.
1432 * ---------------------------------------------------------------------
1435 * indent-tabs-mode: t
1439 * vim:noexpandtab:sw=4:ts=4: