1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
39 #include "mm/memory.h"
41 #include "native/llni.h"
43 #include "threads/lock-common.h"
45 #include "threads/native/lock.h"
46 #include "threads/native/threads.h"
48 #include "toolbox/list.h"
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
56 #include "vmcore/options.h"
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
70 /* includes for atomic instructions: */
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/native/generic-primitives.h"
75 #include "machine-instr.h"
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
87 /* debug **********************************************************************/
90 # define DEBUGLOCKS(format) \
92 if (opt_DebugLocks) { \
97 # define DEBUGLOCKS(format)
101 /******************************************************************************/
103 /******************************************************************************/
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
108 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
110 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
111 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
114 /******************************************************************************/
115 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
116 /******************************************************************************/
118 #define LOCK_SET_FLC_BIT(obj) ((obj)->hdrflags |= HDRFLAG_FLC)
119 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->hdrflags &= ~ HDRFLAG_FLC)
120 #define LOCK_TEST_FLC_BIT(obj) ((obj)->hdrflags & HDRFLAG_FLC)
123 /******************************************************************************/
124 /* MACROS FOR THIN/FAT LOCKS */
125 /******************************************************************************/
127 /* We use a variant of the tasuki locks described in the paper
129 * Tamiya Onodera, Kiyokuni Kawachiya
130 * A Study of Locking Objects with Bimodal Fields
131 * Proceedings of the ACM OOPSLA '99, pp. 223-237
134 * The underlying thin locks are a variant of the thin locks described in
136 * Bacon, Konuru, Murthy, Serrano
137 * Thin Locks: Featherweight Synchronization for Java
138 * Proceedings of the ACM Conference on Programming Language Design and
139 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
142 * In thin lock mode the lockword looks like this:
144 * ,----------------------,-----------,---,
145 * | thread ID | count | 0 |
146 * `----------------------'-----------'---´
148 * thread ID......the 'index' of the owning thread, or 0
149 * count..........number of times the lock has been entered minus 1
150 * 0..............the shape bit is 0 in thin lock mode
152 * In fat lock mode it is basically a lock_record_t *:
154 * ,----------------------------------,---,
155 * | lock_record_t * (without LSB) | 1 |
156 * `----------------------------------'---´
158 * 1..............the shape bit is 1 in fat lock mode
161 #if SIZEOF_VOID_P == 8
162 #define THIN_LOCK_WORD_SIZE 64
164 #define THIN_LOCK_WORD_SIZE 32
167 #define THIN_LOCK_SHAPE_BIT 0x01
169 #define THIN_UNLOCKED 0
171 #define THIN_LOCK_COUNT_SHIFT 1
172 #define THIN_LOCK_COUNT_SIZE 8
173 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
174 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
175 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
177 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
178 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
180 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
181 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
183 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
184 #define MAKE_FAT_LOCK(ptr) ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
186 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
189 /* global variables ***********************************************************/
191 /* hashtable mapping objects to lock records */
192 static lock_hashtable_t lock_hashtable;
195 /******************************************************************************/
197 /******************************************************************************/
199 static void lock_hashtable_init(void);
201 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
202 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
203 static void lock_record_enter(threadobject *t, lock_record_t *lr);
204 static void lock_record_exit(threadobject *t, lock_record_t *lr);
205 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
206 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
209 /*============================================================================*/
210 /* INITIALIZATION OF DATA STRUCTURES */
211 /*============================================================================*/
214 /* lock_init *******************************************************************
216 Initialize global data for locking.
218 *******************************************************************************/
222 /* initialize lock hashtable */
224 lock_hashtable_init();
226 #if defined(ENABLE_VMLOG)
227 vmlog_cacao_init_lock();
232 /* lock_pre_compute_thinlock ***************************************************
234 Pre-compute the thin lock value for a thread index.
237 index........the thead index (>= 1)
240 the thin lock value for this thread index
242 *******************************************************************************/
244 ptrint lock_pre_compute_thinlock(s4 index)
246 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
250 /* lock_record_new *************************************************************
252 Allocate a lock record.
254 *******************************************************************************/
256 static lock_record_t *lock_record_new(void)
260 /* allocate the data structure on the C heap */
262 lr = NEW(lock_record_t);
264 #if defined(ENABLE_STATISTICS)
266 size_lock_record += sizeof(lock_record_t);
269 /* initialize the members */
274 lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
276 /* initialize the mutex */
278 pthread_mutex_init(&(lr->mutex), NULL);
280 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
286 /* lock_record_free ************************************************************
291 lr....lock record to free
293 *******************************************************************************/
295 static void lock_record_free(lock_record_t *lr)
297 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
299 /* Destroy the mutex. */
301 pthread_mutex_destroy(&(lr->mutex));
303 /* Free the waiters list. */
305 list_free(lr->waiters);
307 /* Free the data structure. */
309 FREE(lr, lock_record_t);
311 #if defined(ENABLE_STATISTICS)
313 size_lock_record -= sizeof(lock_record_t);
318 /*============================================================================*/
319 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
320 /*============================================================================*/
322 /* lock_hashtable_init *********************************************************
324 Initialize the global hashtable mapping objects to lock records.
326 *******************************************************************************/
328 static void lock_hashtable_init(void)
330 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
332 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
333 lock_hashtable.entries = 0;
334 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
336 #if defined(ENABLE_STATISTICS)
338 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
341 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
345 /* lock_hashtable_grow *********************************************************
347 Grow the lock record hashtable to about twice its current size and
350 *******************************************************************************/
352 /* must be called with hashtable mutex locked */
353 static void lock_hashtable_grow(void)
357 lock_record_t **oldtable;
358 lock_record_t **newtable;
365 /* allocate a new table */
367 oldsize = lock_hashtable.size;
368 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
370 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
372 oldtable = lock_hashtable.ptr;
373 newtable = MNEW(lock_record_t *, newsize);
375 #if defined(ENABLE_STATISTICS)
377 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
380 MZERO(newtable, lock_record_t *, newsize);
382 /* rehash the entries */
384 for (i = 0; i < oldsize; i++) {
389 h = heap_hashcode(lr->object);
390 newslot = h % newsize;
392 lr->hashlink = newtable[newslot];
393 newtable[newslot] = lr;
399 /* replace the old table */
401 lock_hashtable.ptr = newtable;
402 lock_hashtable.size = newsize;
404 MFREE(oldtable, lock_record_t *, oldsize);
406 #if defined(ENABLE_STATISTICS)
408 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
413 /* lock_hashtable_get **********************************************************
415 Find the lock record for the given object. If it does not exists,
416 yet, create it and enter it in the hashtable.
419 t....the current thread
420 o....the object to look up
423 the lock record to use for this object
425 *******************************************************************************/
427 #if defined(ENABLE_GC_BOEHM)
428 static void lock_record_finalizer(void *object, void *p);
431 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
437 lockword = lock_lockword_get(t, o);
439 if (IS_FAT_LOCK(lockword))
440 return GET_FAT_LOCK(lockword);
442 /* lock the hashtable */
444 pthread_mutex_lock(&(lock_hashtable.mutex));
446 /* lookup the lock record in the hashtable */
448 LLNI_CRITICAL_START_THREAD(t);
449 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
450 lr = lock_hashtable.ptr[slot];
452 for (; lr != NULL; lr = lr->hashlink) {
453 if (lr->object == LLNI_DIRECT(o))
456 LLNI_CRITICAL_END_THREAD(t);
459 /* not found, we must create a new one */
461 lr = lock_record_new();
463 LLNI_CRITICAL_START_THREAD(t);
464 lr->object = LLNI_DIRECT(o);
465 LLNI_CRITICAL_END_THREAD(t);
467 #if defined(ENABLE_GC_BOEHM)
468 /* register new finalizer to clean up the lock record */
470 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
473 /* enter it in the hashtable */
475 lr->hashlink = lock_hashtable.ptr[slot];
476 lock_hashtable.ptr[slot] = lr;
477 lock_hashtable.entries++;
479 /* check whether the hash should grow */
481 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
482 lock_hashtable_grow();
486 /* unlock the hashtable */
488 pthread_mutex_unlock(&(lock_hashtable.mutex));
490 /* return the new lock record */
496 /* lock_hashtable_remove *******************************************************
498 Remove the lock record for the given object from the hashtable
499 and free it afterwards.
502 t....the current thread
503 o....the object to look up
505 *******************************************************************************/
507 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
512 lock_record_t *tmplr;
514 /* lock the hashtable */
516 pthread_mutex_lock(&(lock_hashtable.mutex));
518 /* get lock record */
520 lockword = lock_lockword_get(t, o);
522 assert(IS_FAT_LOCK(lockword));
524 lr = GET_FAT_LOCK(lockword);
526 /* remove the lock-record from the hashtable */
528 LLNI_CRITICAL_START_THREAD(t);
529 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
530 tmplr = lock_hashtable.ptr[slot];
531 LLNI_CRITICAL_END_THREAD(t);
534 /* special handling if it's the first in the chain */
536 lock_hashtable.ptr[slot] = lr->hashlink;
539 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
540 if (tmplr->hashlink == lr) {
541 tmplr->hashlink = lr->hashlink;
546 assert(tmplr != NULL);
549 /* decrease entry count */
551 lock_hashtable.entries--;
553 /* unlock the hashtable */
555 pthread_mutex_unlock(&(lock_hashtable.mutex));
557 /* free the lock record */
559 lock_record_free(lr);
563 /* lock_record_finalizer *******************************************************
565 XXX Remove me for exact GC.
567 *******************************************************************************/
569 static void lock_record_finalizer(void *object, void *p)
574 o = (java_handle_t *) object;
576 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
577 /* XXX this is only a dirty hack to make Boehm work with handles */
579 o = LLNI_WRAP((java_object_t *) o);
582 LLNI_class_get(o, c);
585 if (opt_DebugFinalizer) {
587 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
594 /* check for a finalizer function */
596 if (c->finalizer != NULL)
597 finalizer_run(object, p);
599 /* remove the lock-record entry from the hashtable and free it */
601 lock_hashtable_remove(THREADOBJECT, o);
605 /*============================================================================*/
606 /* OBJECT LOCK INITIALIZATION */
607 /*============================================================================*/
610 /* lock_init_object_lock *******************************************************
612 Initialize the monitor pointer of the given object. The monitor gets
613 initialized to an unlocked state.
615 *******************************************************************************/
617 void lock_init_object_lock(java_object_t *o)
621 o->lockword = THIN_UNLOCKED;
622 LOCK_CLEAR_FLC_BIT(o);
626 /* lock_get_initial_lock_word **************************************************
628 Returns the initial (unlocked) lock word. The pointer is
629 required in the code generator to set up a virtual
630 java_objectheader for code patch locking.
632 *******************************************************************************/
634 lock_record_t *lock_get_initial_lock_word(void)
636 return (lock_record_t *) THIN_UNLOCKED;
641 /*============================================================================*/
642 /* LOCKING ALGORITHM */
643 /*============================================================================*/
646 /* lock_lockword_get ***********************************************************
648 Get the lockword for the given object.
651 t............the current thread
652 o............the object
654 *******************************************************************************/
656 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
660 LLNI_CRITICAL_START_THREAD(t);
661 lockword = LLNI_DIRECT(o)->lockword;
662 LLNI_CRITICAL_END_THREAD(t);
668 /* lock_lockword_set ***********************************************************
670 Set the lockword for the given object.
673 t............the current thread
674 o............the object
675 lockword.....the new lockword value
677 *******************************************************************************/
679 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
681 LLNI_CRITICAL_START_THREAD(t);
682 LLNI_DIRECT(o)->lockword = lockword;
683 LLNI_CRITICAL_END_THREAD(t);
687 /* lock_record_enter ***********************************************************
689 Enter the lock represented by the given lock record.
692 t.................the current thread
693 lr................the lock record
695 *******************************************************************************/
697 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
699 pthread_mutex_lock(&(lr->mutex));
704 /* lock_record_exit ************************************************************
706 Release the lock represented by the given lock record.
709 t.................the current thread
710 lr................the lock record
713 The current thread must own the lock represented by this lock record.
714 This is NOT checked by this function!
716 *******************************************************************************/
718 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
721 pthread_mutex_unlock(&(lr->mutex));
725 /* lock_inflate ****************************************************************
727 Inflate the lock of the given object. This may only be called by the
728 owner of the monitor of the object.
731 t............the current thread
732 o............the object of which to inflate the lock
733 lr...........the lock record to install. The current thread must
734 own the lock of this lock record!
737 The current thread must be the owner of this object's monitor AND
738 of the lock record's lock!
740 *******************************************************************************/
742 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
746 /* get the current lock count */
748 lockword = lock_lockword_get(t, o);
750 if (IS_FAT_LOCK(lockword)) {
751 assert(GET_FAT_LOCK(lockword) == lr);
754 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
756 /* copy the count from the thin lock */
758 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
761 DEBUGLOCKS(("[lock_inflate : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
762 lr, t, o, lockword, lr->count));
764 /* clear flat-lock-contention bit */
766 LLNI_CRITICAL_START_THREAD(t);
767 LOCK_CLEAR_FLC_BIT(LLNI_DIRECT(o));
768 LLNI_CRITICAL_END_THREAD(t);
770 /* notify waiting objects */
772 lock_record_notify(t, lr, false);
776 lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
780 /* lock_monitor_enter **********************************************************
782 Acquire the monitor of the given object. If the current thread already
783 owns the monitor, the lock counter is simply increased.
785 This function blocks until it can acquire the monitor.
788 t............the current thread
789 o............the object of which to enter the monitor
792 true.........the lock has been successfully acquired
793 false........an exception has been thrown
795 *******************************************************************************/
797 bool lock_monitor_enter(java_handle_t *o)
800 /* CAUTION: This code assumes that ptrint is unsigned! */
806 exceptions_throw_nullpointerexception();
812 thinlock = t->thinlock;
814 /* most common case: try to thin-lock an unlocked object */
816 LLNI_CRITICAL_START_THREAD(t);
817 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
818 LLNI_CRITICAL_END_THREAD(t);
820 if (lockword == THIN_UNLOCKED) {
821 /* success. we locked it */
822 /* The Java Memory Model requires a memory barrier here: */
827 /* next common case: recursive lock with small recursion count */
828 /* We don't have to worry about stale values here, as any stale value */
829 /* will indicate another thread holding the lock (or an inflated lock) */
831 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
832 /* we own this monitor */
833 /* check the current recursion count */
835 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
837 /* the recursion count is low enough */
839 lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
841 /* success. we locked it */
845 /* recursion count overflow */
847 lr = lock_hashtable_get(t, o);
848 lock_record_enter(t, lr);
849 lock_inflate(t, o, lr);
856 /* the lock is either contented or fat */
858 if (IS_FAT_LOCK(lockword)) {
860 lr = GET_FAT_LOCK(lockword);
862 /* check for recursive entering */
863 if (lr->owner == t) {
868 /* acquire the mutex of the lock record */
870 lock_record_enter(t, lr);
872 assert(lr->count == 0);
877 /****** inflation path ******/
879 /* first obtain the lock record for this object */
881 lr = lock_hashtable_get(t, o);
883 #if defined(ENABLE_JVMTI)
884 /* Monitor Contended Enter */
885 jvmti_MonitorContendedEntering(false, o);
888 /* enter the monitor */
890 lock_record_enter(t, lr);
892 #if defined(ENABLE_JVMTI)
893 /* Monitor Contended Entered */
894 jvmti_MonitorContendedEntering(true, o);
899 while (IS_THIN_LOCK(lockword = lock_lockword_get(t, o))) {
900 /* Set the flat lock contention bit to let the owning thread
901 know that we want to be notified of unlocking. */
903 LLNI_CRITICAL_START_THREAD(t);
904 LOCK_SET_FLC_BIT(LLNI_DIRECT(o));
905 LLNI_CRITICAL_END_THREAD(t);
907 DEBUGLOCKS(("thread %d set flc bit on %p lr %p",
908 t->index, (void*) o, (void*) lr));
910 /* try to lock the object */
912 LLNI_CRITICAL_START_THREAD(t);
913 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
914 LLNI_CRITICAL_END_THREAD(t);
916 if (lockword == THIN_UNLOCKED) {
917 /* we can inflate the lock ourselves */
919 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
920 t->index, (void*) o, (void*) lr));
922 lock_inflate(t, o, lr);
925 /* Wait until another thread sees the flc bit and notifies
928 (void) lock_record_wait(t, lr, 0, 0);
932 /* we own the inflated lock now */
938 /* lock_monitor_exit ***********************************************************
940 Decrement the counter of a (currently owned) monitor. If the counter
941 reaches zero, release the monitor.
943 If the current thread is not the owner of the monitor, an
944 IllegalMonitorState exception is thrown.
947 t............the current thread
948 o............the object of which to exit the monitor
951 true.........everything ok,
952 false........an exception has been thrown
954 *******************************************************************************/
956 bool lock_monitor_exit(java_handle_t *o)
963 exceptions_throw_nullpointerexception();
969 thinlock = t->thinlock;
971 /* We don't have to worry about stale values here, as any stale value */
972 /* will indicate that we don't own the lock. */
974 lockword = lock_lockword_get(t, o);
976 /* most common case: we release a thin lock that we hold once */
978 if (lockword == thinlock) {
979 /* memory barrier for Java Memory Model */
981 lock_lockword_set(t, o, THIN_UNLOCKED);
982 /* memory barrier for thin locking */
985 /* check if there has been a flat lock contention on this object */
987 if (LOCK_TEST_FLC_BIT(LLNI_DIRECT(o))) {
990 DEBUGLOCKS(("thread %d saw flc bit on %p",
991 t->index, (void*) o));
993 /* there has been a contention on this thin lock */
995 lr = lock_hashtable_get(t, o);
997 DEBUGLOCKS(("thread %d for %p got lr %p",
998 t->index, (void*) o, (void*) lr));
1000 lock_record_enter(t, lr);
1002 if (LOCK_TEST_FLC_BIT(LLNI_DIRECT(o))) {
1003 /* notify a thread that it can try to inflate the lock now */
1005 lock_record_notify(t, lr, true);
1008 lock_record_exit(t, lr);
1014 /* next common case: we release a recursive lock, count > 0 */
1016 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1017 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1021 /* either the lock is fat, or we don't hold it at all */
1023 if (IS_FAT_LOCK(lockword)) {
1027 lr = GET_FAT_LOCK(lockword);
1029 /* check if we own this monitor */
1030 /* We don't have to worry about stale values here, as any stale value */
1031 /* will be != t and thus fail this check. */
1033 if (lr->owner != t) {
1034 exceptions_throw_illegalmonitorstateexception();
1038 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1040 if (lr->count != 0) {
1041 /* we had locked this one recursively. just decrement, it will */
1042 /* still be locked. */
1047 /* unlock this lock record */
1050 pthread_mutex_unlock(&(lr->mutex));
1055 /* legal thin lock cases have been handled above, so this is an error */
1057 exceptions_throw_illegalmonitorstateexception();
1063 /* lock_record_add_waiter ******************************************************
1065 Add a thread to the list of waiting threads of a lock record.
1068 lr...........the lock record
1069 thread.......the thread to add
1071 *******************************************************************************/
1073 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1077 /* Allocate a waiter data structure. */
1079 w = NEW(lock_waiter_t);
1081 #if defined(ENABLE_STATISTICS)
1083 size_lock_waiter += sizeof(lock_waiter_t);
1086 /* Store the thread in the waiter structure. */
1090 /* Add the waiter as last entry to waiters list. */
1092 list_add_last(lr->waiters, w);
1096 /* lock_record_remove_waiter ***************************************************
1098 Remove a thread from the list of waiting threads of a lock record.
1101 lr...........the lock record
1102 t............the current thread
1105 The current thread must be the owner of the lock record.
1107 *******************************************************************************/
1109 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1114 /* Get the waiters list. */
1118 for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1119 if (w->thread == thread) {
1120 /* Remove the waiter entry from the list. */
1122 list_remove_unsynced(l, w);
1124 /* Free the waiter data structure. */
1126 FREE(w, lock_waiter_t);
1128 #if defined(ENABLE_STATISTICS)
1130 size_lock_waiter -= sizeof(lock_waiter_t);
1137 /* This should never happen. */
1139 vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1143 /* lock_record_wait ************************************************************
1145 Wait on a lock record for a given (maximum) amount of time.
1148 t............the current thread
1149 lr...........the lock record
1150 millis.......milliseconds of timeout
1151 nanos........nanoseconds of timeout
1154 true.........we have been interrupted,
1155 false........everything ok
1158 The current thread must be the owner of the lock record.
1159 This is NOT checked by this function!
1161 *******************************************************************************/
1163 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1166 bool wasinterrupted;
1168 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1169 lr, thread, millis, nanos));
1171 /* { the thread t owns the fat lock record lr on the object o } */
1173 /* register us as waiter for this object */
1175 lock_record_add_waiter(lr, thread);
1177 /* remember the old lock count */
1179 lockcount = lr->count;
1181 /* unlock this record */
1184 lock_record_exit(thread, lr);
1186 /* wait until notified/interrupted/timed out */
1188 wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1190 /* re-enter the monitor */
1192 lock_record_enter(thread, lr);
1194 /* remove us from the list of waiting threads */
1196 lock_record_remove_waiter(lr, thread);
1198 /* restore the old lock count */
1200 lr->count = lockcount;
1202 /* return if we have been interrupted */
1204 return wasinterrupted;
1208 /* lock_monitor_wait ***********************************************************
1210 Wait on an object for a given (maximum) amount of time.
1213 t............the current thread
1214 o............the object
1215 millis.......milliseconds of timeout
1216 nanos........nanoseconds of timeout
1219 The current thread must be the owner of the object's monitor.
1221 *******************************************************************************/
1223 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1228 lockword = lock_lockword_get(t, o);
1230 /* check if we own this monitor */
1231 /* We don't have to worry about stale values here, as any stale value */
1232 /* will fail this check. */
1234 if (IS_FAT_LOCK(lockword)) {
1236 lr = GET_FAT_LOCK(lockword);
1238 if (lr->owner != t) {
1239 exceptions_throw_illegalmonitorstateexception();
1244 /* it's a thin lock */
1246 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1247 exceptions_throw_illegalmonitorstateexception();
1251 /* inflate this lock */
1253 lr = lock_hashtable_get(t, o);
1254 lock_record_enter(t, lr);
1255 lock_inflate(t, o, lr);
1258 /* { the thread t owns the fat lock record lr on the object o } */
1260 if (lock_record_wait(t, lr, millis, nanos))
1261 exceptions_throw_interruptedexception();
1265 /* lock_record_notify **********************************************************
1267 Notify one thread or all threads waiting on the given lock record.
1270 t............the current thread
1271 lr...........the lock record
1272 one..........if true, only notify one thread
1275 The current thread must be the owner of the lock record.
1276 This is NOT checked by this function!
1278 *******************************************************************************/
1280 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1284 threadobject *waitingthread;
1286 /* { the thread t owns the fat lock record lr on the object o } */
1288 /* Get the waiters list. */
1292 for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1293 /* signal the waiting thread */
1295 waitingthread = w->thread;
1297 /* If the thread was already signaled but hasn't removed
1298 itself from the list yet, just ignore it. */
1300 if (waitingthread->signaled == true)
1303 /* Enter the wait-mutex. */
1305 pthread_mutex_lock(&(waitingthread->waitmutex));
1307 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1308 lr, t, waitingthread, waitingthread->sleeping, one));
1310 /* Signal the thread if it's sleeping. */
1312 if (waitingthread->sleeping)
1313 pthread_cond_signal(&(waitingthread->waitcond));
1315 /* Mark the thread as signaled. */
1317 waitingthread->signaled = true;
1319 /* Leave the wait-mutex. */
1321 pthread_mutex_unlock(&(waitingthread->waitmutex));
1323 /* if we should only wake one, we are done */
1331 /* lock_monitor_notify *********************************************************
1333 Notify one thread or all threads waiting on the given object.
1336 t............the current thread
1337 o............the object
1338 one..........if true, only notify one thread
1341 The current thread must be the owner of the object's monitor.
1343 *******************************************************************************/
1345 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1350 lockword = lock_lockword_get(t, o);
1352 /* check if we own this monitor */
1353 /* We don't have to worry about stale values here, as any stale value */
1354 /* will fail this check. */
1356 if (IS_FAT_LOCK(lockword)) {
1358 lr = GET_FAT_LOCK(lockword);
1360 if (lr->owner != t) {
1361 exceptions_throw_illegalmonitorstateexception();
1366 /* it's a thin lock */
1368 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1369 exceptions_throw_illegalmonitorstateexception();
1373 /* inflate this lock */
1375 lr = lock_hashtable_get(t, o);
1376 lock_record_enter(t, lr);
1377 lock_inflate(t, o, lr);
1380 /* { the thread t owns the fat lock record lr on the object o } */
1382 lock_record_notify(t, lr, one);
1387 /*============================================================================*/
1388 /* INQUIRY FUNCIONS */
1389 /*============================================================================*/
1392 /* lock_is_held_by_current_thread **********************************************
1394 Return true if the current thread owns the monitor of the given object.
1397 o............the object
1400 true, if the current thread holds the lock of this object.
1402 *******************************************************************************/
1404 bool lock_is_held_by_current_thread(java_handle_t *o)
1412 /* check if we own this monitor */
1413 /* We don't have to worry about stale values here, as any stale value */
1414 /* will fail this check. */
1416 lockword = lock_lockword_get(t, o);
1418 if (IS_FAT_LOCK(lockword)) {
1419 /* it's a fat lock */
1421 lr = GET_FAT_LOCK(lockword);
1423 return (lr->owner == t);
1426 /* it's a thin lock */
1428 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1434 /*============================================================================*/
1435 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1436 /*============================================================================*/
1439 /* lock_wait_for_object ********************************************************
1441 Wait for the given object.
1444 o............the object
1445 millis.......milliseconds to wait
1446 nanos........nanoseconds to wait
1448 *******************************************************************************/
1450 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1452 threadobject *thread;
1454 thread = THREADOBJECT;
1456 lock_monitor_wait(thread, o, millis, nanos);
1460 /* lock_notify_object **********************************************************
1462 Notify one thread waiting on the given object.
1465 o............the object
1467 *******************************************************************************/
1469 void lock_notify_object(java_handle_t *o)
1471 threadobject *thread;
1473 thread = THREADOBJECT;
1475 lock_monitor_notify(thread, o, true);
1479 /* lock_notify_all_object ******************************************************
1481 Notify all threads waiting on the given object.
1484 o............the object
1486 *******************************************************************************/
1488 void lock_notify_all_object(java_handle_t *o)
1490 threadobject *thread;
1492 thread = THREADOBJECT;
1494 lock_monitor_notify(thread, o, false);
1499 * These are local overrides for various environment variables in Emacs.
1500 * Please do not remove this and leave it at the end of the file, where
1501 * Emacs will automagically detect them.
1502 * ---------------------------------------------------------------------
1505 * indent-tabs-mode: t
1509 * vim:noexpandtab:sw=4:ts=4: