1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
39 #include "mm/memory.h"
41 #include "native/llni.h"
43 #include "threads/lock-common.h"
45 #include "threads/native/lock.h"
46 #include "threads/native/threads.h"
48 #include "toolbox/list.h"
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
56 #include "vmcore/options.h"
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
70 /* includes for atomic instructions: */
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/native/generic-primitives.h"
75 #include "machine-instr.h"
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
87 /* debug **********************************************************************/
90 # define DEBUGLOCKS(format) \
92 if (opt_DebugLocks) { \
97 # define DEBUGLOCKS(format)
101 /******************************************************************************/
103 /******************************************************************************/
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
108 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
110 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
111 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
114 /******************************************************************************/
115 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
116 /******************************************************************************/
118 #define LOCK_SET_FLC_BIT(obj) ((obj)->hdrflags |= HDRFLAG_FLC)
119 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->hdrflags &= ~ HDRFLAG_FLC)
120 #define LOCK_TEST_FLC_BIT(obj) ((obj)->hdrflags & HDRFLAG_FLC)
123 /******************************************************************************/
124 /* MACROS FOR THIN/FAT LOCKS */
125 /******************************************************************************/
127 /* We use a variant of the tasuki locks described in the paper
129 * Tamiya Onodera, Kiyokuni Kawachiya
130 * A Study of Locking Objects with Bimodal Fields
131 * Proceedings of the ACM OOPSLA '99, pp. 223-237
134 * The underlying thin locks are a variant of the thin locks described in
136 * Bacon, Konuru, Murthy, Serrano
137 * Thin Locks: Featherweight Synchronization for Java
138 * Proceedings of the ACM Conference on Programming Language Design and
139 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
142 * In thin lock mode the lockword looks like this:
144 * ,----------------------,-----------,---,
145 * | thread ID | count | 0 |
146 * `----------------------'-----------'---´
148 * thread ID......the 'index' of the owning thread, or 0
149 * count..........number of times the lock has been entered minus 1
150 * 0..............the shape bit is 0 in thin lock mode
152 * In fat lock mode it is basically a lock_record_t *:
154 * ,----------------------------------,---,
155 * | lock_record_t * (without LSB) | 1 |
156 * `----------------------------------'---´
158 * 1..............the shape bit is 1 in fat lock mode
161 #if SIZEOF_VOID_P == 8
162 #define THIN_LOCK_WORD_SIZE 64
164 #define THIN_LOCK_WORD_SIZE 32
167 #define THIN_LOCK_SHAPE_BIT 0x01
169 #define THIN_UNLOCKED 0
171 #define THIN_LOCK_COUNT_SHIFT 1
172 #define THIN_LOCK_COUNT_SIZE 8
173 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
174 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
175 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
177 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
178 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
180 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
181 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
183 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
184 #define MAKE_FAT_LOCK(ptr) ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
186 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
189 /* global variables ***********************************************************/
191 /* hashtable mapping objects to lock records */
192 static lock_hashtable_t lock_hashtable;
195 /******************************************************************************/
197 /******************************************************************************/
199 static void lock_hashtable_init(void);
201 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
202 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
203 static void lock_record_enter(threadobject *t, lock_record_t *lr);
204 static void lock_record_exit(threadobject *t, lock_record_t *lr);
205 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
206 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
209 /*============================================================================*/
210 /* INITIALIZATION OF DATA STRUCTURES */
211 /*============================================================================*/
214 /* lock_init *******************************************************************
216 Initialize global data for locking.
218 *******************************************************************************/
222 /* initialize lock hashtable */
224 lock_hashtable_init();
226 #if defined(ENABLE_VMLOG)
227 vmlog_cacao_init_lock();
232 /* lock_pre_compute_thinlock ***************************************************
234 Pre-compute the thin lock value for a thread index.
237 index........the thead index (>= 1)
240 the thin lock value for this thread index
242 *******************************************************************************/
244 ptrint lock_pre_compute_thinlock(s4 index)
246 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
250 /* lock_record_new *************************************************************
252 Allocate a lock record.
254 *******************************************************************************/
256 static lock_record_t *lock_record_new(void)
260 /* allocate the data structure on the C heap */
262 lr = NEW(lock_record_t);
264 #if defined(ENABLE_STATISTICS)
266 size_lock_record += sizeof(lock_record_t);
269 /* initialize the members */
274 lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
276 #if defined(ENABLE_GC_CACAO)
277 /* register the lock object as weak reference with the GC */
279 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
282 /* initialize the mutex */
284 pthread_mutex_init(&(lr->mutex), NULL);
286 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
292 /* lock_record_free ************************************************************
297 lr....lock record to free
299 *******************************************************************************/
301 static void lock_record_free(lock_record_t *lr)
303 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
305 /* Destroy the mutex. */
307 pthread_mutex_destroy(&(lr->mutex));
309 #if defined(ENABLE_GC_CACAO)
310 /* unregister the lock object reference with the GC */
312 gc_weakreference_unregister(&(lr->object));
315 /* Free the waiters list. */
317 list_free(lr->waiters);
319 /* Free the data structure. */
321 FREE(lr, lock_record_t);
323 #if defined(ENABLE_STATISTICS)
325 size_lock_record -= sizeof(lock_record_t);
330 /*============================================================================*/
331 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
332 /*============================================================================*/
334 /* lock_hashtable_init *********************************************************
336 Initialize the global hashtable mapping objects to lock records.
338 *******************************************************************************/
340 static void lock_hashtable_init(void)
342 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
344 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
345 lock_hashtable.entries = 0;
346 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
348 #if defined(ENABLE_STATISTICS)
350 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
353 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
357 /* lock_hashtable_grow *********************************************************
359 Grow the lock record hashtable to about twice its current size and
362 *******************************************************************************/
364 /* must be called with hashtable mutex locked */
365 static void lock_hashtable_grow(void)
369 lock_record_t **oldtable;
370 lock_record_t **newtable;
377 /* allocate a new table */
379 oldsize = lock_hashtable.size;
380 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
382 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
384 oldtable = lock_hashtable.ptr;
385 newtable = MNEW(lock_record_t *, newsize);
387 #if defined(ENABLE_STATISTICS)
389 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
392 MZERO(newtable, lock_record_t *, newsize);
394 /* rehash the entries */
396 for (i = 0; i < oldsize; i++) {
401 h = heap_hashcode(lr->object);
402 newslot = h % newsize;
404 lr->hashlink = newtable[newslot];
405 newtable[newslot] = lr;
411 /* replace the old table */
413 lock_hashtable.ptr = newtable;
414 lock_hashtable.size = newsize;
416 MFREE(oldtable, lock_record_t *, oldsize);
418 #if defined(ENABLE_STATISTICS)
420 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
425 /* lock_hashtable_cleanup ******************************************************
427 Removes (and frees) lock records which have a cleared object reference
428 from the hashtable. The locked object was reclaimed by the GC.
430 *******************************************************************************/
432 #if defined(ENABLE_GC_CACAO)
433 void lock_hashtable_cleanup(void)
443 /* lock the hashtable */
445 pthread_mutex_lock(&(lock_hashtable.mutex));
447 /* search the hashtable for cleared references */
449 for (i = 0; i < lock_hashtable.size; i++) {
450 lr = lock_hashtable.ptr[i];
456 /* remove lock records with cleared references */
458 if (lr->object == NULL) {
460 /* unlink the lock record from the hashtable */
463 lock_hashtable.ptr[i] = next;
465 prev->hashlink = next;
467 /* free the lock record */
469 lock_record_free(lr);
479 /* unlock the hashtable */
481 pthread_mutex_unlock(&(lock_hashtable.mutex));
486 /* lock_hashtable_get **********************************************************
488 Find the lock record for the given object. If it does not exists,
489 yet, create it and enter it in the hashtable.
492 t....the current thread
493 o....the object to look up
496 the lock record to use for this object
498 *******************************************************************************/
500 #if defined(ENABLE_GC_BOEHM)
501 static void lock_record_finalizer(void *object, void *p);
504 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
510 lockword = lock_lockword_get(t, o);
512 if (IS_FAT_LOCK(lockword))
513 return GET_FAT_LOCK(lockword);
515 /* lock the hashtable */
517 pthread_mutex_lock(&(lock_hashtable.mutex));
519 /* lookup the lock record in the hashtable */
521 LLNI_CRITICAL_START_THREAD(t);
522 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
523 lr = lock_hashtable.ptr[slot];
525 for (; lr != NULL; lr = lr->hashlink) {
526 if (lr->object == LLNI_DIRECT(o))
529 LLNI_CRITICAL_END_THREAD(t);
532 /* not found, we must create a new one */
534 lr = lock_record_new();
536 LLNI_CRITICAL_START_THREAD(t);
537 lr->object = LLNI_DIRECT(o);
538 LLNI_CRITICAL_END_THREAD(t);
540 #if defined(ENABLE_GC_BOEHM)
541 /* register new finalizer to clean up the lock record */
543 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
546 /* enter it in the hashtable */
548 lr->hashlink = lock_hashtable.ptr[slot];
549 lock_hashtable.ptr[slot] = lr;
550 lock_hashtable.entries++;
552 /* check whether the hash should grow */
554 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
555 lock_hashtable_grow();
559 /* unlock the hashtable */
561 pthread_mutex_unlock(&(lock_hashtable.mutex));
563 /* return the new lock record */
569 /* lock_hashtable_remove *******************************************************
571 Remove the lock record for the given object from the hashtable
572 and free it afterwards.
575 t....the current thread
576 o....the object to look up
578 *******************************************************************************/
580 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
585 lock_record_t *tmplr;
587 /* lock the hashtable */
589 pthread_mutex_lock(&(lock_hashtable.mutex));
591 /* get lock record */
593 lockword = lock_lockword_get(t, o);
595 assert(IS_FAT_LOCK(lockword));
597 lr = GET_FAT_LOCK(lockword);
599 /* remove the lock-record from the hashtable */
601 LLNI_CRITICAL_START_THREAD(t);
602 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
603 tmplr = lock_hashtable.ptr[slot];
604 LLNI_CRITICAL_END_THREAD(t);
607 /* special handling if it's the first in the chain */
609 lock_hashtable.ptr[slot] = lr->hashlink;
612 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
613 if (tmplr->hashlink == lr) {
614 tmplr->hashlink = lr->hashlink;
619 assert(tmplr != NULL);
622 /* decrease entry count */
624 lock_hashtable.entries--;
626 /* unlock the hashtable */
628 pthread_mutex_unlock(&(lock_hashtable.mutex));
630 /* free the lock record */
632 lock_record_free(lr);
636 /* lock_record_finalizer *******************************************************
638 XXX Remove me for exact GC.
640 *******************************************************************************/
642 static void lock_record_finalizer(void *object, void *p)
647 o = (java_handle_t *) object;
649 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
650 /* XXX this is only a dirty hack to make Boehm work with handles */
652 o = LLNI_WRAP((java_object_t *) o);
655 LLNI_class_get(o, c);
658 if (opt_DebugFinalizer) {
660 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
667 /* check for a finalizer function */
669 if (c->finalizer != NULL)
670 finalizer_run(object, p);
672 /* remove the lock-record entry from the hashtable and free it */
674 lock_hashtable_remove(THREADOBJECT, o);
678 /*============================================================================*/
679 /* OBJECT LOCK INITIALIZATION */
680 /*============================================================================*/
683 /* lock_init_object_lock *******************************************************
685 Initialize the monitor pointer of the given object. The monitor gets
686 initialized to an unlocked state.
688 *******************************************************************************/
690 void lock_init_object_lock(java_object_t *o)
694 o->lockword = THIN_UNLOCKED;
695 LOCK_CLEAR_FLC_BIT(o);
699 /*============================================================================*/
700 /* LOCKING ALGORITHM */
701 /*============================================================================*/
704 /* lock_lockword_get ***********************************************************
706 Get the lockword for the given object.
709 t............the current thread
710 o............the object
712 *******************************************************************************/
714 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
718 LLNI_CRITICAL_START_THREAD(t);
719 lockword = LLNI_DIRECT(o)->lockword;
720 LLNI_CRITICAL_END_THREAD(t);
726 /* lock_lockword_set ***********************************************************
728 Set the lockword for the given object.
731 t............the current thread
732 o............the object
733 lockword.....the new lockword value
735 *******************************************************************************/
737 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
739 LLNI_CRITICAL_START_THREAD(t);
740 LLNI_DIRECT(o)->lockword = lockword;
741 LLNI_CRITICAL_END_THREAD(t);
745 /* lock_record_enter ***********************************************************
747 Enter the lock represented by the given lock record.
750 t.................the current thread
751 lr................the lock record
753 *******************************************************************************/
755 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
757 pthread_mutex_lock(&(lr->mutex));
762 /* lock_record_exit ************************************************************
764 Release the lock represented by the given lock record.
767 t.................the current thread
768 lr................the lock record
771 The current thread must own the lock represented by this lock record.
772 This is NOT checked by this function!
774 *******************************************************************************/
776 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
779 pthread_mutex_unlock(&(lr->mutex));
783 /* lock_inflate ****************************************************************
785 Inflate the lock of the given object. This may only be called by the
786 owner of the monitor of the object.
789 t............the current thread
790 o............the object of which to inflate the lock
791 lr...........the lock record to install. The current thread must
792 own the lock of this lock record!
795 The current thread must be the owner of this object's monitor AND
796 of the lock record's lock!
798 *******************************************************************************/
800 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
804 /* get the current lock count */
806 lockword = lock_lockword_get(t, o);
808 if (IS_FAT_LOCK(lockword)) {
809 assert(GET_FAT_LOCK(lockword) == lr);
812 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
814 /* copy the count from the thin lock */
816 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
819 DEBUGLOCKS(("[lock_inflate : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
820 lr, t, o, lockword, lr->count));
822 /* clear flat-lock-contention bit */
824 LLNI_CRITICAL_START_THREAD(t);
825 LOCK_CLEAR_FLC_BIT(LLNI_DIRECT(o));
826 LLNI_CRITICAL_END_THREAD(t);
828 /* notify waiting objects */
830 lock_record_notify(t, lr, false);
834 lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
838 /* lock_monitor_enter **********************************************************
840 Acquire the monitor of the given object. If the current thread already
841 owns the monitor, the lock counter is simply increased.
843 This function blocks until it can acquire the monitor.
846 t............the current thread
847 o............the object of which to enter the monitor
850 true.........the lock has been successfully acquired
851 false........an exception has been thrown
853 *******************************************************************************/
855 bool lock_monitor_enter(java_handle_t *o)
858 /* CAUTION: This code assumes that ptrint is unsigned! */
864 exceptions_throw_nullpointerexception();
870 thinlock = t->thinlock;
872 /* most common case: try to thin-lock an unlocked object */
874 LLNI_CRITICAL_START_THREAD(t);
875 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
876 LLNI_CRITICAL_END_THREAD(t);
878 if (lockword == THIN_UNLOCKED) {
879 /* success. we locked it */
880 /* The Java Memory Model requires a memory barrier here: */
881 /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
882 MEMORY_BARRIER_AFTER_ATOMIC();
886 /* next common case: recursive lock with small recursion count */
887 /* We don't have to worry about stale values here, as any stale value */
888 /* will indicate another thread holding the lock (or an inflated lock) */
890 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
891 /* we own this monitor */
892 /* check the current recursion count */
894 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
896 /* the recursion count is low enough */
898 lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
900 /* success. we locked it */
904 /* recursion count overflow */
906 lr = lock_hashtable_get(t, o);
907 lock_record_enter(t, lr);
908 lock_inflate(t, o, lr);
915 /* the lock is either contented or fat */
917 if (IS_FAT_LOCK(lockword)) {
919 lr = GET_FAT_LOCK(lockword);
921 /* check for recursive entering */
922 if (lr->owner == t) {
927 /* acquire the mutex of the lock record */
929 lock_record_enter(t, lr);
931 assert(lr->count == 0);
936 /****** inflation path ******/
938 /* first obtain the lock record for this object */
940 lr = lock_hashtable_get(t, o);
942 #if defined(ENABLE_JVMTI)
943 /* Monitor Contended Enter */
944 jvmti_MonitorContendedEntering(false, o);
947 /* enter the monitor */
949 lock_record_enter(t, lr);
951 #if defined(ENABLE_JVMTI)
952 /* Monitor Contended Entered */
953 jvmti_MonitorContendedEntering(true, o);
958 while (IS_THIN_LOCK(lockword = lock_lockword_get(t, o))) {
959 /* Set the flat lock contention bit to let the owning thread
960 know that we want to be notified of unlocking. */
962 LLNI_CRITICAL_START_THREAD(t);
963 LOCK_SET_FLC_BIT(LLNI_DIRECT(o));
964 LLNI_CRITICAL_END_THREAD(t);
966 DEBUGLOCKS(("thread %d set flc bit on %p lr %p",
967 t->index, (void*) o, (void*) lr));
969 /* try to lock the object */
971 LLNI_CRITICAL_START_THREAD(t);
972 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
973 LLNI_CRITICAL_END_THREAD(t);
975 if (lockword == THIN_UNLOCKED) {
976 /* we can inflate the lock ourselves */
978 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
979 t->index, (void*) o, (void*) lr));
981 lock_inflate(t, o, lr);
984 /* Wait until another thread sees the flc bit and notifies
987 (void) lock_record_wait(t, lr, 0, 0);
991 /* we own the inflated lock now */
997 /* lock_monitor_exit ***********************************************************
999 Decrement the counter of a (currently owned) monitor. If the counter
1000 reaches zero, release the monitor.
1002 If the current thread is not the owner of the monitor, an
1003 IllegalMonitorState exception is thrown.
1006 t............the current thread
1007 o............the object of which to exit the monitor
1010 true.........everything ok,
1011 false........an exception has been thrown
1013 *******************************************************************************/
1015 bool lock_monitor_exit(java_handle_t *o)
1022 exceptions_throw_nullpointerexception();
1028 thinlock = t->thinlock;
1030 /* We don't have to worry about stale values here, as any stale value */
1031 /* will indicate that we don't own the lock. */
1033 lockword = lock_lockword_get(t, o);
1035 /* most common case: we release a thin lock that we hold once */
1037 if (lockword == thinlock) {
1038 /* memory barrier for Java Memory Model */
1039 STORE_ORDER_BARRIER();
1040 lock_lockword_set(t, o, THIN_UNLOCKED);
1041 /* memory barrier for thin locking */
1044 /* check if there has been a flat lock contention on this object */
1046 if (LOCK_TEST_FLC_BIT(LLNI_DIRECT(o))) {
1049 DEBUGLOCKS(("thread %d saw flc bit on %p",
1050 t->index, (void*) o));
1052 /* there has been a contention on this thin lock */
1054 lr = lock_hashtable_get(t, o);
1056 DEBUGLOCKS(("thread %d for %p got lr %p",
1057 t->index, (void*) o, (void*) lr));
1059 lock_record_enter(t, lr);
1061 if (LOCK_TEST_FLC_BIT(LLNI_DIRECT(o))) {
1062 /* notify a thread that it can try to inflate the lock now */
1064 lock_record_notify(t, lr, true);
1067 lock_record_exit(t, lr);
1073 /* next common case: we release a recursive lock, count > 0 */
1075 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1076 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1080 /* either the lock is fat, or we don't hold it at all */
1082 if (IS_FAT_LOCK(lockword)) {
1086 lr = GET_FAT_LOCK(lockword);
1088 /* check if we own this monitor */
1089 /* We don't have to worry about stale values here, as any stale value */
1090 /* will be != t and thus fail this check. */
1092 if (lr->owner != t) {
1093 exceptions_throw_illegalmonitorstateexception();
1097 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1099 if (lr->count != 0) {
1100 /* we had locked this one recursively. just decrement, it will */
1101 /* still be locked. */
1106 /* unlock this lock record */
1109 pthread_mutex_unlock(&(lr->mutex));
1114 /* legal thin lock cases have been handled above, so this is an error */
1116 exceptions_throw_illegalmonitorstateexception();
1122 /* lock_record_add_waiter ******************************************************
1124 Add a thread to the list of waiting threads of a lock record.
1127 lr...........the lock record
1128 thread.......the thread to add
1130 *******************************************************************************/
1132 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1136 /* Allocate a waiter data structure. */
1138 w = NEW(lock_waiter_t);
1140 #if defined(ENABLE_STATISTICS)
1142 size_lock_waiter += sizeof(lock_waiter_t);
1145 /* Store the thread in the waiter structure. */
1149 /* Add the waiter as last entry to waiters list. */
1151 list_add_last(lr->waiters, w);
1155 /* lock_record_remove_waiter ***************************************************
1157 Remove a thread from the list of waiting threads of a lock record.
1160 lr...........the lock record
1161 t............the current thread
1164 The current thread must be the owner of the lock record.
1166 *******************************************************************************/
1168 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1173 /* Get the waiters list. */
1177 for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1178 if (w->thread == thread) {
1179 /* Remove the waiter entry from the list. */
1181 list_remove_unsynced(l, w);
1183 /* Free the waiter data structure. */
1185 FREE(w, lock_waiter_t);
1187 #if defined(ENABLE_STATISTICS)
1189 size_lock_waiter -= sizeof(lock_waiter_t);
1196 /* This should never happen. */
1198 vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1202 /* lock_record_wait ************************************************************
1204 Wait on a lock record for a given (maximum) amount of time.
1207 t............the current thread
1208 lr...........the lock record
1209 millis.......milliseconds of timeout
1210 nanos........nanoseconds of timeout
1213 true.........we have been interrupted,
1214 false........everything ok
1217 The current thread must be the owner of the lock record.
1218 This is NOT checked by this function!
1220 *******************************************************************************/
1222 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1225 bool wasinterrupted;
1227 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1228 lr, thread, millis, nanos));
1230 /* { the thread t owns the fat lock record lr on the object o } */
1232 /* register us as waiter for this object */
1234 lock_record_add_waiter(lr, thread);
1236 /* remember the old lock count */
1238 lockcount = lr->count;
1240 /* unlock this record */
1243 lock_record_exit(thread, lr);
1245 /* wait until notified/interrupted/timed out */
1247 wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1249 /* re-enter the monitor */
1251 lock_record_enter(thread, lr);
1253 /* remove us from the list of waiting threads */
1255 lock_record_remove_waiter(lr, thread);
1257 /* restore the old lock count */
1259 lr->count = lockcount;
1261 /* return if we have been interrupted */
1263 return wasinterrupted;
1267 /* lock_monitor_wait ***********************************************************
1269 Wait on an object for a given (maximum) amount of time.
1272 t............the current thread
1273 o............the object
1274 millis.......milliseconds of timeout
1275 nanos........nanoseconds of timeout
1278 The current thread must be the owner of the object's monitor.
1280 *******************************************************************************/
1282 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1287 lockword = lock_lockword_get(t, o);
1289 /* check if we own this monitor */
1290 /* We don't have to worry about stale values here, as any stale value */
1291 /* will fail this check. */
1293 if (IS_FAT_LOCK(lockword)) {
1295 lr = GET_FAT_LOCK(lockword);
1297 if (lr->owner != t) {
1298 exceptions_throw_illegalmonitorstateexception();
1303 /* it's a thin lock */
1305 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1306 exceptions_throw_illegalmonitorstateexception();
1310 /* inflate this lock */
1312 lr = lock_hashtable_get(t, o);
1313 lock_record_enter(t, lr);
1314 lock_inflate(t, o, lr);
1317 /* { the thread t owns the fat lock record lr on the object o } */
1319 if (lock_record_wait(t, lr, millis, nanos))
1320 exceptions_throw_interruptedexception();
1324 /* lock_record_notify **********************************************************
1326 Notify one thread or all threads waiting on the given lock record.
1329 t............the current thread
1330 lr...........the lock record
1331 one..........if true, only notify one thread
1334 The current thread must be the owner of the lock record.
1335 This is NOT checked by this function!
1337 *******************************************************************************/
1339 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1343 threadobject *waitingthread;
1345 /* { the thread t owns the fat lock record lr on the object o } */
1347 /* Get the waiters list. */
1351 for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1352 /* signal the waiting thread */
1354 waitingthread = w->thread;
1356 /* If the thread was already signaled but hasn't removed
1357 itself from the list yet, just ignore it. */
1359 if (waitingthread->signaled == true)
1362 /* Enter the wait-mutex. */
1364 pthread_mutex_lock(&(waitingthread->waitmutex));
1366 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1367 lr, t, waitingthread, waitingthread->sleeping, one));
1369 /* Signal the thread if it's sleeping. */
1371 if (waitingthread->sleeping)
1372 pthread_cond_signal(&(waitingthread->waitcond));
1374 /* Mark the thread as signaled. */
1376 waitingthread->signaled = true;
1378 /* Leave the wait-mutex. */
1380 pthread_mutex_unlock(&(waitingthread->waitmutex));
1382 /* if we should only wake one, we are done */
1390 /* lock_monitor_notify *********************************************************
1392 Notify one thread or all threads waiting on the given object.
1395 t............the current thread
1396 o............the object
1397 one..........if true, only notify one thread
1400 The current thread must be the owner of the object's monitor.
1402 *******************************************************************************/
1404 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1409 lockword = lock_lockword_get(t, o);
1411 /* check if we own this monitor */
1412 /* We don't have to worry about stale values here, as any stale value */
1413 /* will fail this check. */
1415 if (IS_FAT_LOCK(lockword)) {
1417 lr = GET_FAT_LOCK(lockword);
1419 if (lr->owner != t) {
1420 exceptions_throw_illegalmonitorstateexception();
1425 /* it's a thin lock */
1427 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1428 exceptions_throw_illegalmonitorstateexception();
1432 /* inflate this lock */
1434 lr = lock_hashtable_get(t, o);
1435 lock_record_enter(t, lr);
1436 lock_inflate(t, o, lr);
1439 /* { the thread t owns the fat lock record lr on the object o } */
1441 lock_record_notify(t, lr, one);
1446 /*============================================================================*/
1447 /* INQUIRY FUNCIONS */
1448 /*============================================================================*/
1451 /* lock_is_held_by_current_thread **********************************************
1453 Return true if the current thread owns the monitor of the given object.
1456 o............the object
1459 true, if the current thread holds the lock of this object.
1461 *******************************************************************************/
1463 bool lock_is_held_by_current_thread(java_handle_t *o)
1471 /* check if we own this monitor */
1472 /* We don't have to worry about stale values here, as any stale value */
1473 /* will fail this check. */
1475 lockword = lock_lockword_get(t, o);
1477 if (IS_FAT_LOCK(lockword)) {
1478 /* it's a fat lock */
1480 lr = GET_FAT_LOCK(lockword);
1482 return (lr->owner == t);
1485 /* it's a thin lock */
1487 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1493 /*============================================================================*/
1494 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1495 /*============================================================================*/
1498 /* lock_wait_for_object ********************************************************
1500 Wait for the given object.
1503 o............the object
1504 millis.......milliseconds to wait
1505 nanos........nanoseconds to wait
1507 *******************************************************************************/
1509 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1511 threadobject *thread;
1513 thread = THREADOBJECT;
1515 lock_monitor_wait(thread, o, millis, nanos);
1519 /* lock_notify_object **********************************************************
1521 Notify one thread waiting on the given object.
1524 o............the object
1526 *******************************************************************************/
1528 void lock_notify_object(java_handle_t *o)
1530 threadobject *thread;
1532 thread = THREADOBJECT;
1534 lock_monitor_notify(thread, o, true);
1538 /* lock_notify_all_object ******************************************************
1540 Notify all threads waiting on the given object.
1543 o............the object
1545 *******************************************************************************/
1547 void lock_notify_all_object(java_handle_t *o)
1549 threadobject *thread;
1551 thread = THREADOBJECT;
1553 lock_monitor_notify(thread, o, false);
1558 * These are local overrides for various environment variables in Emacs.
1559 * Please do not remove this and leave it at the end of the file, where
1560 * Emacs will automagically detect them.
1561 * ---------------------------------------------------------------------
1564 * indent-tabs-mode: t
1568 * vim:noexpandtab:sw=4:ts=4: