1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
39 #include "mm/memory.h"
41 #include "native/llni.h"
43 #include "threads/lock-common.h"
45 #include "threads/native/lock.h"
46 #include "threads/native/threads.h"
48 #include "toolbox/list.h"
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
56 #include "vmcore/options.h"
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
70 /* includes for atomic instructions: */
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/native/generic-primitives.h"
75 #include "machine-instr.h"
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
87 /* debug **********************************************************************/
90 # define DEBUGLOCKS(format) \
92 if (opt_DebugLocks) { \
97 # define DEBUGLOCKS(format)
101 /******************************************************************************/
103 /******************************************************************************/
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
108 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
110 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
111 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
114 /******************************************************************************/
115 /* MACROS FOR THIN/FAT LOCKS */
116 /******************************************************************************/
118 /* We use a variant of the tasuki locks described in the paper
120 * Tamiya Onodera, Kiyokuni Kawachiya
121 * A Study of Locking Objects with Bimodal Fields
122 * Proceedings of the ACM OOPSLA '99, pp. 223-237
125 * The underlying thin locks are a variant of the thin locks described in
127 * Bacon, Konuru, Murthy, Serrano
128 * Thin Locks: Featherweight Synchronization for Java
129 * Proceedings of the ACM Conference on Programming Language Design and
130 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
133 * In thin lock mode the lockword looks like this:
135 * ,----------------------,-----------,---,
136 * | thread ID | count | 0 |
137 * `----------------------'-----------'---´
139 * thread ID......the 'index' of the owning thread, or 0
140 * count..........number of times the lock has been entered minus 1
141 * 0..............the shape bit is 0 in thin lock mode
143 * In fat lock mode it is basically a lock_record_t *:
145 * ,----------------------------------,---,
146 * | lock_record_t * (without LSB) | 1 |
147 * `----------------------------------'---´
149 * 1..............the shape bit is 1 in fat lock mode
152 #if SIZEOF_VOID_P == 8
153 #define THIN_LOCK_WORD_SIZE 64
155 #define THIN_LOCK_WORD_SIZE 32
158 #define THIN_LOCK_FLC_BIT 0x01
159 #define THIN_LOCK_SHAPE_BIT 0x02
161 #define THIN_UNLOCKED 0
163 #define THIN_LOCK_COUNT_SHIFT 2
164 #define THIN_LOCK_COUNT_SIZE 7
165 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
166 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
167 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
169 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
170 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
172 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
173 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
175 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~(THIN_LOCK_SHAPE_BIT | THIN_LOCK_FLC_BIT)))
176 #define MAKE_FAT_LOCK(ptr) ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
178 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~(THIN_LOCK_COUNT_MASK | THIN_LOCK_FLC_BIT))
179 #define LOCK_WORD_WITHOUT_FLC_BIT(lockword) ((lockword) & ~THIN_LOCK_FLC_BIT)
182 /* global variables ***********************************************************/
184 /* hashtable mapping objects to lock records */
185 static lock_hashtable_t lock_hashtable;
188 /******************************************************************************/
190 /******************************************************************************/
192 static void lock_hashtable_init(void);
194 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
195 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
196 static void lock_record_enter(threadobject *t, lock_record_t *lr);
197 static void lock_record_exit(threadobject *t, lock_record_t *lr);
198 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
199 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
202 /*============================================================================*/
203 /* INITIALIZATION OF DATA STRUCTURES */
204 /*============================================================================*/
207 /* lock_init *******************************************************************
209 Initialize global data for locking.
211 *******************************************************************************/
215 /* initialize lock hashtable */
217 lock_hashtable_init();
219 #if defined(ENABLE_VMLOG)
220 vmlog_cacao_init_lock();
225 /* lock_pre_compute_thinlock ***************************************************
227 Pre-compute the thin lock value for a thread index.
230 index........the thead index (>= 1)
233 the thin lock value for this thread index
235 *******************************************************************************/
237 ptrint lock_pre_compute_thinlock(s4 index)
239 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
243 /* lock_record_new *************************************************************
245 Allocate a lock record.
247 *******************************************************************************/
249 static lock_record_t *lock_record_new(void)
253 /* allocate the data structure on the C heap */
255 lr = NEW(lock_record_t);
257 #if defined(ENABLE_STATISTICS)
259 size_lock_record += sizeof(lock_record_t);
262 /* initialize the members */
267 lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
269 #if defined(ENABLE_GC_CACAO)
270 /* register the lock object as weak reference with the GC */
272 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
275 /* initialize the mutex */
277 pthread_mutex_init(&(lr->mutex), NULL);
279 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
285 /* lock_record_free ************************************************************
290 lr....lock record to free
292 *******************************************************************************/
294 static void lock_record_free(lock_record_t *lr)
296 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
298 /* Destroy the mutex. */
300 pthread_mutex_destroy(&(lr->mutex));
302 #if defined(ENABLE_GC_CACAO)
303 /* unregister the lock object reference with the GC */
305 gc_weakreference_unregister(&(lr->object));
308 /* Free the waiters list. */
310 list_free(lr->waiters);
312 /* Free the data structure. */
314 FREE(lr, lock_record_t);
316 #if defined(ENABLE_STATISTICS)
318 size_lock_record -= sizeof(lock_record_t);
323 /*============================================================================*/
324 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
325 /*============================================================================*/
327 /* lock_hashtable_init *********************************************************
329 Initialize the global hashtable mapping objects to lock records.
331 *******************************************************************************/
333 static void lock_hashtable_init(void)
335 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
337 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
338 lock_hashtable.entries = 0;
339 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
341 #if defined(ENABLE_STATISTICS)
343 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
346 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
350 /* lock_hashtable_grow *********************************************************
352 Grow the lock record hashtable to about twice its current size and
355 *******************************************************************************/
357 /* must be called with hashtable mutex locked */
358 static void lock_hashtable_grow(void)
362 lock_record_t **oldtable;
363 lock_record_t **newtable;
370 /* allocate a new table */
372 oldsize = lock_hashtable.size;
373 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
375 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
377 oldtable = lock_hashtable.ptr;
378 newtable = MNEW(lock_record_t *, newsize);
380 #if defined(ENABLE_STATISTICS)
382 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
385 MZERO(newtable, lock_record_t *, newsize);
387 /* rehash the entries */
389 for (i = 0; i < oldsize; i++) {
394 h = heap_hashcode(lr->object);
395 newslot = h % newsize;
397 lr->hashlink = newtable[newslot];
398 newtable[newslot] = lr;
404 /* replace the old table */
406 lock_hashtable.ptr = newtable;
407 lock_hashtable.size = newsize;
409 MFREE(oldtable, lock_record_t *, oldsize);
411 #if defined(ENABLE_STATISTICS)
413 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
418 /* lock_hashtable_cleanup ******************************************************
420 Removes (and frees) lock records which have a cleared object reference
421 from the hashtable. The locked object was reclaimed by the GC.
423 *******************************************************************************/
425 #if defined(ENABLE_GC_CACAO)
426 void lock_hashtable_cleanup(void)
436 /* lock the hashtable */
438 pthread_mutex_lock(&(lock_hashtable.mutex));
440 /* search the hashtable for cleared references */
442 for (i = 0; i < lock_hashtable.size; i++) {
443 lr = lock_hashtable.ptr[i];
449 /* remove lock records with cleared references */
451 if (lr->object == NULL) {
453 /* unlink the lock record from the hashtable */
456 lock_hashtable.ptr[i] = next;
458 prev->hashlink = next;
460 /* free the lock record */
462 lock_record_free(lr);
472 /* unlock the hashtable */
474 pthread_mutex_unlock(&(lock_hashtable.mutex));
479 /* lock_hashtable_get **********************************************************
481 Find the lock record for the given object. If it does not exists,
482 yet, create it and enter it in the hashtable.
485 t....the current thread
486 o....the object to look up
489 the lock record to use for this object
491 *******************************************************************************/
493 #if defined(ENABLE_GC_BOEHM)
494 static void lock_record_finalizer(void *object, void *p);
497 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
503 lockword = lock_lockword_get(t, o);
505 if (IS_FAT_LOCK(lockword))
506 return GET_FAT_LOCK(lockword);
508 /* lock the hashtable */
510 pthread_mutex_lock(&(lock_hashtable.mutex));
512 /* lookup the lock record in the hashtable */
514 LLNI_CRITICAL_START_THREAD(t);
515 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
516 lr = lock_hashtable.ptr[slot];
518 for (; lr != NULL; lr = lr->hashlink) {
519 if (lr->object == LLNI_DIRECT(o))
522 LLNI_CRITICAL_END_THREAD(t);
525 /* not found, we must create a new one */
527 lr = lock_record_new();
529 LLNI_CRITICAL_START_THREAD(t);
530 lr->object = LLNI_DIRECT(o);
531 LLNI_CRITICAL_END_THREAD(t);
533 #if defined(ENABLE_GC_BOEHM)
534 /* register new finalizer to clean up the lock record */
536 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
539 /* enter it in the hashtable */
541 lr->hashlink = lock_hashtable.ptr[slot];
542 lock_hashtable.ptr[slot] = lr;
543 lock_hashtable.entries++;
545 /* check whether the hash should grow */
547 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
548 lock_hashtable_grow();
552 /* unlock the hashtable */
554 pthread_mutex_unlock(&(lock_hashtable.mutex));
556 /* return the new lock record */
562 /* lock_hashtable_remove *******************************************************
564 Remove the lock record for the given object from the hashtable
565 and free it afterwards.
568 t....the current thread
569 o....the object to look up
571 *******************************************************************************/
573 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
578 lock_record_t *tmplr;
580 /* lock the hashtable */
582 pthread_mutex_lock(&(lock_hashtable.mutex));
584 /* get lock record */
586 lockword = lock_lockword_get(t, o);
588 assert(IS_FAT_LOCK(lockword));
590 lr = GET_FAT_LOCK(lockword);
592 /* remove the lock-record from the hashtable */
594 LLNI_CRITICAL_START_THREAD(t);
595 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
596 tmplr = lock_hashtable.ptr[slot];
597 LLNI_CRITICAL_END_THREAD(t);
600 /* special handling if it's the first in the chain */
602 lock_hashtable.ptr[slot] = lr->hashlink;
605 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
606 if (tmplr->hashlink == lr) {
607 tmplr->hashlink = lr->hashlink;
612 assert(tmplr != NULL);
615 /* decrease entry count */
617 lock_hashtable.entries--;
619 /* unlock the hashtable */
621 pthread_mutex_unlock(&(lock_hashtable.mutex));
623 /* free the lock record */
625 lock_record_free(lr);
629 /* lock_record_finalizer *******************************************************
631 XXX Remove me for exact GC.
633 *******************************************************************************/
635 static void lock_record_finalizer(void *object, void *p)
640 o = (java_handle_t *) object;
642 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
643 /* XXX this is only a dirty hack to make Boehm work with handles */
645 o = LLNI_WRAP((java_object_t *) o);
648 LLNI_class_get(o, c);
651 if (opt_DebugFinalizer) {
653 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
660 /* check for a finalizer function */
662 if (c->finalizer != NULL)
663 finalizer_run(object, p);
665 /* remove the lock-record entry from the hashtable and free it */
667 lock_hashtable_remove(THREADOBJECT, o);
671 /*============================================================================*/
672 /* OBJECT LOCK INITIALIZATION */
673 /*============================================================================*/
676 /* lock_init_object_lock *******************************************************
678 Initialize the monitor pointer of the given object. The monitor gets
679 initialized to an unlocked state.
681 *******************************************************************************/
683 void lock_init_object_lock(java_object_t *o)
687 o->lockword = THIN_UNLOCKED;
691 /*============================================================================*/
692 /* LOCKING ALGORITHM */
693 /*============================================================================*/
696 /* lock_lockword_get ***********************************************************
698 Get the lockword for the given object.
701 t............the current thread
702 o............the object
704 *******************************************************************************/
706 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
710 LLNI_CRITICAL_START_THREAD(t);
711 lockword = LLNI_DIRECT(o)->lockword;
712 LLNI_CRITICAL_END_THREAD(t);
718 /* lock_lockword_set ***********************************************************
720 Set the lockword for the given object.
723 t............the current thread
724 o............the object
725 lockword.....the new lockword value
727 *******************************************************************************/
729 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
731 LLNI_CRITICAL_START_THREAD(t);
732 LLNI_DIRECT(o)->lockword = lockword;
733 LLNI_CRITICAL_END_THREAD(t);
736 /* lock_lockword_swap **********************************************************
738 Atomically sets a new lockword for the given object and returns the
742 t............the current thread
743 o............the object
744 lockword.....the new lockword value
747 the previous lockword value
748 *******************************************************************************/
750 static inline ptrint lock_lockword_swap(threadobject *t, java_handle_t *o, uintptr_t lockword)
754 LLNI_CRITICAL_START_THREAD(t);
755 old = LLNI_DIRECT(o)->lockword;
756 LLNI_CRITICAL_END_THREAD(t);
762 LLNI_CRITICAL_START_THREAD(t);
763 r = COMPARE_AND_SWAP_OLD_VALUE(&LLNI_DIRECT(o)->lockword, old, lockword);
764 LLNI_CRITICAL_END_THREAD(t);
772 /* try_set_flc_bit ************************************************************
774 Tries to atomically set the FLC bit in the lockword, only if the
775 lockword is not inflated. Returns success status.
778 t............the current thread
779 o............the object
780 lockword.....the current lockword value
783 1 if the FLC is now set, 0 otherwise
784 *******************************************************************************/
786 static int try_set_flc_bit(threadobject *t, java_handle_t *o, ptrint lockword)
789 if (lockword == THIN_UNLOCKED)
792 LLNI_CRITICAL_START_THREAD(t);
793 r = COMPARE_AND_SWAP_OLD_VALUE(&LLNI_DIRECT(o)->lockword, lockword, lockword | THIN_LOCK_FLC_BIT);
794 LLNI_CRITICAL_END_THREAD(t);
796 return r == lockword;
800 /* lock_record_enter ***********************************************************
802 Enter the lock represented by the given lock record.
805 t.................the current thread
806 lr................the lock record
808 *******************************************************************************/
810 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
812 pthread_mutex_lock(&(lr->mutex));
817 /* lock_record_exit ************************************************************
819 Release the lock represented by the given lock record.
822 t.................the current thread
823 lr................the lock record
826 The current thread must own the lock represented by this lock record.
827 This is NOT checked by this function!
829 *******************************************************************************/
831 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
834 pthread_mutex_unlock(&(lr->mutex));
838 /* lock_inflate ****************************************************************
840 Inflate the lock of the given object. This may only be called by the
841 owner of the monitor of the object.
844 t............the current thread
845 o............the object of which to inflate the lock
846 lr...........the lock record to install. The current thread must
847 own the lock of this lock record!
850 The current thread must be the owner of this object's monitor AND
851 of the lock record's lock!
853 *******************************************************************************/
855 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
859 /* get the current lock count */
861 lockword = lock_lockword_get(t, o);
863 if (IS_FAT_LOCK(lockword)) {
864 assert(GET_FAT_LOCK(lockword) == lr);
867 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
869 /* copy the count from the thin lock */
871 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
874 DEBUGLOCKS(("[lock_inflate : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
875 lr, t, o, lockword, lr->count));
877 /* notify waiting objects */
879 lock_record_notify(t, lr, false);
883 lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
887 /* lock_monitor_enter **********************************************************
889 Acquire the monitor of the given object. If the current thread already
890 owns the monitor, the lock counter is simply increased.
892 This function blocks until it can acquire the monitor.
895 t............the current thread
896 o............the object of which to enter the monitor
899 true.........the lock has been successfully acquired
900 false........an exception has been thrown
902 *******************************************************************************/
904 bool lock_monitor_enter(java_handle_t *o)
907 /* CAUTION: This code assumes that ptrint is unsigned! */
913 exceptions_throw_nullpointerexception();
919 thinlock = t->thinlock;
921 /* most common case: try to thin-lock an unlocked object */
923 LLNI_CRITICAL_START_THREAD(t);
924 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
925 LLNI_CRITICAL_END_THREAD(t);
927 if (lockword == THIN_UNLOCKED) {
928 /* success. we locked it */
929 /* The Java Memory Model requires a memory barrier here: */
930 /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
931 MEMORY_BARRIER_AFTER_ATOMIC();
935 /* next common case: recursive lock with small recursion count */
936 /* We don't have to worry about stale values here, as any stale value */
937 /* will indicate another thread holding the lock (or an inflated lock) */
939 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
940 /* we own this monitor */
941 /* check the current recursion count */
943 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
945 /* the recursion count is low enough */
947 lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
949 /* success. we locked it */
953 /* recursion count overflow */
955 lr = lock_hashtable_get(t, o);
956 lock_record_enter(t, lr);
957 lock_inflate(t, o, lr);
964 /* the lock is either contented or fat */
966 if (IS_FAT_LOCK(lockword)) {
968 lr = GET_FAT_LOCK(lockword);
970 /* check for recursive entering */
971 if (lr->owner == t) {
976 /* acquire the mutex of the lock record */
978 lock_record_enter(t, lr);
980 assert(lr->count == 0);
985 /****** inflation path ******/
987 /* first obtain the lock record for this object */
989 lr = lock_hashtable_get(t, o);
991 #if defined(ENABLE_JVMTI)
992 /* Monitor Contended Enter */
993 jvmti_MonitorContendedEntering(false, o);
996 /* enter the monitor */
998 lock_record_enter(t, lr);
1000 #if defined(ENABLE_JVMTI)
1001 /* Monitor Contended Entered */
1002 jvmti_MonitorContendedEntering(true, o);
1005 /* inflation loop */
1007 while (IS_THIN_LOCK(lockword = lock_lockword_get(t, o))) {
1008 /* Set the flat lock contention bit to let the owning thread
1009 know that we want to be notified of unlocking. */
1011 if (try_set_flc_bit(t, o, lockword)) {
1012 /* Wait until another thread sees the flc bit and notifies
1015 DEBUGLOCKS(("thread %d set flc bit on %p lr %p",
1016 t->index, (void*) o, (void*) lr));
1018 (void) lock_record_wait(t, lr, 0, 0);
1021 /* try to lock the object */
1023 LLNI_CRITICAL_START_THREAD(t);
1024 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
1025 LLNI_CRITICAL_END_THREAD(t);
1027 if (lockword == THIN_UNLOCKED) {
1028 /* we can inflate the lock ourselves */
1030 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
1031 t->index, (void*) o, (void*) lr));
1033 lock_inflate(t, o, lr);
1037 /* we own the inflated lock now */
1043 /* lock_monitor_exit ***********************************************************
1045 Decrement the counter of a (currently owned) monitor. If the counter
1046 reaches zero, release the monitor.
1048 If the current thread is not the owner of the monitor, an
1049 IllegalMonitorState exception is thrown.
1052 t............the current thread
1053 o............the object of which to exit the monitor
1056 true.........everything ok,
1057 false........an exception has been thrown
1059 *******************************************************************************/
1061 bool lock_monitor_exit(java_handle_t *o)
1068 exceptions_throw_nullpointerexception();
1074 thinlock = t->thinlock;
1076 /* We don't have to worry about stale values here, as any stale value */
1077 /* will indicate that we don't own the lock. */
1079 lockword = lock_lockword_get(t, o);
1081 /* most common case: we release a thin lock that we hold once */
1083 if (LOCK_WORD_WITHOUT_FLC_BIT(lockword) == thinlock) {
1084 /* memory barrier for Java Memory Model */
1085 MEMORY_BARRIER_BEFORE_ATOMIC();
1086 lockword = lock_lockword_swap(t, o, THIN_UNLOCKED);
1088 /* check if there has been a flat lock contention on this object */
1090 if (lockword & THIN_LOCK_FLC_BIT) {
1093 DEBUGLOCKS(("thread %d saw flc bit on %p",
1094 t->index, (void*) o));
1096 /* there has been a contention on this thin lock */
1098 lr = lock_hashtable_get(t, o);
1100 DEBUGLOCKS(("thread %d for %p got lr %p",
1101 t->index, (void*) o, (void*) lr));
1103 lock_record_enter(t, lr);
1105 /* notify a thread that it can try to inflate the lock now */
1107 lock_record_notify(t, lr, true);
1109 lock_record_exit(t, lr);
1115 /* next common case: we release a recursive lock, count > 0 */
1117 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1118 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1122 /* either the lock is fat, or we don't hold it at all */
1124 if (IS_FAT_LOCK(lockword)) {
1128 lr = GET_FAT_LOCK(lockword);
1130 /* check if we own this monitor */
1131 /* We don't have to worry about stale values here, as any stale value */
1132 /* will be != t and thus fail this check. */
1134 if (lr->owner != t) {
1135 exceptions_throw_illegalmonitorstateexception();
1139 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1141 if (lr->count != 0) {
1142 /* we had locked this one recursively. just decrement, it will */
1143 /* still be locked. */
1148 /* unlock this lock record */
1151 pthread_mutex_unlock(&(lr->mutex));
1156 /* legal thin lock cases have been handled above, so this is an error */
1158 exceptions_throw_illegalmonitorstateexception();
1164 /* lock_record_add_waiter ******************************************************
1166 Add a thread to the list of waiting threads of a lock record.
1169 lr...........the lock record
1170 thread.......the thread to add
1172 *******************************************************************************/
1174 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1178 /* Allocate a waiter data structure. */
1180 w = NEW(lock_waiter_t);
1182 #if defined(ENABLE_STATISTICS)
1184 size_lock_waiter += sizeof(lock_waiter_t);
1187 /* Store the thread in the waiter structure. */
1191 /* Add the waiter as last entry to waiters list. */
1193 list_add_last(lr->waiters, w);
1197 /* lock_record_remove_waiter ***************************************************
1199 Remove a thread from the list of waiting threads of a lock record.
1202 lr...........the lock record
1203 t............the current thread
1206 The current thread must be the owner of the lock record.
1208 *******************************************************************************/
1210 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1215 /* Get the waiters list. */
1219 for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1220 if (w->thread == thread) {
1221 /* Remove the waiter entry from the list. */
1223 list_remove_unsynced(l, w);
1225 /* Free the waiter data structure. */
1227 FREE(w, lock_waiter_t);
1229 #if defined(ENABLE_STATISTICS)
1231 size_lock_waiter -= sizeof(lock_waiter_t);
1238 /* This should never happen. */
1240 vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1244 /* lock_record_wait ************************************************************
1246 Wait on a lock record for a given (maximum) amount of time.
1249 t............the current thread
1250 lr...........the lock record
1251 millis.......milliseconds of timeout
1252 nanos........nanoseconds of timeout
1255 true.........we have been interrupted,
1256 false........everything ok
1259 The current thread must be the owner of the lock record.
1260 This is NOT checked by this function!
1262 *******************************************************************************/
1264 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1267 bool wasinterrupted;
1269 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1270 lr, thread, millis, nanos));
1272 /* { the thread t owns the fat lock record lr on the object o } */
1274 /* register us as waiter for this object */
1276 lock_record_add_waiter(lr, thread);
1278 /* remember the old lock count */
1280 lockcount = lr->count;
1282 /* unlock this record */
1285 lock_record_exit(thread, lr);
1287 /* wait until notified/interrupted/timed out */
1289 wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1291 /* re-enter the monitor */
1293 lock_record_enter(thread, lr);
1295 /* remove us from the list of waiting threads */
1297 lock_record_remove_waiter(lr, thread);
1299 /* restore the old lock count */
1301 lr->count = lockcount;
1303 /* return if we have been interrupted */
1305 return wasinterrupted;
1309 /* lock_monitor_wait ***********************************************************
1311 Wait on an object for a given (maximum) amount of time.
1314 t............the current thread
1315 o............the object
1316 millis.......milliseconds of timeout
1317 nanos........nanoseconds of timeout
1320 The current thread must be the owner of the object's monitor.
1322 *******************************************************************************/
1324 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1329 lockword = lock_lockword_get(t, o);
1331 /* check if we own this monitor */
1332 /* We don't have to worry about stale values here, as any stale value */
1333 /* will fail this check. */
1335 if (IS_FAT_LOCK(lockword)) {
1337 lr = GET_FAT_LOCK(lockword);
1339 if (lr->owner != t) {
1340 exceptions_throw_illegalmonitorstateexception();
1345 /* it's a thin lock */
1347 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1348 exceptions_throw_illegalmonitorstateexception();
1352 /* inflate this lock */
1354 lr = lock_hashtable_get(t, o);
1355 lock_record_enter(t, lr);
1356 lock_inflate(t, o, lr);
1359 /* { the thread t owns the fat lock record lr on the object o } */
1361 if (lock_record_wait(t, lr, millis, nanos))
1362 exceptions_throw_interruptedexception();
1366 /* lock_record_notify **********************************************************
1368 Notify one thread or all threads waiting on the given lock record.
1371 t............the current thread
1372 lr...........the lock record
1373 one..........if true, only notify one thread
1376 The current thread must be the owner of the lock record.
1377 This is NOT checked by this function!
1379 *******************************************************************************/
1381 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1385 threadobject *waitingthread;
1387 /* { the thread t owns the fat lock record lr on the object o } */
1389 /* Get the waiters list. */
1393 for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1394 /* signal the waiting thread */
1396 waitingthread = w->thread;
1398 /* If the thread was already signaled but hasn't removed
1399 itself from the list yet, just ignore it. */
1401 if (waitingthread->signaled == true)
1404 /* Enter the wait-mutex. */
1406 pthread_mutex_lock(&(waitingthread->waitmutex));
1408 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1409 lr, t, waitingthread, waitingthread->sleeping, one));
1411 /* Signal the thread if it's sleeping. */
1413 if (waitingthread->sleeping)
1414 pthread_cond_signal(&(waitingthread->waitcond));
1416 /* Mark the thread as signaled. */
1418 waitingthread->signaled = true;
1420 /* Leave the wait-mutex. */
1422 pthread_mutex_unlock(&(waitingthread->waitmutex));
1424 /* if we should only wake one, we are done */
1432 /* lock_monitor_notify *********************************************************
1434 Notify one thread or all threads waiting on the given object.
1437 t............the current thread
1438 o............the object
1439 one..........if true, only notify one thread
1442 The current thread must be the owner of the object's monitor.
1444 *******************************************************************************/
1446 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1451 lockword = lock_lockword_get(t, o);
1453 /* check if we own this monitor */
1454 /* We don't have to worry about stale values here, as any stale value */
1455 /* will fail this check. */
1457 if (IS_FAT_LOCK(lockword)) {
1459 lr = GET_FAT_LOCK(lockword);
1461 if (lr->owner != t) {
1462 exceptions_throw_illegalmonitorstateexception();
1467 /* it's a thin lock */
1469 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1470 exceptions_throw_illegalmonitorstateexception();
1474 /* inflate this lock */
1476 lr = lock_hashtable_get(t, o);
1477 lock_record_enter(t, lr);
1478 lock_inflate(t, o, lr);
1481 /* { the thread t owns the fat lock record lr on the object o } */
1483 lock_record_notify(t, lr, one);
1488 /*============================================================================*/
1489 /* INQUIRY FUNCIONS */
1490 /*============================================================================*/
1493 /* lock_is_held_by_current_thread **********************************************
1495 Return true if the current thread owns the monitor of the given object.
1498 o............the object
1501 true, if the current thread holds the lock of this object.
1503 *******************************************************************************/
1505 bool lock_is_held_by_current_thread(java_handle_t *o)
1513 /* check if we own this monitor */
1514 /* We don't have to worry about stale values here, as any stale value */
1515 /* will fail this check. */
1517 lockword = lock_lockword_get(t, o);
1519 if (IS_FAT_LOCK(lockword)) {
1520 /* it's a fat lock */
1522 lr = GET_FAT_LOCK(lockword);
1524 return (lr->owner == t);
1527 /* it's a thin lock */
1529 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1535 /*============================================================================*/
1536 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1537 /*============================================================================*/
1540 /* lock_wait_for_object ********************************************************
1542 Wait for the given object.
1545 o............the object
1546 millis.......milliseconds to wait
1547 nanos........nanoseconds to wait
1549 *******************************************************************************/
1551 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1553 threadobject *thread;
1555 thread = THREADOBJECT;
1557 lock_monitor_wait(thread, o, millis, nanos);
1561 /* lock_notify_object **********************************************************
1563 Notify one thread waiting on the given object.
1566 o............the object
1568 *******************************************************************************/
1570 void lock_notify_object(java_handle_t *o)
1572 threadobject *thread;
1574 thread = THREADOBJECT;
1576 lock_monitor_notify(thread, o, true);
1580 /* lock_notify_all_object ******************************************************
1582 Notify all threads waiting on the given object.
1585 o............the object
1587 *******************************************************************************/
1589 void lock_notify_all_object(java_handle_t *o)
1591 threadobject *thread;
1593 thread = THREADOBJECT;
1595 lock_monitor_notify(thread, o, false);
1600 * These are local overrides for various environment variables in Emacs.
1601 * Please do not remove this and leave it at the end of the file, where
1602 * Emacs will automagically detect them.
1603 * ---------------------------------------------------------------------
1606 * indent-tabs-mode: t
1610 * vim:noexpandtab:sw=4:ts=4: