1 /* src/threads/lock.cpp - lock implementation
3 Copyright (C) 1996-2011
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 This file is part of CACAO.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2, or (at
11 your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
36 #include "mm/memory.hpp"
38 #include "native/llni.h"
40 #include "threads/atomic.hpp"
41 #include "threads/lock.hpp"
42 #include "threads/mutex.hpp"
43 #include "threads/threadlist.hpp"
44 #include "threads/thread.hpp"
46 #include "toolbox/list.hpp"
48 #include "vm/exceptions.hpp"
49 #include "vm/finalizer.hpp"
50 #include "vm/global.h"
51 #include "vm/options.h"
52 #include "vm/string.hpp"
55 #if defined(ENABLE_STATISTICS)
56 # include "vm/statistics.h"
59 #if defined(ENABLE_VMLOG)
60 #include <vmlog_cacao.h>
63 #if defined(ENABLE_JVMTI)
64 #include "native/jvmti/cacaodbg.h"
67 #if defined(ENABLE_GC_BOEHM)
68 # include "mm/boehm-gc/include/gc.h"
72 /* debug **********************************************************************/
75 # define DEBUGLOCKS(format) \
77 if (opt_DebugLocks) { \
82 # define DEBUGLOCKS(format)
86 /******************************************************************************/
88 /******************************************************************************/
90 /* number of lock records in the first pool allocated for a thread */
91 #define LOCK_INITIAL_LOCK_RECORDS 8
93 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
96 /******************************************************************************/
97 /* MACROS FOR THIN/FAT LOCKS */
98 /******************************************************************************/
100 /* We use a variant of the tasuki locks described in the paper
102 * Tamiya Onodera, Kiyokuni Kawachiya
103 * A Study of Locking Objects with Bimodal Fields
104 * Proceedings of the ACM OOPSLA '99, pp. 223-237
107 * The underlying thin locks are a variant of the thin locks described in
109 * Bacon, Konuru, Murthy, Serrano
110 * Thin Locks: Featherweight Synchronization for Java
111 * Proceedings of the ACM Conference on Programming Language Design and
112 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
115 * In thin lock mode the lockword looks like this:
117 * ,----------------------,-----------,---,
118 * | thread ID | count | 0 |
119 * `----------------------'-----------'---'
121 * thread ID......the 'index' of the owning thread, or 0
122 * count..........number of times the lock has been entered minus 1
123 * 0..............the shape bit is 0 in thin lock mode
125 * In fat lock mode it is basically a lock_record_t *:
127 * ,----------------------------------,---,
128 * | lock_record_t * (without LSB) | 1 |
129 * `----------------------------------'---'
131 * 1..............the shape bit is 1 in fat lock mode
134 /* global variables ***********************************************************/
136 /* hashtable mapping objects to lock records */
137 static lock_hashtable_t lock_hashtable;
140 /******************************************************************************/
142 /******************************************************************************/
144 static void lock_hashtable_init(void);
146 static inline uintptr_t* lock_lockword_get(java_handle_t* o);
147 static void lock_record_enter(threadobject *t, lock_record_t *lr);
148 static void lock_record_exit(threadobject *t, lock_record_t *lr);
149 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
150 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
153 /*============================================================================*/
154 /* INITIALIZATION OF DATA STRUCTURES */
155 /*============================================================================*/
158 /* lock_init *******************************************************************
160 Initialize global data for locking.
162 *******************************************************************************/
166 /* initialize lock hashtable */
168 lock_hashtable_init();
170 #if defined(ENABLE_VMLOG)
171 vmlog_cacao_init_lock();
176 /* lock_record_new *************************************************************
178 Allocate a lock record.
180 *******************************************************************************/
182 static lock_record_t *lock_record_new(void)
186 /* allocate the data structure on the C heap */
188 lr = NEW(lock_record_t);
190 #if defined(ENABLE_STATISTICS)
192 size_lock_record += sizeof(lock_record_t);
195 /* initialize the members */
200 lr->waiters = new List<threadobject*>();
202 #if defined(ENABLE_GC_CACAO)
203 /* register the lock object as weak reference with the GC */
205 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
208 // Initialize the mutex.
209 lr->mutex = new Mutex();
211 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
217 /* lock_record_free ************************************************************
222 lr....lock record to free
224 *******************************************************************************/
226 static void lock_record_free(lock_record_t *lr)
228 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
230 // Destroy the mutex.
233 #if defined(ENABLE_GC_CACAO)
234 /* unregister the lock object reference with the GC */
236 gc_weakreference_unregister(&(lr->object));
239 // Free the waiters list.
242 /* Free the data structure. */
244 FREE(lr, lock_record_t);
246 #if defined(ENABLE_STATISTICS)
248 size_lock_record -= sizeof(lock_record_t);
253 /*============================================================================*/
254 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
255 /*============================================================================*/
257 /* lock_hashtable_init *********************************************************
259 Initialize the global hashtable mapping objects to lock records.
261 *******************************************************************************/
263 static void lock_hashtable_init(void)
265 lock_hashtable.mutex = new Mutex();
267 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
268 lock_hashtable.entries = 0;
269 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
271 #if defined(ENABLE_STATISTICS)
273 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
276 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
280 /* lock_hashtable_grow *********************************************************
282 Grow the lock record hashtable to about twice its current size and
285 *******************************************************************************/
287 /* must be called with hashtable mutex locked */
288 static void lock_hashtable_grow(void)
292 lock_record_t **oldtable;
293 lock_record_t **newtable;
300 /* allocate a new table */
302 oldsize = lock_hashtable.size;
303 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
305 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
307 oldtable = lock_hashtable.ptr;
308 newtable = MNEW(lock_record_t *, newsize);
310 #if defined(ENABLE_STATISTICS)
312 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
315 MZERO(newtable, lock_record_t *, newsize);
317 /* rehash the entries */
319 for (i = 0; i < oldsize; i++) {
324 h = heap_hashcode(lr->object);
325 newslot = h % newsize;
327 lr->hashlink = newtable[newslot];
328 newtable[newslot] = lr;
334 /* replace the old table */
336 lock_hashtable.ptr = newtable;
337 lock_hashtable.size = newsize;
339 MFREE(oldtable, lock_record_t *, oldsize);
341 #if defined(ENABLE_STATISTICS)
343 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
348 /* lock_hashtable_cleanup ******************************************************
350 Removes (and frees) lock records which have a cleared object reference
351 from the hashtable. The locked object was reclaimed by the GC.
353 *******************************************************************************/
355 #if defined(ENABLE_GC_CACAO)
356 void lock_hashtable_cleanup(void)
366 /* lock the hashtable */
368 Mutex_lock(lock_hashtable.mutex);
370 /* search the hashtable for cleared references */
372 for (i = 0; i < lock_hashtable.size; i++) {
373 lr = lock_hashtable.ptr[i];
379 /* remove lock records with cleared references */
381 if (lr->object == NULL) {
383 /* unlink the lock record from the hashtable */
386 lock_hashtable.ptr[i] = next;
388 prev->hashlink = next;
390 /* free the lock record */
392 lock_record_free(lr);
402 /* unlock the hashtable */
404 Mutex_unlock(lock_hashtable.mutex);
409 /* lock_hashtable_get **********************************************************
411 Find the lock record for the given object. If it does not exists,
412 yet, create it and enter it in the hashtable.
415 o....the object to look up
418 the lock record to use for this object
420 *******************************************************************************/
422 #if defined(ENABLE_GC_BOEHM)
423 static void lock_record_finalizer(java_handle_t *object, void *p);
426 static lock_record_t *lock_hashtable_get(java_handle_t* o)
428 // This function is inside a critical section.
429 GCCriticalSection cs;
434 // lw_cache is used throughout this file because the lockword can change at
435 // any time, unless it is absolutely certain that we are holding the lock.
436 // We don't do deflation, so we would also not expect a fat lockword to
437 // change, but for the sake of uniformity, lw_cache is used even in this
439 uintptr_t lw_cache = *lock_lockword_get(o);
440 Lockword lockword(lw_cache);
442 if (lockword.is_fat_lock())
443 return lockword.get_fat_lock();
445 // Lock the hashtable.
446 lock_hashtable.mutex->lock();
448 /* lookup the lock record in the hashtable */
450 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
451 lr = lock_hashtable.ptr[slot];
453 for (; lr != NULL; lr = lr->hashlink) {
454 if (lr->object == LLNI_DIRECT(o))
459 /* not found, we must create a new one */
461 lr = lock_record_new();
463 lr->object = LLNI_DIRECT(o);
465 #if defined(ENABLE_GC_BOEHM)
466 /* register new finalizer to clean up the lock record */
468 Finalizer::attach_custom_finalizer(o, lock_record_finalizer, 0);
471 /* enter it in the hashtable */
473 lr->hashlink = lock_hashtable.ptr[slot];
474 lock_hashtable.ptr[slot] = lr;
475 lock_hashtable.entries++;
477 /* check whether the hash should grow */
479 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
480 lock_hashtable_grow();
484 // Unlock the hashtable.
485 lock_hashtable.mutex->unlock();
487 /* return the new lock record */
492 /* lock_hashtable_remove *******************************************************
494 Remove the lock record for the given object from the hashtable
495 and free it afterwards.
498 t....the current thread
499 o....the object to look up
501 *******************************************************************************/
503 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
507 lock_record_t *tmplr;
509 // Lock the hashtable.
510 lock_hashtable.mutex->lock();
512 /* get lock record */
514 uintptr_t lw_cache = *lock_lockword_get(o);
515 Lockword lockword(lw_cache);
518 assert(lockword.is_fat_lock());
520 lr = lockword.get_fat_lock();
522 /* remove the lock-record from the hashtable */
524 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
525 tmplr = lock_hashtable.ptr[slot];
528 /* special handling if it's the first in the chain */
530 lock_hashtable.ptr[slot] = lr->hashlink;
533 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
534 if (tmplr->hashlink == lr) {
535 tmplr->hashlink = lr->hashlink;
540 assert(tmplr != NULL);
543 /* decrease entry count */
545 lock_hashtable.entries--;
547 // Unlock the hashtable.
548 lock_hashtable.mutex->unlock();
550 /* free the lock record */
552 lock_record_free(lr);
556 /* lock_record_finalizer *******************************************************
558 XXX Remove me for exact GC.
560 *******************************************************************************/
562 static void lock_record_finalizer(java_handle_t *o, void *p)
567 LLNI_class_get(o, c);
569 if (opt_DebugFinalizer) {
571 log_print("[finalizer lockrecord: o=%p p=%p class=", o, p);
578 /* remove the lock-record entry from the hashtable and free it */
580 lock_hashtable_remove(THREADOBJECT, o);
584 /*============================================================================*/
585 /* LOCKING ALGORITHM */
586 /*============================================================================*/
589 /* lock_lockword_get ***********************************************************
591 Get the lockword for the given object.
594 o............the object
596 *******************************************************************************/
598 static inline uintptr_t* lock_lockword_get(java_handle_t* o)
600 #if defined(ENABLE_GC_CACAO)
602 assert(GCCriticalSection::inside() == true);
605 return &(LLNI_DIRECT(o)->lockword);
609 /* lock_record_enter ***********************************************************
611 Enter the lock represented by the given lock record.
614 t.................the current thread
615 lr................the lock record
617 *******************************************************************************/
619 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
626 /* lock_record_exit ************************************************************
628 Release the lock represented by the given lock record.
631 t.................the current thread
632 lr................the lock record
635 The current thread must own the lock represented by this lock record.
636 This is NOT checked by this function!
638 *******************************************************************************/
640 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
647 /* lock_inflate ****************************************************************
649 Inflate the lock of the given object. This may only be called by the
650 owner of the monitor of the object.
653 o............the object of which to inflate the lock
654 lr...........the lock record to install. The current thread must
655 own the lock of this lock record!
658 The current thread must be the owner of this object's monitor AND
659 of the lock record's lock!
661 *******************************************************************************/
663 static void lock_inflate(java_handle_t *o, lock_record_t *lr)
665 Lockword lockword(*lock_lockword_get(o));
666 lockword.inflate(lr);
670 /* sable_flc_waiting ***********************************************************
672 Enqueue the current thread on another thread's FLC list. The function
673 blocks until the lock has been inflated by the owning thread.
675 The algorithm used to be an almost literal copy from SableVM. The
676 superfluous list traversal in the waiting loop has been removed since,
680 lockword.....the object's lockword as seen at the first locking attempt
681 t............the current thread
682 o............the object of which to enter the monitor
684 *******************************************************************************/
686 static void sable_flc_waiting(uintptr_t lw_cache, threadobject *t, java_handle_t *o)
689 threadobject *t_other;
692 Lockword lockword(lw_cache);
693 index = lockword.get_thin_lock_thread_index();
694 t_other = ThreadList::get_thread_by_index(index);
696 // The lockword could have changed during our way here. If the
697 // thread index is zero, the lock got unlocked and we simply
700 /* failure, TODO: add statistics */
703 t_other->flc_lock->lock();
704 old_flc = t_other->flc_bit;
705 t_other->flc_bit = true;
707 DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d", t->index, t_other->index));
709 // Set FLC bit first, then read the lockword again.
710 Atomic::memory_barrier();
712 lw_cache = *lock_lockword_get(o);
714 /* Lockword is still the way it was seen before */
715 if (lockword.is_thin_lock() && (lockword.get_thin_lock_thread_index() == index))
718 /* Add tuple (t, o) to the other thread's FLC list */
720 t->flc_next = t_other->flc_list;
721 t_other->flc_list = t;
722 if (t->flc_next == 0)
723 t_other->flc_tail = t;
724 f = t_other->flc_tail;
726 // The other thread will clear flc_object.
727 while (t->flc_object)
729 // We are not cleared yet -- the other thread cannot have seen
731 assert(t_other->flc_bit);
733 // Wait until another thread sees the flc bit and notifies
735 t->flc_cond->wait(t_other->flc_lock);
741 t_other->flc_bit = old_flc;
743 t_other->flc_lock->unlock();
746 /* notify_flc_waiters **********************************************************
748 Traverse the thread's FLC list and inflate all corresponding locks. Notify
749 the associated threads as well.
752 t............the current thread
753 o............the object currently being unlocked
755 *******************************************************************************/
757 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
759 threadobject *current;
763 current = t->flc_list;
766 if (current->flc_object != o)
768 /* The object has to be inflated so the other threads can properly
771 // Only if not already inflated.
772 Lockword lockword(*lock_lockword_get(current->flc_object));
773 if (lockword.is_thin_lock()) {
774 lock_record_t *lr = lock_hashtable_get(current->flc_object);
775 lock_record_enter(t, lr);
777 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
778 t->index, (void*) current->flc_object, (void*) lr));
780 lock_inflate(current->flc_object, lr);
784 // Wake the waiting threads.
785 current->flc_cond->broadcast();
786 current->flc_object = NULL;
788 current = current->flc_next;
794 t->flc_lock->unlock();
797 /* lock_monitor_enter **********************************************************
799 Acquire the monitor of the given object. If the current thread already
800 owns the monitor, the lock counter is simply increased.
802 This function blocks until it can acquire the monitor.
805 t............the current thread
806 o............the object of which to enter the monitor
809 true.........the lock has been successfully acquired
810 false........an exception has been thrown
812 *******************************************************************************/
814 bool lock_monitor_enter(java_handle_t *o)
816 // This function is inside a critical section.
817 GCCriticalSection cs;
820 exceptions_throw_nullpointerexception();
824 threadobject* t = thread_get_current();
826 uintptr_t thinlock = t->thinlock;
829 // Most common case: try to thin-lock an unlocked object.
830 uintptr_t *lw_ptr = lock_lockword_get(o);
831 uintptr_t lw_cache = *lw_ptr;
832 Lockword lockword(lw_cache);
833 bool result = Lockword(*lw_ptr).lock(thinlock);
835 if (result == true) {
836 // Success, we locked it.
837 // NOTE: The Java Memory Model requires a memory barrier here.
838 #if defined(CAS_PROVIDES_FULL_BARRIER) && CAS_PROVIDES_FULL_BARRIER
839 // On some architectures, the CAS (hidden in the
840 // lockword.lock call above), already provides this barrier,
841 // so we only need to inform the compiler.
842 Atomic::instruction_barrier();
844 Atomic::memory_barrier();
849 // Next common case: recursive lock with small recursion count.
850 // NOTE: We don't have to worry about stale values here, as any
851 // stale value will indicate another thread holding the lock (or
852 // an inflated lock).
853 if (lockword.get_thin_lock_without_count() == thinlock) {
854 // We own this monitor. Check the current recursion count.
855 if (lockword.is_max_thin_lock_count() == false) {
856 // The recursion count is low enough.
857 Lockword(*lw_ptr).increase_thin_lock_count();
859 // Success, we locked it.
863 // Recursion count overflow.
864 lock_record_t* lr = lock_hashtable_get(o);
865 lock_record_enter(t, lr);
869 notify_flc_waiters(t, o);
875 // The lock is either contented or fat.
876 if (lockword.is_fat_lock()) {
877 lock_record_t* lr = lockword.get_fat_lock();
879 // Check for recursive entering.
880 if (lr->owner == t) {
885 // Acquire the mutex of the lock record.
886 lock_record_enter(t, lr);
889 assert(lr->count == 0);
893 /****** inflation path ******/
895 #if defined(ENABLE_JVMTI)
896 /* Monitor Contended Enter */
897 jvmti_MonitorContendedEntering(false, o);
900 sable_flc_waiting(lw_cache, t, o);
902 #if defined(ENABLE_JVMTI)
903 /* Monitor Contended Entered */
904 jvmti_MonitorContendedEntering(true, o);
910 /* lock_monitor_exit ***********************************************************
912 Decrement the counter of a (currently owned) monitor. If the counter
913 reaches zero, release the monitor.
915 If the current thread is not the owner of the monitor, an
916 IllegalMonitorState exception is thrown.
919 t............the current thread
920 o............the object of which to exit the monitor
923 true.........everything ok,
924 false........an exception has been thrown
926 *******************************************************************************/
928 bool lock_monitor_exit(java_handle_t* o)
930 // This function is inside a critical section.
931 GCCriticalSection cs;
934 exceptions_throw_nullpointerexception();
938 threadobject* t = thread_get_current();
940 uintptr_t thinlock = t->thinlock;
942 // We don't have to worry about stale values here, as any stale
943 // value will indicate that we don't own the lock.
944 uintptr_t *lw_ptr = lock_lockword_get(o);
945 uintptr_t lw_cache = *lw_ptr;
946 Lockword lockword(lw_cache);
948 // Most common case: we release a thin lock that we hold once.
949 if (lockword.get_thin_lock() == thinlock) {
950 // Memory barrier for Java Memory Model.
951 Atomic::write_memory_barrier();
952 Lockword(*lw_ptr).unlock();
953 // Memory barrier for FLC bit testing.
954 Atomic::memory_barrier();
956 /* check if there has been a flat lock contention on this object */
959 DEBUGLOCKS(("thread %d saw flc bit", t->index));
961 /* there has been a contention on this thin lock */
962 notify_flc_waiters(t, o);
968 // Next common case: we release a recursive lock, count > 0.
969 if (lockword.get_thin_lock_without_count() == thinlock) {
970 Lockword(*lw_ptr).decrease_thin_lock_count();
974 // Either the lock is fat, or we don't hold it at all.
975 if (lockword.is_fat_lock()) {
976 lock_record_t* lr = lockword.get_fat_lock();
978 // Check if we own this monitor.
979 // NOTE: We don't have to worry about stale values here, as
980 // any stale value will be != t and thus fail this check.
981 if (lr->owner != t) {
982 exceptions_throw_illegalmonitorstateexception();
986 /* { the current thread `t` owns the lock record `lr` on object `o` } */
988 if (lr->count != 0) {
989 // We had locked this one recursively. Just decrement, it
990 // will still be locked.
995 // Unlock this lock record.
996 lock_record_exit(t, lr);
1000 // Legal thin lock cases have been handled above, so this is an
1002 exceptions_throw_illegalmonitorstateexception();
1008 /* lock_record_add_waiter ******************************************************
1010 Add a thread to the list of waiting threads of a lock record.
1013 lr...........the lock record
1014 thread.......the thread to add
1016 *******************************************************************************/
1018 static void lock_record_add_waiter(lock_record_t *lr, threadobject* t)
1020 // Add the thread as last entry to waiters list.
1021 lr->waiters->push_back(t);
1023 #if defined(ENABLE_STATISTICS)
1025 size_lock_waiter += sizeof(threadobject*);
1030 /* lock_record_remove_waiter ***************************************************
1032 Remove a thread from the list of waiting threads of a lock record.
1035 lr...........the lock record
1036 t............the current thread
1039 The current thread must be the owner of the lock record.
1041 *******************************************************************************/
1043 static void lock_record_remove_waiter(lock_record_t *lr, threadobject* t)
1045 // Remove the thread from the waiters.
1046 lr->waiters->remove(t);
1048 #if defined(ENABLE_STATISTICS)
1050 size_lock_waiter -= sizeof(threadobject*);
1055 /* lock_record_wait ************************************************************
1057 Wait on a lock record for a given (maximum) amount of time.
1060 t............the current thread
1061 lr...........the lock record
1062 millis.......milliseconds of timeout
1063 nanos........nanoseconds of timeout
1066 true.........we have been interrupted,
1067 false........everything ok
1070 The current thread must be the owner of the lock record.
1071 This is NOT checked by this function!
1073 *******************************************************************************/
1075 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1078 bool wasinterrupted = false;
1080 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1081 lr, thread, millis, nanos));
1083 /* { the thread t owns the fat lock record lr on the object o } */
1085 /* register us as waiter for this object */
1087 lock_record_add_waiter(lr, thread);
1089 /* remember the old lock count */
1091 lockcount = lr->count;
1093 /* unlock this record */
1096 lock_record_exit(thread, lr);
1098 /* wait until notified/interrupted/timed out */
1100 threads_wait_with_timeout_relative(thread, millis, nanos);
1102 /* re-enter the monitor */
1104 lock_record_enter(thread, lr);
1106 /* remove us from the list of waiting threads */
1108 lock_record_remove_waiter(lr, thread);
1110 /* restore the old lock count */
1112 lr->count = lockcount;
1114 /* We can only be signaled OR interrupted, not both. If both flags
1115 are set, reset only signaled and leave the thread in
1116 interrupted state. Otherwise, clear both. */
1118 if (!thread->signaled) {
1119 wasinterrupted = thread->interrupted;
1120 thread->interrupted = false;
1123 thread->signaled = false;
1125 /* return if we have been interrupted */
1127 return wasinterrupted;
1131 /* lock_monitor_wait ***********************************************************
1133 Wait on an object for a given (maximum) amount of time.
1136 t............the current thread
1137 o............the object
1138 millis.......milliseconds of timeout
1139 nanos........nanoseconds of timeout
1142 The current thread must be the owner of the object's monitor.
1144 *******************************************************************************/
1146 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1150 uintptr_t *lw_ptr = lock_lockword_get(o);
1151 uintptr_t lw_cache = *lw_ptr;
1152 Lockword lockword(lw_cache);
1154 // Check if we own this monitor.
1155 // NOTE: We don't have to worry about stale values here, as any
1156 // stale value will fail this check.
1157 if (lockword.is_fat_lock()) {
1158 lr = lockword.get_fat_lock();
1160 if (lr->owner != t) {
1161 exceptions_throw_illegalmonitorstateexception();
1166 // It's a thin lock.
1167 if (lockword.get_thin_lock_without_count() != t->thinlock) {
1168 exceptions_throw_illegalmonitorstateexception();
1172 // Get the lock-record.
1173 lr = lock_hashtable_get(o);
1174 lock_record_enter(t, lr);
1176 // Inflate this lock.
1177 Lockword(*lw_ptr).inflate(lr);
1179 notify_flc_waiters(t, o);
1182 /* { the thread t owns the fat lock record lr on the object o } */
1184 if (lock_record_wait(t, lr, millis, nanos))
1185 exceptions_throw_interruptedexception();
1189 /* lock_record_notify **********************************************************
1191 Notify one thread or all threads waiting on the given lock record.
1194 t............the current thread
1195 lr...........the lock record
1196 one..........if true, only notify one thread
1199 The current thread must be the owner of the lock record.
1200 This is NOT checked by this function!
1202 *******************************************************************************/
1204 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1206 #if defined(ENABLE_GC_CACAO)
1208 assert(GCCriticalSection::inside() == false);
1211 // { The thread t owns the fat lock record lr on the object o }
1213 for (List<threadobject*>::iterator it = lr->waiters->begin(); it != lr->waiters->end(); it++) {
1214 threadobject* waiter = *it;
1216 // We must skip threads which have already been notified. They
1217 // will remove themselves from the list.
1218 if (waiter->signaled)
1221 // Enter the wait-mutex.
1222 waiter->waitmutex->lock();
1224 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]", lr, t, waiter, one));
1226 // Signal the waiter.
1227 waiter->waitcond->signal();
1229 // Mark the thread as signaled.
1230 waiter->signaled = true;
1232 // Leave the wait-mutex.
1233 waiter->waitmutex->unlock();
1235 // If we should only wake one thread, we are done.
1242 /* lock_monitor_notify *********************************************************
1244 Notify one thread or all threads waiting on the given object.
1247 t............the current thread
1248 o............the object
1249 one..........if true, only notify one thread
1252 The current thread must be the owner of the object's monitor.
1254 *******************************************************************************/
1256 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1258 lock_record_t* lr = NULL;
1261 // This scope is inside a critical section.
1262 GCCriticalSection cs;
1264 uintptr_t lw_cache = *lock_lockword_get(o);
1265 Lockword lockword(lw_cache);
1267 // Check if we own this monitor.
1268 // NOTE: We don't have to worry about stale values here, as any
1269 // stale value will fail this check.
1271 if (lockword.is_fat_lock()) {
1272 lr = lockword.get_fat_lock();
1274 if (lr->owner != t) {
1275 exceptions_throw_illegalmonitorstateexception();
1280 // It's a thin lock.
1281 if (lockword.get_thin_lock_without_count() != t->thinlock) {
1282 exceptions_throw_illegalmonitorstateexception();
1286 // No thread can wait on a thin lock, so there's nothing to do.
1291 // { The thread t owns the fat lock record lr on the object o }
1292 lock_record_notify(t, lr, one);
1297 /*============================================================================*/
1298 /* INQUIRY FUNCIONS */
1299 /*============================================================================*/
1302 /* lock_is_held_by_current_thread **********************************************
1304 Return true if the current thread owns the monitor of the given object.
1307 o............the object
1310 true, if the current thread holds the lock of this object.
1312 *******************************************************************************/
1314 bool lock_is_held_by_current_thread(java_handle_t *o)
1316 // This function is inside a critical section.
1317 GCCriticalSection cs;
1319 // Check if we own this monitor.
1320 // NOTE: We don't have to worry about stale values here, as any
1321 // stale value will fail this check.
1322 threadobject* t = thread_get_current();
1323 uintptr_t lw_cache = *lock_lockword_get(o);
1324 Lockword lockword(lw_cache);
1326 if (lockword.is_fat_lock()) {
1328 lock_record_t* lr = lockword.get_fat_lock();
1329 return (lr->owner == t);
1332 // It's a thin lock.
1333 return (lockword.get_thin_lock_without_count() == t->thinlock);
1339 /*============================================================================*/
1340 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1341 /*============================================================================*/
1344 /* lock_wait_for_object ********************************************************
1346 Wait for the given object.
1349 o............the object
1350 millis.......milliseconds to wait
1351 nanos........nanoseconds to wait
1353 *******************************************************************************/
1355 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1357 threadobject *thread;
1359 thread = THREADOBJECT;
1361 lock_monitor_wait(thread, o, millis, nanos);
1365 /* lock_notify_object **********************************************************
1367 Notify one thread waiting on the given object.
1370 o............the object
1372 *******************************************************************************/
1374 void lock_notify_object(java_handle_t *o)
1376 threadobject *thread;
1378 thread = THREADOBJECT;
1380 lock_monitor_notify(thread, o, true);
1384 /* lock_notify_all_object ******************************************************
1386 Notify all threads waiting on the given object.
1389 o............the object
1391 *******************************************************************************/
1393 void lock_notify_all_object(java_handle_t *o)
1395 threadobject *thread;
1397 thread = THREADOBJECT;
1399 lock_monitor_notify(thread, o, false);
1404 * These are local overrides for various environment variables in Emacs.
1405 * Please do not remove this and leave it at the end of the file, where
1406 * Emacs will automagically detect them.
1407 * ---------------------------------------------------------------------
1410 * indent-tabs-mode: t
1414 * vim:noexpandtab:sw=4:ts=4: