1 /* src/threads/lock.cpp - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007, 2008, 2010
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 This file is part of CACAO.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2, or (at
11 your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
36 #include "mm/memory.hpp"
38 #include "native/llni.h"
40 #include "threads/atomic.hpp"
41 #include "threads/lock.hpp"
42 #include "threads/mutex.hpp"
43 #include "threads/threadlist.hpp"
44 #include "threads/thread.hpp"
46 #include "toolbox/list.hpp"
48 #include "vm/exceptions.hpp"
49 #include "vm/finalizer.hpp"
50 #include "vm/global.h"
51 #include "vm/options.h"
52 #include "vm/string.hpp"
55 #if defined(ENABLE_STATISTICS)
56 # include "vm/statistics.h"
59 #if defined(ENABLE_VMLOG)
60 #include <vmlog_cacao.h>
63 #if defined(ENABLE_JVMTI)
64 #include "native/jvmti/cacaodbg.h"
67 #if defined(ENABLE_GC_BOEHM)
68 # include "mm/boehm-gc/include/gc.h"
72 /* debug **********************************************************************/
75 # define DEBUGLOCKS(format) \
77 if (opt_DebugLocks) { \
82 # define DEBUGLOCKS(format)
86 /******************************************************************************/
88 /******************************************************************************/
90 /* number of lock records in the first pool allocated for a thread */
91 #define LOCK_INITIAL_LOCK_RECORDS 8
93 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
96 /******************************************************************************/
97 /* MACROS FOR THIN/FAT LOCKS */
98 /******************************************************************************/
100 /* We use a variant of the tasuki locks described in the paper
102 * Tamiya Onodera, Kiyokuni Kawachiya
103 * A Study of Locking Objects with Bimodal Fields
104 * Proceedings of the ACM OOPSLA '99, pp. 223-237
107 * The underlying thin locks are a variant of the thin locks described in
109 * Bacon, Konuru, Murthy, Serrano
110 * Thin Locks: Featherweight Synchronization for Java
111 * Proceedings of the ACM Conference on Programming Language Design and
112 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
115 * In thin lock mode the lockword looks like this:
117 * ,----------------------,-----------,---,
118 * | thread ID | count | 0 |
119 * `----------------------'-----------'---'
121 * thread ID......the 'index' of the owning thread, or 0
122 * count..........number of times the lock has been entered minus 1
123 * 0..............the shape bit is 0 in thin lock mode
125 * In fat lock mode it is basically a lock_record_t *:
127 * ,----------------------------------,---,
128 * | lock_record_t * (without LSB) | 1 |
129 * `----------------------------------'---'
131 * 1..............the shape bit is 1 in fat lock mode
134 /* global variables ***********************************************************/
136 /* hashtable mapping objects to lock records */
137 static lock_hashtable_t lock_hashtable;
140 /******************************************************************************/
142 /******************************************************************************/
144 static void lock_hashtable_init(void);
146 static inline uintptr_t* lock_lockword_get(java_handle_t* o);
147 static void lock_record_enter(threadobject *t, lock_record_t *lr);
148 static void lock_record_exit(threadobject *t, lock_record_t *lr);
149 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
150 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
153 /*============================================================================*/
154 /* INITIALIZATION OF DATA STRUCTURES */
155 /*============================================================================*/
158 /* lock_init *******************************************************************
160 Initialize global data for locking.
162 *******************************************************************************/
166 /* initialize lock hashtable */
168 lock_hashtable_init();
170 #if defined(ENABLE_VMLOG)
171 vmlog_cacao_init_lock();
176 /* lock_record_new *************************************************************
178 Allocate a lock record.
180 *******************************************************************************/
182 static lock_record_t *lock_record_new(void)
186 /* allocate the data structure on the C heap */
188 lr = NEW(lock_record_t);
190 #if defined(ENABLE_STATISTICS)
192 size_lock_record += sizeof(lock_record_t);
195 /* initialize the members */
200 lr->waiters = new List<threadobject*>();
202 #if defined(ENABLE_GC_CACAO)
203 /* register the lock object as weak reference with the GC */
205 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
208 // Initialize the mutex.
209 lr->mutex = new Mutex();
211 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
217 /* lock_record_free ************************************************************
222 lr....lock record to free
224 *******************************************************************************/
226 static void lock_record_free(lock_record_t *lr)
228 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
230 // Destroy the mutex.
233 #if defined(ENABLE_GC_CACAO)
234 /* unregister the lock object reference with the GC */
236 gc_weakreference_unregister(&(lr->object));
239 // Free the waiters list.
242 /* Free the data structure. */
244 FREE(lr, lock_record_t);
246 #if defined(ENABLE_STATISTICS)
248 size_lock_record -= sizeof(lock_record_t);
253 /*============================================================================*/
254 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
255 /*============================================================================*/
257 /* lock_hashtable_init *********************************************************
259 Initialize the global hashtable mapping objects to lock records.
261 *******************************************************************************/
263 static void lock_hashtable_init(void)
265 lock_hashtable.mutex = new Mutex();
267 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
268 lock_hashtable.entries = 0;
269 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
271 #if defined(ENABLE_STATISTICS)
273 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
276 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
280 /* lock_hashtable_grow *********************************************************
282 Grow the lock record hashtable to about twice its current size and
285 *******************************************************************************/
287 /* must be called with hashtable mutex locked */
288 static void lock_hashtable_grow(void)
292 lock_record_t **oldtable;
293 lock_record_t **newtable;
300 /* allocate a new table */
302 oldsize = lock_hashtable.size;
303 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
305 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
307 oldtable = lock_hashtable.ptr;
308 newtable = MNEW(lock_record_t *, newsize);
310 #if defined(ENABLE_STATISTICS)
312 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
315 MZERO(newtable, lock_record_t *, newsize);
317 /* rehash the entries */
319 for (i = 0; i < oldsize; i++) {
324 h = heap_hashcode(lr->object);
325 newslot = h % newsize;
327 lr->hashlink = newtable[newslot];
328 newtable[newslot] = lr;
334 /* replace the old table */
336 lock_hashtable.ptr = newtable;
337 lock_hashtable.size = newsize;
339 MFREE(oldtable, lock_record_t *, oldsize);
341 #if defined(ENABLE_STATISTICS)
343 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
348 /* lock_hashtable_cleanup ******************************************************
350 Removes (and frees) lock records which have a cleared object reference
351 from the hashtable. The locked object was reclaimed by the GC.
353 *******************************************************************************/
355 #if defined(ENABLE_GC_CACAO)
356 void lock_hashtable_cleanup(void)
366 /* lock the hashtable */
368 Mutex_lock(lock_hashtable.mutex);
370 /* search the hashtable for cleared references */
372 for (i = 0; i < lock_hashtable.size; i++) {
373 lr = lock_hashtable.ptr[i];
379 /* remove lock records with cleared references */
381 if (lr->object == NULL) {
383 /* unlink the lock record from the hashtable */
386 lock_hashtable.ptr[i] = next;
388 prev->hashlink = next;
390 /* free the lock record */
392 lock_record_free(lr);
402 /* unlock the hashtable */
404 Mutex_unlock(lock_hashtable.mutex);
409 /* lock_hashtable_get **********************************************************
411 Find the lock record for the given object. If it does not exists,
412 yet, create it and enter it in the hashtable.
415 o....the object to look up
418 the lock record to use for this object
420 *******************************************************************************/
422 #if defined(ENABLE_GC_BOEHM)
423 static void lock_record_finalizer(void *object, void *p);
426 static lock_record_t *lock_hashtable_get(java_handle_t* o)
428 // This function is inside a critical section.
429 GCCriticalSection cs;
434 // lw_cache is used throughout this file because the lockword can change at
435 // any time, unless it is absolutely certain that we are holding the lock.
436 // We don't do deflation, so we would also not expect a fat lockword to
437 // change, but for the sake of uniformity, lw_cache is used even in this
439 uintptr_t lw_cache = *lock_lockword_get(o);
440 Lockword lockword(lw_cache);
442 if (lockword.is_fat_lock())
443 return lockword.get_fat_lock();
445 // Lock the hashtable.
446 lock_hashtable.mutex->lock();
448 /* lookup the lock record in the hashtable */
450 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
451 lr = lock_hashtable.ptr[slot];
453 for (; lr != NULL; lr = lr->hashlink) {
454 if (lr->object == LLNI_DIRECT(o))
459 /* not found, we must create a new one */
461 lr = lock_record_new();
463 lr->object = LLNI_DIRECT(o);
465 #if defined(ENABLE_GC_BOEHM)
466 /* register new finalizer to clean up the lock record */
468 GC_finalization_proc ofinal = 0;
469 GC_REGISTER_FINALIZER_UNREACHABLE(LLNI_DIRECT(o), lock_record_finalizer, 0, &ofinal, 0);
471 /* There was a finalizer -- reinstall it. We do not want to disrupt the
472 normal finalizer operation. We hold the monitor on this object, so
473 this is thread-safe. */
475 GC_REGISTER_FINALIZER_NO_ORDER(LLNI_DIRECT(o), ofinal, 0, 0, 0);
478 /* enter it in the hashtable */
480 lr->hashlink = lock_hashtable.ptr[slot];
481 lock_hashtable.ptr[slot] = lr;
482 lock_hashtable.entries++;
484 /* check whether the hash should grow */
486 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
487 lock_hashtable_grow();
491 // Unlock the hashtable.
492 lock_hashtable.mutex->unlock();
494 /* return the new lock record */
499 /* lock_schedule_lockrecord_removal ********************************************
501 Gives the locking system a chance to schedule the removal of an unused lock
502 record. This function is called after an object's finalizer has run.
505 o....the object which has been finalized
507 *******************************************************************************/
509 #if defined(ENABLE_GC_BOEHM)
510 void lock_schedule_lockrecord_removal(java_handle_t *o)
512 Lockword lockword(*lock_lockword_get(o));
513 if (!lockword.is_fat_lock())
514 /* there is no lock record */
517 /* register new finalizer to clean up the lock record */
518 GC_REGISTER_FINALIZER_UNREACHABLE(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
523 /* lock_hashtable_remove *******************************************************
525 Remove the lock record for the given object from the hashtable
526 and free it afterwards.
529 t....the current thread
530 o....the object to look up
532 *******************************************************************************/
534 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
538 lock_record_t *tmplr;
540 // Lock the hashtable.
541 lock_hashtable.mutex->lock();
543 /* get lock record */
545 uintptr_t lw_cache = *lock_lockword_get(o);
546 Lockword lockword(lw_cache);
549 assert(lockword.is_fat_lock());
551 lr = lockword.get_fat_lock();
553 /* remove the lock-record from the hashtable */
555 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
556 tmplr = lock_hashtable.ptr[slot];
559 /* special handling if it's the first in the chain */
561 lock_hashtable.ptr[slot] = lr->hashlink;
564 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
565 if (tmplr->hashlink == lr) {
566 tmplr->hashlink = lr->hashlink;
571 assert(tmplr != NULL);
574 /* decrease entry count */
576 lock_hashtable.entries--;
578 // Unlock the hashtable.
579 lock_hashtable.mutex->unlock();
581 /* free the lock record */
583 lock_record_free(lr);
587 /* lock_record_finalizer *******************************************************
589 XXX Remove me for exact GC.
591 *******************************************************************************/
593 static void lock_record_finalizer(void *object, void *p)
598 o = (java_handle_t *) object;
600 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
601 /* XXX this is only a dirty hack to make Boehm work with handles */
603 o = LLNI_WRAP((java_object_t *) o);
606 LLNI_class_get(o, c);
609 if (opt_DebugFinalizer) {
611 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
618 /* remove the lock-record entry from the hashtable and free it */
620 lock_hashtable_remove(THREADOBJECT, o);
624 /*============================================================================*/
625 /* LOCKING ALGORITHM */
626 /*============================================================================*/
629 /* lock_lockword_get ***********************************************************
631 Get the lockword for the given object.
634 o............the object
636 *******************************************************************************/
638 static inline uintptr_t* lock_lockword_get(java_handle_t* o)
640 #if defined(ENABLE_GC_CACAO)
642 assert(GCCriticalSection::inside() == true);
645 return &(LLNI_DIRECT(o)->lockword);
649 /* lock_record_enter ***********************************************************
651 Enter the lock represented by the given lock record.
654 t.................the current thread
655 lr................the lock record
657 *******************************************************************************/
659 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
666 /* lock_record_exit ************************************************************
668 Release the lock represented by the given lock record.
671 t.................the current thread
672 lr................the lock record
675 The current thread must own the lock represented by this lock record.
676 This is NOT checked by this function!
678 *******************************************************************************/
680 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
687 /* lock_inflate ****************************************************************
689 Inflate the lock of the given object. This may only be called by the
690 owner of the monitor of the object.
693 o............the object of which to inflate the lock
694 lr...........the lock record to install. The current thread must
695 own the lock of this lock record!
698 The current thread must be the owner of this object's monitor AND
699 of the lock record's lock!
701 *******************************************************************************/
703 static void lock_inflate(java_handle_t *o, lock_record_t *lr)
705 Lockword lockword(*lock_lockword_get(o));
706 lockword.inflate(lr);
710 /* sable_flc_waiting ***********************************************************
712 Enqueue the current thread on another thread's FLC list. The function
713 blocks until the lock has been inflated by the owning thread.
715 The algorithm used to be an almost literal copy from SableVM. The
716 superfluous list traversal in the waiting loop has been removed since,
720 lockword.....the object's lockword as seen at the first locking attempt
721 t............the current thread
722 o............the object of which to enter the monitor
724 *******************************************************************************/
726 static void sable_flc_waiting(uintptr_t lw_cache, threadobject *t, java_handle_t *o)
729 threadobject *t_other;
732 Lockword lockword(lw_cache);
733 index = lockword.get_thin_lock_thread_index();
734 t_other = ThreadList::get_thread_by_index(index);
736 // The lockword could have changed during our way here. If the
737 // thread index is zero, the lock got unlocked and we simply
740 /* failure, TODO: add statistics */
743 t_other->flc_lock->lock();
744 old_flc = t_other->flc_bit;
745 t_other->flc_bit = true;
747 DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d", t->index, t_other->index));
749 // Set FLC bit first, then read the lockword again.
750 Atomic::memory_barrier();
752 lw_cache = *lock_lockword_get(o);
754 /* Lockword is still the way it was seen before */
755 if (lockword.is_thin_lock() && (lockword.get_thin_lock_thread_index() == index))
758 /* Add tuple (t, o) to the other thread's FLC list */
760 t->flc_next = t_other->flc_list;
761 t_other->flc_list = t;
762 if (t->flc_next == 0)
763 t_other->flc_tail = t;
764 f = t_other->flc_tail;
766 // The other thread will clear flc_object.
767 while (t->flc_object)
769 // We are not cleared yet -- the other thread cannot have seen
771 assert(t_other->flc_bit);
773 // Wait until another thread sees the flc bit and notifies
775 t->flc_cond->wait(t_other->flc_lock);
781 t_other->flc_bit = old_flc;
783 t_other->flc_lock->unlock();
786 /* notify_flc_waiters **********************************************************
788 Traverse the thread's FLC list and inflate all corresponding locks. Notify
789 the associated threads as well.
792 t............the current thread
793 o............the object currently being unlocked
795 *******************************************************************************/
797 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
799 threadobject *current;
803 current = t->flc_list;
806 if (current->flc_object != o)
808 /* The object has to be inflated so the other threads can properly
811 // Only if not already inflated.
812 Lockword lockword(*lock_lockword_get(current->flc_object));
813 if (lockword.is_thin_lock()) {
814 lock_record_t *lr = lock_hashtable_get(current->flc_object);
815 lock_record_enter(t, lr);
817 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
818 t->index, (void*) current->flc_object, (void*) lr));
820 lock_inflate(current->flc_object, lr);
824 // Wake the waiting threads.
825 current->flc_cond->broadcast();
826 current->flc_object = NULL;
828 current = current->flc_next;
834 t->flc_lock->unlock();
837 /* lock_monitor_enter **********************************************************
839 Acquire the monitor of the given object. If the current thread already
840 owns the monitor, the lock counter is simply increased.
842 This function blocks until it can acquire the monitor.
845 t............the current thread
846 o............the object of which to enter the monitor
849 true.........the lock has been successfully acquired
850 false........an exception has been thrown
852 *******************************************************************************/
854 bool lock_monitor_enter(java_handle_t *o)
856 // This function is inside a critical section.
857 GCCriticalSection cs;
860 exceptions_throw_nullpointerexception();
864 threadobject* t = thread_get_current();
866 uintptr_t thinlock = t->thinlock;
869 // Most common case: try to thin-lock an unlocked object.
870 uintptr_t *lw_ptr = lock_lockword_get(o);
871 uintptr_t lw_cache = *lw_ptr;
872 Lockword lockword(lw_cache);
873 bool result = Lockword(*lw_ptr).lock(thinlock);
875 if (result == true) {
876 // Success, we locked it.
877 // NOTE: The Java Memory Model requires a memory barrier here.
878 #if defined(CAS_PROVIDES_FULL_BARRIER) && CAS_PROVIDES_FULL_BARRIER
879 // On some architectures, the CAS (hidden in the
880 // lockword.lock call above), already provides this barrier,
881 // so we only need to inform the compiler.
882 Atomic::instruction_barrier();
884 Atomic::memory_barrier();
889 // Next common case: recursive lock with small recursion count.
890 // NOTE: We don't have to worry about stale values here, as any
891 // stale value will indicate another thread holding the lock (or
892 // an inflated lock).
893 if (lockword.get_thin_lock_without_count() == thinlock) {
894 // We own this monitor. Check the current recursion count.
895 if (lockword.is_max_thin_lock_count() == false) {
896 // The recursion count is low enough.
897 Lockword(*lw_ptr).increase_thin_lock_count();
899 // Success, we locked it.
903 // Recursion count overflow.
904 lock_record_t* lr = lock_hashtable_get(o);
905 lock_record_enter(t, lr);
909 notify_flc_waiters(t, o);
915 // The lock is either contented or fat.
916 if (lockword.is_fat_lock()) {
917 lock_record_t* lr = lockword.get_fat_lock();
919 // Check for recursive entering.
920 if (lr->owner == t) {
925 // Acquire the mutex of the lock record.
926 lock_record_enter(t, lr);
929 assert(lr->count == 0);
933 /****** inflation path ******/
935 #if defined(ENABLE_JVMTI)
936 /* Monitor Contended Enter */
937 jvmti_MonitorContendedEntering(false, o);
940 sable_flc_waiting(lw_cache, t, o);
942 #if defined(ENABLE_JVMTI)
943 /* Monitor Contended Entered */
944 jvmti_MonitorContendedEntering(true, o);
950 /* lock_monitor_exit ***********************************************************
952 Decrement the counter of a (currently owned) monitor. If the counter
953 reaches zero, release the monitor.
955 If the current thread is not the owner of the monitor, an
956 IllegalMonitorState exception is thrown.
959 t............the current thread
960 o............the object of which to exit the monitor
963 true.........everything ok,
964 false........an exception has been thrown
966 *******************************************************************************/
968 bool lock_monitor_exit(java_handle_t* o)
970 // This function is inside a critical section.
971 GCCriticalSection cs;
974 exceptions_throw_nullpointerexception();
978 threadobject* t = thread_get_current();
980 uintptr_t thinlock = t->thinlock;
982 // We don't have to worry about stale values here, as any stale
983 // value will indicate that we don't own the lock.
984 uintptr_t *lw_ptr = lock_lockword_get(o);
985 uintptr_t lw_cache = *lw_ptr;
986 Lockword lockword(lw_cache);
988 // Most common case: we release a thin lock that we hold once.
989 if (lockword.get_thin_lock() == thinlock) {
990 // Memory barrier for Java Memory Model.
991 Atomic::write_memory_barrier();
992 Lockword(*lw_ptr).unlock();
993 // Memory barrier for FLC bit testing.
994 Atomic::memory_barrier();
996 /* check if there has been a flat lock contention on this object */
999 DEBUGLOCKS(("thread %d saw flc bit", t->index));
1001 /* there has been a contention on this thin lock */
1002 notify_flc_waiters(t, o);
1008 // Next common case: we release a recursive lock, count > 0.
1009 if (lockword.get_thin_lock_without_count() == thinlock) {
1010 Lockword(*lw_ptr).decrease_thin_lock_count();
1014 // Either the lock is fat, or we don't hold it at all.
1015 if (lockword.is_fat_lock()) {
1016 lock_record_t* lr = lockword.get_fat_lock();
1018 // Check if we own this monitor.
1019 // NOTE: We don't have to worry about stale values here, as
1020 // any stale value will be != t and thus fail this check.
1021 if (lr->owner != t) {
1022 exceptions_throw_illegalmonitorstateexception();
1026 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1028 if (lr->count != 0) {
1029 // We had locked this one recursively. Just decrement, it
1030 // will still be locked.
1035 // Unlock this lock record.
1036 lock_record_exit(t, lr);
1040 // Legal thin lock cases have been handled above, so this is an
1042 exceptions_throw_illegalmonitorstateexception();
1048 /* lock_record_add_waiter ******************************************************
1050 Add a thread to the list of waiting threads of a lock record.
1053 lr...........the lock record
1054 thread.......the thread to add
1056 *******************************************************************************/
1058 static void lock_record_add_waiter(lock_record_t *lr, threadobject* t)
1060 // Add the thread as last entry to waiters list.
1061 lr->waiters->push_back(t);
1063 #if defined(ENABLE_STATISTICS)
1065 size_lock_waiter += sizeof(threadobject*);
1070 /* lock_record_remove_waiter ***************************************************
1072 Remove a thread from the list of waiting threads of a lock record.
1075 lr...........the lock record
1076 t............the current thread
1079 The current thread must be the owner of the lock record.
1081 *******************************************************************************/
1083 static void lock_record_remove_waiter(lock_record_t *lr, threadobject* t)
1085 // Remove the thread from the waiters.
1086 lr->waiters->remove(t);
1088 #if defined(ENABLE_STATISTICS)
1090 size_lock_waiter -= sizeof(threadobject*);
1095 /* lock_record_wait ************************************************************
1097 Wait on a lock record for a given (maximum) amount of time.
1100 t............the current thread
1101 lr...........the lock record
1102 millis.......milliseconds of timeout
1103 nanos........nanoseconds of timeout
1106 true.........we have been interrupted,
1107 false........everything ok
1110 The current thread must be the owner of the lock record.
1111 This is NOT checked by this function!
1113 *******************************************************************************/
1115 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1118 bool wasinterrupted = false;
1120 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1121 lr, thread, millis, nanos));
1123 /* { the thread t owns the fat lock record lr on the object o } */
1125 /* register us as waiter for this object */
1127 lock_record_add_waiter(lr, thread);
1129 /* remember the old lock count */
1131 lockcount = lr->count;
1133 /* unlock this record */
1136 lock_record_exit(thread, lr);
1138 /* wait until notified/interrupted/timed out */
1140 threads_wait_with_timeout_relative(thread, millis, nanos);
1142 /* re-enter the monitor */
1144 lock_record_enter(thread, lr);
1146 /* remove us from the list of waiting threads */
1148 lock_record_remove_waiter(lr, thread);
1150 /* restore the old lock count */
1152 lr->count = lockcount;
1154 /* We can only be signaled OR interrupted, not both. If both flags
1155 are set, reset only signaled and leave the thread in
1156 interrupted state. Otherwise, clear both. */
1158 if (!thread->signaled) {
1159 wasinterrupted = thread->interrupted;
1160 thread->interrupted = false;
1163 thread->signaled = false;
1165 /* return if we have been interrupted */
1167 return wasinterrupted;
1171 /* lock_monitor_wait ***********************************************************
1173 Wait on an object for a given (maximum) amount of time.
1176 t............the current thread
1177 o............the object
1178 millis.......milliseconds of timeout
1179 nanos........nanoseconds of timeout
1182 The current thread must be the owner of the object's monitor.
1184 *******************************************************************************/
1186 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1190 uintptr_t *lw_ptr = lock_lockword_get(o);
1191 uintptr_t lw_cache = *lw_ptr;
1192 Lockword lockword(lw_cache);
1194 // Check if we own this monitor.
1195 // NOTE: We don't have to worry about stale values here, as any
1196 // stale value will fail this check.
1197 if (lockword.is_fat_lock()) {
1198 lr = lockword.get_fat_lock();
1200 if (lr->owner != t) {
1201 exceptions_throw_illegalmonitorstateexception();
1206 // It's a thin lock.
1207 if (lockword.get_thin_lock_without_count() != t->thinlock) {
1208 exceptions_throw_illegalmonitorstateexception();
1212 // Get the lock-record.
1213 lr = lock_hashtable_get(o);
1214 lock_record_enter(t, lr);
1216 // Inflate this lock.
1217 Lockword(*lw_ptr).inflate(lr);
1219 notify_flc_waiters(t, o);
1222 /* { the thread t owns the fat lock record lr on the object o } */
1224 if (lock_record_wait(t, lr, millis, nanos))
1225 exceptions_throw_interruptedexception();
1229 /* lock_record_notify **********************************************************
1231 Notify one thread or all threads waiting on the given lock record.
1234 t............the current thread
1235 lr...........the lock record
1236 one..........if true, only notify one thread
1239 The current thread must be the owner of the lock record.
1240 This is NOT checked by this function!
1242 *******************************************************************************/
1244 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1246 #if defined(ENABLE_GC_CACAO)
1248 assert(GCCriticalSection::inside() == false);
1251 // { The thread t owns the fat lock record lr on the object o }
1253 for (List<threadobject*>::iterator it = lr->waiters->begin(); it != lr->waiters->end(); it++) {
1254 threadobject* waiter = *it;
1256 // We must skip threads which have already been notified. They
1257 // will remove themselves from the list.
1258 if (waiter->signaled)
1261 // Enter the wait-mutex.
1262 waiter->waitmutex->lock();
1264 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]", lr, t, waiter, one));
1266 // Signal the waiter.
1267 waiter->waitcond->signal();
1269 // Mark the thread as signaled.
1270 waiter->signaled = true;
1272 // Leave the wait-mutex.
1273 waiter->waitmutex->unlock();
1275 // If we should only wake one thread, we are done.
1282 /* lock_monitor_notify *********************************************************
1284 Notify one thread or all threads waiting on the given object.
1287 t............the current thread
1288 o............the object
1289 one..........if true, only notify one thread
1292 The current thread must be the owner of the object's monitor.
1294 *******************************************************************************/
1296 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1298 lock_record_t* lr = NULL;
1301 // This scope is inside a critical section.
1302 GCCriticalSection cs;
1304 uintptr_t lw_cache = *lock_lockword_get(o);
1305 Lockword lockword(lw_cache);
1307 // Check if we own this monitor.
1308 // NOTE: We don't have to worry about stale values here, as any
1309 // stale value will fail this check.
1311 if (lockword.is_fat_lock()) {
1312 lr = lockword.get_fat_lock();
1314 if (lr->owner != t) {
1315 exceptions_throw_illegalmonitorstateexception();
1320 // It's a thin lock.
1321 if (lockword.get_thin_lock_without_count() != t->thinlock) {
1322 exceptions_throw_illegalmonitorstateexception();
1326 // No thread can wait on a thin lock, so there's nothing to do.
1331 // { The thread t owns the fat lock record lr on the object o }
1332 lock_record_notify(t, lr, one);
1337 /*============================================================================*/
1338 /* INQUIRY FUNCIONS */
1339 /*============================================================================*/
1342 /* lock_is_held_by_current_thread **********************************************
1344 Return true if the current thread owns the monitor of the given object.
1347 o............the object
1350 true, if the current thread holds the lock of this object.
1352 *******************************************************************************/
1354 bool lock_is_held_by_current_thread(java_handle_t *o)
1356 // This function is inside a critical section.
1357 GCCriticalSection cs;
1359 // Check if we own this monitor.
1360 // NOTE: We don't have to worry about stale values here, as any
1361 // stale value will fail this check.
1362 threadobject* t = thread_get_current();
1363 uintptr_t lw_cache = *lock_lockword_get(o);
1364 Lockword lockword(lw_cache);
1366 if (lockword.is_fat_lock()) {
1368 lock_record_t* lr = lockword.get_fat_lock();
1369 return (lr->owner == t);
1372 // It's a thin lock.
1373 return (lockword.get_thin_lock_without_count() == t->thinlock);
1379 /*============================================================================*/
1380 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1381 /*============================================================================*/
1384 /* lock_wait_for_object ********************************************************
1386 Wait for the given object.
1389 o............the object
1390 millis.......milliseconds to wait
1391 nanos........nanoseconds to wait
1393 *******************************************************************************/
1395 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1397 threadobject *thread;
1399 thread = THREADOBJECT;
1401 lock_monitor_wait(thread, o, millis, nanos);
1405 /* lock_notify_object **********************************************************
1407 Notify one thread waiting on the given object.
1410 o............the object
1412 *******************************************************************************/
1414 void lock_notify_object(java_handle_t *o)
1416 threadobject *thread;
1418 thread = THREADOBJECT;
1420 lock_monitor_notify(thread, o, true);
1424 /* lock_notify_all_object ******************************************************
1426 Notify all threads waiting on the given object.
1429 o............the object
1431 *******************************************************************************/
1433 void lock_notify_all_object(java_handle_t *o)
1435 threadobject *thread;
1437 thread = THREADOBJECT;
1439 lock_monitor_notify(thread, o, false);
1444 * These are local overrides for various environment variables in Emacs.
1445 * Please do not remove this and leave it at the end of the file, where
1446 * Emacs will automagically detect them.
1447 * ---------------------------------------------------------------------
1450 * indent-tabs-mode: t
1454 * vim:noexpandtab:sw=4:ts=4: