1 /* src/threads/lock.cpp - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007, 2008
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 This file is part of CACAO.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2, or (at
11 your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
36 #include "mm/memory.hpp"
38 #include "native/llni.h"
40 #include "threads/atomic.hpp"
41 #include "threads/lock.hpp"
42 #include "threads/mutex.hpp"
43 #include "threads/threadlist.hpp"
44 #include "threads/thread.hpp"
46 #include "toolbox/list.hpp"
48 #include "vm/exceptions.hpp"
49 #include "vm/finalizer.hpp"
50 #include "vm/global.h"
51 #include "vm/options.h"
52 #include "vm/string.hpp"
55 #if defined(ENABLE_STATISTICS)
56 # include "vm/statistics.h"
59 #if defined(ENABLE_VMLOG)
60 #include <vmlog_cacao.h>
63 #if defined(ENABLE_JVMTI)
64 #include "native/jvmti/cacaodbg.h"
67 #if defined(ENABLE_GC_BOEHM)
68 # include "mm/boehm-gc/include/gc.h"
72 /* debug **********************************************************************/
75 # define DEBUGLOCKS(format) \
77 if (opt_DebugLocks) { \
82 # define DEBUGLOCKS(format)
86 /******************************************************************************/
88 /******************************************************************************/
90 /* number of lock records in the first pool allocated for a thread */
91 #define LOCK_INITIAL_LOCK_RECORDS 8
93 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
96 /******************************************************************************/
97 /* MACROS FOR THIN/FAT LOCKS */
98 /******************************************************************************/
100 /* We use a variant of the tasuki locks described in the paper
102 * Tamiya Onodera, Kiyokuni Kawachiya
103 * A Study of Locking Objects with Bimodal Fields
104 * Proceedings of the ACM OOPSLA '99, pp. 223-237
107 * The underlying thin locks are a variant of the thin locks described in
109 * Bacon, Konuru, Murthy, Serrano
110 * Thin Locks: Featherweight Synchronization for Java
111 * Proceedings of the ACM Conference on Programming Language Design and
112 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
115 * In thin lock mode the lockword looks like this:
117 * ,----------------------,-----------,---,
118 * | thread ID | count | 0 |
119 * `----------------------'-----------'---'
121 * thread ID......the 'index' of the owning thread, or 0
122 * count..........number of times the lock has been entered minus 1
123 * 0..............the shape bit is 0 in thin lock mode
125 * In fat lock mode it is basically a lock_record_t *:
127 * ,----------------------------------,---,
128 * | lock_record_t * (without LSB) | 1 |
129 * `----------------------------------'---'
131 * 1..............the shape bit is 1 in fat lock mode
134 /* global variables ***********************************************************/
136 /* hashtable mapping objects to lock records */
137 static lock_hashtable_t lock_hashtable;
140 /******************************************************************************/
142 /******************************************************************************/
144 static void lock_hashtable_init(void);
146 static inline Lockword* lock_lockword_get(java_handle_t* o);
147 static void lock_record_enter(threadobject *t, lock_record_t *lr);
148 static void lock_record_exit(threadobject *t, lock_record_t *lr);
149 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
150 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
153 /*============================================================================*/
154 /* INITIALIZATION OF DATA STRUCTURES */
155 /*============================================================================*/
158 /* lock_init *******************************************************************
160 Initialize global data for locking.
162 *******************************************************************************/
166 /* initialize lock hashtable */
168 lock_hashtable_init();
170 #if defined(ENABLE_VMLOG)
171 vmlog_cacao_init_lock();
176 /* lock_record_new *************************************************************
178 Allocate a lock record.
180 *******************************************************************************/
182 static lock_record_t *lock_record_new(void)
186 /* allocate the data structure on the C heap */
188 lr = NEW(lock_record_t);
190 #if defined(ENABLE_STATISTICS)
192 size_lock_record += sizeof(lock_record_t);
195 /* initialize the members */
200 lr->waiters = new List<threadobject*>();
202 #if defined(ENABLE_GC_CACAO)
203 /* register the lock object as weak reference with the GC */
205 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
208 // Initialize the mutex.
209 lr->mutex = new Mutex();
211 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
217 /* lock_record_free ************************************************************
222 lr....lock record to free
224 *******************************************************************************/
226 static void lock_record_free(lock_record_t *lr)
228 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
230 // Destroy the mutex.
233 #if defined(ENABLE_GC_CACAO)
234 /* unregister the lock object reference with the GC */
236 gc_weakreference_unregister(&(lr->object));
239 // Free the waiters list.
242 /* Free the data structure. */
244 FREE(lr, lock_record_t);
246 #if defined(ENABLE_STATISTICS)
248 size_lock_record -= sizeof(lock_record_t);
253 /*============================================================================*/
254 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
255 /*============================================================================*/
257 /* lock_hashtable_init *********************************************************
259 Initialize the global hashtable mapping objects to lock records.
261 *******************************************************************************/
263 static void lock_hashtable_init(void)
265 lock_hashtable.mutex = new Mutex();
267 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
268 lock_hashtable.entries = 0;
269 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
271 #if defined(ENABLE_STATISTICS)
273 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
276 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
280 /* lock_hashtable_grow *********************************************************
282 Grow the lock record hashtable to about twice its current size and
285 *******************************************************************************/
287 /* must be called with hashtable mutex locked */
288 static void lock_hashtable_grow(void)
292 lock_record_t **oldtable;
293 lock_record_t **newtable;
300 /* allocate a new table */
302 oldsize = lock_hashtable.size;
303 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
305 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
307 oldtable = lock_hashtable.ptr;
308 newtable = MNEW(lock_record_t *, newsize);
310 #if defined(ENABLE_STATISTICS)
312 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
315 MZERO(newtable, lock_record_t *, newsize);
317 /* rehash the entries */
319 for (i = 0; i < oldsize; i++) {
324 h = heap_hashcode(lr->object);
325 newslot = h % newsize;
327 lr->hashlink = newtable[newslot];
328 newtable[newslot] = lr;
334 /* replace the old table */
336 lock_hashtable.ptr = newtable;
337 lock_hashtable.size = newsize;
339 MFREE(oldtable, lock_record_t *, oldsize);
341 #if defined(ENABLE_STATISTICS)
343 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
348 /* lock_hashtable_cleanup ******************************************************
350 Removes (and frees) lock records which have a cleared object reference
351 from the hashtable. The locked object was reclaimed by the GC.
353 *******************************************************************************/
355 #if defined(ENABLE_GC_CACAO)
356 void lock_hashtable_cleanup(void)
366 /* lock the hashtable */
368 Mutex_lock(lock_hashtable.mutex);
370 /* search the hashtable for cleared references */
372 for (i = 0; i < lock_hashtable.size; i++) {
373 lr = lock_hashtable.ptr[i];
379 /* remove lock records with cleared references */
381 if (lr->object == NULL) {
383 /* unlink the lock record from the hashtable */
386 lock_hashtable.ptr[i] = next;
388 prev->hashlink = next;
390 /* free the lock record */
392 lock_record_free(lr);
402 /* unlock the hashtable */
404 Mutex_unlock(lock_hashtable.mutex);
409 /* lock_hashtable_get **********************************************************
411 Find the lock record for the given object. If it does not exists,
412 yet, create it and enter it in the hashtable.
415 o....the object to look up
418 the lock record to use for this object
420 *******************************************************************************/
422 #if defined(ENABLE_GC_BOEHM)
423 static void lock_record_finalizer(void *object, void *p);
426 static lock_record_t *lock_hashtable_get(java_handle_t* o)
428 // This function is inside a critical section.
429 GCCriticalSection cs;
434 Lockword* lockword = lock_lockword_get(o);
436 if (lockword->is_fat_lock())
437 return lockword->get_fat_lock();
439 // Lock the hashtable.
440 lock_hashtable.mutex->lock();
442 /* lookup the lock record in the hashtable */
444 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
445 lr = lock_hashtable.ptr[slot];
447 for (; lr != NULL; lr = lr->hashlink) {
448 if (lr->object == LLNI_DIRECT(o))
453 /* not found, we must create a new one */
455 lr = lock_record_new();
457 lr->object = LLNI_DIRECT(o);
459 #if defined(ENABLE_GC_BOEHM)
460 /* register new finalizer to clean up the lock record */
462 GC_finalization_proc ofinal = 0;
463 GC_REGISTER_FINALIZER_UNREACHABLE(LLNI_DIRECT(o), lock_record_finalizer, 0, &ofinal, 0);
465 /* There was a finalizer -- reinstall it. We do not want to disrupt the
466 normal finalizer operation. We hold the monitor on this object, so
467 this is thread-safe. */
469 GC_REGISTER_FINALIZER_NO_ORDER(LLNI_DIRECT(o), ofinal, 0, 0, 0);
472 /* enter it in the hashtable */
474 lr->hashlink = lock_hashtable.ptr[slot];
475 lock_hashtable.ptr[slot] = lr;
476 lock_hashtable.entries++;
478 /* check whether the hash should grow */
480 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
481 lock_hashtable_grow();
485 // Unlock the hashtable.
486 lock_hashtable.mutex->unlock();
488 /* return the new lock record */
493 /* lock_schedule_lockrecord_removal ********************************************
495 Gives the locking system a chance to schedule the removal of an unused lock
496 record. This function is called after an object's finalizer has run.
499 o....the object which has been finalized
501 *******************************************************************************/
503 #if defined(ENABLE_GC_BOEHM)
504 void lock_schedule_lockrecord_removal(java_handle_t *o)
506 Lockword* lockword = lock_lockword_get(o);
507 if (!lockword->is_fat_lock())
508 /* there is no lock record */
511 /* register new finalizer to clean up the lock record */
512 GC_REGISTER_FINALIZER_UNREACHABLE(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
517 /* lock_hashtable_remove *******************************************************
519 Remove the lock record for the given object from the hashtable
520 and free it afterwards.
523 t....the current thread
524 o....the object to look up
526 *******************************************************************************/
528 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
532 lock_record_t *tmplr;
534 // Lock the hashtable.
535 lock_hashtable.mutex->lock();
537 /* get lock record */
539 Lockword* lockword = lock_lockword_get(o);
542 assert(lockword->is_fat_lock());
544 lr = lockword->get_fat_lock();
546 /* remove the lock-record from the hashtable */
548 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
549 tmplr = lock_hashtable.ptr[slot];
552 /* special handling if it's the first in the chain */
554 lock_hashtable.ptr[slot] = lr->hashlink;
557 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
558 if (tmplr->hashlink == lr) {
559 tmplr->hashlink = lr->hashlink;
564 assert(tmplr != NULL);
567 /* decrease entry count */
569 lock_hashtable.entries--;
571 // Unlock the hashtable.
572 lock_hashtable.mutex->unlock();
574 /* free the lock record */
576 lock_record_free(lr);
580 /* lock_record_finalizer *******************************************************
582 XXX Remove me for exact GC.
584 *******************************************************************************/
586 static void lock_record_finalizer(void *object, void *p)
591 o = (java_handle_t *) object;
593 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
594 /* XXX this is only a dirty hack to make Boehm work with handles */
596 o = LLNI_WRAP((java_object_t *) o);
599 LLNI_class_get(o, c);
602 if (opt_DebugFinalizer) {
604 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
611 /* remove the lock-record entry from the hashtable and free it */
613 lock_hashtable_remove(THREADOBJECT, o);
617 /*============================================================================*/
618 /* LOCKING ALGORITHM */
619 /*============================================================================*/
622 /* lock_lockword_get ***********************************************************
624 Get the lockword for the given object.
627 o............the object
629 *******************************************************************************/
631 static inline Lockword* lock_lockword_get(java_handle_t* o)
633 #if defined(ENABLE_GC_CACAO)
635 assert(GCCriticalSection::inside() == true);
638 return &(LLNI_DIRECT(o)->lockword);
642 /* lock_record_enter ***********************************************************
644 Enter the lock represented by the given lock record.
647 t.................the current thread
648 lr................the lock record
650 *******************************************************************************/
652 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
659 /* lock_record_exit ************************************************************
661 Release the lock represented by the given lock record.
664 t.................the current thread
665 lr................the lock record
668 The current thread must own the lock represented by this lock record.
669 This is NOT checked by this function!
671 *******************************************************************************/
673 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
680 /* lock_inflate ****************************************************************
682 Inflate the lock of the given object. This may only be called by the
683 owner of the monitor of the object.
686 o............the object of which to inflate the lock
687 lr...........the lock record to install. The current thread must
688 own the lock of this lock record!
691 The current thread must be the owner of this object's monitor AND
692 of the lock record's lock!
694 *******************************************************************************/
696 static void lock_inflate(java_handle_t *o, lock_record_t *lr)
698 Lockword* lockword = lock_lockword_get(o);
699 lockword->inflate(lr);
703 /* sable_flc_waiting ***********************************************************
705 Enqueue the current thread on another thread's FLC list. The function
706 blocks until the lock has been inflated by the owning thread.
708 The algorithm used to be an almost literal copy from SableVM. The
709 superfluous list traversal in the waiting loop has been removed since,
713 lockword.....the object's lockword as seen at the first locking attempt
714 t............the current thread
715 o............the object of which to enter the monitor
717 *******************************************************************************/
719 static void sable_flc_waiting(Lockword *lockword, threadobject *t, java_handle_t *o)
722 threadobject *t_other;
725 index = lockword->get_thin_lock_thread_index();
726 t_other = ThreadList::get_thread_by_index(index);
728 // The lockword could have changed during our way here. If the
729 // thread index is zero, the lock got unlocked and we simply
732 /* failure, TODO: add statistics */
735 t_other->flc_lock->lock();
736 old_flc = t_other->flc_bit;
737 t_other->flc_bit = true;
739 DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d", t->index, t_other->index));
741 // Set FLC bit first, then read the lockword again.
742 Atomic::memory_barrier();
744 lockword = lock_lockword_get(o);
746 /* Lockword is still the way it was seen before */
747 if (lockword->is_thin_lock() && (lockword->get_thin_lock_thread_index() == index))
750 /* Add tuple (t, o) to the other thread's FLC list */
752 t->flc_next = t_other->flc_list;
753 t_other->flc_list = t;
754 if (t->flc_next == 0)
755 t_other->flc_tail = t;
756 f = t_other->flc_tail;
758 // The other thread will clear flc_object.
759 while (t->flc_object)
761 // We are not cleared yet -- the other thread cannot have seen
763 assert(t_other->flc_bit);
765 // Wait until another thread sees the flc bit and notifies
767 t->flc_cond->wait(t_other->flc_lock);
773 t_other->flc_bit = old_flc;
775 t_other->flc_lock->unlock();
778 /* notify_flc_waiters **********************************************************
780 Traverse the thread's FLC list and inflate all corresponding locks. Notify
781 the associated threads as well.
784 t............the current thread
785 o............the object currently being unlocked
787 *******************************************************************************/
789 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
791 threadobject *current;
795 current = t->flc_list;
798 if (current->flc_object != o)
800 /* The object has to be inflated so the other threads can properly
803 // Only if not already inflated.
804 Lockword* lockword = lock_lockword_get(current->flc_object);
805 if (lockword->is_thin_lock()) {
806 lock_record_t *lr = lock_hashtable_get(current->flc_object);
807 lock_record_enter(t, lr);
809 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
810 t->index, (void*) current->flc_object, (void*) lr));
812 lock_inflate(current->flc_object, lr);
816 // Wake the waiting threads.
817 current->flc_cond->broadcast();
818 current->flc_object = NULL;
820 current = current->flc_next;
826 t->flc_lock->unlock();
829 /* lock_monitor_enter **********************************************************
831 Acquire the monitor of the given object. If the current thread already
832 owns the monitor, the lock counter is simply increased.
834 This function blocks until it can acquire the monitor.
837 t............the current thread
838 o............the object of which to enter the monitor
841 true.........the lock has been successfully acquired
842 false........an exception has been thrown
844 *******************************************************************************/
846 bool lock_monitor_enter(java_handle_t *o)
848 // This function is inside a critical section.
849 GCCriticalSection cs;
852 exceptions_throw_nullpointerexception();
856 threadobject* t = thread_get_current();
858 uintptr_t thinlock = t->thinlock;
861 // Most common case: try to thin-lock an unlocked object.
862 Lockword* lockword = lock_lockword_get(o);
863 bool result = lockword->lock(thinlock);
865 if (result == true) {
866 // Success, we locked it.
867 // NOTE: The Java Memory Model requires a memory barrier here.
868 #if defined(CAS_PROVIDES_FULL_BARRIER) && CAS_PROVIDES_FULL_BARRIER
869 // On some architectures, the CAS (hidden in the
870 // lockword->lock call above), already provides this barrier,
871 // so we only need to inform the compiler.
872 Atomic::instruction_barrier();
874 Atomic::memory_barrier();
879 // Next common case: recursive lock with small recursion count.
880 // NOTE: We don't have to worry about stale values here, as any
881 // stale value will indicate another thread holding the lock (or
882 // an inflated lock).
883 if (lockword->get_thin_lock_without_count() == thinlock) {
884 // We own this monitor. Check the current recursion count.
885 if (lockword->is_max_thin_lock_count() == false) {
886 // The recursion count is low enough.
887 lockword->increase_thin_lock_count();
889 // Success, we locked it.
893 // Recursion count overflow.
894 lock_record_t* lr = lock_hashtable_get(o);
895 lock_record_enter(t, lr);
899 notify_flc_waiters(t, o);
905 // The lock is either contented or fat.
906 if (lockword->is_fat_lock()) {
907 lock_record_t* lr = lockword->get_fat_lock();
909 // Check for recursive entering.
910 if (lr->owner == t) {
915 // Acquire the mutex of the lock record.
916 lock_record_enter(t, lr);
919 assert(lr->count == 0);
923 /****** inflation path ******/
925 #if defined(ENABLE_JVMTI)
926 /* Monitor Contended Enter */
927 jvmti_MonitorContendedEntering(false, o);
930 sable_flc_waiting(lockword, t, o);
932 #if defined(ENABLE_JVMTI)
933 /* Monitor Contended Entered */
934 jvmti_MonitorContendedEntering(true, o);
940 /* lock_monitor_exit ***********************************************************
942 Decrement the counter of a (currently owned) monitor. If the counter
943 reaches zero, release the monitor.
945 If the current thread is not the owner of the monitor, an
946 IllegalMonitorState exception is thrown.
949 t............the current thread
950 o............the object of which to exit the monitor
953 true.........everything ok,
954 false........an exception has been thrown
956 *******************************************************************************/
958 bool lock_monitor_exit(java_handle_t* o)
960 // This function is inside a critical section.
961 GCCriticalSection cs;
964 exceptions_throw_nullpointerexception();
968 threadobject* t = thread_get_current();
970 uintptr_t thinlock = t->thinlock;
972 // We don't have to worry about stale values here, as any stale
973 // value will indicate that we don't own the lock.
974 Lockword* lockword = lock_lockword_get(o);
976 // Most common case: we release a thin lock that we hold once.
977 if (lockword->get_thin_lock() == thinlock) {
978 // Memory barrier for Java Memory Model.
979 Atomic::write_memory_barrier();
981 // Memory barrier for FLC bit testing.
982 Atomic::memory_barrier();
984 /* check if there has been a flat lock contention on this object */
987 DEBUGLOCKS(("thread %d saw flc bit", t->index));
989 /* there has been a contention on this thin lock */
990 notify_flc_waiters(t, o);
996 // Next common case: we release a recursive lock, count > 0.
997 if (lockword->get_thin_lock_without_count() == thinlock) {
998 lockword->decrease_thin_lock_count();
1002 // Either the lock is fat, or we don't hold it at all.
1003 if (lockword->is_fat_lock()) {
1004 lock_record_t* lr = lockword->get_fat_lock();
1006 // Check if we own this monitor.
1007 // NOTE: We don't have to worry about stale values here, as
1008 // any stale value will be != t and thus fail this check.
1009 if (lr->owner != t) {
1010 exceptions_throw_illegalmonitorstateexception();
1014 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1016 if (lr->count != 0) {
1017 // We had locked this one recursively. Just decrement, it
1018 // will still be locked.
1023 // Unlock this lock record.
1024 lock_record_exit(t, lr);
1028 // Legal thin lock cases have been handled above, so this is an
1030 exceptions_throw_illegalmonitorstateexception();
1036 /* lock_record_add_waiter ******************************************************
1038 Add a thread to the list of waiting threads of a lock record.
1041 lr...........the lock record
1042 thread.......the thread to add
1044 *******************************************************************************/
1046 static void lock_record_add_waiter(lock_record_t *lr, threadobject* t)
1048 // Add the thread as last entry to waiters list.
1049 lr->waiters->push_back(t);
1051 #if defined(ENABLE_STATISTICS)
1053 size_lock_waiter += sizeof(threadobject*);
1058 /* lock_record_remove_waiter ***************************************************
1060 Remove a thread from the list of waiting threads of a lock record.
1063 lr...........the lock record
1064 t............the current thread
1067 The current thread must be the owner of the lock record.
1069 *******************************************************************************/
1071 static void lock_record_remove_waiter(lock_record_t *lr, threadobject* t)
1073 // Remove the thread from the waiters.
1074 lr->waiters->remove(t);
1076 #if defined(ENABLE_STATISTICS)
1078 size_lock_waiter -= sizeof(threadobject*);
1083 /* lock_record_wait ************************************************************
1085 Wait on a lock record for a given (maximum) amount of time.
1088 t............the current thread
1089 lr...........the lock record
1090 millis.......milliseconds of timeout
1091 nanos........nanoseconds of timeout
1094 true.........we have been interrupted,
1095 false........everything ok
1098 The current thread must be the owner of the lock record.
1099 This is NOT checked by this function!
1101 *******************************************************************************/
1103 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1106 bool wasinterrupted = false;
1108 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1109 lr, thread, millis, nanos));
1111 /* { the thread t owns the fat lock record lr on the object o } */
1113 /* register us as waiter for this object */
1115 lock_record_add_waiter(lr, thread);
1117 /* remember the old lock count */
1119 lockcount = lr->count;
1121 /* unlock this record */
1124 lock_record_exit(thread, lr);
1126 /* wait until notified/interrupted/timed out */
1128 threads_wait_with_timeout_relative(thread, millis, nanos);
1130 /* re-enter the monitor */
1132 lock_record_enter(thread, lr);
1134 /* remove us from the list of waiting threads */
1136 lock_record_remove_waiter(lr, thread);
1138 /* restore the old lock count */
1140 lr->count = lockcount;
1142 /* We can only be signaled OR interrupted, not both. If both flags
1143 are set, reset only signaled and leave the thread in
1144 interrupted state. Otherwise, clear both. */
1146 if (!thread->signaled) {
1147 wasinterrupted = thread->interrupted;
1148 thread->interrupted = false;
1151 thread->signaled = false;
1153 /* return if we have been interrupted */
1155 return wasinterrupted;
1159 /* lock_monitor_wait ***********************************************************
1161 Wait on an object for a given (maximum) amount of time.
1164 t............the current thread
1165 o............the object
1166 millis.......milliseconds of timeout
1167 nanos........nanoseconds of timeout
1170 The current thread must be the owner of the object's monitor.
1172 *******************************************************************************/
1174 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1178 Lockword* lockword = lock_lockword_get(o);
1180 // Check if we own this monitor.
1181 // NOTE: We don't have to worry about stale values here, as any
1182 // stale value will fail this check.
1183 if (lockword->is_fat_lock()) {
1184 lr = lockword->get_fat_lock();
1186 if (lr->owner != t) {
1187 exceptions_throw_illegalmonitorstateexception();
1192 // It's a thin lock.
1193 if (lockword->get_thin_lock_without_count() != t->thinlock) {
1194 exceptions_throw_illegalmonitorstateexception();
1198 // Get the lock-record.
1199 lr = lock_hashtable_get(o);
1200 lock_record_enter(t, lr);
1202 // Inflate this lock.
1203 lockword->inflate(lr);
1205 notify_flc_waiters(t, o);
1208 /* { the thread t owns the fat lock record lr on the object o } */
1210 if (lock_record_wait(t, lr, millis, nanos))
1211 exceptions_throw_interruptedexception();
1215 /* lock_record_notify **********************************************************
1217 Notify one thread or all threads waiting on the given lock record.
1220 t............the current thread
1221 lr...........the lock record
1222 one..........if true, only notify one thread
1225 The current thread must be the owner of the lock record.
1226 This is NOT checked by this function!
1228 *******************************************************************************/
1230 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1232 #if defined(ENABLE_GC_CACAO)
1234 assert(GCCriticalSection::inside() == false);
1237 // { The thread t owns the fat lock record lr on the object o }
1239 for (List<threadobject*>::iterator it = lr->waiters->begin(); it != lr->waiters->end(); it++) {
1240 threadobject* waiter = *it;
1242 // We must skip threads which have already been notified. They
1243 // will remove themselves from the list.
1244 if (waiter->signaled)
1247 // Enter the wait-mutex.
1248 waiter->waitmutex->lock();
1250 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]", lr, t, waiter, one));
1252 // Signal the waiter.
1253 waiter->waitcond->signal();
1255 // Mark the thread as signaled.
1256 waiter->signaled = true;
1258 // Leave the wait-mutex.
1259 waiter->waitmutex->unlock();
1261 // If we should only wake one thread, we are done.
1268 /* lock_monitor_notify *********************************************************
1270 Notify one thread or all threads waiting on the given object.
1273 t............the current thread
1274 o............the object
1275 one..........if true, only notify one thread
1278 The current thread must be the owner of the object's monitor.
1280 *******************************************************************************/
1282 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1284 lock_record_t* lr = NULL;
1287 // This scope is inside a critical section.
1288 GCCriticalSection cs;
1290 Lockword* lockword = lock_lockword_get(o);
1292 // Check if we own this monitor.
1293 // NOTE: We don't have to worry about stale values here, as any
1294 // stale value will fail this check.
1296 if (lockword->is_fat_lock()) {
1297 lr = lockword->get_fat_lock();
1299 if (lr->owner != t) {
1300 exceptions_throw_illegalmonitorstateexception();
1305 // It's a thin lock.
1306 if (lockword->get_thin_lock_without_count() != t->thinlock) {
1307 exceptions_throw_illegalmonitorstateexception();
1311 // No thread can wait on a thin lock, so there's nothing to do.
1316 // { The thread t owns the fat lock record lr on the object o }
1317 lock_record_notify(t, lr, one);
1322 /*============================================================================*/
1323 /* INQUIRY FUNCIONS */
1324 /*============================================================================*/
1327 /* lock_is_held_by_current_thread **********************************************
1329 Return true if the current thread owns the monitor of the given object.
1332 o............the object
1335 true, if the current thread holds the lock of this object.
1337 *******************************************************************************/
1339 bool lock_is_held_by_current_thread(java_handle_t *o)
1341 // This function is inside a critical section.
1342 GCCriticalSection cs;
1344 // Check if we own this monitor.
1345 // NOTE: We don't have to worry about stale values here, as any
1346 // stale value will fail this check.
1347 threadobject* t = thread_get_current();
1348 Lockword* lockword = lock_lockword_get(o);
1350 if (lockword->is_fat_lock()) {
1352 lock_record_t* lr = lockword->get_fat_lock();
1353 return (lr->owner == t);
1356 // It's a thin lock.
1357 return (lockword->get_thin_lock_without_count() == t->thinlock);
1363 /*============================================================================*/
1364 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1365 /*============================================================================*/
1368 /* lock_wait_for_object ********************************************************
1370 Wait for the given object.
1373 o............the object
1374 millis.......milliseconds to wait
1375 nanos........nanoseconds to wait
1377 *******************************************************************************/
1379 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1381 threadobject *thread;
1383 thread = THREADOBJECT;
1385 lock_monitor_wait(thread, o, millis, nanos);
1389 /* lock_notify_object **********************************************************
1391 Notify one thread waiting on the given object.
1394 o............the object
1396 *******************************************************************************/
1398 void lock_notify_object(java_handle_t *o)
1400 threadobject *thread;
1402 thread = THREADOBJECT;
1404 lock_monitor_notify(thread, o, true);
1408 /* lock_notify_all_object ******************************************************
1410 Notify all threads waiting on the given object.
1413 o............the object
1415 *******************************************************************************/
1417 void lock_notify_all_object(java_handle_t *o)
1419 threadobject *thread;
1421 thread = THREADOBJECT;
1423 lock_monitor_notify(thread, o, false);
1428 * These are local overrides for various environment variables in Emacs.
1429 * Please do not remove this and leave it at the end of the file, where
1430 * Emacs will automagically detect them.
1431 * ---------------------------------------------------------------------
1434 * indent-tabs-mode: t
1438 * vim:noexpandtab:sw=4:ts=4: