1 /* src/threads/lock.cpp - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007, 2008
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 This file is part of CACAO.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2, or (at
11 your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
36 #include "mm/memory.hpp"
38 #include "native/llni.h"
40 #include "threads/atomic.hpp"
41 #include "threads/lock.hpp"
42 #include "threads/mutex.hpp"
43 #include "threads/threadlist.hpp"
44 #include "threads/thread.hpp"
46 #include "toolbox/list.hpp"
48 #include "vm/exceptions.hpp"
49 #include "vm/finalizer.hpp"
50 #include "vm/global.h"
51 #include "vm/options.h"
52 #include "vm/string.hpp"
55 #if defined(ENABLE_STATISTICS)
56 # include "vm/statistics.h"
59 #if defined(ENABLE_VMLOG)
60 #include <vmlog_cacao.h>
63 #if defined(ENABLE_JVMTI)
64 #include "native/jvmti/cacaodbg.h"
67 #if defined(ENABLE_GC_BOEHM)
68 # include "mm/boehm-gc/include/gc.h"
72 /* debug **********************************************************************/
75 # define DEBUGLOCKS(format) \
77 if (opt_DebugLocks) { \
82 # define DEBUGLOCKS(format)
86 /******************************************************************************/
88 /******************************************************************************/
90 /* number of lock records in the first pool allocated for a thread */
91 #define LOCK_INITIAL_LOCK_RECORDS 8
93 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
96 /******************************************************************************/
97 /* MACROS FOR THIN/FAT LOCKS */
98 /******************************************************************************/
100 /* We use a variant of the tasuki locks described in the paper
102 * Tamiya Onodera, Kiyokuni Kawachiya
103 * A Study of Locking Objects with Bimodal Fields
104 * Proceedings of the ACM OOPSLA '99, pp. 223-237
107 * The underlying thin locks are a variant of the thin locks described in
109 * Bacon, Konuru, Murthy, Serrano
110 * Thin Locks: Featherweight Synchronization for Java
111 * Proceedings of the ACM Conference on Programming Language Design and
112 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
115 * In thin lock mode the lockword looks like this:
117 * ,----------------------,-----------,---,
118 * | thread ID | count | 0 |
119 * `----------------------'-----------'---'
121 * thread ID......the 'index' of the owning thread, or 0
122 * count..........number of times the lock has been entered minus 1
123 * 0..............the shape bit is 0 in thin lock mode
125 * In fat lock mode it is basically a lock_record_t *:
127 * ,----------------------------------,---,
128 * | lock_record_t * (without LSB) | 1 |
129 * `----------------------------------'---'
131 * 1..............the shape bit is 1 in fat lock mode
134 /* global variables ***********************************************************/
136 /* hashtable mapping objects to lock records */
137 static lock_hashtable_t lock_hashtable;
140 /******************************************************************************/
142 /******************************************************************************/
144 static void lock_hashtable_init(void);
146 static inline Lockword* lock_lockword_get(java_handle_t* o);
147 static void lock_record_enter(threadobject *t, lock_record_t *lr);
148 static void lock_record_exit(threadobject *t, lock_record_t *lr);
149 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
150 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
153 /*============================================================================*/
154 /* INITIALIZATION OF DATA STRUCTURES */
155 /*============================================================================*/
158 /* lock_init *******************************************************************
160 Initialize global data for locking.
162 *******************************************************************************/
166 /* initialize lock hashtable */
168 lock_hashtable_init();
170 #if defined(ENABLE_VMLOG)
171 vmlog_cacao_init_lock();
176 /* lock_record_new *************************************************************
178 Allocate a lock record.
180 *******************************************************************************/
182 static lock_record_t *lock_record_new(void)
186 /* allocate the data structure on the C heap */
188 lr = NEW(lock_record_t);
190 #if defined(ENABLE_STATISTICS)
192 size_lock_record += sizeof(lock_record_t);
195 /* initialize the members */
200 lr->waiters = new List<threadobject*>();
202 #if defined(ENABLE_GC_CACAO)
203 /* register the lock object as weak reference with the GC */
205 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
208 // Initialize the mutex.
209 lr->mutex = new Mutex();
211 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
217 /* lock_record_free ************************************************************
222 lr....lock record to free
224 *******************************************************************************/
226 static void lock_record_free(lock_record_t *lr)
228 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
230 // Destroy the mutex.
233 #if defined(ENABLE_GC_CACAO)
234 /* unregister the lock object reference with the GC */
236 gc_weakreference_unregister(&(lr->object));
239 // Free the waiters list.
242 /* Free the data structure. */
244 FREE(lr, lock_record_t);
246 #if defined(ENABLE_STATISTICS)
248 size_lock_record -= sizeof(lock_record_t);
253 /*============================================================================*/
254 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
255 /*============================================================================*/
257 /* lock_hashtable_init *********************************************************
259 Initialize the global hashtable mapping objects to lock records.
261 *******************************************************************************/
263 static void lock_hashtable_init(void)
265 lock_hashtable.mutex = new Mutex();
267 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
268 lock_hashtable.entries = 0;
269 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
271 #if defined(ENABLE_STATISTICS)
273 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
276 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
280 /* lock_hashtable_grow *********************************************************
282 Grow the lock record hashtable to about twice its current size and
285 *******************************************************************************/
287 /* must be called with hashtable mutex locked */
288 static void lock_hashtable_grow(void)
292 lock_record_t **oldtable;
293 lock_record_t **newtable;
300 /* allocate a new table */
302 oldsize = lock_hashtable.size;
303 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
305 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
307 oldtable = lock_hashtable.ptr;
308 newtable = MNEW(lock_record_t *, newsize);
310 #if defined(ENABLE_STATISTICS)
312 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
315 MZERO(newtable, lock_record_t *, newsize);
317 /* rehash the entries */
319 for (i = 0; i < oldsize; i++) {
324 h = heap_hashcode(lr->object);
325 newslot = h % newsize;
327 lr->hashlink = newtable[newslot];
328 newtable[newslot] = lr;
334 /* replace the old table */
336 lock_hashtable.ptr = newtable;
337 lock_hashtable.size = newsize;
339 MFREE(oldtable, lock_record_t *, oldsize);
341 #if defined(ENABLE_STATISTICS)
343 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
348 /* lock_hashtable_cleanup ******************************************************
350 Removes (and frees) lock records which have a cleared object reference
351 from the hashtable. The locked object was reclaimed by the GC.
353 *******************************************************************************/
355 #if defined(ENABLE_GC_CACAO)
356 void lock_hashtable_cleanup(void)
366 /* lock the hashtable */
368 Mutex_lock(lock_hashtable.mutex);
370 /* search the hashtable for cleared references */
372 for (i = 0; i < lock_hashtable.size; i++) {
373 lr = lock_hashtable.ptr[i];
379 /* remove lock records with cleared references */
381 if (lr->object == NULL) {
383 /* unlink the lock record from the hashtable */
386 lock_hashtable.ptr[i] = next;
388 prev->hashlink = next;
390 /* free the lock record */
392 lock_record_free(lr);
402 /* unlock the hashtable */
404 Mutex_unlock(lock_hashtable.mutex);
409 /* lock_hashtable_get **********************************************************
411 Find the lock record for the given object. If it does not exists,
412 yet, create it and enter it in the hashtable.
415 o....the object to look up
418 the lock record to use for this object
420 *******************************************************************************/
422 #if defined(ENABLE_GC_BOEHM)
423 static void lock_record_finalizer(void *object, void *p);
426 static lock_record_t *lock_hashtable_get(java_handle_t* o)
428 // This function is inside a critical section.
429 GCCriticalSection cs;
434 Lockword* lockword = lock_lockword_get(o);
436 if (lockword->is_fat_lock())
437 return lockword->get_fat_lock();
439 // Lock the hashtable.
440 lock_hashtable.mutex->lock();
442 /* lookup the lock record in the hashtable */
444 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
445 lr = lock_hashtable.ptr[slot];
447 for (; lr != NULL; lr = lr->hashlink) {
448 if (lr->object == LLNI_DIRECT(o))
453 /* not found, we must create a new one */
455 lr = lock_record_new();
457 lr->object = LLNI_DIRECT(o);
459 #if defined(ENABLE_GC_BOEHM)
460 /* register new finalizer to clean up the lock record */
462 GC_finalization_proc ofinal = 0;
463 GC_REGISTER_FINALIZER_UNREACHABLE(LLNI_DIRECT(o), lock_record_finalizer, 0, &ofinal, 0);
465 /* There was a finalizer -- reinstall it. We do not want to disrupt the
466 normal finalizer operation. We hold the monitor on this object, so
467 this is thread-safe. */
469 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), ofinal, 0, 0, 0);
472 /* enter it in the hashtable */
474 lr->hashlink = lock_hashtable.ptr[slot];
475 lock_hashtable.ptr[slot] = lr;
476 lock_hashtable.entries++;
478 /* check whether the hash should grow */
480 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
481 lock_hashtable_grow();
485 // Unlock the hashtable.
486 lock_hashtable.mutex->unlock();
488 /* return the new lock record */
493 /* lock_schedule_lockrecord_removal ********************************************
495 Gives the locking system a chance to schedule the removal of an unused lock
496 record. This function is called after an object's finalizer has run.
499 o....the object which has been finalized
501 *******************************************************************************/
503 #if defined(ENABLE_GC_BOEHM)
504 void lock_schedule_lockrecord_removal(java_handle_t *o)
506 Lockword* lockword = lock_lockword_get(o);
507 if (!lockword->is_fat_lock())
508 /* there is no lock record */
511 /* register new finalizer to clean up the lock record */
512 GC_REGISTER_FINALIZER_UNREACHABLE(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
517 /* lock_hashtable_remove *******************************************************
519 Remove the lock record for the given object from the hashtable
520 and free it afterwards.
523 t....the current thread
524 o....the object to look up
526 *******************************************************************************/
528 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
532 lock_record_t *tmplr;
534 // Lock the hashtable.
535 lock_hashtable.mutex->lock();
537 /* get lock record */
539 Lockword* lockword = lock_lockword_get(o);
542 assert(lockword->is_fat_lock());
544 lr = lockword->get_fat_lock();
546 /* remove the lock-record from the hashtable */
548 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
549 tmplr = lock_hashtable.ptr[slot];
552 /* special handling if it's the first in the chain */
554 lock_hashtable.ptr[slot] = lr->hashlink;
557 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
558 if (tmplr->hashlink == lr) {
559 tmplr->hashlink = lr->hashlink;
564 assert(tmplr != NULL);
567 /* decrease entry count */
569 lock_hashtable.entries--;
571 // Unlock the hashtable.
572 lock_hashtable.mutex->unlock();
574 /* free the lock record */
576 lock_record_free(lr);
580 /* lock_record_finalizer *******************************************************
582 XXX Remove me for exact GC.
584 *******************************************************************************/
586 static void lock_record_finalizer(void *object, void *p)
591 o = (java_handle_t *) object;
593 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
594 /* XXX this is only a dirty hack to make Boehm work with handles */
596 o = LLNI_WRAP((java_object_t *) o);
599 LLNI_class_get(o, c);
602 if (opt_DebugFinalizer) {
604 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
611 /* remove the lock-record entry from the hashtable and free it */
613 lock_hashtable_remove(THREADOBJECT, o);
617 /*============================================================================*/
618 /* LOCKING ALGORITHM */
619 /*============================================================================*/
622 /* lock_lockword_get ***********************************************************
624 Get the lockword for the given object.
627 o............the object
629 *******************************************************************************/
631 static inline Lockword* lock_lockword_get(java_handle_t* o)
633 #if defined(ENABLE_GC_CACAO)
635 assert(GCCriticalSection::inside() == true);
638 return &(LLNI_DIRECT(o)->lockword);
642 /* lock_record_enter ***********************************************************
644 Enter the lock represented by the given lock record.
647 t.................the current thread
648 lr................the lock record
650 *******************************************************************************/
652 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
659 /* lock_record_exit ************************************************************
661 Release the lock represented by the given lock record.
664 t.................the current thread
665 lr................the lock record
668 The current thread must own the lock represented by this lock record.
669 This is NOT checked by this function!
671 *******************************************************************************/
673 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
680 /* lock_inflate ****************************************************************
682 Inflate the lock of the given object. This may only be called by the
683 owner of the monitor of the object.
686 o............the object of which to inflate the lock
687 lr...........the lock record to install. The current thread must
688 own the lock of this lock record!
691 The current thread must be the owner of this object's monitor AND
692 of the lock record's lock!
694 *******************************************************************************/
696 static void lock_inflate(java_handle_t *o, lock_record_t *lr)
698 Lockword* lockword = lock_lockword_get(o);
699 lockword->inflate(lr);
703 /* sable_flc_waiting ***********************************************************
705 Enqueue the current thread on another thread's FLC list. The function
706 blocks until the lock has been inflated by the owning thread.
708 The algorithm used to be an almost literal copy from SableVM. The
709 superfluous list traversal in the waiting loop has been removed since,
713 lockword.....the object's lockword as seen at the first locking attempt
714 t............the current thread
715 o............the object of which to enter the monitor
717 *******************************************************************************/
719 static void sable_flc_waiting(Lockword *lockword, threadobject *t, java_handle_t *o)
722 threadobject *t_other;
725 index = lockword->get_thin_lock_thread_index();
726 t_other = ThreadList::get_thread_by_index(index);
728 // The lockword could have changed during our way here. If the
729 // thread index is zero, the lock got unlocked and we simply
732 /* failure, TODO: add statistics */
735 t_other->flc_lock->lock();
736 old_flc = t_other->flc_bit;
737 t_other->flc_bit = true;
739 DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d", t->index, t_other->index));
741 // Set FLC bit first, then read the lockword again.
742 Atomic::memory_barrier();
744 lockword = lock_lockword_get(o);
746 /* Lockword is still the way it was seen before */
747 if (lockword->is_thin_lock() && (lockword->get_thin_lock_thread_index() == index))
750 /* Add tuple (t, o) to the other thread's FLC list */
752 t->flc_next = t_other->flc_list;
753 t_other->flc_list = t;
754 if (t->flc_next == 0)
755 t_other->flc_tail = t;
756 f = t_other->flc_tail;
758 // The other thread will clear flc_object.
759 while (t->flc_object)
761 // We are not cleared yet -- the other thread cannot have seen
763 assert(t_other->flc_bit);
765 // Wait until another thread sees the flc bit and notifies
767 t->flc_cond->wait(t_other->flc_lock);
773 t_other->flc_bit = old_flc;
775 t_other->flc_lock->unlock();
778 /* notify_flc_waiters **********************************************************
780 Traverse the thread's FLC list and inflate all corresponding locks. Notify
781 the associated threads as well.
784 t............the current thread
785 o............the object currently being unlocked
787 *******************************************************************************/
789 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
791 threadobject *current;
795 current = t->flc_list;
798 if (current->flc_object != o)
800 /* The object has to be inflated so the other threads can properly
803 // Only if not already inflated.
804 Lockword* lockword = lock_lockword_get(current->flc_object);
805 if (lockword->is_thin_lock()) {
806 lock_record_t *lr = lock_hashtable_get(current->flc_object);
807 lock_record_enter(t, lr);
809 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
810 t->index, (void*) current->flc_object, (void*) lr));
812 lock_inflate(current->flc_object, lr);
816 // Wake the waiting threads.
817 current->flc_cond->broadcast();
818 current->flc_object = NULL;
820 current = current->flc_next;
826 t->flc_lock->unlock();
829 /* lock_monitor_enter **********************************************************
831 Acquire the monitor of the given object. If the current thread already
832 owns the monitor, the lock counter is simply increased.
834 This function blocks until it can acquire the monitor.
837 t............the current thread
838 o............the object of which to enter the monitor
841 true.........the lock has been successfully acquired
842 false........an exception has been thrown
844 *******************************************************************************/
846 bool lock_monitor_enter(java_handle_t *o)
848 // This function is inside a critical section.
849 GCCriticalSection cs;
852 exceptions_throw_nullpointerexception();
856 threadobject* t = thread_get_current();
858 uintptr_t thinlock = t->thinlock;
861 // Most common case: try to thin-lock an unlocked object.
862 Lockword* lockword = lock_lockword_get(o);
863 bool result = lockword->lock(thinlock);
865 if (result == true) {
866 // Success, we locked it.
867 // NOTE: The Java Memory Model requires an instruction barrier
868 // here (because of the CAS above).
869 Atomic::instruction_barrier();
873 // Next common case: recursive lock with small recursion count.
874 // NOTE: We don't have to worry about stale values here, as any
875 // stale value will indicate another thread holding the lock (or
876 // an inflated lock).
877 if (lockword->get_thin_lock_without_count() == thinlock) {
878 // We own this monitor. Check the current recursion count.
879 if (lockword->is_max_thin_lock_count() == false) {
880 // The recursion count is low enough.
881 lockword->increase_thin_lock_count();
883 // Success, we locked it.
887 // Recursion count overflow.
888 lock_record_t* lr = lock_hashtable_get(o);
889 lock_record_enter(t, lr);
893 notify_flc_waiters(t, o);
899 // The lock is either contented or fat.
900 if (lockword->is_fat_lock()) {
901 lock_record_t* lr = lockword->get_fat_lock();
903 // Check for recursive entering.
904 if (lr->owner == t) {
909 // Acquire the mutex of the lock record.
910 lock_record_enter(t, lr);
913 assert(lr->count == 0);
917 /****** inflation path ******/
919 #if defined(ENABLE_JVMTI)
920 /* Monitor Contended Enter */
921 jvmti_MonitorContendedEntering(false, o);
924 sable_flc_waiting(lockword, t, o);
926 #if defined(ENABLE_JVMTI)
927 /* Monitor Contended Entered */
928 jvmti_MonitorContendedEntering(true, o);
934 /* lock_monitor_exit ***********************************************************
936 Decrement the counter of a (currently owned) monitor. If the counter
937 reaches zero, release the monitor.
939 If the current thread is not the owner of the monitor, an
940 IllegalMonitorState exception is thrown.
943 t............the current thread
944 o............the object of which to exit the monitor
947 true.........everything ok,
948 false........an exception has been thrown
950 *******************************************************************************/
952 bool lock_monitor_exit(java_handle_t* o)
954 // This function is inside a critical section.
955 GCCriticalSection cs;
958 exceptions_throw_nullpointerexception();
962 threadobject* t = thread_get_current();
964 uintptr_t thinlock = t->thinlock;
966 // We don't have to worry about stale values here, as any stale
967 // value will indicate that we don't own the lock.
968 Lockword* lockword = lock_lockword_get(o);
970 // Most common case: we release a thin lock that we hold once.
971 if (lockword->get_thin_lock() == thinlock) {
972 // Memory barrier for Java Memory Model.
973 Atomic::write_memory_barrier();
975 // Memory barrier for thin locking.
976 Atomic::memory_barrier();
978 /* check if there has been a flat lock contention on this object */
981 DEBUGLOCKS(("thread %d saw flc bit", t->index));
983 /* there has been a contention on this thin lock */
984 notify_flc_waiters(t, o);
990 // Next common case: we release a recursive lock, count > 0.
991 if (lockword->get_thin_lock_without_count() == thinlock) {
992 lockword->decrease_thin_lock_count();
996 // Either the lock is fat, or we don't hold it at all.
997 if (lockword->is_fat_lock()) {
998 lock_record_t* lr = lockword->get_fat_lock();
1000 // Check if we own this monitor.
1001 // NOTE: We don't have to worry about stale values here, as
1002 // any stale value will be != t and thus fail this check.
1003 if (lr->owner != t) {
1004 exceptions_throw_illegalmonitorstateexception();
1008 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1010 if (lr->count != 0) {
1011 // We had locked this one recursively. Just decrement, it
1012 // will still be locked.
1017 // Unlock this lock record.
1018 lock_record_exit(t, lr);
1022 // Legal thin lock cases have been handled above, so this is an
1024 exceptions_throw_illegalmonitorstateexception();
1030 /* lock_record_add_waiter ******************************************************
1032 Add a thread to the list of waiting threads of a lock record.
1035 lr...........the lock record
1036 thread.......the thread to add
1038 *******************************************************************************/
1040 static void lock_record_add_waiter(lock_record_t *lr, threadobject* t)
1042 // Add the thread as last entry to waiters list.
1043 lr->waiters->push_back(t);
1045 #if defined(ENABLE_STATISTICS)
1047 size_lock_waiter += sizeof(threadobject*);
1052 /* lock_record_remove_waiter ***************************************************
1054 Remove a thread from the list of waiting threads of a lock record.
1057 lr...........the lock record
1058 t............the current thread
1061 The current thread must be the owner of the lock record.
1063 *******************************************************************************/
1065 static void lock_record_remove_waiter(lock_record_t *lr, threadobject* t)
1067 // Remove the thread from the waiters.
1068 lr->waiters->remove(t);
1070 #if defined(ENABLE_STATISTICS)
1072 size_lock_waiter -= sizeof(threadobject*);
1077 /* lock_record_wait ************************************************************
1079 Wait on a lock record for a given (maximum) amount of time.
1082 t............the current thread
1083 lr...........the lock record
1084 millis.......milliseconds of timeout
1085 nanos........nanoseconds of timeout
1088 true.........we have been interrupted,
1089 false........everything ok
1092 The current thread must be the owner of the lock record.
1093 This is NOT checked by this function!
1095 *******************************************************************************/
1097 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1100 bool wasinterrupted = false;
1102 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1103 lr, thread, millis, nanos));
1105 /* { the thread t owns the fat lock record lr on the object o } */
1107 /* register us as waiter for this object */
1109 lock_record_add_waiter(lr, thread);
1111 /* remember the old lock count */
1113 lockcount = lr->count;
1115 /* unlock this record */
1118 lock_record_exit(thread, lr);
1120 /* wait until notified/interrupted/timed out */
1122 threads_wait_with_timeout_relative(thread, millis, nanos);
1124 /* re-enter the monitor */
1126 lock_record_enter(thread, lr);
1128 /* remove us from the list of waiting threads */
1130 lock_record_remove_waiter(lr, thread);
1132 /* restore the old lock count */
1134 lr->count = lockcount;
1136 /* We can only be signaled OR interrupted, not both. If both flags
1137 are set, reset only signaled and leave the thread in
1138 interrupted state. Otherwise, clear both. */
1140 if (!thread->signaled) {
1141 wasinterrupted = thread->interrupted;
1142 thread->interrupted = false;
1145 thread->signaled = false;
1147 /* return if we have been interrupted */
1149 return wasinterrupted;
1153 /* lock_monitor_wait ***********************************************************
1155 Wait on an object for a given (maximum) amount of time.
1158 t............the current thread
1159 o............the object
1160 millis.......milliseconds of timeout
1161 nanos........nanoseconds of timeout
1164 The current thread must be the owner of the object's monitor.
1166 *******************************************************************************/
1168 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1172 Lockword* lockword = lock_lockword_get(o);
1174 // Check if we own this monitor.
1175 // NOTE: We don't have to worry about stale values here, as any
1176 // stale value will fail this check.
1177 if (lockword->is_fat_lock()) {
1178 lr = lockword->get_fat_lock();
1180 if (lr->owner != t) {
1181 exceptions_throw_illegalmonitorstateexception();
1186 // It's a thin lock.
1187 if (lockword->get_thin_lock_without_count() != t->thinlock) {
1188 exceptions_throw_illegalmonitorstateexception();
1192 // Get the lock-record.
1193 lr = lock_hashtable_get(o);
1194 lock_record_enter(t, lr);
1196 // Inflate this lock.
1197 lockword->inflate(lr);
1199 notify_flc_waiters(t, o);
1202 /* { the thread t owns the fat lock record lr on the object o } */
1204 if (lock_record_wait(t, lr, millis, nanos))
1205 exceptions_throw_interruptedexception();
1209 /* lock_record_notify **********************************************************
1211 Notify one thread or all threads waiting on the given lock record.
1214 t............the current thread
1215 lr...........the lock record
1216 one..........if true, only notify one thread
1219 The current thread must be the owner of the lock record.
1220 This is NOT checked by this function!
1222 *******************************************************************************/
1224 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1226 #if defined(ENABLE_GC_CACAO)
1228 assert(GCCriticalSection::inside() == false);
1231 // { The thread t owns the fat lock record lr on the object o }
1233 for (List<threadobject*>::iterator it = lr->waiters->begin(); it != lr->waiters->end(); it++) {
1234 threadobject* waiter = *it;
1236 // We must skip threads which have already been notified. They
1237 // will remove themselves from the list.
1238 if (waiter->signaled)
1241 // Enter the wait-mutex.
1242 waiter->waitmutex->lock();
1244 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]", lr, t, waiter, one));
1246 // Signal the waiter.
1247 waiter->waitcond->signal();
1249 // Mark the thread as signaled.
1250 waiter->signaled = true;
1252 // Leave the wait-mutex.
1253 waiter->waitmutex->unlock();
1255 // If we should only wake one thread, we are done.
1262 /* lock_monitor_notify *********************************************************
1264 Notify one thread or all threads waiting on the given object.
1267 t............the current thread
1268 o............the object
1269 one..........if true, only notify one thread
1272 The current thread must be the owner of the object's monitor.
1274 *******************************************************************************/
1276 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1278 lock_record_t* lr = NULL;
1281 // This scope is inside a critical section.
1282 GCCriticalSection cs;
1284 Lockword* lockword = lock_lockword_get(o);
1286 // Check if we own this monitor.
1287 // NOTE: We don't have to worry about stale values here, as any
1288 // stale value will fail this check.
1290 if (lockword->is_fat_lock()) {
1291 lr = lockword->get_fat_lock();
1293 if (lr->owner != t) {
1294 exceptions_throw_illegalmonitorstateexception();
1299 // It's a thin lock.
1300 if (lockword->get_thin_lock_without_count() != t->thinlock) {
1301 exceptions_throw_illegalmonitorstateexception();
1305 // No thread can wait on a thin lock, so there's nothing to do.
1310 // { The thread t owns the fat lock record lr on the object o }
1311 lock_record_notify(t, lr, one);
1316 /*============================================================================*/
1317 /* INQUIRY FUNCIONS */
1318 /*============================================================================*/
1321 /* lock_is_held_by_current_thread **********************************************
1323 Return true if the current thread owns the monitor of the given object.
1326 o............the object
1329 true, if the current thread holds the lock of this object.
1331 *******************************************************************************/
1333 bool lock_is_held_by_current_thread(java_handle_t *o)
1335 // This function is inside a critical section.
1336 GCCriticalSection cs;
1338 // Check if we own this monitor.
1339 // NOTE: We don't have to worry about stale values here, as any
1340 // stale value will fail this check.
1341 threadobject* t = thread_get_current();
1342 Lockword* lockword = lock_lockword_get(o);
1344 if (lockword->is_fat_lock()) {
1346 lock_record_t* lr = lockword->get_fat_lock();
1347 return (lr->owner == t);
1350 // It's a thin lock.
1351 return (lockword->get_thin_lock_without_count() == t->thinlock);
1357 /*============================================================================*/
1358 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1359 /*============================================================================*/
1362 /* lock_wait_for_object ********************************************************
1364 Wait for the given object.
1367 o............the object
1368 millis.......milliseconds to wait
1369 nanos........nanoseconds to wait
1371 *******************************************************************************/
1373 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1375 threadobject *thread;
1377 thread = THREADOBJECT;
1379 lock_monitor_wait(thread, o, millis, nanos);
1383 /* lock_notify_object **********************************************************
1385 Notify one thread waiting on the given object.
1388 o............the object
1390 *******************************************************************************/
1392 void lock_notify_object(java_handle_t *o)
1394 threadobject *thread;
1396 thread = THREADOBJECT;
1398 lock_monitor_notify(thread, o, true);
1402 /* lock_notify_all_object ******************************************************
1404 Notify all threads waiting on the given object.
1407 o............the object
1409 *******************************************************************************/
1411 void lock_notify_all_object(java_handle_t *o)
1413 threadobject *thread;
1415 thread = THREADOBJECT;
1417 lock_monitor_notify(thread, o, false);
1422 * These are local overrides for various environment variables in Emacs.
1423 * Please do not remove this and leave it at the end of the file, where
1424 * Emacs will automagically detect them.
1425 * ---------------------------------------------------------------------
1428 * indent-tabs-mode: t
1432 * vim:noexpandtab:sw=4:ts=4: