1 /* src/threads/lock.cpp - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007, 2008
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 This file is part of CACAO.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2, or (at
11 your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
36 #include "mm/memory.hpp"
38 #include "native/llni.h"
40 #include "threads/atomic.hpp"
41 #include "threads/lock.hpp"
42 #include "threads/mutex.hpp"
43 #include "threads/threadlist.hpp"
44 #include "threads/thread.hpp"
46 #include "toolbox/list.hpp"
48 #include "vm/exceptions.hpp"
49 #include "vm/finalizer.hpp"
50 #include "vm/global.h"
51 #include "vm/options.h"
52 #include "vm/string.hpp"
55 #if defined(ENABLE_STATISTICS)
56 # include "vm/statistics.h"
59 #if defined(ENABLE_VMLOG)
60 #include <vmlog_cacao.h>
63 #if defined(ENABLE_JVMTI)
64 #include "native/jvmti/cacaodbg.h"
67 #if defined(ENABLE_GC_BOEHM)
68 # include "mm/boehm-gc/include/gc.h"
72 /* debug **********************************************************************/
75 # define DEBUGLOCKS(format) \
77 if (opt_DebugLocks) { \
82 # define DEBUGLOCKS(format)
86 /******************************************************************************/
88 /******************************************************************************/
90 /* number of lock records in the first pool allocated for a thread */
91 #define LOCK_INITIAL_LOCK_RECORDS 8
93 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
96 /******************************************************************************/
97 /* MACROS FOR THIN/FAT LOCKS */
98 /******************************************************************************/
100 /* We use a variant of the tasuki locks described in the paper
102 * Tamiya Onodera, Kiyokuni Kawachiya
103 * A Study of Locking Objects with Bimodal Fields
104 * Proceedings of the ACM OOPSLA '99, pp. 223-237
107 * The underlying thin locks are a variant of the thin locks described in
109 * Bacon, Konuru, Murthy, Serrano
110 * Thin Locks: Featherweight Synchronization for Java
111 * Proceedings of the ACM Conference on Programming Language Design and
112 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
115 * In thin lock mode the lockword looks like this:
117 * ,----------------------,-----------,---,
118 * | thread ID | count | 0 |
119 * `----------------------'-----------'---'
121 * thread ID......the 'index' of the owning thread, or 0
122 * count..........number of times the lock has been entered minus 1
123 * 0..............the shape bit is 0 in thin lock mode
125 * In fat lock mode it is basically a lock_record_t *:
127 * ,----------------------------------,---,
128 * | lock_record_t * (without LSB) | 1 |
129 * `----------------------------------'---'
131 * 1..............the shape bit is 1 in fat lock mode
134 /* global variables ***********************************************************/
136 /* hashtable mapping objects to lock records */
137 static lock_hashtable_t lock_hashtable;
140 /******************************************************************************/
142 /******************************************************************************/
144 static void lock_hashtable_init(void);
146 static inline Lockword* lock_lockword_get(java_handle_t* o);
147 static void lock_record_enter(threadobject *t, lock_record_t *lr);
148 static void lock_record_exit(threadobject *t, lock_record_t *lr);
149 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
150 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
153 /*============================================================================*/
154 /* INITIALIZATION OF DATA STRUCTURES */
155 /*============================================================================*/
158 /* lock_init *******************************************************************
160 Initialize global data for locking.
162 *******************************************************************************/
166 /* initialize lock hashtable */
168 lock_hashtable_init();
170 #if defined(ENABLE_VMLOG)
171 vmlog_cacao_init_lock();
176 /* lock_record_new *************************************************************
178 Allocate a lock record.
180 *******************************************************************************/
182 static lock_record_t *lock_record_new(void)
186 /* allocate the data structure on the C heap */
188 lr = NEW(lock_record_t);
190 #if defined(ENABLE_STATISTICS)
192 size_lock_record += sizeof(lock_record_t);
195 /* initialize the members */
200 lr->waiters = new List<threadobject*>();
202 #if defined(ENABLE_GC_CACAO)
203 /* register the lock object as weak reference with the GC */
205 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
208 // Initialize the mutex.
209 lr->mutex = new Mutex();
211 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
217 /* lock_record_free ************************************************************
222 lr....lock record to free
224 *******************************************************************************/
226 static void lock_record_free(lock_record_t *lr)
228 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
230 // Destroy the mutex.
233 #if defined(ENABLE_GC_CACAO)
234 /* unregister the lock object reference with the GC */
236 gc_weakreference_unregister(&(lr->object));
239 // Free the waiters list.
242 /* Free the data structure. */
244 FREE(lr, lock_record_t);
246 #if defined(ENABLE_STATISTICS)
248 size_lock_record -= sizeof(lock_record_t);
253 /*============================================================================*/
254 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
255 /*============================================================================*/
257 /* lock_hashtable_init *********************************************************
259 Initialize the global hashtable mapping objects to lock records.
261 *******************************************************************************/
263 static void lock_hashtable_init(void)
265 lock_hashtable.mutex = new Mutex();
267 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
268 lock_hashtable.entries = 0;
269 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
271 #if defined(ENABLE_STATISTICS)
273 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
276 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
280 /* lock_hashtable_grow *********************************************************
282 Grow the lock record hashtable to about twice its current size and
285 *******************************************************************************/
287 /* must be called with hashtable mutex locked */
288 static void lock_hashtable_grow(void)
292 lock_record_t **oldtable;
293 lock_record_t **newtable;
300 /* allocate a new table */
302 oldsize = lock_hashtable.size;
303 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
305 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
307 oldtable = lock_hashtable.ptr;
308 newtable = MNEW(lock_record_t *, newsize);
310 #if defined(ENABLE_STATISTICS)
312 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
315 MZERO(newtable, lock_record_t *, newsize);
317 /* rehash the entries */
319 for (i = 0; i < oldsize; i++) {
324 h = heap_hashcode(lr->object);
325 newslot = h % newsize;
327 lr->hashlink = newtable[newslot];
328 newtable[newslot] = lr;
334 /* replace the old table */
336 lock_hashtable.ptr = newtable;
337 lock_hashtable.size = newsize;
339 MFREE(oldtable, lock_record_t *, oldsize);
341 #if defined(ENABLE_STATISTICS)
343 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
348 /* lock_hashtable_cleanup ******************************************************
350 Removes (and frees) lock records which have a cleared object reference
351 from the hashtable. The locked object was reclaimed by the GC.
353 *******************************************************************************/
355 #if defined(ENABLE_GC_CACAO)
356 void lock_hashtable_cleanup(void)
366 /* lock the hashtable */
368 Mutex_lock(lock_hashtable.mutex);
370 /* search the hashtable for cleared references */
372 for (i = 0; i < lock_hashtable.size; i++) {
373 lr = lock_hashtable.ptr[i];
379 /* remove lock records with cleared references */
381 if (lr->object == NULL) {
383 /* unlink the lock record from the hashtable */
386 lock_hashtable.ptr[i] = next;
388 prev->hashlink = next;
390 /* free the lock record */
392 lock_record_free(lr);
402 /* unlock the hashtable */
404 Mutex_unlock(lock_hashtable.mutex);
409 /* lock_hashtable_get **********************************************************
411 Find the lock record for the given object. If it does not exists,
412 yet, create it and enter it in the hashtable.
415 o....the object to look up
418 the lock record to use for this object
420 *******************************************************************************/
422 #if defined(ENABLE_GC_BOEHM)
423 static void lock_record_finalizer(void *object, void *p);
426 static lock_record_t *lock_hashtable_get(java_handle_t* o)
428 // This function is inside a critical section.
429 GCCriticalSection cs;
434 Lockword* lockword = lock_lockword_get(o);
436 if (lockword->is_fat_lock())
437 return lockword->get_fat_lock();
439 // Lock the hashtable.
440 lock_hashtable.mutex->lock();
442 /* lookup the lock record in the hashtable */
444 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
445 lr = lock_hashtable.ptr[slot];
447 for (; lr != NULL; lr = lr->hashlink) {
448 if (lr->object == LLNI_DIRECT(o))
453 /* not found, we must create a new one */
455 lr = lock_record_new();
457 lr->object = LLNI_DIRECT(o);
459 #if defined(ENABLE_GC_BOEHM)
460 /* register new finalizer to clean up the lock record */
462 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
465 /* enter it in the hashtable */
467 lr->hashlink = lock_hashtable.ptr[slot];
468 lock_hashtable.ptr[slot] = lr;
469 lock_hashtable.entries++;
471 /* check whether the hash should grow */
473 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
474 lock_hashtable_grow();
478 // Unlock the hashtable.
479 lock_hashtable.mutex->unlock();
481 /* return the new lock record */
487 /* lock_hashtable_remove *******************************************************
489 Remove the lock record for the given object from the hashtable
490 and free it afterwards.
493 t....the current thread
494 o....the object to look up
496 *******************************************************************************/
498 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
502 lock_record_t *tmplr;
504 // Lock the hashtable.
505 lock_hashtable.mutex->lock();
507 /* get lock record */
509 Lockword* lockword = lock_lockword_get(o);
512 assert(lockword->is_fat_lock());
514 lr = lockword->get_fat_lock();
516 /* remove the lock-record from the hashtable */
518 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
519 tmplr = lock_hashtable.ptr[slot];
522 /* special handling if it's the first in the chain */
524 lock_hashtable.ptr[slot] = lr->hashlink;
527 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
528 if (tmplr->hashlink == lr) {
529 tmplr->hashlink = lr->hashlink;
534 assert(tmplr != NULL);
537 /* decrease entry count */
539 lock_hashtable.entries--;
541 // Unlock the hashtable.
542 lock_hashtable.mutex->unlock();
544 /* free the lock record */
546 lock_record_free(lr);
550 /* lock_record_finalizer *******************************************************
552 XXX Remove me for exact GC.
554 *******************************************************************************/
556 static void lock_record_finalizer(void *object, void *p)
561 o = (java_handle_t *) object;
563 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
564 /* XXX this is only a dirty hack to make Boehm work with handles */
566 o = LLNI_WRAP((java_object_t *) o);
569 LLNI_class_get(o, c);
572 if (opt_DebugFinalizer) {
574 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
581 /* check for a finalizer function */
583 if (c->finalizer != NULL)
584 finalizer_run(object, p);
586 /* remove the lock-record entry from the hashtable and free it */
588 lock_hashtable_remove(THREADOBJECT, o);
592 /*============================================================================*/
593 /* LOCKING ALGORITHM */
594 /*============================================================================*/
597 /* lock_lockword_get ***********************************************************
599 Get the lockword for the given object.
602 o............the object
604 *******************************************************************************/
606 static inline Lockword* lock_lockword_get(java_handle_t* o)
608 #if defined(ENABLE_GC_CACAO)
610 assert(GCCriticalSection::inside() == true);
613 return &(LLNI_DIRECT(o)->lockword);
617 /* lock_record_enter ***********************************************************
619 Enter the lock represented by the given lock record.
622 t.................the current thread
623 lr................the lock record
625 *******************************************************************************/
627 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
634 /* lock_record_exit ************************************************************
636 Release the lock represented by the given lock record.
639 t.................the current thread
640 lr................the lock record
643 The current thread must own the lock represented by this lock record.
644 This is NOT checked by this function!
646 *******************************************************************************/
648 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
655 /* lock_inflate ****************************************************************
657 Inflate the lock of the given object. This may only be called by the
658 owner of the monitor of the object.
661 o............the object of which to inflate the lock
662 lr...........the lock record to install. The current thread must
663 own the lock of this lock record!
666 The current thread must be the owner of this object's monitor AND
667 of the lock record's lock!
669 *******************************************************************************/
671 static void lock_inflate(java_handle_t *o, lock_record_t *lr)
673 Lockword* lockword = lock_lockword_get(o);
674 lockword->inflate(lr);
678 /* sable_flc_waiting ***********************************************************
680 Enqueue the current thread on another thread's FLC list. The function
681 blocks until the lock has been inflated by the owning thread.
683 The algorithm used to be an almost literal copy from SableVM. The
684 superfluous list traversal in the waiting loop has been removed since,
688 lockword.....the object's lockword as seen at the first locking attempt
689 t............the current thread
690 o............the object of which to enter the monitor
692 *******************************************************************************/
694 static void sable_flc_waiting(Lockword *lockword, threadobject *t, java_handle_t *o)
697 threadobject *t_other;
700 index = lockword->get_thin_lock_thread_index();
701 t_other = ThreadList::get_thread_by_index(index);
703 // The lockword could have changed during our way here. If the
704 // thread index is zero, the lock got unlocked and we simply
707 /* failure, TODO: add statistics */
710 t_other->flc_lock->lock();
711 old_flc = t_other->flc_bit;
712 t_other->flc_bit = true;
714 DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d", t->index, t_other->index));
716 // Set FLC bit first, then read the lockword again.
717 Atomic::memory_barrier();
719 lockword = lock_lockword_get(o);
721 /* Lockword is still the way it was seen before */
722 if (lockword->is_thin_lock() && (lockword->get_thin_lock_thread_index() == index))
725 /* Add tuple (t, o) to the other thread's FLC list */
727 t->flc_next = t_other->flc_list;
728 t_other->flc_list = t;
729 if (t->flc_next == 0)
730 t_other->flc_tail = t;
731 f = t_other->flc_tail;
733 // The other thread will clear flc_object.
734 while (t->flc_object)
736 // We are not cleared yet -- the other thread cannot have seen
738 assert(t_other->flc_bit);
740 // Wait until another thread sees the flc bit and notifies
742 t->flc_cond->wait(t_other->flc_lock);
748 t_other->flc_bit = old_flc;
750 t_other->flc_lock->unlock();
753 /* notify_flc_waiters **********************************************************
755 Traverse the thread's FLC list and inflate all corresponding locks. Notify
756 the associated threads as well.
759 t............the current thread
760 o............the object currently being unlocked
762 *******************************************************************************/
764 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
766 threadobject *current;
770 current = t->flc_list;
773 if (current->flc_object != o)
775 /* The object has to be inflated so the other threads can properly
778 // Only if not already inflated.
779 Lockword* lockword = lock_lockword_get(current->flc_object);
780 if (lockword->is_thin_lock()) {
781 lock_record_t *lr = lock_hashtable_get(current->flc_object);
782 lock_record_enter(t, lr);
784 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
785 t->index, (void*) current->flc_object, (void*) lr));
787 lock_inflate(current->flc_object, lr);
791 // Wake the waiting threads.
792 current->flc_cond->broadcast();
793 current->flc_object = NULL;
795 current = current->flc_next;
801 t->flc_lock->unlock();
804 /* lock_monitor_enter **********************************************************
806 Acquire the monitor of the given object. If the current thread already
807 owns the monitor, the lock counter is simply increased.
809 This function blocks until it can acquire the monitor.
812 t............the current thread
813 o............the object of which to enter the monitor
816 true.........the lock has been successfully acquired
817 false........an exception has been thrown
819 *******************************************************************************/
821 bool lock_monitor_enter(java_handle_t *o)
823 // This function is inside a critical section.
824 GCCriticalSection cs;
827 exceptions_throw_nullpointerexception();
831 threadobject* t = thread_get_current();
833 uintptr_t thinlock = t->thinlock;
836 // Most common case: try to thin-lock an unlocked object.
837 Lockword* lockword = lock_lockword_get(o);
838 bool result = lockword->lock(thinlock);
840 if (result == true) {
841 // Success, we locked it.
842 // NOTE: The Java Memory Model requires an instruction barrier
843 // here (because of the CAS above).
844 Atomic::instruction_barrier();
848 // Next common case: recursive lock with small recursion count.
849 // NOTE: We don't have to worry about stale values here, as any
850 // stale value will indicate another thread holding the lock (or
851 // an inflated lock).
852 if (lockword->get_thin_lock_without_count() == thinlock) {
853 // We own this monitor. Check the current recursion count.
854 if (lockword->is_max_thin_lock_count() == false) {
855 // The recursion count is low enough.
856 lockword->increase_thin_lock_count();
858 // Success, we locked it.
862 // Recursion count overflow.
863 lock_record_t* lr = lock_hashtable_get(o);
864 lock_record_enter(t, lr);
868 notify_flc_waiters(t, o);
874 // The lock is either contented or fat.
875 if (lockword->is_fat_lock()) {
876 lock_record_t* lr = lockword->get_fat_lock();
878 // Check for recursive entering.
879 if (lr->owner == t) {
884 // Acquire the mutex of the lock record.
885 lock_record_enter(t, lr);
888 assert(lr->count == 0);
892 /****** inflation path ******/
894 #if defined(ENABLE_JVMTI)
895 /* Monitor Contended Enter */
896 jvmti_MonitorContendedEntering(false, o);
899 sable_flc_waiting(lockword, t, o);
901 #if defined(ENABLE_JVMTI)
902 /* Monitor Contended Entered */
903 jvmti_MonitorContendedEntering(true, o);
909 /* lock_monitor_exit ***********************************************************
911 Decrement the counter of a (currently owned) monitor. If the counter
912 reaches zero, release the monitor.
914 If the current thread is not the owner of the monitor, an
915 IllegalMonitorState exception is thrown.
918 t............the current thread
919 o............the object of which to exit the monitor
922 true.........everything ok,
923 false........an exception has been thrown
925 *******************************************************************************/
927 bool lock_monitor_exit(java_handle_t* o)
929 // This function is inside a critical section.
930 GCCriticalSection cs;
933 exceptions_throw_nullpointerexception();
937 threadobject* t = thread_get_current();
939 uintptr_t thinlock = t->thinlock;
941 // We don't have to worry about stale values here, as any stale
942 // value will indicate that we don't own the lock.
943 Lockword* lockword = lock_lockword_get(o);
945 // Most common case: we release a thin lock that we hold once.
946 if (lockword->get_thin_lock() == thinlock) {
947 // Memory barrier for Java Memory Model.
948 Atomic::write_memory_barrier();
950 // Memory barrier for thin locking.
951 Atomic::memory_barrier();
953 /* check if there has been a flat lock contention on this object */
956 DEBUGLOCKS(("thread %d saw flc bit", t->index));
958 /* there has been a contention on this thin lock */
959 notify_flc_waiters(t, o);
965 // Next common case: we release a recursive lock, count > 0.
966 if (lockword->get_thin_lock_without_count() == thinlock) {
967 lockword->decrease_thin_lock_count();
971 // Either the lock is fat, or we don't hold it at all.
972 if (lockword->is_fat_lock()) {
973 lock_record_t* lr = lockword->get_fat_lock();
975 // Check if we own this monitor.
976 // NOTE: We don't have to worry about stale values here, as
977 // any stale value will be != t and thus fail this check.
978 if (lr->owner != t) {
979 exceptions_throw_illegalmonitorstateexception();
983 /* { the current thread `t` owns the lock record `lr` on object `o` } */
985 if (lr->count != 0) {
986 // We had locked this one recursively. Just decrement, it
987 // will still be locked.
992 // Unlock this lock record.
993 lock_record_exit(t, lr);
997 // Legal thin lock cases have been handled above, so this is an
999 exceptions_throw_illegalmonitorstateexception();
1005 /* lock_record_add_waiter ******************************************************
1007 Add a thread to the list of waiting threads of a lock record.
1010 lr...........the lock record
1011 thread.......the thread to add
1013 *******************************************************************************/
1015 static void lock_record_add_waiter(lock_record_t *lr, threadobject* t)
1017 // Add the thread as last entry to waiters list.
1018 lr->waiters->push_back(t);
1020 #if defined(ENABLE_STATISTICS)
1022 size_lock_waiter += sizeof(threadobject*);
1027 /* lock_record_remove_waiter ***************************************************
1029 Remove a thread from the list of waiting threads of a lock record.
1032 lr...........the lock record
1033 t............the current thread
1036 The current thread must be the owner of the lock record.
1038 *******************************************************************************/
1040 static void lock_record_remove_waiter(lock_record_t *lr, threadobject* t)
1042 // Remove the thread from the waiters.
1043 lr->waiters->remove(t);
1045 #if defined(ENABLE_STATISTICS)
1047 size_lock_waiter -= sizeof(threadobject*);
1052 /* lock_record_wait ************************************************************
1054 Wait on a lock record for a given (maximum) amount of time.
1057 t............the current thread
1058 lr...........the lock record
1059 millis.......milliseconds of timeout
1060 nanos........nanoseconds of timeout
1063 true.........we have been interrupted,
1064 false........everything ok
1067 The current thread must be the owner of the lock record.
1068 This is NOT checked by this function!
1070 *******************************************************************************/
1072 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1075 bool wasinterrupted = false;
1077 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1078 lr, thread, millis, nanos));
1080 /* { the thread t owns the fat lock record lr on the object o } */
1082 /* register us as waiter for this object */
1084 lock_record_add_waiter(lr, thread);
1086 /* remember the old lock count */
1088 lockcount = lr->count;
1090 /* unlock this record */
1093 lock_record_exit(thread, lr);
1095 /* wait until notified/interrupted/timed out */
1097 threads_wait_with_timeout_relative(thread, millis, nanos);
1099 /* re-enter the monitor */
1101 lock_record_enter(thread, lr);
1103 /* remove us from the list of waiting threads */
1105 lock_record_remove_waiter(lr, thread);
1107 /* restore the old lock count */
1109 lr->count = lockcount;
1111 /* We can only be signaled OR interrupted, not both. If both flags
1112 are set, reset only signaled and leave the thread in
1113 interrupted state. Otherwise, clear both. */
1115 if (!thread->signaled) {
1116 wasinterrupted = thread->interrupted;
1117 thread->interrupted = false;
1120 thread->signaled = false;
1122 /* return if we have been interrupted */
1124 return wasinterrupted;
1128 /* lock_monitor_wait ***********************************************************
1130 Wait on an object for a given (maximum) amount of time.
1133 t............the current thread
1134 o............the object
1135 millis.......milliseconds of timeout
1136 nanos........nanoseconds of timeout
1139 The current thread must be the owner of the object's monitor.
1141 *******************************************************************************/
1143 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1147 Lockword* lockword = lock_lockword_get(o);
1149 // Check if we own this monitor.
1150 // NOTE: We don't have to worry about stale values here, as any
1151 // stale value will fail this check.
1152 if (lockword->is_fat_lock()) {
1153 lr = lockword->get_fat_lock();
1155 if (lr->owner != t) {
1156 exceptions_throw_illegalmonitorstateexception();
1161 // It's a thin lock.
1162 if (lockword->get_thin_lock_without_count() != t->thinlock) {
1163 exceptions_throw_illegalmonitorstateexception();
1167 // Get the lock-record.
1168 lr = lock_hashtable_get(o);
1169 lock_record_enter(t, lr);
1171 // Inflate this lock.
1172 lockword->inflate(lr);
1174 notify_flc_waiters(t, o);
1177 /* { the thread t owns the fat lock record lr on the object o } */
1179 if (lock_record_wait(t, lr, millis, nanos))
1180 exceptions_throw_interruptedexception();
1184 /* lock_record_notify **********************************************************
1186 Notify one thread or all threads waiting on the given lock record.
1189 t............the current thread
1190 lr...........the lock record
1191 one..........if true, only notify one thread
1194 The current thread must be the owner of the lock record.
1195 This is NOT checked by this function!
1197 *******************************************************************************/
1199 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1201 #if defined(ENABLE_GC_CACAO)
1203 assert(GCCriticalSection::inside() == false);
1206 // { The thread t owns the fat lock record lr on the object o }
1208 for (List<threadobject*>::iterator it = lr->waiters->begin(); it != lr->waiters->end(); it++) {
1209 threadobject* waiter = *it;
1211 // We must skip threads which have already been notified. They
1212 // will remove themselves from the list.
1213 if (waiter->signaled)
1216 // Enter the wait-mutex.
1217 waiter->waitmutex->lock();
1219 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]", lr, t, waiter, one));
1221 // Signal the waiter.
1222 waiter->waitcond->signal();
1224 // Mark the thread as signaled.
1225 waiter->signaled = true;
1227 // Leave the wait-mutex.
1228 waiter->waitmutex->unlock();
1230 // If we should only wake one thread, we are done.
1237 /* lock_monitor_notify *********************************************************
1239 Notify one thread or all threads waiting on the given object.
1242 t............the current thread
1243 o............the object
1244 one..........if true, only notify one thread
1247 The current thread must be the owner of the object's monitor.
1249 *******************************************************************************/
1251 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1253 lock_record_t* lr = NULL;
1256 // This scope is inside a critical section.
1257 GCCriticalSection cs;
1259 Lockword* lockword = lock_lockword_get(o);
1261 // Check if we own this monitor.
1262 // NOTE: We don't have to worry about stale values here, as any
1263 // stale value will fail this check.
1265 if (lockword->is_fat_lock()) {
1266 lr = lockword->get_fat_lock();
1268 if (lr->owner != t) {
1269 exceptions_throw_illegalmonitorstateexception();
1274 // It's a thin lock.
1275 if (lockword->get_thin_lock_without_count() != t->thinlock) {
1276 exceptions_throw_illegalmonitorstateexception();
1280 // No thread can wait on a thin lock, so there's nothing to do.
1285 // { The thread t owns the fat lock record lr on the object o }
1286 lock_record_notify(t, lr, one);
1291 /*============================================================================*/
1292 /* INQUIRY FUNCIONS */
1293 /*============================================================================*/
1296 /* lock_is_held_by_current_thread **********************************************
1298 Return true if the current thread owns the monitor of the given object.
1301 o............the object
1304 true, if the current thread holds the lock of this object.
1306 *******************************************************************************/
1308 bool lock_is_held_by_current_thread(java_handle_t *o)
1310 // This function is inside a critical section.
1311 GCCriticalSection cs;
1313 // Check if we own this monitor.
1314 // NOTE: We don't have to worry about stale values here, as any
1315 // stale value will fail this check.
1316 threadobject* t = thread_get_current();
1317 Lockword* lockword = lock_lockword_get(o);
1319 if (lockword->is_fat_lock()) {
1321 lock_record_t* lr = lockword->get_fat_lock();
1322 return (lr->owner == t);
1325 // It's a thin lock.
1326 return (lockword->get_thin_lock_without_count() == t->thinlock);
1332 /*============================================================================*/
1333 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1334 /*============================================================================*/
1337 /* lock_wait_for_object ********************************************************
1339 Wait for the given object.
1342 o............the object
1343 millis.......milliseconds to wait
1344 nanos........nanoseconds to wait
1346 *******************************************************************************/
1348 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1350 threadobject *thread;
1352 thread = THREADOBJECT;
1354 lock_monitor_wait(thread, o, millis, nanos);
1358 /* lock_notify_object **********************************************************
1360 Notify one thread waiting on the given object.
1363 o............the object
1365 *******************************************************************************/
1367 void lock_notify_object(java_handle_t *o)
1369 threadobject *thread;
1371 thread = THREADOBJECT;
1373 lock_monitor_notify(thread, o, true);
1377 /* lock_notify_all_object ******************************************************
1379 Notify all threads waiting on the given object.
1382 o............the object
1384 *******************************************************************************/
1386 void lock_notify_all_object(java_handle_t *o)
1388 threadobject *thread;
1390 thread = THREADOBJECT;
1392 lock_monitor_notify(thread, o, false);
1397 * These are local overrides for various environment variables in Emacs.
1398 * Please do not remove this and leave it at the end of the file, where
1399 * Emacs will automagically detect them.
1400 * ---------------------------------------------------------------------
1403 * indent-tabs-mode: t
1407 * vim:noexpandtab:sw=4:ts=4: