1 /* src/threads/lock.cpp - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007, 2008
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 This file is part of CACAO.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2, or (at
11 your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
36 #include "mm/memory.h"
38 #include "native/llni.h"
40 #include "threads/atomic.hpp"
41 #include "threads/lock.hpp"
42 #include "threads/mutex.hpp"
43 #include "threads/threadlist.hpp"
44 #include "threads/thread.hpp"
46 #include "toolbox/list.hpp"
48 #include "vm/exceptions.hpp"
49 #include "vm/finalizer.hpp"
50 #include "vm/global.h"
51 #include "vm/options.h"
52 #include "vm/string.hpp"
55 #if defined(ENABLE_STATISTICS)
56 # include "vm/statistics.h"
59 #if defined(ENABLE_VMLOG)
60 #include <vmlog_cacao.h>
63 #if defined(ENABLE_JVMTI)
64 #include "native/jvmti/cacaodbg.h"
67 #if defined(ENABLE_GC_BOEHM)
68 # include "mm/boehm-gc/include/gc.h"
72 /* debug **********************************************************************/
75 # define DEBUGLOCKS(format) \
77 if (opt_DebugLocks) { \
82 # define DEBUGLOCKS(format)
86 /******************************************************************************/
88 /******************************************************************************/
90 /* number of lock records in the first pool allocated for a thread */
91 #define LOCK_INITIAL_LOCK_RECORDS 8
93 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
96 /******************************************************************************/
97 /* MACROS FOR THIN/FAT LOCKS */
98 /******************************************************************************/
100 /* We use a variant of the tasuki locks described in the paper
102 * Tamiya Onodera, Kiyokuni Kawachiya
103 * A Study of Locking Objects with Bimodal Fields
104 * Proceedings of the ACM OOPSLA '99, pp. 223-237
107 * The underlying thin locks are a variant of the thin locks described in
109 * Bacon, Konuru, Murthy, Serrano
110 * Thin Locks: Featherweight Synchronization for Java
111 * Proceedings of the ACM Conference on Programming Language Design and
112 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
115 * In thin lock mode the lockword looks like this:
117 * ,----------------------,-----------,---,
118 * | thread ID | count | 0 |
119 * `----------------------'-----------'---'
121 * thread ID......the 'index' of the owning thread, or 0
122 * count..........number of times the lock has been entered minus 1
123 * 0..............the shape bit is 0 in thin lock mode
125 * In fat lock mode it is basically a lock_record_t *:
127 * ,----------------------------------,---,
128 * | lock_record_t * (without LSB) | 1 |
129 * `----------------------------------'---'
131 * 1..............the shape bit is 1 in fat lock mode
134 /* global variables ***********************************************************/
136 /* hashtable mapping objects to lock records */
137 static lock_hashtable_t lock_hashtable;
140 /******************************************************************************/
142 /******************************************************************************/
144 static void lock_hashtable_init(void);
146 static inline Lockword* lock_lockword_get(java_handle_t* o);
147 static void lock_record_enter(threadobject *t, lock_record_t *lr);
148 static void lock_record_exit(threadobject *t, lock_record_t *lr);
149 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
150 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
153 /*============================================================================*/
154 /* INITIALIZATION OF DATA STRUCTURES */
155 /*============================================================================*/
158 /* lock_init *******************************************************************
160 Initialize global data for locking.
162 *******************************************************************************/
166 /* initialize lock hashtable */
168 lock_hashtable_init();
170 #if defined(ENABLE_VMLOG)
171 vmlog_cacao_init_lock();
176 /* lock_record_new *************************************************************
178 Allocate a lock record.
180 *******************************************************************************/
182 static lock_record_t *lock_record_new(void)
186 /* allocate the data structure on the C heap */
188 lr = NEW(lock_record_t);
190 #if defined(ENABLE_STATISTICS)
192 size_lock_record += sizeof(lock_record_t);
195 /* initialize the members */
200 lr->waiters = new List<threadobject*>();
202 #if defined(ENABLE_GC_CACAO)
203 /* register the lock object as weak reference with the GC */
205 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
208 // Initialize the mutex.
209 lr->mutex = new Mutex();
211 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
217 /* lock_record_free ************************************************************
222 lr....lock record to free
224 *******************************************************************************/
226 static void lock_record_free(lock_record_t *lr)
228 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
230 // Destroy the mutex.
233 #if defined(ENABLE_GC_CACAO)
234 /* unregister the lock object reference with the GC */
236 gc_weakreference_unregister(&(lr->object));
239 // Free the waiters list.
242 /* Free the data structure. */
244 FREE(lr, lock_record_t);
246 #if defined(ENABLE_STATISTICS)
248 size_lock_record -= sizeof(lock_record_t);
253 /*============================================================================*/
254 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
255 /*============================================================================*/
257 /* lock_hashtable_init *********************************************************
259 Initialize the global hashtable mapping objects to lock records.
261 *******************************************************************************/
263 static void lock_hashtable_init(void)
265 lock_hashtable.mutex = new Mutex();
267 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
268 lock_hashtable.entries = 0;
269 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
271 #if defined(ENABLE_STATISTICS)
273 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
276 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
280 /* lock_hashtable_grow *********************************************************
282 Grow the lock record hashtable to about twice its current size and
285 *******************************************************************************/
287 /* must be called with hashtable mutex locked */
288 static void lock_hashtable_grow(void)
292 lock_record_t **oldtable;
293 lock_record_t **newtable;
300 /* allocate a new table */
302 oldsize = lock_hashtable.size;
303 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
305 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
307 oldtable = lock_hashtable.ptr;
308 newtable = MNEW(lock_record_t *, newsize);
310 #if defined(ENABLE_STATISTICS)
312 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
315 MZERO(newtable, lock_record_t *, newsize);
317 /* rehash the entries */
319 for (i = 0; i < oldsize; i++) {
324 h = heap_hashcode(lr->object);
325 newslot = h % newsize;
327 lr->hashlink = newtable[newslot];
328 newtable[newslot] = lr;
334 /* replace the old table */
336 lock_hashtable.ptr = newtable;
337 lock_hashtable.size = newsize;
339 MFREE(oldtable, lock_record_t *, oldsize);
341 #if defined(ENABLE_STATISTICS)
343 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
348 /* lock_hashtable_cleanup ******************************************************
350 Removes (and frees) lock records which have a cleared object reference
351 from the hashtable. The locked object was reclaimed by the GC.
353 *******************************************************************************/
355 #if defined(ENABLE_GC_CACAO)
356 void lock_hashtable_cleanup(void)
366 /* lock the hashtable */
368 Mutex_lock(lock_hashtable.mutex);
370 /* search the hashtable for cleared references */
372 for (i = 0; i < lock_hashtable.size; i++) {
373 lr = lock_hashtable.ptr[i];
379 /* remove lock records with cleared references */
381 if (lr->object == NULL) {
383 /* unlink the lock record from the hashtable */
386 lock_hashtable.ptr[i] = next;
388 prev->hashlink = next;
390 /* free the lock record */
392 lock_record_free(lr);
402 /* unlock the hashtable */
404 Mutex_unlock(lock_hashtable.mutex);
409 /* lock_hashtable_get **********************************************************
411 Find the lock record for the given object. If it does not exists,
412 yet, create it and enter it in the hashtable.
415 o....the object to look up
418 the lock record to use for this object
420 *******************************************************************************/
422 #if defined(ENABLE_GC_BOEHM)
423 static void lock_record_finalizer(void *object, void *p);
426 static lock_record_t *lock_hashtable_get(java_handle_t* o)
428 // This function is inside a critical section.
429 GCCriticalSection cs;
434 Lockword* lockword = lock_lockword_get(o);
436 if (lockword->is_fat_lock())
437 return lockword->get_fat_lock();
439 // Lock the hashtable.
440 lock_hashtable.mutex->lock();
442 /* lookup the lock record in the hashtable */
444 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
445 lr = lock_hashtable.ptr[slot];
447 for (; lr != NULL; lr = lr->hashlink) {
448 if (lr->object == LLNI_DIRECT(o))
453 /* not found, we must create a new one */
455 lr = lock_record_new();
457 lr->object = LLNI_DIRECT(o);
459 #if defined(ENABLE_GC_BOEHM)
460 /* register new finalizer to clean up the lock record */
462 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
465 /* enter it in the hashtable */
467 lr->hashlink = lock_hashtable.ptr[slot];
468 lock_hashtable.ptr[slot] = lr;
469 lock_hashtable.entries++;
471 /* check whether the hash should grow */
473 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
474 lock_hashtable_grow();
478 // Unlock the hashtable.
479 lock_hashtable.mutex->unlock();
481 /* return the new lock record */
487 /* lock_hashtable_remove *******************************************************
489 Remove the lock record for the given object from the hashtable
490 and free it afterwards.
493 t....the current thread
494 o....the object to look up
496 *******************************************************************************/
498 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
502 lock_record_t *tmplr;
504 // Lock the hashtable.
505 lock_hashtable.mutex->lock();
507 /* get lock record */
509 Lockword* lockword = lock_lockword_get(o);
512 assert(lockword->is_fat_lock());
514 lr = lockword->get_fat_lock();
516 /* remove the lock-record from the hashtable */
518 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
519 tmplr = lock_hashtable.ptr[slot];
522 /* special handling if it's the first in the chain */
524 lock_hashtable.ptr[slot] = lr->hashlink;
527 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
528 if (tmplr->hashlink == lr) {
529 tmplr->hashlink = lr->hashlink;
534 assert(tmplr != NULL);
537 /* decrease entry count */
539 lock_hashtable.entries--;
541 // Unlock the hashtable.
542 lock_hashtable.mutex->unlock();
544 /* free the lock record */
546 lock_record_free(lr);
550 /* lock_record_finalizer *******************************************************
552 XXX Remove me for exact GC.
554 *******************************************************************************/
556 static void lock_record_finalizer(void *object, void *p)
561 o = (java_handle_t *) object;
563 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
564 /* XXX this is only a dirty hack to make Boehm work with handles */
566 o = LLNI_WRAP((java_object_t *) o);
569 LLNI_class_get(o, c);
572 if (opt_DebugFinalizer) {
574 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
581 /* check for a finalizer function */
583 if (c->finalizer != NULL)
584 finalizer_run(object, p);
586 /* remove the lock-record entry from the hashtable and free it */
588 lock_hashtable_remove(THREADOBJECT, o);
592 /*============================================================================*/
593 /* LOCKING ALGORITHM */
594 /*============================================================================*/
597 /* lock_lockword_get ***********************************************************
599 Get the lockword for the given object.
602 o............the object
604 *******************************************************************************/
606 static inline Lockword* lock_lockword_get(java_handle_t* o)
608 #if defined(ENABLE_GC_CACAO)
610 assert(GCCriticalSection::inside() == true);
613 return &(LLNI_DIRECT(o)->lockword);
617 /* lock_record_enter ***********************************************************
619 Enter the lock represented by the given lock record.
622 t.................the current thread
623 lr................the lock record
625 *******************************************************************************/
627 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
634 /* lock_record_exit ************************************************************
636 Release the lock represented by the given lock record.
639 t.................the current thread
640 lr................the lock record
643 The current thread must own the lock represented by this lock record.
644 This is NOT checked by this function!
646 *******************************************************************************/
648 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
655 /* lock_inflate ****************************************************************
657 Inflate the lock of the given object. This may only be called by the
658 owner of the monitor of the object.
661 o............the object of which to inflate the lock
662 lr...........the lock record to install. The current thread must
663 own the lock of this lock record!
666 The current thread must be the owner of this object's monitor AND
667 of the lock record's lock!
669 *******************************************************************************/
671 static void lock_inflate(java_handle_t *o, lock_record_t *lr)
673 Lockword* lockword = lock_lockword_get(o);
674 lockword->inflate(lr);
678 static void sable_flc_waiting(Lockword *lockword, threadobject *t, java_handle_t *o)
681 threadobject *t_other;
684 index = lockword->get_thin_lock_thread_index();
685 t_other = ThreadList::get_thread_by_index(index);
687 // The lockword could have changed during our way here. If the
688 // thread index is zero, the lock got unlocked and we simply
691 /* failure, TODO: add statistics */
694 t_other->flc_lock->lock();
695 old_flc = t_other->flc_bit;
696 t_other->flc_bit = true;
698 DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d", t->index, t_other->index));
700 // Set FLC bit first, then read the lockword again.
701 Atomic::memory_barrier();
703 lockword = lock_lockword_get(o);
705 /* Lockword is still the way it was seen before */
706 if (lockword->is_thin_lock() && (lockword->get_thin_lock_thread_index() == index))
709 /* Add tuple (t, o) to the other thread's FLC list */
711 t->flc_next = t_other->flc_list;
712 t_other->flc_list = t;
713 if (t->flc_next == 0)
714 t_other->flc_tail = t;
715 f = t_other->flc_tail;
719 threadobject *current;
721 // Wait until another thread sees the flc bit and notifies
723 t->flc_cond->wait(t_other->flc_lock);
725 if (t_other->flc_tail != f)
727 /* Traverse FLC list looking if we're still there */
728 current = t_other->flc_list;
729 while (current && current != t)
730 current = current->flc_next;
732 /* not in list anymore, can stop waiting */
735 /* We are still in the list -- the other thread cannot have seen
737 assert(t_other->flc_bit);
740 t->flc_object = NULL; /* for garbage collector? */
744 t_other->flc_bit = old_flc;
746 t_other->flc_lock->unlock();
749 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
751 threadobject *current;
755 current = t->flc_list;
758 if (current->flc_object != o)
760 /* The object has to be inflated so the other threads can properly
763 // Only if not already inflated.
764 Lockword* lockword = lock_lockword_get(current->flc_object);
765 if (lockword->is_thin_lock()) {
766 lock_record_t *lr = lock_hashtable_get(current->flc_object);
767 lock_record_enter(t, lr);
769 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
770 t->index, (void*) current->flc_object, (void*) lr));
772 lock_inflate(current->flc_object, lr);
776 // Wake the waiting threads.
777 current->flc_cond->broadcast();
779 current = current->flc_next;
785 t->flc_lock->unlock();
788 /* lock_monitor_enter **********************************************************
790 Acquire the monitor of the given object. If the current thread already
791 owns the monitor, the lock counter is simply increased.
793 This function blocks until it can acquire the monitor.
796 t............the current thread
797 o............the object of which to enter the monitor
800 true.........the lock has been successfully acquired
801 false........an exception has been thrown
803 *******************************************************************************/
805 bool lock_monitor_enter(java_handle_t *o)
807 // This function is inside a critical section.
808 GCCriticalSection cs;
811 exceptions_throw_nullpointerexception();
815 threadobject* t = thread_get_current();
817 uintptr_t thinlock = t->thinlock;
820 // Most common case: try to thin-lock an unlocked object.
821 Lockword* lockword = lock_lockword_get(o);
822 bool result = lockword->lock(thinlock);
824 if (result == true) {
825 // Success, we locked it.
826 // NOTE: The Java Memory Model requires an instruction barrier
827 // here (because of the CAS above).
828 Atomic::instruction_barrier();
832 // Next common case: recursive lock with small recursion count.
833 // NOTE: We don't have to worry about stale values here, as any
834 // stale value will indicate another thread holding the lock (or
835 // an inflated lock).
836 if (lockword->get_thin_lock_without_count() == thinlock) {
837 // We own this monitor. Check the current recursion count.
838 if (lockword->is_max_thin_lock_count() == false) {
839 // The recursion count is low enough.
840 lockword->increase_thin_lock_count();
842 // Success, we locked it.
846 // Recursion count overflow.
847 lock_record_t* lr = lock_hashtable_get(o);
848 lock_record_enter(t, lr);
852 notify_flc_waiters(t, o);
858 // The lock is either contented or fat.
859 if (lockword->is_fat_lock()) {
860 lock_record_t* lr = lockword->get_fat_lock();
862 // Check for recursive entering.
863 if (lr->owner == t) {
868 // Acquire the mutex of the lock record.
869 lock_record_enter(t, lr);
872 assert(lr->count == 0);
876 /****** inflation path ******/
878 #if defined(ENABLE_JVMTI)
879 /* Monitor Contended Enter */
880 jvmti_MonitorContendedEntering(false, o);
883 sable_flc_waiting(lockword, t, o);
885 #if defined(ENABLE_JVMTI)
886 /* Monitor Contended Entered */
887 jvmti_MonitorContendedEntering(true, o);
893 /* lock_monitor_exit ***********************************************************
895 Decrement the counter of a (currently owned) monitor. If the counter
896 reaches zero, release the monitor.
898 If the current thread is not the owner of the monitor, an
899 IllegalMonitorState exception is thrown.
902 t............the current thread
903 o............the object of which to exit the monitor
906 true.........everything ok,
907 false........an exception has been thrown
909 *******************************************************************************/
911 bool lock_monitor_exit(java_handle_t* o)
913 // This function is inside a critical section.
914 GCCriticalSection cs;
917 exceptions_throw_nullpointerexception();
921 threadobject* t = thread_get_current();
923 uintptr_t thinlock = t->thinlock;
925 // We don't have to worry about stale values here, as any stale
926 // value will indicate that we don't own the lock.
927 Lockword* lockword = lock_lockword_get(o);
929 // Most common case: we release a thin lock that we hold once.
930 if (lockword->get_thin_lock() == thinlock) {
931 // Memory barrier for Java Memory Model.
932 Atomic::write_memory_barrier();
934 // Memory barrier for thin locking.
935 Atomic::memory_barrier();
937 /* check if there has been a flat lock contention on this object */
940 DEBUGLOCKS(("thread %d saw flc bit", t->index));
942 /* there has been a contention on this thin lock */
943 notify_flc_waiters(t, o);
949 // Next common case: we release a recursive lock, count > 0.
950 if (lockword->get_thin_lock_without_count() == thinlock) {
951 lockword->decrease_thin_lock_count();
955 // Either the lock is fat, or we don't hold it at all.
956 if (lockword->is_fat_lock()) {
957 lock_record_t* lr = lockword->get_fat_lock();
959 // Check if we own this monitor.
960 // NOTE: We don't have to worry about stale values here, as
961 // any stale value will be != t and thus fail this check.
962 if (lr->owner != t) {
963 exceptions_throw_illegalmonitorstateexception();
967 /* { the current thread `t` owns the lock record `lr` on object `o` } */
969 if (lr->count != 0) {
970 // We had locked this one recursively. Just decrement, it
971 // will still be locked.
976 // Unlock this lock record.
977 lock_record_exit(t, lr);
981 // Legal thin lock cases have been handled above, so this is an
983 exceptions_throw_illegalmonitorstateexception();
989 /* lock_record_add_waiter ******************************************************
991 Add a thread to the list of waiting threads of a lock record.
994 lr...........the lock record
995 thread.......the thread to add
997 *******************************************************************************/
999 static void lock_record_add_waiter(lock_record_t *lr, threadobject* t)
1001 // Add the thread as last entry to waiters list.
1002 lr->waiters->push_back(t);
1004 #if defined(ENABLE_STATISTICS)
1006 size_lock_waiter += sizeof(threadobject*);
1011 /* lock_record_remove_waiter ***************************************************
1013 Remove a thread from the list of waiting threads of a lock record.
1016 lr...........the lock record
1017 t............the current thread
1020 The current thread must be the owner of the lock record.
1022 *******************************************************************************/
1024 static void lock_record_remove_waiter(lock_record_t *lr, threadobject* t)
1026 // Remove the thread from the waiters.
1027 lr->waiters->remove(t);
1029 #if defined(ENABLE_STATISTICS)
1031 size_lock_waiter -= sizeof(threadobject*);
1036 /* lock_record_wait ************************************************************
1038 Wait on a lock record for a given (maximum) amount of time.
1041 t............the current thread
1042 lr...........the lock record
1043 millis.......milliseconds of timeout
1044 nanos........nanoseconds of timeout
1047 true.........we have been interrupted,
1048 false........everything ok
1051 The current thread must be the owner of the lock record.
1052 This is NOT checked by this function!
1054 *******************************************************************************/
1056 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1059 bool wasinterrupted = false;
1061 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1062 lr, thread, millis, nanos));
1064 /* { the thread t owns the fat lock record lr on the object o } */
1066 /* register us as waiter for this object */
1068 lock_record_add_waiter(lr, thread);
1070 /* remember the old lock count */
1072 lockcount = lr->count;
1074 /* unlock this record */
1077 lock_record_exit(thread, lr);
1079 /* wait until notified/interrupted/timed out */
1081 threads_wait_with_timeout_relative(thread, millis, nanos);
1083 /* re-enter the monitor */
1085 lock_record_enter(thread, lr);
1087 /* remove us from the list of waiting threads */
1089 lock_record_remove_waiter(lr, thread);
1091 /* restore the old lock count */
1093 lr->count = lockcount;
1095 /* We can only be signaled OR interrupted, not both. If both flags
1096 are set, reset only signaled and leave the thread in
1097 interrupted state. Otherwise, clear both. */
1099 if (!thread->signaled) {
1100 wasinterrupted = thread->interrupted;
1101 thread->interrupted = false;
1104 thread->signaled = false;
1106 /* return if we have been interrupted */
1108 return wasinterrupted;
1112 /* lock_monitor_wait ***********************************************************
1114 Wait on an object for a given (maximum) amount of time.
1117 t............the current thread
1118 o............the object
1119 millis.......milliseconds of timeout
1120 nanos........nanoseconds of timeout
1123 The current thread must be the owner of the object's monitor.
1125 *******************************************************************************/
1127 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1131 Lockword* lockword = lock_lockword_get(o);
1133 // Check if we own this monitor.
1134 // NOTE: We don't have to worry about stale values here, as any
1135 // stale value will fail this check.
1136 if (lockword->is_fat_lock()) {
1137 lr = lockword->get_fat_lock();
1139 if (lr->owner != t) {
1140 exceptions_throw_illegalmonitorstateexception();
1145 // It's a thin lock.
1146 if (lockword->get_thin_lock_without_count() != t->thinlock) {
1147 exceptions_throw_illegalmonitorstateexception();
1151 // Get the lock-record.
1152 lr = lock_hashtable_get(o);
1153 lock_record_enter(t, lr);
1155 // Inflate this lock.
1156 lockword->inflate(lr);
1158 notify_flc_waiters(t, o);
1161 /* { the thread t owns the fat lock record lr on the object o } */
1163 if (lock_record_wait(t, lr, millis, nanos))
1164 exceptions_throw_interruptedexception();
1168 /* lock_record_notify **********************************************************
1170 Notify one thread or all threads waiting on the given lock record.
1173 t............the current thread
1174 lr...........the lock record
1175 one..........if true, only notify one thread
1178 The current thread must be the owner of the lock record.
1179 This is NOT checked by this function!
1181 *******************************************************************************/
1183 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1185 #if defined(ENABLE_GC_CACAO)
1187 assert(GCCriticalSection::inside() == false);
1190 // { The thread t owns the fat lock record lr on the object o }
1192 for (List<threadobject*>::iterator it = lr->waiters->begin(); it != lr->waiters->end(); it++) {
1193 threadobject* waiter = *it;
1195 // We must skip threads which have already been notified. They
1196 // will remove themselves from the list.
1197 if (waiter->signaled)
1200 // Enter the wait-mutex.
1201 waiter->waitmutex->lock();
1203 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]", lr, t, waiter, one));
1205 // Signal the waiter.
1206 waiter->waitcond->signal();
1208 // Mark the thread as signaled.
1209 waiter->signaled = true;
1211 // Leave the wait-mutex.
1212 waiter->waitmutex->unlock();
1214 // If we should only wake one thread, we are done.
1221 /* lock_monitor_notify *********************************************************
1223 Notify one thread or all threads waiting on the given object.
1226 t............the current thread
1227 o............the object
1228 one..........if true, only notify one thread
1231 The current thread must be the owner of the object's monitor.
1233 *******************************************************************************/
1235 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1237 lock_record_t* lr = NULL;
1240 // This scope is inside a critical section.
1241 GCCriticalSection cs;
1243 Lockword* lockword = lock_lockword_get(o);
1245 // Check if we own this monitor.
1246 // NOTE: We don't have to worry about stale values here, as any
1247 // stale value will fail this check.
1249 if (lockword->is_fat_lock()) {
1250 lr = lockword->get_fat_lock();
1252 if (lr->owner != t) {
1253 exceptions_throw_illegalmonitorstateexception();
1258 // It's a thin lock.
1259 if (lockword->get_thin_lock_without_count() != t->thinlock) {
1260 exceptions_throw_illegalmonitorstateexception();
1264 // No thread can wait on a thin lock, so there's nothing to do.
1269 // { The thread t owns the fat lock record lr on the object o }
1270 lock_record_notify(t, lr, one);
1275 /*============================================================================*/
1276 /* INQUIRY FUNCIONS */
1277 /*============================================================================*/
1280 /* lock_is_held_by_current_thread **********************************************
1282 Return true if the current thread owns the monitor of the given object.
1285 o............the object
1288 true, if the current thread holds the lock of this object.
1290 *******************************************************************************/
1292 bool lock_is_held_by_current_thread(java_handle_t *o)
1294 // This function is inside a critical section.
1295 GCCriticalSection cs;
1297 // Check if we own this monitor.
1298 // NOTE: We don't have to worry about stale values here, as any
1299 // stale value will fail this check.
1300 threadobject* t = thread_get_current();
1301 Lockword* lockword = lock_lockword_get(o);
1303 if (lockword->is_fat_lock()) {
1305 lock_record_t* lr = lockword->get_fat_lock();
1306 return (lr->owner == t);
1309 // It's a thin lock.
1310 return (lockword->get_thin_lock_without_count() == t->thinlock);
1316 /*============================================================================*/
1317 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1318 /*============================================================================*/
1321 /* lock_wait_for_object ********************************************************
1323 Wait for the given object.
1326 o............the object
1327 millis.......milliseconds to wait
1328 nanos........nanoseconds to wait
1330 *******************************************************************************/
1332 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1334 threadobject *thread;
1336 thread = THREADOBJECT;
1338 lock_monitor_wait(thread, o, millis, nanos);
1342 /* lock_notify_object **********************************************************
1344 Notify one thread waiting on the given object.
1347 o............the object
1349 *******************************************************************************/
1351 void lock_notify_object(java_handle_t *o)
1353 threadobject *thread;
1355 thread = THREADOBJECT;
1357 lock_monitor_notify(thread, o, true);
1361 /* lock_notify_all_object ******************************************************
1363 Notify all threads waiting on the given object.
1366 o............the object
1368 *******************************************************************************/
1370 void lock_notify_all_object(java_handle_t *o)
1372 threadobject *thread;
1374 thread = THREADOBJECT;
1376 lock_monitor_notify(thread, o, false);
1381 * These are local overrides for various environment variables in Emacs.
1382 * Please do not remove this and leave it at the end of the file, where
1383 * Emacs will automagically detect them.
1384 * ---------------------------------------------------------------------
1387 * indent-tabs-mode: t
1391 * vim:noexpandtab:sw=4:ts=4: