1 /* src/threads/lock.cpp - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007, 2008
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 This file is part of CACAO.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2, or (at
11 your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
36 #include "mm/memory.h"
38 #include "native/llni.h"
40 #include "threads/atomic.hpp"
41 #include "threads/lock.hpp"
42 #include "threads/mutex.hpp"
43 #include "threads/threadlist.hpp"
44 #include "threads/thread.hpp"
46 #include "toolbox/list.hpp"
48 #include "vm/exceptions.hpp"
49 #include "vm/finalizer.h"
50 #include "vm/global.h"
51 #include "vm/options.h"
52 #include "vm/string.hpp"
55 #if defined(ENABLE_STATISTICS)
56 # include "vm/statistics.h"
59 #if defined(ENABLE_VMLOG)
60 #include <vmlog_cacao.h>
63 #if defined(ENABLE_JVMTI)
64 #include "native/jvmti/cacaodbg.h"
67 #if defined(ENABLE_GC_BOEHM)
68 # include "mm/boehm-gc/include/gc.h"
72 /* debug **********************************************************************/
75 # define DEBUGLOCKS(format) \
77 if (opt_DebugLocks) { \
82 # define DEBUGLOCKS(format)
86 /******************************************************************************/
88 /******************************************************************************/
90 /* number of lock records in the first pool allocated for a thread */
91 #define LOCK_INITIAL_LOCK_RECORDS 8
93 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
96 /******************************************************************************/
97 /* MACROS FOR THIN/FAT LOCKS */
98 /******************************************************************************/
100 /* We use a variant of the tasuki locks described in the paper
102 * Tamiya Onodera, Kiyokuni Kawachiya
103 * A Study of Locking Objects with Bimodal Fields
104 * Proceedings of the ACM OOPSLA '99, pp. 223-237
107 * The underlying thin locks are a variant of the thin locks described in
109 * Bacon, Konuru, Murthy, Serrano
110 * Thin Locks: Featherweight Synchronization for Java
111 * Proceedings of the ACM Conference on Programming Language Design and
112 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
115 * In thin lock mode the lockword looks like this:
117 * ,----------------------,-----------,---,
118 * | thread ID | count | 0 |
119 * `----------------------'-----------'---'
121 * thread ID......the 'index' of the owning thread, or 0
122 * count..........number of times the lock has been entered minus 1
123 * 0..............the shape bit is 0 in thin lock mode
125 * In fat lock mode it is basically a lock_record_t *:
127 * ,----------------------------------,---,
128 * | lock_record_t * (without LSB) | 1 |
129 * `----------------------------------'---'
131 * 1..............the shape bit is 1 in fat lock mode
134 /* global variables ***********************************************************/
136 /* hashtable mapping objects to lock records */
137 static lock_hashtable_t lock_hashtable;
140 /******************************************************************************/
142 /******************************************************************************/
144 static void lock_hashtable_init(void);
146 static inline Lockword* lock_lockword_get(java_handle_t* o);
147 static void lock_record_enter(threadobject *t, lock_record_t *lr);
148 static void lock_record_exit(threadobject *t, lock_record_t *lr);
149 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
150 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
153 /*============================================================================*/
154 /* INITIALIZATION OF DATA STRUCTURES */
155 /*============================================================================*/
158 /* lock_init *******************************************************************
160 Initialize global data for locking.
162 *******************************************************************************/
166 /* initialize lock hashtable */
168 lock_hashtable_init();
170 #if defined(ENABLE_VMLOG)
171 vmlog_cacao_init_lock();
176 /* lock_record_new *************************************************************
178 Allocate a lock record.
180 *******************************************************************************/
182 static lock_record_t *lock_record_new(void)
186 /* allocate the data structure on the C heap */
188 lr = NEW(lock_record_t);
190 #if defined(ENABLE_STATISTICS)
192 size_lock_record += sizeof(lock_record_t);
195 /* initialize the members */
200 lr->waiters = new List<threadobject*>();
202 #if defined(ENABLE_GC_CACAO)
203 /* register the lock object as weak reference with the GC */
205 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
208 // Initialize the mutex.
209 lr->mutex = new Mutex();
211 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
217 /* lock_record_free ************************************************************
222 lr....lock record to free
224 *******************************************************************************/
226 static void lock_record_free(lock_record_t *lr)
228 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
230 // Destroy the mutex.
233 #if defined(ENABLE_GC_CACAO)
234 /* unregister the lock object reference with the GC */
236 gc_weakreference_unregister(&(lr->object));
239 // Free the waiters list.
242 /* Free the data structure. */
244 FREE(lr, lock_record_t);
246 #if defined(ENABLE_STATISTICS)
248 size_lock_record -= sizeof(lock_record_t);
253 /*============================================================================*/
254 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
255 /*============================================================================*/
257 /* lock_hashtable_init *********************************************************
259 Initialize the global hashtable mapping objects to lock records.
261 *******************************************************************************/
263 static void lock_hashtable_init(void)
265 lock_hashtable.mutex = new Mutex();
267 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
268 lock_hashtable.entries = 0;
269 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
271 #if defined(ENABLE_STATISTICS)
273 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
276 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
280 /* lock_hashtable_grow *********************************************************
282 Grow the lock record hashtable to about twice its current size and
285 *******************************************************************************/
287 /* must be called with hashtable mutex locked */
288 static void lock_hashtable_grow(void)
292 lock_record_t **oldtable;
293 lock_record_t **newtable;
300 /* allocate a new table */
302 oldsize = lock_hashtable.size;
303 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
305 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
307 oldtable = lock_hashtable.ptr;
308 newtable = MNEW(lock_record_t *, newsize);
310 #if defined(ENABLE_STATISTICS)
312 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
315 MZERO(newtable, lock_record_t *, newsize);
317 /* rehash the entries */
319 for (i = 0; i < oldsize; i++) {
324 h = heap_hashcode(lr->object);
325 newslot = h % newsize;
327 lr->hashlink = newtable[newslot];
328 newtable[newslot] = lr;
334 /* replace the old table */
336 lock_hashtable.ptr = newtable;
337 lock_hashtable.size = newsize;
339 MFREE(oldtable, lock_record_t *, oldsize);
341 #if defined(ENABLE_STATISTICS)
343 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
348 /* lock_hashtable_cleanup ******************************************************
350 Removes (and frees) lock records which have a cleared object reference
351 from the hashtable. The locked object was reclaimed by the GC.
353 *******************************************************************************/
355 #if defined(ENABLE_GC_CACAO)
356 void lock_hashtable_cleanup(void)
366 /* lock the hashtable */
368 Mutex_lock(lock_hashtable.mutex);
370 /* search the hashtable for cleared references */
372 for (i = 0; i < lock_hashtable.size; i++) {
373 lr = lock_hashtable.ptr[i];
379 /* remove lock records with cleared references */
381 if (lr->object == NULL) {
383 /* unlink the lock record from the hashtable */
386 lock_hashtable.ptr[i] = next;
388 prev->hashlink = next;
390 /* free the lock record */
392 lock_record_free(lr);
402 /* unlock the hashtable */
404 Mutex_unlock(lock_hashtable.mutex);
409 /* lock_hashtable_get **********************************************************
411 Find the lock record for the given object. If it does not exists,
412 yet, create it and enter it in the hashtable.
415 o....the object to look up
418 the lock record to use for this object
420 *******************************************************************************/
422 #if defined(ENABLE_GC_BOEHM)
423 static void lock_record_finalizer(void *object, void *p);
426 static lock_record_t *lock_hashtable_get(java_handle_t* o)
428 // This function is inside a critical section.
429 GCCriticalSection cs;
434 Lockword* lockword = lock_lockword_get(o);
436 if (lockword->is_fat_lock())
437 return lockword->get_fat_lock();
439 // Lock the hashtable.
440 lock_hashtable.mutex->lock();
442 /* lookup the lock record in the hashtable */
444 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
445 lr = lock_hashtable.ptr[slot];
447 for (; lr != NULL; lr = lr->hashlink) {
448 if (lr->object == LLNI_DIRECT(o))
453 /* not found, we must create a new one */
455 lr = lock_record_new();
457 lr->object = LLNI_DIRECT(o);
459 #if defined(ENABLE_GC_BOEHM)
460 /* register new finalizer to clean up the lock record */
462 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
465 /* enter it in the hashtable */
467 lr->hashlink = lock_hashtable.ptr[slot];
468 lock_hashtable.ptr[slot] = lr;
469 lock_hashtable.entries++;
471 /* check whether the hash should grow */
473 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
474 lock_hashtable_grow();
478 // Unlock the hashtable.
479 lock_hashtable.mutex->unlock();
481 /* return the new lock record */
487 /* lock_hashtable_remove *******************************************************
489 Remove the lock record for the given object from the hashtable
490 and free it afterwards.
493 t....the current thread
494 o....the object to look up
496 *******************************************************************************/
498 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
502 lock_record_t *tmplr;
504 // Lock the hashtable.
505 lock_hashtable.mutex->lock();
507 /* get lock record */
509 Lockword* lockword = lock_lockword_get(o);
512 assert(lockword->is_fat_lock());
514 lr = lockword->get_fat_lock();
516 /* remove the lock-record from the hashtable */
518 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
519 tmplr = lock_hashtable.ptr[slot];
522 /* special handling if it's the first in the chain */
524 lock_hashtable.ptr[slot] = lr->hashlink;
527 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
528 if (tmplr->hashlink == lr) {
529 tmplr->hashlink = lr->hashlink;
534 assert(tmplr != NULL);
537 /* decrease entry count */
539 lock_hashtable.entries--;
541 // Unlock the hashtable.
542 lock_hashtable.mutex->unlock();
544 /* free the lock record */
546 lock_record_free(lr);
550 /* lock_record_finalizer *******************************************************
552 XXX Remove me for exact GC.
554 *******************************************************************************/
556 static void lock_record_finalizer(void *object, void *p)
561 o = (java_handle_t *) object;
563 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
564 /* XXX this is only a dirty hack to make Boehm work with handles */
566 o = LLNI_WRAP((java_object_t *) o);
569 LLNI_class_get(o, c);
572 if (opt_DebugFinalizer) {
574 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
581 /* check for a finalizer function */
583 if (c->finalizer != NULL)
584 finalizer_run(object, p);
586 /* remove the lock-record entry from the hashtable and free it */
588 lock_hashtable_remove(THREADOBJECT, o);
592 /*============================================================================*/
593 /* LOCKING ALGORITHM */
594 /*============================================================================*/
597 /* lock_lockword_get ***********************************************************
599 Get the lockword for the given object.
602 o............the object
604 *******************************************************************************/
606 static inline Lockword* lock_lockword_get(java_handle_t* o)
608 #if defined(ENABLE_GC_CACAO)
610 assert(GCCriticalSection::inside() == true);
613 return &(LLNI_DIRECT(o)->lockword);
617 /* lock_record_enter ***********************************************************
619 Enter the lock represented by the given lock record.
622 t.................the current thread
623 lr................the lock record
625 *******************************************************************************/
627 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
634 /* lock_record_exit ************************************************************
636 Release the lock represented by the given lock record.
639 t.................the current thread
640 lr................the lock record
643 The current thread must own the lock represented by this lock record.
644 This is NOT checked by this function!
646 *******************************************************************************/
648 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
655 /* lock_inflate ****************************************************************
657 Inflate the lock of the given object. This may only be called by the
658 owner of the monitor of the object.
661 o............the object of which to inflate the lock
662 lr...........the lock record to install. The current thread must
663 own the lock of this lock record!
666 The current thread must be the owner of this object's monitor AND
667 of the lock record's lock!
669 *******************************************************************************/
671 static void lock_inflate(java_handle_t *o, lock_record_t *lr)
673 Lockword* lockword = lock_lockword_get(o);
674 lockword->inflate(lr);
678 static void sable_flc_waiting(Lockword *lockword, threadobject *t, java_handle_t *o)
681 threadobject *t_other;
684 index = lockword->get_thin_lock_thread_index();
685 t_other = ThreadList::get_thread_by_index(index);
687 // The lockword could have changed during our way here. If the
688 // thread index is zero, the lock got unlocked and we simply
691 /* failure, TODO: add statistics */
694 t_other->flc_lock->lock();
695 old_flc = t_other->flc_bit;
696 t_other->flc_bit = true;
698 DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d", t->index, t_other->index));
700 // Set FLC bit first, then read the lockword again.
701 Atomic::memory_barrier();
703 lockword = lock_lockword_get(o);
705 /* Lockword is still the way it was seen before */
706 if (lockword->is_thin_lock() && (lockword->get_thin_lock_thread_index() == index))
708 /* Add tuple (t, o) to the other thread's FLC list */
710 t->flc_next = t_other->flc_list;
711 t_other->flc_list = t;
715 threadobject *current;
717 // Wait until another thread sees the flc bit and notifies
719 t->flc_cond->wait(t_other->flc_lock);
721 /* Traverse FLC list looking if we're still there */
722 current = t_other->flc_list;
723 while (current && current != t)
724 current = current->flc_next;
726 /* not in list anymore, can stop waiting */
729 /* We are still in the list -- the other thread cannot have seen
731 assert(t_other->flc_bit);
734 t->flc_object = NULL; /* for garbage collector? */
738 t_other->flc_bit = old_flc;
740 t_other->flc_lock->unlock();
743 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
745 threadobject *current;
749 current = t->flc_list;
752 if (current->flc_object != o)
754 /* The object has to be inflated so the other threads can properly
757 // Only if not already inflated.
758 Lockword* lockword = lock_lockword_get(current->flc_object);
759 if (lockword->is_thin_lock()) {
760 lock_record_t *lr = lock_hashtable_get(current->flc_object);
761 lock_record_enter(t, lr);
763 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
764 t->index, (void*) current->flc_object, (void*) lr));
766 lock_inflate(current->flc_object, lr);
770 // Wake the waiting threads.
771 current->flc_cond->broadcast();
773 current = current->flc_next;
779 t->flc_lock->unlock();
782 /* lock_monitor_enter **********************************************************
784 Acquire the monitor of the given object. If the current thread already
785 owns the monitor, the lock counter is simply increased.
787 This function blocks until it can acquire the monitor.
790 t............the current thread
791 o............the object of which to enter the monitor
794 true.........the lock has been successfully acquired
795 false........an exception has been thrown
797 *******************************************************************************/
799 bool lock_monitor_enter(java_handle_t *o)
801 // This function is inside a critical section.
802 GCCriticalSection cs;
805 exceptions_throw_nullpointerexception();
809 threadobject* t = thread_get_current();
811 uintptr_t thinlock = t->thinlock;
814 // Most common case: try to thin-lock an unlocked object.
815 Lockword* lockword = lock_lockword_get(o);
816 bool result = lockword->lock(thinlock);
818 if (result == true) {
819 // Success, we locked it.
820 // NOTE: The Java Memory Model requires an instruction barrier
821 // here (because of the CAS above).
822 Atomic::instruction_barrier();
826 // Next common case: recursive lock with small recursion count.
827 // NOTE: We don't have to worry about stale values here, as any
828 // stale value will indicate another thread holding the lock (or
829 // an inflated lock).
830 if (lockword->get_thin_lock_without_count() == thinlock) {
831 // We own this monitor. Check the current recursion count.
832 if (lockword->is_max_thin_lock_count() == false) {
833 // The recursion count is low enough.
834 lockword->increase_thin_lock_count();
836 // Success, we locked it.
840 // Recursion count overflow.
841 lock_record_t* lr = lock_hashtable_get(o);
842 lock_record_enter(t, lr);
846 notify_flc_waiters(t, o);
852 // The lock is either contented or fat.
853 if (lockword->is_fat_lock()) {
854 lock_record_t* lr = lockword->get_fat_lock();
856 // Check for recursive entering.
857 if (lr->owner == t) {
862 // Acquire the mutex of the lock record.
863 lock_record_enter(t, lr);
866 assert(lr->count == 0);
870 /****** inflation path ******/
872 #if defined(ENABLE_JVMTI)
873 /* Monitor Contended Enter */
874 jvmti_MonitorContendedEntering(false, o);
877 sable_flc_waiting(lockword, t, o);
879 #if defined(ENABLE_JVMTI)
880 /* Monitor Contended Entered */
881 jvmti_MonitorContendedEntering(true, o);
887 /* lock_monitor_exit ***********************************************************
889 Decrement the counter of a (currently owned) monitor. If the counter
890 reaches zero, release the monitor.
892 If the current thread is not the owner of the monitor, an
893 IllegalMonitorState exception is thrown.
896 t............the current thread
897 o............the object of which to exit the monitor
900 true.........everything ok,
901 false........an exception has been thrown
903 *******************************************************************************/
905 bool lock_monitor_exit(java_handle_t* o)
907 // This function is inside a critical section.
908 GCCriticalSection cs;
911 exceptions_throw_nullpointerexception();
915 threadobject* t = thread_get_current();
917 uintptr_t thinlock = t->thinlock;
919 // We don't have to worry about stale values here, as any stale
920 // value will indicate that we don't own the lock.
921 Lockword* lockword = lock_lockword_get(o);
923 // Most common case: we release a thin lock that we hold once.
924 if (lockword->get_thin_lock() == thinlock) {
925 // Memory barrier for Java Memory Model.
926 Atomic::write_memory_barrier();
928 // Memory barrier for thin locking.
929 Atomic::memory_barrier();
931 /* check if there has been a flat lock contention on this object */
934 DEBUGLOCKS(("thread %d saw flc bit", t->index));
936 /* there has been a contention on this thin lock */
937 notify_flc_waiters(t, o);
943 // Next common case: we release a recursive lock, count > 0.
944 if (lockword->get_thin_lock_without_count() == thinlock) {
945 lockword->decrease_thin_lock_count();
949 // Either the lock is fat, or we don't hold it at all.
950 if (lockword->is_fat_lock()) {
951 lock_record_t* lr = lockword->get_fat_lock();
953 // Check if we own this monitor.
954 // NOTE: We don't have to worry about stale values here, as
955 // any stale value will be != t and thus fail this check.
956 if (lr->owner != t) {
957 exceptions_throw_illegalmonitorstateexception();
961 /* { the current thread `t` owns the lock record `lr` on object `o` } */
963 if (lr->count != 0) {
964 // We had locked this one recursively. Just decrement, it
965 // will still be locked.
970 // Unlock this lock record.
971 lock_record_exit(t, lr);
975 // Legal thin lock cases have been handled above, so this is an
977 exceptions_throw_illegalmonitorstateexception();
983 /* lock_record_add_waiter ******************************************************
985 Add a thread to the list of waiting threads of a lock record.
988 lr...........the lock record
989 thread.......the thread to add
991 *******************************************************************************/
993 static void lock_record_add_waiter(lock_record_t *lr, threadobject* t)
995 // Add the thread as last entry to waiters list.
996 lr->waiters->push_back(t);
998 #if defined(ENABLE_STATISTICS)
1000 size_lock_waiter += sizeof(threadobject*);
1005 /* lock_record_remove_waiter ***************************************************
1007 Remove a thread from the list of waiting threads of a lock record.
1010 lr...........the lock record
1011 t............the current thread
1014 The current thread must be the owner of the lock record.
1016 *******************************************************************************/
1018 static void lock_record_remove_waiter(lock_record_t *lr, threadobject* t)
1020 // Remove the thread from the waiters.
1021 lr->waiters->remove(t);
1023 #if defined(ENABLE_STATISTICS)
1025 size_lock_waiter -= sizeof(threadobject*);
1030 /* lock_record_wait ************************************************************
1032 Wait on a lock record for a given (maximum) amount of time.
1035 t............the current thread
1036 lr...........the lock record
1037 millis.......milliseconds of timeout
1038 nanos........nanoseconds of timeout
1041 true.........we have been interrupted,
1042 false........everything ok
1045 The current thread must be the owner of the lock record.
1046 This is NOT checked by this function!
1048 *******************************************************************************/
1050 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1053 bool wasinterrupted = false;
1055 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1056 lr, thread, millis, nanos));
1058 /* { the thread t owns the fat lock record lr on the object o } */
1060 /* register us as waiter for this object */
1062 lock_record_add_waiter(lr, thread);
1064 /* remember the old lock count */
1066 lockcount = lr->count;
1068 /* unlock this record */
1071 lock_record_exit(thread, lr);
1073 /* wait until notified/interrupted/timed out */
1075 threads_wait_with_timeout_relative(thread, millis, nanos);
1077 /* re-enter the monitor */
1079 lock_record_enter(thread, lr);
1081 /* remove us from the list of waiting threads */
1083 lock_record_remove_waiter(lr, thread);
1085 /* restore the old lock count */
1087 lr->count = lockcount;
1089 /* We can only be signaled OR interrupted, not both. If both flags
1090 are set, reset only signaled and leave the thread in
1091 interrupted state. Otherwise, clear both. */
1093 if (!thread->signaled) {
1094 wasinterrupted = thread->interrupted;
1095 thread->interrupted = false;
1098 thread->signaled = false;
1100 /* return if we have been interrupted */
1102 return wasinterrupted;
1106 /* lock_monitor_wait ***********************************************************
1108 Wait on an object for a given (maximum) amount of time.
1111 t............the current thread
1112 o............the object
1113 millis.......milliseconds of timeout
1114 nanos........nanoseconds of timeout
1117 The current thread must be the owner of the object's monitor.
1119 *******************************************************************************/
1121 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1125 Lockword* lockword = lock_lockword_get(o);
1127 // Check if we own this monitor.
1128 // NOTE: We don't have to worry about stale values here, as any
1129 // stale value will fail this check.
1130 if (lockword->is_fat_lock()) {
1131 lr = lockword->get_fat_lock();
1133 if (lr->owner != t) {
1134 exceptions_throw_illegalmonitorstateexception();
1139 // It's a thin lock.
1140 if (lockword->get_thin_lock_without_count() != t->thinlock) {
1141 exceptions_throw_illegalmonitorstateexception();
1145 // Get the lock-record.
1146 lr = lock_hashtable_get(o);
1147 lock_record_enter(t, lr);
1149 // Inflate this lock.
1150 lockword->inflate(lr);
1152 notify_flc_waiters(t, o);
1155 /* { the thread t owns the fat lock record lr on the object o } */
1157 if (lock_record_wait(t, lr, millis, nanos))
1158 exceptions_throw_interruptedexception();
1162 /* lock_record_notify **********************************************************
1164 Notify one thread or all threads waiting on the given lock record.
1167 t............the current thread
1168 lr...........the lock record
1169 one..........if true, only notify one thread
1172 The current thread must be the owner of the lock record.
1173 This is NOT checked by this function!
1175 *******************************************************************************/
1177 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1179 #if defined(ENABLE_GC_CACAO)
1181 assert(GCCriticalSection::inside() == false);
1184 // { The thread t owns the fat lock record lr on the object o }
1186 for (List<threadobject*>::iterator it = lr->waiters->begin(); it != lr->waiters->end(); it++) {
1187 threadobject* waiter = *it;
1189 // We must skip threads which have already been notified. They
1190 // will remove themselves from the list.
1191 if (waiter->signaled)
1194 // Enter the wait-mutex.
1195 waiter->waitmutex->lock();
1197 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]", lr, t, waiter, one));
1199 // Signal the waiter.
1200 waiter->waitcond->signal();
1202 // Mark the thread as signaled.
1203 waiter->signaled = true;
1205 // Leave the wait-mutex.
1206 waiter->waitmutex->unlock();
1208 // If we should only wake one thread, we are done.
1215 /* lock_monitor_notify *********************************************************
1217 Notify one thread or all threads waiting on the given object.
1220 t............the current thread
1221 o............the object
1222 one..........if true, only notify one thread
1225 The current thread must be the owner of the object's monitor.
1227 *******************************************************************************/
1229 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1231 lock_record_t* lr = NULL;
1234 // This scope is inside a critical section.
1235 GCCriticalSection cs;
1237 Lockword* lockword = lock_lockword_get(o);
1239 // Check if we own this monitor.
1240 // NOTE: We don't have to worry about stale values here, as any
1241 // stale value will fail this check.
1243 if (lockword->is_fat_lock()) {
1244 lr = lockword->get_fat_lock();
1246 if (lr->owner != t) {
1247 exceptions_throw_illegalmonitorstateexception();
1252 // It's a thin lock.
1253 if (lockword->get_thin_lock_without_count() != t->thinlock) {
1254 exceptions_throw_illegalmonitorstateexception();
1258 // No thread can wait on a thin lock, so there's nothing to do.
1263 // { The thread t owns the fat lock record lr on the object o }
1264 lock_record_notify(t, lr, one);
1269 /*============================================================================*/
1270 /* INQUIRY FUNCIONS */
1271 /*============================================================================*/
1274 /* lock_is_held_by_current_thread **********************************************
1276 Return true if the current thread owns the monitor of the given object.
1279 o............the object
1282 true, if the current thread holds the lock of this object.
1284 *******************************************************************************/
1286 bool lock_is_held_by_current_thread(java_handle_t *o)
1288 // This function is inside a critical section.
1289 GCCriticalSection cs;
1291 // Check if we own this monitor.
1292 // NOTE: We don't have to worry about stale values here, as any
1293 // stale value will fail this check.
1294 threadobject* t = thread_get_current();
1295 Lockword* lockword = lock_lockword_get(o);
1297 if (lockword->is_fat_lock()) {
1299 lock_record_t* lr = lockword->get_fat_lock();
1300 return (lr->owner == t);
1303 // It's a thin lock.
1304 return (lockword->get_thin_lock_without_count() == t->thinlock);
1310 /*============================================================================*/
1311 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1312 /*============================================================================*/
1315 /* lock_wait_for_object ********************************************************
1317 Wait for the given object.
1320 o............the object
1321 millis.......milliseconds to wait
1322 nanos........nanoseconds to wait
1324 *******************************************************************************/
1326 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1328 threadobject *thread;
1330 thread = THREADOBJECT;
1332 lock_monitor_wait(thread, o, millis, nanos);
1336 /* lock_notify_object **********************************************************
1338 Notify one thread waiting on the given object.
1341 o............the object
1343 *******************************************************************************/
1345 void lock_notify_object(java_handle_t *o)
1347 threadobject *thread;
1349 thread = THREADOBJECT;
1351 lock_monitor_notify(thread, o, true);
1355 /* lock_notify_all_object ******************************************************
1357 Notify all threads waiting on the given object.
1360 o............the object
1362 *******************************************************************************/
1364 void lock_notify_all_object(java_handle_t *o)
1366 threadobject *thread;
1368 thread = THREADOBJECT;
1370 lock_monitor_notify(thread, o, false);
1375 * These are local overrides for various environment variables in Emacs.
1376 * Please do not remove this and leave it at the end of the file, where
1377 * Emacs will automagically detect them.
1378 * ---------------------------------------------------------------------
1381 * indent-tabs-mode: t
1385 * vim:noexpandtab:sw=4:ts=4: