1 /* src/threads/posix/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007, 2008
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 This file is part of CACAO.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2, or (at
11 your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
37 #include "mm/memory.h"
39 #include "native/llni.h"
41 #include "threads/lock-common.h"
42 #include "threads/mutex.hpp"
43 #include "threads/threadlist.h"
44 #include "threads/thread.h"
46 #include "threads/posix/lock.h"
48 #include "toolbox/list.h"
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
56 #include "vmcore/options.h"
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
70 /* includes for atomic instructions: */
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/posix/generic-primitives.h"
75 #include "threads/atomic.hpp"
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
87 /* debug **********************************************************************/
90 # define DEBUGLOCKS(format) \
92 if (opt_DebugLocks) { \
97 # define DEBUGLOCKS(format)
101 /******************************************************************************/
103 /******************************************************************************/
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
108 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
111 /******************************************************************************/
112 /* MACROS FOR THIN/FAT LOCKS */
113 /******************************************************************************/
115 /* We use a variant of the tasuki locks described in the paper
117 * Tamiya Onodera, Kiyokuni Kawachiya
118 * A Study of Locking Objects with Bimodal Fields
119 * Proceedings of the ACM OOPSLA '99, pp. 223-237
122 * The underlying thin locks are a variant of the thin locks described in
124 * Bacon, Konuru, Murthy, Serrano
125 * Thin Locks: Featherweight Synchronization for Java
126 * Proceedings of the ACM Conference on Programming Language Design and
127 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
130 * In thin lock mode the lockword looks like this:
132 * ,----------------------,-----------,---,
133 * | thread ID | count | 0 |
134 * `----------------------'-----------'---'
136 * thread ID......the 'index' of the owning thread, or 0
137 * count..........number of times the lock has been entered minus 1
138 * 0..............the shape bit is 0 in thin lock mode
140 * In fat lock mode it is basically a lock_record_t *:
142 * ,----------------------------------,---,
143 * | lock_record_t * (without LSB) | 1 |
144 * `----------------------------------'---'
146 * 1..............the shape bit is 1 in fat lock mode
149 #if SIZEOF_VOID_P == 8
150 #define THIN_LOCK_WORD_SIZE 64
152 #define THIN_LOCK_WORD_SIZE 32
155 #define THIN_LOCK_SHAPE_BIT 0x01
157 #define THIN_UNLOCKED 0
159 #define THIN_LOCK_COUNT_SHIFT 1
160 #define THIN_LOCK_COUNT_SIZE 8
161 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
162 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
163 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
165 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
166 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
168 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
169 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
171 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
172 #define MAKE_FAT_LOCK(ptr) ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
174 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
175 #define GET_THREAD_INDEX(lockword) ((unsigned) lockword >> THIN_LOCK_TID_SHIFT)
178 /* global variables ***********************************************************/
180 /* hashtable mapping objects to lock records */
181 static lock_hashtable_t lock_hashtable;
184 /******************************************************************************/
186 /******************************************************************************/
188 static void lock_hashtable_init(void);
190 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
191 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
192 static void lock_record_enter(threadobject *t, lock_record_t *lr);
193 static void lock_record_exit(threadobject *t, lock_record_t *lr);
194 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
195 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
198 /*============================================================================*/
199 /* INITIALIZATION OF DATA STRUCTURES */
200 /*============================================================================*/
203 /* lock_init *******************************************************************
205 Initialize global data for locking.
207 *******************************************************************************/
211 /* initialize lock hashtable */
213 lock_hashtable_init();
215 #if defined(ENABLE_VMLOG)
216 vmlog_cacao_init_lock();
221 /* lock_pre_compute_thinlock ***************************************************
223 Pre-compute the thin lock value for a thread index.
226 index........the thead index (>= 1)
229 the thin lock value for this thread index
231 *******************************************************************************/
233 ptrint lock_pre_compute_thinlock(s4 index)
235 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
239 /* lock_record_new *************************************************************
241 Allocate a lock record.
243 *******************************************************************************/
245 static lock_record_t *lock_record_new(void)
249 /* allocate the data structure on the C heap */
251 lr = NEW(lock_record_t);
253 #if defined(ENABLE_STATISTICS)
255 size_lock_record += sizeof(lock_record_t);
258 /* initialize the members */
263 lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
265 #if defined(ENABLE_GC_CACAO)
266 /* register the lock object as weak reference with the GC */
268 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
271 /* initialize the mutex */
273 lr->mutex = Mutex_new();
275 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
281 /* lock_record_free ************************************************************
286 lr....lock record to free
288 *******************************************************************************/
290 static void lock_record_free(lock_record_t *lr)
292 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
294 /* Destroy the mutex. */
296 Mutex_delete(lr->mutex);
298 #if defined(ENABLE_GC_CACAO)
299 /* unregister the lock object reference with the GC */
301 gc_weakreference_unregister(&(lr->object));
304 /* Free the waiters list. */
306 list_free(lr->waiters);
308 /* Free the data structure. */
310 FREE(lr, lock_record_t);
312 #if defined(ENABLE_STATISTICS)
314 size_lock_record -= sizeof(lock_record_t);
319 /*============================================================================*/
320 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
321 /*============================================================================*/
323 /* lock_hashtable_init *********************************************************
325 Initialize the global hashtable mapping objects to lock records.
327 *******************************************************************************/
329 static void lock_hashtable_init(void)
331 lock_hashtable.mutex = Mutex_new();
333 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
334 lock_hashtable.entries = 0;
335 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
337 #if defined(ENABLE_STATISTICS)
339 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
342 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
346 /* lock_hashtable_grow *********************************************************
348 Grow the lock record hashtable to about twice its current size and
351 *******************************************************************************/
353 /* must be called with hashtable mutex locked */
354 static void lock_hashtable_grow(void)
358 lock_record_t **oldtable;
359 lock_record_t **newtable;
366 /* allocate a new table */
368 oldsize = lock_hashtable.size;
369 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
371 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
373 oldtable = lock_hashtable.ptr;
374 newtable = MNEW(lock_record_t *, newsize);
376 #if defined(ENABLE_STATISTICS)
378 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
381 MZERO(newtable, lock_record_t *, newsize);
383 /* rehash the entries */
385 for (i = 0; i < oldsize; i++) {
390 h = heap_hashcode(lr->object);
391 newslot = h % newsize;
393 lr->hashlink = newtable[newslot];
394 newtable[newslot] = lr;
400 /* replace the old table */
402 lock_hashtable.ptr = newtable;
403 lock_hashtable.size = newsize;
405 MFREE(oldtable, lock_record_t *, oldsize);
407 #if defined(ENABLE_STATISTICS)
409 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
414 /* lock_hashtable_cleanup ******************************************************
416 Removes (and frees) lock records which have a cleared object reference
417 from the hashtable. The locked object was reclaimed by the GC.
419 *******************************************************************************/
421 #if defined(ENABLE_GC_CACAO)
422 void lock_hashtable_cleanup(void)
432 /* lock the hashtable */
434 Mutex_lock(lock_hashtable.mutex);
436 /* search the hashtable for cleared references */
438 for (i = 0; i < lock_hashtable.size; i++) {
439 lr = lock_hashtable.ptr[i];
445 /* remove lock records with cleared references */
447 if (lr->object == NULL) {
449 /* unlink the lock record from the hashtable */
452 lock_hashtable.ptr[i] = next;
454 prev->hashlink = next;
456 /* free the lock record */
458 lock_record_free(lr);
468 /* unlock the hashtable */
470 Mutex_unlock(lock_hashtable.mutex);
475 /* lock_hashtable_get **********************************************************
477 Find the lock record for the given object. If it does not exists,
478 yet, create it and enter it in the hashtable.
481 t....the current thread
482 o....the object to look up
485 the lock record to use for this object
487 *******************************************************************************/
489 #if defined(ENABLE_GC_BOEHM)
490 static void lock_record_finalizer(void *object, void *p);
493 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
499 lockword = lock_lockword_get(t, o);
501 if (IS_FAT_LOCK(lockword))
502 return GET_FAT_LOCK(lockword);
504 /* lock the hashtable */
506 Mutex_lock(lock_hashtable.mutex);
508 /* lookup the lock record in the hashtable */
510 LLNI_CRITICAL_START_THREAD(t);
511 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
512 lr = lock_hashtable.ptr[slot];
514 for (; lr != NULL; lr = lr->hashlink) {
515 if (lr->object == LLNI_DIRECT(o))
518 LLNI_CRITICAL_END_THREAD(t);
521 /* not found, we must create a new one */
523 lr = lock_record_new();
525 LLNI_CRITICAL_START_THREAD(t);
526 lr->object = LLNI_DIRECT(o);
527 LLNI_CRITICAL_END_THREAD(t);
529 #if defined(ENABLE_GC_BOEHM)
530 /* register new finalizer to clean up the lock record */
532 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
535 /* enter it in the hashtable */
537 lr->hashlink = lock_hashtable.ptr[slot];
538 lock_hashtable.ptr[slot] = lr;
539 lock_hashtable.entries++;
541 /* check whether the hash should grow */
543 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
544 lock_hashtable_grow();
548 /* unlock the hashtable */
550 Mutex_unlock(lock_hashtable.mutex);
552 /* return the new lock record */
558 /* lock_hashtable_remove *******************************************************
560 Remove the lock record for the given object from the hashtable
561 and free it afterwards.
564 t....the current thread
565 o....the object to look up
567 *******************************************************************************/
569 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
574 lock_record_t *tmplr;
576 /* lock the hashtable */
578 Mutex_lock(lock_hashtable.mutex);
580 /* get lock record */
582 lockword = lock_lockword_get(t, o);
584 assert(IS_FAT_LOCK(lockword));
586 lr = GET_FAT_LOCK(lockword);
588 /* remove the lock-record from the hashtable */
590 LLNI_CRITICAL_START_THREAD(t);
591 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
592 tmplr = lock_hashtable.ptr[slot];
593 LLNI_CRITICAL_END_THREAD(t);
596 /* special handling if it's the first in the chain */
598 lock_hashtable.ptr[slot] = lr->hashlink;
601 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
602 if (tmplr->hashlink == lr) {
603 tmplr->hashlink = lr->hashlink;
608 assert(tmplr != NULL);
611 /* decrease entry count */
613 lock_hashtable.entries--;
615 /* unlock the hashtable */
617 Mutex_unlock(lock_hashtable.mutex);
619 /* free the lock record */
621 lock_record_free(lr);
625 /* lock_record_finalizer *******************************************************
627 XXX Remove me for exact GC.
629 *******************************************************************************/
631 static void lock_record_finalizer(void *object, void *p)
636 o = (java_handle_t *) object;
638 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
639 /* XXX this is only a dirty hack to make Boehm work with handles */
641 o = LLNI_WRAP((java_object_t *) o);
644 LLNI_class_get(o, c);
647 if (opt_DebugFinalizer) {
649 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
656 /* check for a finalizer function */
658 if (c->finalizer != NULL)
659 finalizer_run(object, p);
661 /* remove the lock-record entry from the hashtable and free it */
663 lock_hashtable_remove(THREADOBJECT, o);
667 /*============================================================================*/
668 /* OBJECT LOCK INITIALIZATION */
669 /*============================================================================*/
672 /* lock_init_object_lock *******************************************************
674 Initialize the monitor pointer of the given object. The monitor gets
675 initialized to an unlocked state.
677 *******************************************************************************/
679 void lock_init_object_lock(java_object_t *o)
683 o->lockword = THIN_UNLOCKED;
687 /*============================================================================*/
688 /* LOCKING ALGORITHM */
689 /*============================================================================*/
692 /* lock_lockword_get ***********************************************************
694 Get the lockword for the given object.
697 t............the current thread
698 o............the object
700 *******************************************************************************/
702 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
706 LLNI_CRITICAL_START_THREAD(t);
707 lockword = LLNI_DIRECT(o)->lockword;
708 LLNI_CRITICAL_END_THREAD(t);
714 /* lock_lockword_set ***********************************************************
716 Set the lockword for the given object.
719 t............the current thread
720 o............the object
721 lockword.....the new lockword value
723 *******************************************************************************/
725 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
727 LLNI_CRITICAL_START_THREAD(t);
728 LLNI_DIRECT(o)->lockword = lockword;
729 LLNI_CRITICAL_END_THREAD(t);
733 /* lock_record_enter ***********************************************************
735 Enter the lock represented by the given lock record.
738 t.................the current thread
739 lr................the lock record
741 *******************************************************************************/
743 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
745 Mutex_lock(lr->mutex);
750 /* lock_record_exit ************************************************************
752 Release the lock represented by the given lock record.
755 t.................the current thread
756 lr................the lock record
759 The current thread must own the lock represented by this lock record.
760 This is NOT checked by this function!
762 *******************************************************************************/
764 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
767 Mutex_unlock(lr->mutex);
771 /* lock_inflate ****************************************************************
773 Inflate the lock of the given object. This may only be called by the
774 owner of the monitor of the object.
777 t............the current thread
778 o............the object of which to inflate the lock
779 lr...........the lock record to install. The current thread must
780 own the lock of this lock record!
783 The current thread must be the owner of this object's monitor AND
784 of the lock record's lock!
786 *******************************************************************************/
788 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
792 /* get the current lock count */
794 lockword = lock_lockword_get(t, o);
796 if (IS_FAT_LOCK(lockword)) {
797 assert(GET_FAT_LOCK(lockword) == lr);
801 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
803 /* copy the count from the thin lock */
805 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
808 DEBUGLOCKS(("[lock_inflate : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
809 lr, t, o, lockword, lr->count));
813 lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
817 /* TODO Move this function into threadlist.[ch]. */
819 static threadobject *threads_lookup_thread_id(int index)
825 for (t = threadlist_first(); t != NULL; t = threadlist_next(t)) {
826 if (t->state == THREAD_STATE_NEW)
828 if (t->index == index)
836 static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o)
839 threadobject *t_other;
842 index = GET_THREAD_INDEX(lockword);
843 t_other = threads_lookup_thread_id(index);
845 /* failure, TODO: add statistics */
848 Mutex_lock(t_other->flc_lock);
849 old_flc = t_other->flc_bit;
850 t_other->flc_bit = true;
852 DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d",
853 t->index, t_other->index));
855 /* Set FLC bit first, then read the lockword again */
856 Atomic_memory_barrier();
858 lockword = lock_lockword_get(t, o);
860 /* Lockword is still the way it was seen before */
861 if (IS_THIN_LOCK(lockword) && (GET_THREAD_INDEX(lockword) == index))
863 /* Add tuple (t, o) to the other thread's FLC list */
865 t->flc_next = t_other->flc_list;
866 t_other->flc_list = t;
870 threadobject *current;
872 /* Wait until another thread sees the flc bit and notifies
874 Condition_wait(t->flc_cond, t_other->flc_lock);
876 /* Traverse FLC list looking if we're still there */
877 current = t_other->flc_list;
878 while (current && current != t)
879 current = current->flc_next;
881 /* not in list anymore, can stop waiting */
884 /* We are still in the list -- the other thread cannot have seen
886 assert(t_other->flc_bit);
889 t->flc_object = NULL; /* for garbage collector? */
893 t_other->flc_bit = old_flc;
895 Mutex_unlock(t_other->flc_lock);
898 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
900 threadobject *current;
902 Mutex_lock(t->flc_lock);
904 current = t->flc_list;
907 if (current->flc_object != o)
909 /* The object has to be inflated so the other threads can properly
912 /* Only if not already inflated */
913 ptrint lockword = lock_lockword_get(t, current->flc_object);
914 if (IS_THIN_LOCK(lockword)) {
915 lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
916 lock_record_enter(t, lr);
918 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
919 t->index, (void*) current->flc_object, (void*) lr));
921 lock_inflate(t, current->flc_object, lr);
924 /* Wake the waiting thread */
925 Condition_broadcast(current->flc_cond);
927 current = current->flc_next;
932 Mutex_unlock(t->flc_lock);
935 /* lock_monitor_enter **********************************************************
937 Acquire the monitor of the given object. If the current thread already
938 owns the monitor, the lock counter is simply increased.
940 This function blocks until it can acquire the monitor.
943 t............the current thread
944 o............the object of which to enter the monitor
947 true.........the lock has been successfully acquired
948 false........an exception has been thrown
950 *******************************************************************************/
952 bool lock_monitor_enter(java_handle_t *o)
955 /* CAUTION: This code assumes that ptrint is unsigned! */
961 exceptions_throw_nullpointerexception();
967 thinlock = t->thinlock;
970 /* most common case: try to thin-lock an unlocked object */
972 LLNI_CRITICAL_START_THREAD(t);
973 lockword = Atomic_compare_and_swap_ptr(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
974 LLNI_CRITICAL_END_THREAD(t);
976 if (lockword == THIN_UNLOCKED) {
977 /* success. we locked it */
978 /* The Java Memory Model requires a memory barrier here: */
979 /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
980 Atomic_instruction_barrier();
984 /* next common case: recursive lock with small recursion count */
985 /* We don't have to worry about stale values here, as any stale value */
986 /* will indicate another thread holding the lock (or an inflated lock) */
988 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
989 /* we own this monitor */
990 /* check the current recursion count */
992 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
994 /* the recursion count is low enough */
996 lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
998 /* success. we locked it */
1002 /* recursion count overflow */
1004 lr = lock_hashtable_get(t, o);
1005 lock_record_enter(t, lr);
1006 lock_inflate(t, o, lr);
1009 notify_flc_waiters(t, o);
1015 /* the lock is either contented or fat */
1017 if (IS_FAT_LOCK(lockword)) {
1019 lr = GET_FAT_LOCK(lockword);
1021 /* check for recursive entering */
1022 if (lr->owner == t) {
1027 /* acquire the mutex of the lock record */
1029 lock_record_enter(t, lr);
1031 assert(lr->count == 0);
1036 /****** inflation path ******/
1038 #if defined(ENABLE_JVMTI)
1039 /* Monitor Contended Enter */
1040 jvmti_MonitorContendedEntering(false, o);
1043 sable_flc_waiting(lockword, t, o);
1045 #if defined(ENABLE_JVMTI)
1046 /* Monitor Contended Entered */
1047 jvmti_MonitorContendedEntering(true, o);
1053 /* lock_monitor_exit ***********************************************************
1055 Decrement the counter of a (currently owned) monitor. If the counter
1056 reaches zero, release the monitor.
1058 If the current thread is not the owner of the monitor, an
1059 IllegalMonitorState exception is thrown.
1062 t............the current thread
1063 o............the object of which to exit the monitor
1066 true.........everything ok,
1067 false........an exception has been thrown
1069 *******************************************************************************/
1071 bool lock_monitor_exit(java_handle_t *o)
1078 exceptions_throw_nullpointerexception();
1084 thinlock = t->thinlock;
1086 /* We don't have to worry about stale values here, as any stale value */
1087 /* will indicate that we don't own the lock. */
1089 lockword = lock_lockword_get(t, o);
1091 /* most common case: we release a thin lock that we hold once */
1093 if (lockword == thinlock) {
1094 /* memory barrier for Java Memory Model */
1095 Atomic_write_memory_barrier();
1096 lock_lockword_set(t, o, THIN_UNLOCKED);
1097 /* Memory barrier for thin locking. */
1098 Atomic_memory_barrier();
1100 /* check if there has been a flat lock contention on this object */
1103 DEBUGLOCKS(("thread %d saw flc bit", t->index));
1105 /* there has been a contention on this thin lock */
1106 notify_flc_waiters(t, o);
1112 /* next common case: we release a recursive lock, count > 0 */
1114 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1115 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1119 /* either the lock is fat, or we don't hold it at all */
1121 if (IS_FAT_LOCK(lockword)) {
1125 lr = GET_FAT_LOCK(lockword);
1127 /* check if we own this monitor */
1128 /* We don't have to worry about stale values here, as any stale value */
1129 /* will be != t and thus fail this check. */
1131 if (lr->owner != t) {
1132 exceptions_throw_illegalmonitorstateexception();
1136 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1138 if (lr->count != 0) {
1139 /* we had locked this one recursively. just decrement, it will */
1140 /* still be locked. */
1145 /* unlock this lock record */
1148 Mutex_unlock(lr->mutex);
1153 /* legal thin lock cases have been handled above, so this is an error */
1155 exceptions_throw_illegalmonitorstateexception();
1161 /* lock_record_add_waiter ******************************************************
1163 Add a thread to the list of waiting threads of a lock record.
1166 lr...........the lock record
1167 thread.......the thread to add
1169 *******************************************************************************/
1171 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1175 /* Allocate a waiter data structure. */
1177 w = NEW(lock_waiter_t);
1179 #if defined(ENABLE_STATISTICS)
1181 size_lock_waiter += sizeof(lock_waiter_t);
1184 /* Store the thread in the waiter structure. */
1188 /* Add the waiter as last entry to waiters list. */
1190 list_add_last(lr->waiters, w);
1194 /* lock_record_remove_waiter ***************************************************
1196 Remove a thread from the list of waiting threads of a lock record.
1199 lr...........the lock record
1200 t............the current thread
1203 The current thread must be the owner of the lock record.
1205 *******************************************************************************/
1207 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1212 /* Get the waiters list. */
1216 for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1217 if (w->thread == thread) {
1218 /* Remove the waiter entry from the list. */
1222 /* Free the waiter data structure. */
1224 FREE(w, lock_waiter_t);
1226 #if defined(ENABLE_STATISTICS)
1228 size_lock_waiter -= sizeof(lock_waiter_t);
1235 /* This should never happen. */
1237 vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1241 /* lock_record_wait ************************************************************
1243 Wait on a lock record for a given (maximum) amount of time.
1246 t............the current thread
1247 lr...........the lock record
1248 millis.......milliseconds of timeout
1249 nanos........nanoseconds of timeout
1252 true.........we have been interrupted,
1253 false........everything ok
1256 The current thread must be the owner of the lock record.
1257 This is NOT checked by this function!
1259 *******************************************************************************/
1261 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1264 bool wasinterrupted = false;
1266 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1267 lr, thread, millis, nanos));
1269 /* { the thread t owns the fat lock record lr on the object o } */
1271 /* register us as waiter for this object */
1273 lock_record_add_waiter(lr, thread);
1275 /* remember the old lock count */
1277 lockcount = lr->count;
1279 /* unlock this record */
1282 lock_record_exit(thread, lr);
1284 /* wait until notified/interrupted/timed out */
1286 threads_wait_with_timeout_relative(thread, millis, nanos);
1288 /* re-enter the monitor */
1290 lock_record_enter(thread, lr);
1292 /* remove us from the list of waiting threads */
1294 lock_record_remove_waiter(lr, thread);
1296 /* restore the old lock count */
1298 lr->count = lockcount;
1300 /* We can only be signaled OR interrupted, not both. If both flags
1301 are set, reset only signaled and leave the thread in
1302 interrupted state. Otherwise, clear both. */
1304 if (!thread->signaled) {
1305 wasinterrupted = thread->interrupted;
1306 thread->interrupted = false;
1309 thread->signaled = false;
1311 /* return if we have been interrupted */
1313 return wasinterrupted;
1317 /* lock_monitor_wait ***********************************************************
1319 Wait on an object for a given (maximum) amount of time.
1322 t............the current thread
1323 o............the object
1324 millis.......milliseconds of timeout
1325 nanos........nanoseconds of timeout
1328 The current thread must be the owner of the object's monitor.
1330 *******************************************************************************/
1332 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1337 lockword = lock_lockword_get(t, o);
1339 /* check if we own this monitor */
1340 /* We don't have to worry about stale values here, as any stale value */
1341 /* will fail this check. */
1343 if (IS_FAT_LOCK(lockword)) {
1345 lr = GET_FAT_LOCK(lockword);
1347 if (lr->owner != t) {
1348 exceptions_throw_illegalmonitorstateexception();
1353 /* it's a thin lock */
1355 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1356 exceptions_throw_illegalmonitorstateexception();
1360 /* inflate this lock */
1362 lr = lock_hashtable_get(t, o);
1363 lock_record_enter(t, lr);
1364 lock_inflate(t, o, lr);
1366 notify_flc_waiters(t, o);
1369 /* { the thread t owns the fat lock record lr on the object o } */
1371 if (lock_record_wait(t, lr, millis, nanos))
1372 exceptions_throw_interruptedexception();
1376 /* lock_record_notify **********************************************************
1378 Notify one thread or all threads waiting on the given lock record.
1381 t............the current thread
1382 lr...........the lock record
1383 one..........if true, only notify one thread
1386 The current thread must be the owner of the lock record.
1387 This is NOT checked by this function!
1389 *******************************************************************************/
1391 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1395 threadobject *waitingthread;
1397 /* { the thread t owns the fat lock record lr on the object o } */
1399 /* Get the waiters list. */
1403 for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1404 /* signal the waiting thread */
1406 waitingthread = w->thread;
1408 /* We must skip threads which have already been notified. They will
1409 remove themselves from the list. */
1411 if (waitingthread->signaled)
1414 /* Enter the wait-mutex. */
1416 Mutex_lock(waitingthread->waitmutex);
1418 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]",
1419 lr, t, waitingthread, one));
1421 Condition_signal(waitingthread->waitcond);
1423 /* Mark the thread as signaled. */
1425 waitingthread->signaled = true;
1427 /* Leave the wait-mutex. */
1429 Mutex_unlock(waitingthread->waitmutex);
1431 /* if we should only wake one, we are done */
1439 /* lock_monitor_notify *********************************************************
1441 Notify one thread or all threads waiting on the given object.
1444 t............the current thread
1445 o............the object
1446 one..........if true, only notify one thread
1449 The current thread must be the owner of the object's monitor.
1451 *******************************************************************************/
1453 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1458 lockword = lock_lockword_get(t, o);
1460 /* check if we own this monitor */
1461 /* We don't have to worry about stale values here, as any stale value */
1462 /* will fail this check. */
1464 if (IS_FAT_LOCK(lockword)) {
1466 lr = GET_FAT_LOCK(lockword);
1468 if (lr->owner != t) {
1469 exceptions_throw_illegalmonitorstateexception();
1474 /* it's a thin lock */
1476 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1477 exceptions_throw_illegalmonitorstateexception();
1481 /* no thread can wait on a thin lock, so there's nothing to do. */
1485 /* { the thread t owns the fat lock record lr on the object o } */
1487 lock_record_notify(t, lr, one);
1492 /*============================================================================*/
1493 /* INQUIRY FUNCIONS */
1494 /*============================================================================*/
1497 /* lock_is_held_by_current_thread **********************************************
1499 Return true if the current thread owns the monitor of the given object.
1502 o............the object
1505 true, if the current thread holds the lock of this object.
1507 *******************************************************************************/
1509 bool lock_is_held_by_current_thread(java_handle_t *o)
1517 /* check if we own this monitor */
1518 /* We don't have to worry about stale values here, as any stale value */
1519 /* will fail this check. */
1521 lockword = lock_lockword_get(t, o);
1523 if (IS_FAT_LOCK(lockword)) {
1524 /* it's a fat lock */
1526 lr = GET_FAT_LOCK(lockword);
1528 return (lr->owner == t);
1531 /* it's a thin lock */
1533 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1539 /*============================================================================*/
1540 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1541 /*============================================================================*/
1544 /* lock_wait_for_object ********************************************************
1546 Wait for the given object.
1549 o............the object
1550 millis.......milliseconds to wait
1551 nanos........nanoseconds to wait
1553 *******************************************************************************/
1555 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1557 threadobject *thread;
1559 thread = THREADOBJECT;
1561 lock_monitor_wait(thread, o, millis, nanos);
1565 /* lock_notify_object **********************************************************
1567 Notify one thread waiting on the given object.
1570 o............the object
1572 *******************************************************************************/
1574 void lock_notify_object(java_handle_t *o)
1576 threadobject *thread;
1578 thread = THREADOBJECT;
1580 lock_monitor_notify(thread, o, true);
1584 /* lock_notify_all_object ******************************************************
1586 Notify all threads waiting on the given object.
1589 o............the object
1591 *******************************************************************************/
1593 void lock_notify_all_object(java_handle_t *o)
1595 threadobject *thread;
1597 thread = THREADOBJECT;
1599 lock_monitor_notify(thread, o, false);
1604 * These are local overrides for various environment variables in Emacs.
1605 * Please do not remove this and leave it at the end of the file, where
1606 * Emacs will automagically detect them.
1607 * ---------------------------------------------------------------------
1610 * indent-tabs-mode: t
1614 * vim:noexpandtab:sw=4:ts=4: