1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
39 #include "mm/memory.h"
41 #include "native/llni.h"
43 #include "threads/lock-common.h"
45 #include "threads/native/lock.h"
46 #include "threads/native/threads.h"
48 #include "toolbox/list.h"
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
56 #include "vmcore/options.h"
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
70 /* includes for atomic instructions: */
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/native/generic-primitives.h"
75 #include "machine-instr.h"
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
87 /* debug **********************************************************************/
90 # define DEBUGLOCKS(format) \
92 if (opt_DebugLocks) { \
97 # define DEBUGLOCKS(format)
101 /******************************************************************************/
103 /******************************************************************************/
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
108 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
110 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
111 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
114 /******************************************************************************/
115 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
116 /******************************************************************************/
118 #define LOCK_SET_FLC_BIT(obj) ((obj)->hdrflags |= HDRFLAG_FLC)
119 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->hdrflags &= ~ HDRFLAG_FLC)
120 #define LOCK_TEST_FLC_BIT(obj) ((obj)->hdrflags & HDRFLAG_FLC)
123 /******************************************************************************/
124 /* MACROS FOR THIN/FAT LOCKS */
125 /******************************************************************************/
127 /* We use a variant of the tasuki locks described in the paper
129 * Tamiya Onodera, Kiyokuni Kawachiya
130 * A Study of Locking Objects with Bimodal Fields
131 * Proceedings of the ACM OOPSLA '99, pp. 223-237
134 * The underlying thin locks are a variant of the thin locks described in
136 * Bacon, Konuru, Murthy, Serrano
137 * Thin Locks: Featherweight Synchronization for Java
138 * Proceedings of the ACM Conference on Programming Language Design and
139 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
142 * In thin lock mode the lockword looks like this:
144 * ,----------------------,-----------,---,
145 * | thread ID | count | 0 |
146 * `----------------------'-----------'---´
148 * thread ID......the 'index' of the owning thread, or 0
149 * count..........number of times the lock has been entered minus 1
150 * 0..............the shape bit is 0 in thin lock mode
152 * In fat lock mode it is basically a lock_record_t *:
154 * ,----------------------------------,---,
155 * | lock_record_t * (without LSB) | 1 |
156 * `----------------------------------'---´
158 * 1..............the shape bit is 1 in fat lock mode
161 #if SIZEOF_VOID_P == 8
162 #define THIN_LOCK_WORD_SIZE 64
164 #define THIN_LOCK_WORD_SIZE 32
167 #define THIN_LOCK_SHAPE_BIT 0x01
169 #define THIN_UNLOCKED 0
171 #define THIN_LOCK_COUNT_SHIFT 1
172 #define THIN_LOCK_COUNT_SIZE 8
173 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
174 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
175 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
177 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
178 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
180 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
181 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
183 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
184 #define MAKE_FAT_LOCK(ptr) ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
186 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
189 /* global variables ***********************************************************/
191 /* hashtable mapping objects to lock records */
192 static lock_hashtable_t lock_hashtable;
195 /******************************************************************************/
197 /******************************************************************************/
199 static void lock_hashtable_init(void);
201 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
202 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
203 static void lock_record_enter(threadobject *t, lock_record_t *lr);
204 static void lock_record_exit(threadobject *t, lock_record_t *lr);
205 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
206 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
209 /*============================================================================*/
210 /* INITIALIZATION OF DATA STRUCTURES */
211 /*============================================================================*/
214 /* lock_init *******************************************************************
216 Initialize global data for locking.
218 *******************************************************************************/
222 /* initialize lock hashtable */
224 lock_hashtable_init();
226 #if defined(ENABLE_VMLOG)
227 vmlog_cacao_init_lock();
232 /* lock_pre_compute_thinlock ***************************************************
234 Pre-compute the thin lock value for a thread index.
237 index........the thead index (>= 1)
240 the thin lock value for this thread index
242 *******************************************************************************/
244 ptrint lock_pre_compute_thinlock(s4 index)
246 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
250 /* lock_record_new *************************************************************
252 Allocate a lock record.
254 *******************************************************************************/
256 static lock_record_t *lock_record_new(void)
260 /* allocate the data structure on the C heap */
262 lr = NEW(lock_record_t);
264 #if defined(ENABLE_STATISTICS)
266 size_lock_record += sizeof(lock_record_t);
269 /* initialize the members */
274 lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
276 #if defined(ENABLE_GC_CACAO)
277 /* register the lock object as weak reference with the GC */
279 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
282 /* initialize the mutex */
284 pthread_mutex_init(&(lr->mutex), NULL);
286 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
292 /* lock_record_free ************************************************************
297 lr....lock record to free
299 *******************************************************************************/
301 static void lock_record_free(lock_record_t *lr)
303 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
305 /* Destroy the mutex. */
307 pthread_mutex_destroy(&(lr->mutex));
309 #if defined(ENABLE_GC_CACAO)
310 /* unregister the lock object reference with the GC */
312 gc_weakreference_unregister(&(lr->object));
315 /* Free the waiters list. */
317 list_free(lr->waiters);
319 /* Free the data structure. */
321 FREE(lr, lock_record_t);
323 #if defined(ENABLE_STATISTICS)
325 size_lock_record -= sizeof(lock_record_t);
330 /*============================================================================*/
331 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
332 /*============================================================================*/
334 /* lock_hashtable_init *********************************************************
336 Initialize the global hashtable mapping objects to lock records.
338 *******************************************************************************/
340 static void lock_hashtable_init(void)
342 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
344 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
345 lock_hashtable.entries = 0;
346 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
348 #if defined(ENABLE_STATISTICS)
350 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
353 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
357 /* lock_hashtable_grow *********************************************************
359 Grow the lock record hashtable to about twice its current size and
362 *******************************************************************************/
364 /* must be called with hashtable mutex locked */
365 static void lock_hashtable_grow(void)
369 lock_record_t **oldtable;
370 lock_record_t **newtable;
377 /* allocate a new table */
379 oldsize = lock_hashtable.size;
380 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
382 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
384 oldtable = lock_hashtable.ptr;
385 newtable = MNEW(lock_record_t *, newsize);
387 #if defined(ENABLE_STATISTICS)
389 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
392 MZERO(newtable, lock_record_t *, newsize);
394 /* rehash the entries */
396 for (i = 0; i < oldsize; i++) {
401 h = heap_hashcode(lr->object);
402 newslot = h % newsize;
404 lr->hashlink = newtable[newslot];
405 newtable[newslot] = lr;
411 /* replace the old table */
413 lock_hashtable.ptr = newtable;
414 lock_hashtable.size = newsize;
416 MFREE(oldtable, lock_record_t *, oldsize);
418 #if defined(ENABLE_STATISTICS)
420 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
425 /* lock_hashtable_cleanup ******************************************************
427 Removes (and frees) lock records which have a cleared object reference
428 from the hashtable. The locked object was reclaimed by the GC.
430 *******************************************************************************/
432 #if defined(ENABLE_GC_CACAO)
433 void lock_hashtable_cleanup(void)
443 /* lock the hashtable */
445 pthread_mutex_lock(&(lock_hashtable.mutex));
447 /* search the hashtable for cleared references */
449 for (i = 0; i < lock_hashtable.size; i++) {
450 lr = lock_hashtable.ptr[i];
456 /* remove lock records with cleared references */
458 if (lr->object == NULL) {
460 /* unlink the lock record from the hashtable */
463 lock_hashtable.ptr[i] = next;
465 prev->hashlink = next;
467 /* free the lock record */
469 lock_record_free(lr);
479 /* unlock the hashtable */
481 pthread_mutex_unlock(&(lock_hashtable.mutex));
486 /* lock_hashtable_get **********************************************************
488 Find the lock record for the given object. If it does not exists,
489 yet, create it and enter it in the hashtable.
492 t....the current thread
493 o....the object to look up
496 the lock record to use for this object
498 *******************************************************************************/
500 #if defined(ENABLE_GC_BOEHM)
501 static void lock_record_finalizer(void *object, void *p);
504 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
510 lockword = lock_lockword_get(t, o);
512 if (IS_FAT_LOCK(lockword))
513 return GET_FAT_LOCK(lockword);
515 /* lock the hashtable */
517 pthread_mutex_lock(&(lock_hashtable.mutex));
519 /* lookup the lock record in the hashtable */
521 LLNI_CRITICAL_START_THREAD(t);
522 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
523 lr = lock_hashtable.ptr[slot];
525 for (; lr != NULL; lr = lr->hashlink) {
526 if (lr->object == LLNI_DIRECT(o))
529 LLNI_CRITICAL_END_THREAD(t);
532 /* not found, we must create a new one */
534 lr = lock_record_new();
536 LLNI_CRITICAL_START_THREAD(t);
537 lr->object = LLNI_DIRECT(o);
538 LLNI_CRITICAL_END_THREAD(t);
540 #if defined(ENABLE_GC_BOEHM)
541 /* register new finalizer to clean up the lock record */
543 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
546 /* enter it in the hashtable */
548 lr->hashlink = lock_hashtable.ptr[slot];
549 lock_hashtable.ptr[slot] = lr;
550 lock_hashtable.entries++;
552 /* check whether the hash should grow */
554 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
555 lock_hashtable_grow();
559 /* unlock the hashtable */
561 pthread_mutex_unlock(&(lock_hashtable.mutex));
563 /* return the new lock record */
569 /* lock_hashtable_remove *******************************************************
571 Remove the lock record for the given object from the hashtable
572 and free it afterwards.
575 t....the current thread
576 o....the object to look up
578 *******************************************************************************/
580 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
585 lock_record_t *tmplr;
587 /* lock the hashtable */
589 pthread_mutex_lock(&(lock_hashtable.mutex));
591 /* get lock record */
593 lockword = lock_lockword_get(t, o);
595 assert(IS_FAT_LOCK(lockword));
597 lr = GET_FAT_LOCK(lockword);
599 /* remove the lock-record from the hashtable */
601 LLNI_CRITICAL_START_THREAD(t);
602 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
603 tmplr = lock_hashtable.ptr[slot];
604 LLNI_CRITICAL_END_THREAD(t);
607 /* special handling if it's the first in the chain */
609 lock_hashtable.ptr[slot] = lr->hashlink;
612 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
613 if (tmplr->hashlink == lr) {
614 tmplr->hashlink = lr->hashlink;
619 assert(tmplr != NULL);
622 /* decrease entry count */
624 lock_hashtable.entries--;
626 /* unlock the hashtable */
628 pthread_mutex_unlock(&(lock_hashtable.mutex));
630 /* free the lock record */
632 lock_record_free(lr);
636 /* lock_record_finalizer *******************************************************
638 XXX Remove me for exact GC.
640 *******************************************************************************/
642 static void lock_record_finalizer(void *object, void *p)
647 o = (java_handle_t *) object;
649 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
650 /* XXX this is only a dirty hack to make Boehm work with handles */
652 o = LLNI_WRAP((java_object_t *) o);
655 LLNI_class_get(o, c);
658 if (opt_DebugFinalizer) {
660 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
667 /* check for a finalizer function */
669 if (c->finalizer != NULL)
670 finalizer_run(object, p);
672 /* remove the lock-record entry from the hashtable and free it */
674 lock_hashtable_remove(THREADOBJECT, o);
678 /*============================================================================*/
679 /* OBJECT LOCK INITIALIZATION */
680 /*============================================================================*/
683 /* lock_init_object_lock *******************************************************
685 Initialize the monitor pointer of the given object. The monitor gets
686 initialized to an unlocked state.
688 *******************************************************************************/
690 void lock_init_object_lock(java_object_t *o)
694 o->lockword = THIN_UNLOCKED;
695 LOCK_CLEAR_FLC_BIT(o);
699 /* lock_get_initial_lock_word **************************************************
701 Returns the initial (unlocked) lock word. The pointer is
702 required in the code generator to set up a virtual
703 java_objectheader for code patch locking.
705 *******************************************************************************/
707 lock_record_t *lock_get_initial_lock_word(void)
709 return (lock_record_t *) THIN_UNLOCKED;
714 /*============================================================================*/
715 /* LOCKING ALGORITHM */
716 /*============================================================================*/
719 /* lock_lockword_get ***********************************************************
721 Get the lockword for the given object.
724 t............the current thread
725 o............the object
727 *******************************************************************************/
729 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
733 LLNI_CRITICAL_START_THREAD(t);
734 lockword = LLNI_DIRECT(o)->lockword;
735 LLNI_CRITICAL_END_THREAD(t);
741 /* lock_lockword_set ***********************************************************
743 Set the lockword for the given object.
746 t............the current thread
747 o............the object
748 lockword.....the new lockword value
750 *******************************************************************************/
752 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
754 LLNI_CRITICAL_START_THREAD(t);
755 LLNI_DIRECT(o)->lockword = lockword;
756 LLNI_CRITICAL_END_THREAD(t);
760 /* lock_record_enter ***********************************************************
762 Enter the lock represented by the given lock record.
765 t.................the current thread
766 lr................the lock record
768 *******************************************************************************/
770 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
772 pthread_mutex_lock(&(lr->mutex));
777 /* lock_record_exit ************************************************************
779 Release the lock represented by the given lock record.
782 t.................the current thread
783 lr................the lock record
786 The current thread must own the lock represented by this lock record.
787 This is NOT checked by this function!
789 *******************************************************************************/
791 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
794 pthread_mutex_unlock(&(lr->mutex));
798 /* lock_inflate ****************************************************************
800 Inflate the lock of the given object. This may only be called by the
801 owner of the monitor of the object.
804 t............the current thread
805 o............the object of which to inflate the lock
806 lr...........the lock record to install. The current thread must
807 own the lock of this lock record!
810 The current thread must be the owner of this object's monitor AND
811 of the lock record's lock!
813 *******************************************************************************/
815 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
819 /* get the current lock count */
821 lockword = lock_lockword_get(t, o);
823 if (IS_FAT_LOCK(lockword)) {
824 assert(GET_FAT_LOCK(lockword) == lr);
827 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
829 /* copy the count from the thin lock */
831 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
834 DEBUGLOCKS(("[lock_inflate : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
835 lr, t, o, lockword, lr->count));
837 /* clear flat-lock-contention bit */
839 LLNI_CRITICAL_START_THREAD(t);
840 LOCK_CLEAR_FLC_BIT(LLNI_DIRECT(o));
841 LLNI_CRITICAL_END_THREAD(t);
843 /* notify waiting objects */
845 lock_record_notify(t, lr, false);
849 lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
853 /* lock_monitor_enter **********************************************************
855 Acquire the monitor of the given object. If the current thread already
856 owns the monitor, the lock counter is simply increased.
858 This function blocks until it can acquire the monitor.
861 t............the current thread
862 o............the object of which to enter the monitor
865 true.........the lock has been successfully acquired
866 false........an exception has been thrown
868 *******************************************************************************/
870 bool lock_monitor_enter(java_handle_t *o)
873 /* CAUTION: This code assumes that ptrint is unsigned! */
879 exceptions_throw_nullpointerexception();
885 thinlock = t->thinlock;
887 /* most common case: try to thin-lock an unlocked object */
889 LLNI_CRITICAL_START_THREAD(t);
890 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
891 LLNI_CRITICAL_END_THREAD(t);
893 if (lockword == THIN_UNLOCKED) {
894 /* success. we locked it */
895 /* The Java Memory Model requires a memory barrier here: */
900 /* next common case: recursive lock with small recursion count */
901 /* We don't have to worry about stale values here, as any stale value */
902 /* will indicate another thread holding the lock (or an inflated lock) */
904 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
905 /* we own this monitor */
906 /* check the current recursion count */
908 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
910 /* the recursion count is low enough */
912 lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
914 /* success. we locked it */
918 /* recursion count overflow */
920 lr = lock_hashtable_get(t, o);
921 lock_record_enter(t, lr);
922 lock_inflate(t, o, lr);
929 /* the lock is either contented or fat */
931 if (IS_FAT_LOCK(lockword)) {
933 lr = GET_FAT_LOCK(lockword);
935 /* check for recursive entering */
936 if (lr->owner == t) {
941 /* acquire the mutex of the lock record */
943 lock_record_enter(t, lr);
945 assert(lr->count == 0);
950 /****** inflation path ******/
952 /* first obtain the lock record for this object */
954 lr = lock_hashtable_get(t, o);
956 #if defined(ENABLE_JVMTI)
957 /* Monitor Contended Enter */
958 jvmti_MonitorContendedEntering(false, o);
961 /* enter the monitor */
963 lock_record_enter(t, lr);
965 #if defined(ENABLE_JVMTI)
966 /* Monitor Contended Entered */
967 jvmti_MonitorContendedEntering(true, o);
972 while (IS_THIN_LOCK(lockword = lock_lockword_get(t, o))) {
973 /* Set the flat lock contention bit to let the owning thread
974 know that we want to be notified of unlocking. */
976 LLNI_CRITICAL_START_THREAD(t);
977 LOCK_SET_FLC_BIT(LLNI_DIRECT(o));
978 LLNI_CRITICAL_END_THREAD(t);
980 DEBUGLOCKS(("thread %d set flc bit on %p lr %p",
981 t->index, (void*) o, (void*) lr));
983 /* try to lock the object */
985 LLNI_CRITICAL_START_THREAD(t);
986 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
987 LLNI_CRITICAL_END_THREAD(t);
989 if (lockword == THIN_UNLOCKED) {
990 /* we can inflate the lock ourselves */
992 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
993 t->index, (void*) o, (void*) lr));
995 lock_inflate(t, o, lr);
998 /* Wait until another thread sees the flc bit and notifies
1001 (void) lock_record_wait(t, lr, 0, 0);
1005 /* we own the inflated lock now */
1011 /* lock_monitor_exit ***********************************************************
1013 Decrement the counter of a (currently owned) monitor. If the counter
1014 reaches zero, release the monitor.
1016 If the current thread is not the owner of the monitor, an
1017 IllegalMonitorState exception is thrown.
1020 t............the current thread
1021 o............the object of which to exit the monitor
1024 true.........everything ok,
1025 false........an exception has been thrown
1027 *******************************************************************************/
1029 bool lock_monitor_exit(java_handle_t *o)
1036 exceptions_throw_nullpointerexception();
1042 thinlock = t->thinlock;
1044 /* We don't have to worry about stale values here, as any stale value */
1045 /* will indicate that we don't own the lock. */
1047 lockword = lock_lockword_get(t, o);
1049 /* most common case: we release a thin lock that we hold once */
1051 if (lockword == thinlock) {
1052 /* memory barrier for Java Memory Model */
1054 lock_lockword_set(t, o, THIN_UNLOCKED);
1055 /* memory barrier for thin locking */
1058 /* check if there has been a flat lock contention on this object */
1060 if (LOCK_TEST_FLC_BIT(LLNI_DIRECT(o))) {
1063 DEBUGLOCKS(("thread %d saw flc bit on %p",
1064 t->index, (void*) o));
1066 /* there has been a contention on this thin lock */
1068 lr = lock_hashtable_get(t, o);
1070 DEBUGLOCKS(("thread %d for %p got lr %p",
1071 t->index, (void*) o, (void*) lr));
1073 lock_record_enter(t, lr);
1075 if (LOCK_TEST_FLC_BIT(LLNI_DIRECT(o))) {
1076 /* notify a thread that it can try to inflate the lock now */
1078 lock_record_notify(t, lr, true);
1081 lock_record_exit(t, lr);
1087 /* next common case: we release a recursive lock, count > 0 */
1089 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1090 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1094 /* either the lock is fat, or we don't hold it at all */
1096 if (IS_FAT_LOCK(lockword)) {
1100 lr = GET_FAT_LOCK(lockword);
1102 /* check if we own this monitor */
1103 /* We don't have to worry about stale values here, as any stale value */
1104 /* will be != t and thus fail this check. */
1106 if (lr->owner != t) {
1107 exceptions_throw_illegalmonitorstateexception();
1111 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1113 if (lr->count != 0) {
1114 /* we had locked this one recursively. just decrement, it will */
1115 /* still be locked. */
1120 /* unlock this lock record */
1123 pthread_mutex_unlock(&(lr->mutex));
1128 /* legal thin lock cases have been handled above, so this is an error */
1130 exceptions_throw_illegalmonitorstateexception();
1136 /* lock_record_add_waiter ******************************************************
1138 Add a thread to the list of waiting threads of a lock record.
1141 lr...........the lock record
1142 thread.......the thread to add
1144 *******************************************************************************/
1146 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1150 /* Allocate a waiter data structure. */
1152 w = NEW(lock_waiter_t);
1154 #if defined(ENABLE_STATISTICS)
1156 size_lock_waiter += sizeof(lock_waiter_t);
1159 /* Store the thread in the waiter structure. */
1163 /* Add the waiter as last entry to waiters list. */
1165 list_add_last(lr->waiters, w);
1169 /* lock_record_remove_waiter ***************************************************
1171 Remove a thread from the list of waiting threads of a lock record.
1174 lr...........the lock record
1175 t............the current thread
1178 The current thread must be the owner of the lock record.
1180 *******************************************************************************/
1182 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1187 /* Get the waiters list. */
1191 for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1192 if (w->thread == thread) {
1193 /* Remove the waiter entry from the list. */
1195 list_remove_unsynced(l, w);
1197 /* Free the waiter data structure. */
1199 FREE(w, lock_waiter_t);
1201 #if defined(ENABLE_STATISTICS)
1203 size_lock_waiter -= sizeof(lock_waiter_t);
1210 /* This should never happen. */
1212 vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1216 /* lock_record_wait ************************************************************
1218 Wait on a lock record for a given (maximum) amount of time.
1221 t............the current thread
1222 lr...........the lock record
1223 millis.......milliseconds of timeout
1224 nanos........nanoseconds of timeout
1227 true.........we have been interrupted,
1228 false........everything ok
1231 The current thread must be the owner of the lock record.
1232 This is NOT checked by this function!
1234 *******************************************************************************/
1236 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1239 bool wasinterrupted;
1241 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1242 lr, thread, millis, nanos));
1244 /* { the thread t owns the fat lock record lr on the object o } */
1246 /* register us as waiter for this object */
1248 lock_record_add_waiter(lr, thread);
1250 /* remember the old lock count */
1252 lockcount = lr->count;
1254 /* unlock this record */
1257 lock_record_exit(thread, lr);
1259 /* wait until notified/interrupted/timed out */
1261 wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1263 /* re-enter the monitor */
1265 lock_record_enter(thread, lr);
1267 /* remove us from the list of waiting threads */
1269 lock_record_remove_waiter(lr, thread);
1271 /* restore the old lock count */
1273 lr->count = lockcount;
1275 /* return if we have been interrupted */
1277 return wasinterrupted;
1281 /* lock_monitor_wait ***********************************************************
1283 Wait on an object for a given (maximum) amount of time.
1286 t............the current thread
1287 o............the object
1288 millis.......milliseconds of timeout
1289 nanos........nanoseconds of timeout
1292 The current thread must be the owner of the object's monitor.
1294 *******************************************************************************/
1296 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1301 lockword = lock_lockword_get(t, o);
1303 /* check if we own this monitor */
1304 /* We don't have to worry about stale values here, as any stale value */
1305 /* will fail this check. */
1307 if (IS_FAT_LOCK(lockword)) {
1309 lr = GET_FAT_LOCK(lockword);
1311 if (lr->owner != t) {
1312 exceptions_throw_illegalmonitorstateexception();
1317 /* it's a thin lock */
1319 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1320 exceptions_throw_illegalmonitorstateexception();
1324 /* inflate this lock */
1326 lr = lock_hashtable_get(t, o);
1327 lock_record_enter(t, lr);
1328 lock_inflate(t, o, lr);
1331 /* { the thread t owns the fat lock record lr on the object o } */
1333 if (lock_record_wait(t, lr, millis, nanos))
1334 exceptions_throw_interruptedexception();
1338 /* lock_record_notify **********************************************************
1340 Notify one thread or all threads waiting on the given lock record.
1343 t............the current thread
1344 lr...........the lock record
1345 one..........if true, only notify one thread
1348 The current thread must be the owner of the lock record.
1349 This is NOT checked by this function!
1351 *******************************************************************************/
1353 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1357 threadobject *waitingthread;
1359 /* { the thread t owns the fat lock record lr on the object o } */
1361 /* Get the waiters list. */
1365 for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1366 /* signal the waiting thread */
1368 waitingthread = w->thread;
1370 /* If the thread was already signaled but hasn't removed
1371 itself from the list yet, just ignore it. */
1373 if (waitingthread->signaled == true)
1376 /* Enter the wait-mutex. */
1378 pthread_mutex_lock(&(waitingthread->waitmutex));
1380 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1381 lr, t, waitingthread, waitingthread->sleeping, one));
1383 /* Signal the thread if it's sleeping. */
1385 if (waitingthread->sleeping)
1386 pthread_cond_signal(&(waitingthread->waitcond));
1388 /* Mark the thread as signaled. */
1390 waitingthread->signaled = true;
1392 /* Leave the wait-mutex. */
1394 pthread_mutex_unlock(&(waitingthread->waitmutex));
1396 /* if we should only wake one, we are done */
1404 /* lock_monitor_notify *********************************************************
1406 Notify one thread or all threads waiting on the given object.
1409 t............the current thread
1410 o............the object
1411 one..........if true, only notify one thread
1414 The current thread must be the owner of the object's monitor.
1416 *******************************************************************************/
1418 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1423 lockword = lock_lockword_get(t, o);
1425 /* check if we own this monitor */
1426 /* We don't have to worry about stale values here, as any stale value */
1427 /* will fail this check. */
1429 if (IS_FAT_LOCK(lockword)) {
1431 lr = GET_FAT_LOCK(lockword);
1433 if (lr->owner != t) {
1434 exceptions_throw_illegalmonitorstateexception();
1439 /* it's a thin lock */
1441 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1442 exceptions_throw_illegalmonitorstateexception();
1446 /* inflate this lock */
1448 lr = lock_hashtable_get(t, o);
1449 lock_record_enter(t, lr);
1450 lock_inflate(t, o, lr);
1453 /* { the thread t owns the fat lock record lr on the object o } */
1455 lock_record_notify(t, lr, one);
1460 /*============================================================================*/
1461 /* INQUIRY FUNCIONS */
1462 /*============================================================================*/
1465 /* lock_is_held_by_current_thread **********************************************
1467 Return true if the current thread owns the monitor of the given object.
1470 o............the object
1473 true, if the current thread holds the lock of this object.
1475 *******************************************************************************/
1477 bool lock_is_held_by_current_thread(java_handle_t *o)
1485 /* check if we own this monitor */
1486 /* We don't have to worry about stale values here, as any stale value */
1487 /* will fail this check. */
1489 lockword = lock_lockword_get(t, o);
1491 if (IS_FAT_LOCK(lockword)) {
1492 /* it's a fat lock */
1494 lr = GET_FAT_LOCK(lockword);
1496 return (lr->owner == t);
1499 /* it's a thin lock */
1501 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1507 /*============================================================================*/
1508 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1509 /*============================================================================*/
1512 /* lock_wait_for_object ********************************************************
1514 Wait for the given object.
1517 o............the object
1518 millis.......milliseconds to wait
1519 nanos........nanoseconds to wait
1521 *******************************************************************************/
1523 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1525 threadobject *thread;
1527 thread = THREADOBJECT;
1529 lock_monitor_wait(thread, o, millis, nanos);
1533 /* lock_notify_object **********************************************************
1535 Notify one thread waiting on the given object.
1538 o............the object
1540 *******************************************************************************/
1542 void lock_notify_object(java_handle_t *o)
1544 threadobject *thread;
1546 thread = THREADOBJECT;
1548 lock_monitor_notify(thread, o, true);
1552 /* lock_notify_all_object ******************************************************
1554 Notify all threads waiting on the given object.
1557 o............the object
1559 *******************************************************************************/
1561 void lock_notify_all_object(java_handle_t *o)
1563 threadobject *thread;
1565 thread = THREADOBJECT;
1567 lock_monitor_notify(thread, o, false);
1572 * These are local overrides for various environment variables in Emacs.
1573 * Please do not remove this and leave it at the end of the file, where
1574 * Emacs will automagically detect them.
1575 * ---------------------------------------------------------------------
1578 * indent-tabs-mode: t
1582 * vim:noexpandtab:sw=4:ts=4: