1 /* src/threads/posix/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007, 2008
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 This file is part of CACAO.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2, or (at
11 your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
37 #include "mm/memory.h"
39 #include "native/llni.h"
41 #include "threads/lock-common.h"
42 #include "threads/threadlist.h"
43 #include "threads/thread.h"
45 #include "threads/posix/lock.h"
47 #include "toolbox/list.h"
49 #include "vm/global.h"
50 #include "vm/exceptions.h"
51 #include "vm/finalizer.h"
52 #include "vm/stringlocal.h"
55 #include "vmcore/options.h"
57 #if defined(ENABLE_STATISTICS)
58 # include "vmcore/statistics.h"
61 #if defined(ENABLE_VMLOG)
62 #include <vmlog_cacao.h>
65 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
69 /* includes for atomic instructions: */
71 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
72 #include "threads/posix/generic-primitives.h"
74 #include "machine-instr.h"
77 #if defined(ENABLE_JVMTI)
78 #include "native/jvmti/cacaodbg.h"
81 #if defined(ENABLE_GC_BOEHM)
82 # include "mm/boehm-gc/include/gc.h"
86 /* debug **********************************************************************/
89 # define DEBUGLOCKS(format) \
91 if (opt_DebugLocks) { \
96 # define DEBUGLOCKS(format)
100 /******************************************************************************/
102 /******************************************************************************/
104 /* number of lock records in the first pool allocated for a thread */
105 #define LOCK_INITIAL_LOCK_RECORDS 8
107 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
109 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
110 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
113 /******************************************************************************/
114 /* MACROS FOR THIN/FAT LOCKS */
115 /******************************************************************************/
117 /* We use a variant of the tasuki locks described in the paper
119 * Tamiya Onodera, Kiyokuni Kawachiya
120 * A Study of Locking Objects with Bimodal Fields
121 * Proceedings of the ACM OOPSLA '99, pp. 223-237
124 * The underlying thin locks are a variant of the thin locks described in
126 * Bacon, Konuru, Murthy, Serrano
127 * Thin Locks: Featherweight Synchronization for Java
128 * Proceedings of the ACM Conference on Programming Language Design and
129 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
132 * In thin lock mode the lockword looks like this:
134 * ,----------------------,-----------,---,
135 * | thread ID | count | 0 |
136 * `----------------------'-----------'---´
138 * thread ID......the 'index' of the owning thread, or 0
139 * count..........number of times the lock has been entered minus 1
140 * 0..............the shape bit is 0 in thin lock mode
142 * In fat lock mode it is basically a lock_record_t *:
144 * ,----------------------------------,---,
145 * | lock_record_t * (without LSB) | 1 |
146 * `----------------------------------'---´
148 * 1..............the shape bit is 1 in fat lock mode
151 #if SIZEOF_VOID_P == 8
152 #define THIN_LOCK_WORD_SIZE 64
154 #define THIN_LOCK_WORD_SIZE 32
157 #define THIN_LOCK_SHAPE_BIT 0x01
159 #define THIN_UNLOCKED 0
161 #define THIN_LOCK_COUNT_SHIFT 1
162 #define THIN_LOCK_COUNT_SIZE 8
163 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
164 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
165 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
167 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
168 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
170 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
171 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
173 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
174 #define MAKE_FAT_LOCK(ptr) ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
176 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
177 #define GET_THREAD_INDEX(lockword) ((unsigned) lockword >> THIN_LOCK_TID_SHIFT)
180 /* global variables ***********************************************************/
182 /* hashtable mapping objects to lock records */
183 static lock_hashtable_t lock_hashtable;
186 /******************************************************************************/
188 /******************************************************************************/
190 static void lock_hashtable_init(void);
192 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
193 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
194 static void lock_record_enter(threadobject *t, lock_record_t *lr);
195 static void lock_record_exit(threadobject *t, lock_record_t *lr);
196 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
197 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
200 /*============================================================================*/
201 /* INITIALIZATION OF DATA STRUCTURES */
202 /*============================================================================*/
205 /* lock_init *******************************************************************
207 Initialize global data for locking.
209 *******************************************************************************/
213 /* initialize lock hashtable */
215 lock_hashtable_init();
217 #if defined(ENABLE_VMLOG)
218 vmlog_cacao_init_lock();
223 /* lock_pre_compute_thinlock ***************************************************
225 Pre-compute the thin lock value for a thread index.
228 index........the thead index (>= 1)
231 the thin lock value for this thread index
233 *******************************************************************************/
235 ptrint lock_pre_compute_thinlock(s4 index)
237 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
241 /* lock_record_new *************************************************************
243 Allocate a lock record.
245 *******************************************************************************/
247 static lock_record_t *lock_record_new(void)
252 /* allocate the data structure on the C heap */
254 lr = NEW(lock_record_t);
256 #if defined(ENABLE_STATISTICS)
258 size_lock_record += sizeof(lock_record_t);
261 /* initialize the members */
266 lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
268 #if defined(ENABLE_GC_CACAO)
269 /* register the lock object as weak reference with the GC */
271 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
274 /* initialize the mutex */
276 result = pthread_mutex_init(&(lr->mutex), NULL);
278 vm_abort_errnum(result, "lock_record_new: pthread_mutex_init failed");
280 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
286 /* lock_record_free ************************************************************
291 lr....lock record to free
293 *******************************************************************************/
295 static void lock_record_free(lock_record_t *lr)
299 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
301 /* Destroy the mutex. */
303 result = pthread_mutex_destroy(&(lr->mutex));
305 vm_abort_errnum(result, "lock_record_free: pthread_mutex_destroy failed");
307 #if defined(ENABLE_GC_CACAO)
308 /* unregister the lock object reference with the GC */
310 gc_weakreference_unregister(&(lr->object));
313 /* Free the waiters list. */
315 list_free(lr->waiters);
317 /* Free the data structure. */
319 FREE(lr, lock_record_t);
321 #if defined(ENABLE_STATISTICS)
323 size_lock_record -= sizeof(lock_record_t);
328 /*============================================================================*/
329 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
330 /*============================================================================*/
332 /* lock_hashtable_init *********************************************************
334 Initialize the global hashtable mapping objects to lock records.
336 *******************************************************************************/
338 static void lock_hashtable_init(void)
340 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
342 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
343 lock_hashtable.entries = 0;
344 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
346 #if defined(ENABLE_STATISTICS)
348 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
351 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
355 /* lock_hashtable_grow *********************************************************
357 Grow the lock record hashtable to about twice its current size and
360 *******************************************************************************/
362 /* must be called with hashtable mutex locked */
363 static void lock_hashtable_grow(void)
367 lock_record_t **oldtable;
368 lock_record_t **newtable;
375 /* allocate a new table */
377 oldsize = lock_hashtable.size;
378 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
380 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
382 oldtable = lock_hashtable.ptr;
383 newtable = MNEW(lock_record_t *, newsize);
385 #if defined(ENABLE_STATISTICS)
387 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
390 MZERO(newtable, lock_record_t *, newsize);
392 /* rehash the entries */
394 for (i = 0; i < oldsize; i++) {
399 h = heap_hashcode(lr->object);
400 newslot = h % newsize;
402 lr->hashlink = newtable[newslot];
403 newtable[newslot] = lr;
409 /* replace the old table */
411 lock_hashtable.ptr = newtable;
412 lock_hashtable.size = newsize;
414 MFREE(oldtable, lock_record_t *, oldsize);
416 #if defined(ENABLE_STATISTICS)
418 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
423 /* lock_hashtable_cleanup ******************************************************
425 Removes (and frees) lock records which have a cleared object reference
426 from the hashtable. The locked object was reclaimed by the GC.
428 *******************************************************************************/
430 #if defined(ENABLE_GC_CACAO)
431 void lock_hashtable_cleanup(void)
441 /* lock the hashtable */
443 pthread_mutex_lock(&(lock_hashtable.mutex));
445 /* search the hashtable for cleared references */
447 for (i = 0; i < lock_hashtable.size; i++) {
448 lr = lock_hashtable.ptr[i];
454 /* remove lock records with cleared references */
456 if (lr->object == NULL) {
458 /* unlink the lock record from the hashtable */
461 lock_hashtable.ptr[i] = next;
463 prev->hashlink = next;
465 /* free the lock record */
467 lock_record_free(lr);
477 /* unlock the hashtable */
479 pthread_mutex_unlock(&(lock_hashtable.mutex));
484 /* lock_hashtable_get **********************************************************
486 Find the lock record for the given object. If it does not exists,
487 yet, create it and enter it in the hashtable.
490 t....the current thread
491 o....the object to look up
494 the lock record to use for this object
496 *******************************************************************************/
498 #if defined(ENABLE_GC_BOEHM)
499 static void lock_record_finalizer(void *object, void *p);
502 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
508 lockword = lock_lockword_get(t, o);
510 if (IS_FAT_LOCK(lockword))
511 return GET_FAT_LOCK(lockword);
513 /* lock the hashtable */
515 pthread_mutex_lock(&(lock_hashtable.mutex));
517 /* lookup the lock record in the hashtable */
519 LLNI_CRITICAL_START_THREAD(t);
520 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
521 lr = lock_hashtable.ptr[slot];
523 for (; lr != NULL; lr = lr->hashlink) {
524 if (lr->object == LLNI_DIRECT(o))
527 LLNI_CRITICAL_END_THREAD(t);
530 /* not found, we must create a new one */
532 lr = lock_record_new();
534 LLNI_CRITICAL_START_THREAD(t);
535 lr->object = LLNI_DIRECT(o);
536 LLNI_CRITICAL_END_THREAD(t);
538 #if defined(ENABLE_GC_BOEHM)
539 /* register new finalizer to clean up the lock record */
541 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
544 /* enter it in the hashtable */
546 lr->hashlink = lock_hashtable.ptr[slot];
547 lock_hashtable.ptr[slot] = lr;
548 lock_hashtable.entries++;
550 /* check whether the hash should grow */
552 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
553 lock_hashtable_grow();
557 /* unlock the hashtable */
559 pthread_mutex_unlock(&(lock_hashtable.mutex));
561 /* return the new lock record */
567 /* lock_hashtable_remove *******************************************************
569 Remove the lock record for the given object from the hashtable
570 and free it afterwards.
573 t....the current thread
574 o....the object to look up
576 *******************************************************************************/
578 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
583 lock_record_t *tmplr;
585 /* lock the hashtable */
587 pthread_mutex_lock(&(lock_hashtable.mutex));
589 /* get lock record */
591 lockword = lock_lockword_get(t, o);
593 assert(IS_FAT_LOCK(lockword));
595 lr = GET_FAT_LOCK(lockword);
597 /* remove the lock-record from the hashtable */
599 LLNI_CRITICAL_START_THREAD(t);
600 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
601 tmplr = lock_hashtable.ptr[slot];
602 LLNI_CRITICAL_END_THREAD(t);
605 /* special handling if it's the first in the chain */
607 lock_hashtable.ptr[slot] = lr->hashlink;
610 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
611 if (tmplr->hashlink == lr) {
612 tmplr->hashlink = lr->hashlink;
617 assert(tmplr != NULL);
620 /* decrease entry count */
622 lock_hashtable.entries--;
624 /* unlock the hashtable */
626 pthread_mutex_unlock(&(lock_hashtable.mutex));
628 /* free the lock record */
630 lock_record_free(lr);
634 /* lock_record_finalizer *******************************************************
636 XXX Remove me for exact GC.
638 *******************************************************************************/
640 static void lock_record_finalizer(void *object, void *p)
645 o = (java_handle_t *) object;
647 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
648 /* XXX this is only a dirty hack to make Boehm work with handles */
650 o = LLNI_WRAP((java_object_t *) o);
653 LLNI_class_get(o, c);
656 if (opt_DebugFinalizer) {
658 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
665 /* check for a finalizer function */
667 if (c->finalizer != NULL)
668 finalizer_run(object, p);
670 /* remove the lock-record entry from the hashtable and free it */
672 lock_hashtable_remove(THREADOBJECT, o);
676 /*============================================================================*/
677 /* OBJECT LOCK INITIALIZATION */
678 /*============================================================================*/
681 /* lock_init_object_lock *******************************************************
683 Initialize the monitor pointer of the given object. The monitor gets
684 initialized to an unlocked state.
686 *******************************************************************************/
688 void lock_init_object_lock(java_object_t *o)
692 o->lockword = THIN_UNLOCKED;
696 /*============================================================================*/
697 /* LOCKING ALGORITHM */
698 /*============================================================================*/
701 /* lock_lockword_get ***********************************************************
703 Get the lockword for the given object.
706 t............the current thread
707 o............the object
709 *******************************************************************************/
711 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
715 LLNI_CRITICAL_START_THREAD(t);
716 lockword = LLNI_DIRECT(o)->lockword;
717 LLNI_CRITICAL_END_THREAD(t);
723 /* lock_lockword_set ***********************************************************
725 Set the lockword for the given object.
728 t............the current thread
729 o............the object
730 lockword.....the new lockword value
732 *******************************************************************************/
734 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
736 LLNI_CRITICAL_START_THREAD(t);
737 LLNI_DIRECT(o)->lockword = lockword;
738 LLNI_CRITICAL_END_THREAD(t);
742 /* lock_record_enter ***********************************************************
744 Enter the lock represented by the given lock record.
747 t.................the current thread
748 lr................the lock record
750 *******************************************************************************/
752 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
754 pthread_mutex_lock(&(lr->mutex));
759 /* lock_record_exit ************************************************************
761 Release the lock represented by the given lock record.
764 t.................the current thread
765 lr................the lock record
768 The current thread must own the lock represented by this lock record.
769 This is NOT checked by this function!
771 *******************************************************************************/
773 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
776 pthread_mutex_unlock(&(lr->mutex));
780 /* lock_inflate ****************************************************************
782 Inflate the lock of the given object. This may only be called by the
783 owner of the monitor of the object.
786 t............the current thread
787 o............the object of which to inflate the lock
788 lr...........the lock record to install. The current thread must
789 own the lock of this lock record!
792 The current thread must be the owner of this object's monitor AND
793 of the lock record's lock!
795 *******************************************************************************/
797 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
801 /* get the current lock count */
803 lockword = lock_lockword_get(t, o);
805 if (IS_FAT_LOCK(lockword)) {
806 assert(GET_FAT_LOCK(lockword) == lr);
810 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
812 /* copy the count from the thin lock */
814 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
817 DEBUGLOCKS(("[lock_inflate : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
818 lr, t, o, lockword, lr->count));
822 lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
826 /* TODO Move this function into threadlist.[ch]. */
828 static threadobject *threads_lookup_thread_id(int index)
834 for (t = threadlist_first(); t != NULL; t = threadlist_next(t)) {
835 if (t->state == THREAD_STATE_NEW)
837 if (t->index == index)
845 static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o)
848 threadobject *t_other;
851 index = GET_THREAD_INDEX(lockword);
852 t_other = threads_lookup_thread_id(index);
854 /* failure, TODO: add statistics */
857 pthread_mutex_lock(&t_other->flc_lock);
858 old_flc = t_other->flc_bit;
859 t_other->flc_bit = true;
861 DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d",
862 t->index, t_other->index));
864 /* Set FLC bit first, then read the lockword again */
867 lockword = lock_lockword_get(t, o);
869 /* Lockword is still the way it was seen before */
870 if (IS_THIN_LOCK(lockword) && (GET_THREAD_INDEX(lockword) == index))
872 /* Add tuple (t, o) to the other thread's FLC list */
874 t->flc_next = t_other->flc_list;
875 t_other->flc_list = t;
879 threadobject *current;
881 /* Wait until another thread sees the flc bit and notifies
883 pthread_cond_wait(&t->flc_cond, &t_other->flc_lock);
885 /* Traverse FLC list looking if we're still there */
886 current = t_other->flc_list;
887 while (current && current != t)
888 current = current->flc_next;
890 /* not in list anymore, can stop waiting */
893 /* We are still in the list -- the other thread cannot have seen
895 assert(t_other->flc_bit);
898 t->flc_object = NULL; /* for garbage collector? */
902 t_other->flc_bit = old_flc;
904 pthread_mutex_unlock(&t_other->flc_lock);
907 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
909 threadobject *current;
911 pthread_mutex_lock(&t->flc_lock);
913 current = t->flc_list;
916 if (current->flc_object != o)
918 /* The object has to be inflated so the other threads can properly
921 /* Only if not already inflated */
922 ptrint lockword = lock_lockword_get(t, current->flc_object);
923 if (IS_THIN_LOCK(lockword)) {
924 lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
925 lock_record_enter(t, lr);
927 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
928 t->index, (void*) current->flc_object, (void*) lr));
930 lock_inflate(t, current->flc_object, lr);
933 /* Wake the waiting thread */
934 pthread_cond_broadcast(¤t->flc_cond);
936 current = current->flc_next;
941 pthread_mutex_unlock(&t->flc_lock);
944 /* lock_monitor_enter **********************************************************
946 Acquire the monitor of the given object. If the current thread already
947 owns the monitor, the lock counter is simply increased.
949 This function blocks until it can acquire the monitor.
952 t............the current thread
953 o............the object of which to enter the monitor
956 true.........the lock has been successfully acquired
957 false........an exception has been thrown
959 *******************************************************************************/
961 bool lock_monitor_enter(java_handle_t *o)
964 /* CAUTION: This code assumes that ptrint is unsigned! */
970 exceptions_throw_nullpointerexception();
976 thinlock = t->thinlock;
979 /* most common case: try to thin-lock an unlocked object */
981 LLNI_CRITICAL_START_THREAD(t);
982 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
983 LLNI_CRITICAL_END_THREAD(t);
985 if (lockword == THIN_UNLOCKED) {
986 /* success. we locked it */
987 /* The Java Memory Model requires a memory barrier here: */
988 /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
989 MEMORY_BARRIER_AFTER_ATOMIC();
993 /* next common case: recursive lock with small recursion count */
994 /* We don't have to worry about stale values here, as any stale value */
995 /* will indicate another thread holding the lock (or an inflated lock) */
997 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
998 /* we own this monitor */
999 /* check the current recursion count */
1001 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
1003 /* the recursion count is low enough */
1005 lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
1007 /* success. we locked it */
1011 /* recursion count overflow */
1013 lr = lock_hashtable_get(t, o);
1014 lock_record_enter(t, lr);
1015 lock_inflate(t, o, lr);
1018 notify_flc_waiters(t, o);
1024 /* the lock is either contented or fat */
1026 if (IS_FAT_LOCK(lockword)) {
1028 lr = GET_FAT_LOCK(lockword);
1030 /* check for recursive entering */
1031 if (lr->owner == t) {
1036 /* acquire the mutex of the lock record */
1038 lock_record_enter(t, lr);
1040 assert(lr->count == 0);
1045 /****** inflation path ******/
1047 #if defined(ENABLE_JVMTI)
1048 /* Monitor Contended Enter */
1049 jvmti_MonitorContendedEntering(false, o);
1052 sable_flc_waiting(lockword, t, o);
1054 #if defined(ENABLE_JVMTI)
1055 /* Monitor Contended Entered */
1056 jvmti_MonitorContendedEntering(true, o);
1062 /* lock_monitor_exit ***********************************************************
1064 Decrement the counter of a (currently owned) monitor. If the counter
1065 reaches zero, release the monitor.
1067 If the current thread is not the owner of the monitor, an
1068 IllegalMonitorState exception is thrown.
1071 t............the current thread
1072 o............the object of which to exit the monitor
1075 true.........everything ok,
1076 false........an exception has been thrown
1078 *******************************************************************************/
1080 bool lock_monitor_exit(java_handle_t *o)
1087 exceptions_throw_nullpointerexception();
1093 thinlock = t->thinlock;
1095 /* We don't have to worry about stale values here, as any stale value */
1096 /* will indicate that we don't own the lock. */
1098 lockword = lock_lockword_get(t, o);
1100 /* most common case: we release a thin lock that we hold once */
1102 if (lockword == thinlock) {
1103 /* memory barrier for Java Memory Model */
1104 STORE_ORDER_BARRIER();
1105 lock_lockword_set(t, o, THIN_UNLOCKED);
1106 /* memory barrier for thin locking */
1109 /* check if there has been a flat lock contention on this object */
1112 DEBUGLOCKS(("thread %d saw flc bit", t->index));
1114 /* there has been a contention on this thin lock */
1115 notify_flc_waiters(t, o);
1121 /* next common case: we release a recursive lock, count > 0 */
1123 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1124 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1128 /* either the lock is fat, or we don't hold it at all */
1130 if (IS_FAT_LOCK(lockword)) {
1134 lr = GET_FAT_LOCK(lockword);
1136 /* check if we own this monitor */
1137 /* We don't have to worry about stale values here, as any stale value */
1138 /* will be != t and thus fail this check. */
1140 if (lr->owner != t) {
1141 exceptions_throw_illegalmonitorstateexception();
1145 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1147 if (lr->count != 0) {
1148 /* we had locked this one recursively. just decrement, it will */
1149 /* still be locked. */
1154 /* unlock this lock record */
1157 pthread_mutex_unlock(&(lr->mutex));
1162 /* legal thin lock cases have been handled above, so this is an error */
1164 exceptions_throw_illegalmonitorstateexception();
1170 /* lock_record_add_waiter ******************************************************
1172 Add a thread to the list of waiting threads of a lock record.
1175 lr...........the lock record
1176 thread.......the thread to add
1178 *******************************************************************************/
1180 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1184 /* Allocate a waiter data structure. */
1186 w = NEW(lock_waiter_t);
1188 #if defined(ENABLE_STATISTICS)
1190 size_lock_waiter += sizeof(lock_waiter_t);
1193 /* Store the thread in the waiter structure. */
1197 /* Add the waiter as last entry to waiters list. */
1199 list_add_last(lr->waiters, w);
1203 /* lock_record_remove_waiter ***************************************************
1205 Remove a thread from the list of waiting threads of a lock record.
1208 lr...........the lock record
1209 t............the current thread
1212 The current thread must be the owner of the lock record.
1214 *******************************************************************************/
1216 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1221 /* Get the waiters list. */
1225 for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1226 if (w->thread == thread) {
1227 /* Remove the waiter entry from the list. */
1231 /* Free the waiter data structure. */
1233 FREE(w, lock_waiter_t);
1235 #if defined(ENABLE_STATISTICS)
1237 size_lock_waiter -= sizeof(lock_waiter_t);
1244 /* This should never happen. */
1246 vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1250 /* lock_record_wait ************************************************************
1252 Wait on a lock record for a given (maximum) amount of time.
1255 t............the current thread
1256 lr...........the lock record
1257 millis.......milliseconds of timeout
1258 nanos........nanoseconds of timeout
1261 true.........we have been interrupted,
1262 false........everything ok
1265 The current thread must be the owner of the lock record.
1266 This is NOT checked by this function!
1268 *******************************************************************************/
1270 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1273 bool wasinterrupted = false;
1275 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1276 lr, thread, millis, nanos));
1278 /* { the thread t owns the fat lock record lr on the object o } */
1280 /* register us as waiter for this object */
1282 lock_record_add_waiter(lr, thread);
1284 /* remember the old lock count */
1286 lockcount = lr->count;
1288 /* unlock this record */
1291 lock_record_exit(thread, lr);
1293 /* wait until notified/interrupted/timed out */
1295 threads_wait_with_timeout_relative(thread, millis, nanos);
1297 /* re-enter the monitor */
1299 lock_record_enter(thread, lr);
1301 /* remove us from the list of waiting threads */
1303 lock_record_remove_waiter(lr, thread);
1305 /* restore the old lock count */
1307 lr->count = lockcount;
1309 /* We can only be signaled OR interrupted, not both. If both flags
1310 are set, reset only signaled and leave the thread in
1311 interrupted state. Otherwise, clear both. */
1313 if (!thread->signaled) {
1314 wasinterrupted = thread->interrupted;
1315 thread->interrupted = false;
1318 thread->signaled = false;
1320 /* return if we have been interrupted */
1322 return wasinterrupted;
1326 /* lock_monitor_wait ***********************************************************
1328 Wait on an object for a given (maximum) amount of time.
1331 t............the current thread
1332 o............the object
1333 millis.......milliseconds of timeout
1334 nanos........nanoseconds of timeout
1337 The current thread must be the owner of the object's monitor.
1339 *******************************************************************************/
1341 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1346 lockword = lock_lockword_get(t, o);
1348 /* check if we own this monitor */
1349 /* We don't have to worry about stale values here, as any stale value */
1350 /* will fail this check. */
1352 if (IS_FAT_LOCK(lockword)) {
1354 lr = GET_FAT_LOCK(lockword);
1356 if (lr->owner != t) {
1357 exceptions_throw_illegalmonitorstateexception();
1362 /* it's a thin lock */
1364 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1365 exceptions_throw_illegalmonitorstateexception();
1369 /* inflate this lock */
1371 lr = lock_hashtable_get(t, o);
1372 lock_record_enter(t, lr);
1373 lock_inflate(t, o, lr);
1375 notify_flc_waiters(t, o);
1378 /* { the thread t owns the fat lock record lr on the object o } */
1380 if (lock_record_wait(t, lr, millis, nanos))
1381 exceptions_throw_interruptedexception();
1385 /* lock_record_notify **********************************************************
1387 Notify one thread or all threads waiting on the given lock record.
1390 t............the current thread
1391 lr...........the lock record
1392 one..........if true, only notify one thread
1395 The current thread must be the owner of the lock record.
1396 This is NOT checked by this function!
1398 *******************************************************************************/
1400 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1404 threadobject *waitingthread;
1406 /* { the thread t owns the fat lock record lr on the object o } */
1408 /* Get the waiters list. */
1412 for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1413 /* signal the waiting thread */
1415 waitingthread = w->thread;
1417 /* We must skip threads which have already been notified or
1418 interrupted. They will remove themselves from the list. */
1420 if (waitingthread->signaled || waitingthread->interrupted)
1423 /* Enter the wait-mutex. */
1425 pthread_mutex_lock(&(waitingthread->waitmutex));
1427 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1428 lr, t, waitingthread, waitingthread->sleeping, one));
1430 /* Signal the thread if it's sleeping. sleeping can be false
1431 when the waiting thread is blocked between giving up the
1432 monitor and entering the waitmutex. It will eventually
1433 observe that it's signaled and refrain from going to
1436 if (waitingthread->sleeping)
1437 pthread_cond_signal(&(waitingthread->waitcond));
1439 /* Mark the thread as signaled. */
1441 waitingthread->signaled = true;
1443 /* Leave the wait-mutex. */
1445 pthread_mutex_unlock(&(waitingthread->waitmutex));
1447 /* if we should only wake one, we are done */
1455 /* lock_monitor_notify *********************************************************
1457 Notify one thread or all threads waiting on the given object.
1460 t............the current thread
1461 o............the object
1462 one..........if true, only notify one thread
1465 The current thread must be the owner of the object's monitor.
1467 *******************************************************************************/
1469 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1474 lockword = lock_lockword_get(t, o);
1476 /* check if we own this monitor */
1477 /* We don't have to worry about stale values here, as any stale value */
1478 /* will fail this check. */
1480 if (IS_FAT_LOCK(lockword)) {
1482 lr = GET_FAT_LOCK(lockword);
1484 if (lr->owner != t) {
1485 exceptions_throw_illegalmonitorstateexception();
1490 /* it's a thin lock */
1492 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1493 exceptions_throw_illegalmonitorstateexception();
1497 /* no thread can wait on a thin lock, so there's nothing to do. */
1501 /* { the thread t owns the fat lock record lr on the object o } */
1503 lock_record_notify(t, lr, one);
1508 /*============================================================================*/
1509 /* INQUIRY FUNCIONS */
1510 /*============================================================================*/
1513 /* lock_is_held_by_current_thread **********************************************
1515 Return true if the current thread owns the monitor of the given object.
1518 o............the object
1521 true, if the current thread holds the lock of this object.
1523 *******************************************************************************/
1525 bool lock_is_held_by_current_thread(java_handle_t *o)
1533 /* check if we own this monitor */
1534 /* We don't have to worry about stale values here, as any stale value */
1535 /* will fail this check. */
1537 lockword = lock_lockword_get(t, o);
1539 if (IS_FAT_LOCK(lockword)) {
1540 /* it's a fat lock */
1542 lr = GET_FAT_LOCK(lockword);
1544 return (lr->owner == t);
1547 /* it's a thin lock */
1549 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1555 /*============================================================================*/
1556 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1557 /*============================================================================*/
1560 /* lock_wait_for_object ********************************************************
1562 Wait for the given object.
1565 o............the object
1566 millis.......milliseconds to wait
1567 nanos........nanoseconds to wait
1569 *******************************************************************************/
1571 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1573 threadobject *thread;
1575 thread = THREADOBJECT;
1577 lock_monitor_wait(thread, o, millis, nanos);
1581 /* lock_notify_object **********************************************************
1583 Notify one thread waiting on the given object.
1586 o............the object
1588 *******************************************************************************/
1590 void lock_notify_object(java_handle_t *o)
1592 threadobject *thread;
1594 thread = THREADOBJECT;
1596 lock_monitor_notify(thread, o, true);
1600 /* lock_notify_all_object ******************************************************
1602 Notify all threads waiting on the given object.
1605 o............the object
1607 *******************************************************************************/
1609 void lock_notify_all_object(java_handle_t *o)
1611 threadobject *thread;
1613 thread = THREADOBJECT;
1615 lock_monitor_notify(thread, o, false);
1620 * These are local overrides for various environment variables in Emacs.
1621 * Please do not remove this and leave it at the end of the file, where
1622 * Emacs will automagically detect them.
1623 * ---------------------------------------------------------------------
1626 * indent-tabs-mode: t
1630 * vim:noexpandtab:sw=4:ts=4: