1 /* src/threads/posix/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007, 2008
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 This file is part of CACAO.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2, or (at
11 your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
37 #include "mm/memory.h"
39 #include "native/llni.h"
41 #include "threads/lock-common.h"
42 #include "threads/mutex.h"
43 #include "threads/threadlist.h"
44 #include "threads/thread.h"
46 #include "threads/posix/lock.h"
48 #include "toolbox/list.h"
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
56 #include "vmcore/options.h"
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
70 /* includes for atomic instructions: */
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/posix/generic-primitives.h"
75 #include "machine-instr.h"
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
87 /* debug **********************************************************************/
90 # define DEBUGLOCKS(format) \
92 if (opt_DebugLocks) { \
97 # define DEBUGLOCKS(format)
101 /******************************************************************************/
103 /******************************************************************************/
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
108 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
110 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
111 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
114 /******************************************************************************/
115 /* MACROS FOR THIN/FAT LOCKS */
116 /******************************************************************************/
118 /* We use a variant of the tasuki locks described in the paper
120 * Tamiya Onodera, Kiyokuni Kawachiya
121 * A Study of Locking Objects with Bimodal Fields
122 * Proceedings of the ACM OOPSLA '99, pp. 223-237
125 * The underlying thin locks are a variant of the thin locks described in
127 * Bacon, Konuru, Murthy, Serrano
128 * Thin Locks: Featherweight Synchronization for Java
129 * Proceedings of the ACM Conference on Programming Language Design and
130 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
133 * In thin lock mode the lockword looks like this:
135 * ,----------------------,-----------,---,
136 * | thread ID | count | 0 |
137 * `----------------------'-----------'---'
139 * thread ID......the 'index' of the owning thread, or 0
140 * count..........number of times the lock has been entered minus 1
141 * 0..............the shape bit is 0 in thin lock mode
143 * In fat lock mode it is basically a lock_record_t *:
145 * ,----------------------------------,---,
146 * | lock_record_t * (without LSB) | 1 |
147 * `----------------------------------'---'
149 * 1..............the shape bit is 1 in fat lock mode
152 #if SIZEOF_VOID_P == 8
153 #define THIN_LOCK_WORD_SIZE 64
155 #define THIN_LOCK_WORD_SIZE 32
158 #define THIN_LOCK_SHAPE_BIT 0x01
160 #define THIN_UNLOCKED 0
162 #define THIN_LOCK_COUNT_SHIFT 1
163 #define THIN_LOCK_COUNT_SIZE 8
164 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
165 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
166 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
168 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
169 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
171 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
172 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
174 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
175 #define MAKE_FAT_LOCK(ptr) ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
177 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
178 #define GET_THREAD_INDEX(lockword) ((unsigned) lockword >> THIN_LOCK_TID_SHIFT)
181 /* global variables ***********************************************************/
183 /* hashtable mapping objects to lock records */
184 static lock_hashtable_t lock_hashtable;
187 /******************************************************************************/
189 /******************************************************************************/
191 static void lock_hashtable_init(void);
193 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
194 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
195 static void lock_record_enter(threadobject *t, lock_record_t *lr);
196 static void lock_record_exit(threadobject *t, lock_record_t *lr);
197 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
198 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
201 /*============================================================================*/
202 /* INITIALIZATION OF DATA STRUCTURES */
203 /*============================================================================*/
206 /* lock_init *******************************************************************
208 Initialize global data for locking.
210 *******************************************************************************/
214 /* initialize lock hashtable */
216 lock_hashtable_init();
218 #if defined(ENABLE_VMLOG)
219 vmlog_cacao_init_lock();
224 /* lock_pre_compute_thinlock ***************************************************
226 Pre-compute the thin lock value for a thread index.
229 index........the thead index (>= 1)
232 the thin lock value for this thread index
234 *******************************************************************************/
236 ptrint lock_pre_compute_thinlock(s4 index)
238 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
242 /* lock_record_new *************************************************************
244 Allocate a lock record.
246 *******************************************************************************/
248 static lock_record_t *lock_record_new(void)
252 /* allocate the data structure on the C heap */
254 lr = NEW(lock_record_t);
256 #if defined(ENABLE_STATISTICS)
258 size_lock_record += sizeof(lock_record_t);
261 /* initialize the members */
266 lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
268 #if defined(ENABLE_GC_CACAO)
269 /* register the lock object as weak reference with the GC */
271 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
274 /* initialize the mutex */
276 mutex_init(&(lr->mutex));
278 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
284 /* lock_record_free ************************************************************
289 lr....lock record to free
291 *******************************************************************************/
293 static void lock_record_free(lock_record_t *lr)
295 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
297 /* Destroy the mutex. */
299 mutex_destroy(&(lr->mutex));
301 #if defined(ENABLE_GC_CACAO)
302 /* unregister the lock object reference with the GC */
304 gc_weakreference_unregister(&(lr->object));
307 /* Free the waiters list. */
309 list_free(lr->waiters);
311 /* Free the data structure. */
313 FREE(lr, lock_record_t);
315 #if defined(ENABLE_STATISTICS)
317 size_lock_record -= sizeof(lock_record_t);
322 /*============================================================================*/
323 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
324 /*============================================================================*/
326 /* lock_hashtable_init *********************************************************
328 Initialize the global hashtable mapping objects to lock records.
330 *******************************************************************************/
332 static void lock_hashtable_init(void)
334 mutex_init(&(lock_hashtable.mutex));
336 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
337 lock_hashtable.entries = 0;
338 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
340 #if defined(ENABLE_STATISTICS)
342 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
345 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
349 /* lock_hashtable_grow *********************************************************
351 Grow the lock record hashtable to about twice its current size and
354 *******************************************************************************/
356 /* must be called with hashtable mutex locked */
357 static void lock_hashtable_grow(void)
361 lock_record_t **oldtable;
362 lock_record_t **newtable;
369 /* allocate a new table */
371 oldsize = lock_hashtable.size;
372 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
374 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
376 oldtable = lock_hashtable.ptr;
377 newtable = MNEW(lock_record_t *, newsize);
379 #if defined(ENABLE_STATISTICS)
381 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
384 MZERO(newtable, lock_record_t *, newsize);
386 /* rehash the entries */
388 for (i = 0; i < oldsize; i++) {
393 h = heap_hashcode(lr->object);
394 newslot = h % newsize;
396 lr->hashlink = newtable[newslot];
397 newtable[newslot] = lr;
403 /* replace the old table */
405 lock_hashtable.ptr = newtable;
406 lock_hashtable.size = newsize;
408 MFREE(oldtable, lock_record_t *, oldsize);
410 #if defined(ENABLE_STATISTICS)
412 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
417 /* lock_hashtable_cleanup ******************************************************
419 Removes (and frees) lock records which have a cleared object reference
420 from the hashtable. The locked object was reclaimed by the GC.
422 *******************************************************************************/
424 #if defined(ENABLE_GC_CACAO)
425 void lock_hashtable_cleanup(void)
435 /* lock the hashtable */
437 mutex_lock(&(lock_hashtable.mutex));
439 /* search the hashtable for cleared references */
441 for (i = 0; i < lock_hashtable.size; i++) {
442 lr = lock_hashtable.ptr[i];
448 /* remove lock records with cleared references */
450 if (lr->object == NULL) {
452 /* unlink the lock record from the hashtable */
455 lock_hashtable.ptr[i] = next;
457 prev->hashlink = next;
459 /* free the lock record */
461 lock_record_free(lr);
471 /* unlock the hashtable */
473 mutex_unlock(&(lock_hashtable.mutex));
478 /* lock_hashtable_get **********************************************************
480 Find the lock record for the given object. If it does not exists,
481 yet, create it and enter it in the hashtable.
484 t....the current thread
485 o....the object to look up
488 the lock record to use for this object
490 *******************************************************************************/
492 #if defined(ENABLE_GC_BOEHM)
493 static void lock_record_finalizer(void *object, void *p);
496 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
502 lockword = lock_lockword_get(t, o);
504 if (IS_FAT_LOCK(lockword))
505 return GET_FAT_LOCK(lockword);
507 /* lock the hashtable */
509 mutex_lock(&(lock_hashtable.mutex));
511 /* lookup the lock record in the hashtable */
513 LLNI_CRITICAL_START_THREAD(t);
514 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
515 lr = lock_hashtable.ptr[slot];
517 for (; lr != NULL; lr = lr->hashlink) {
518 if (lr->object == LLNI_DIRECT(o))
521 LLNI_CRITICAL_END_THREAD(t);
524 /* not found, we must create a new one */
526 lr = lock_record_new();
528 LLNI_CRITICAL_START_THREAD(t);
529 lr->object = LLNI_DIRECT(o);
530 LLNI_CRITICAL_END_THREAD(t);
532 #if defined(ENABLE_GC_BOEHM)
533 /* register new finalizer to clean up the lock record */
535 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
538 /* enter it in the hashtable */
540 lr->hashlink = lock_hashtable.ptr[slot];
541 lock_hashtable.ptr[slot] = lr;
542 lock_hashtable.entries++;
544 /* check whether the hash should grow */
546 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
547 lock_hashtable_grow();
551 /* unlock the hashtable */
553 mutex_unlock(&(lock_hashtable.mutex));
555 /* return the new lock record */
561 /* lock_hashtable_remove *******************************************************
563 Remove the lock record for the given object from the hashtable
564 and free it afterwards.
567 t....the current thread
568 o....the object to look up
570 *******************************************************************************/
572 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
577 lock_record_t *tmplr;
579 /* lock the hashtable */
581 mutex_lock(&(lock_hashtable.mutex));
583 /* get lock record */
585 lockword = lock_lockword_get(t, o);
587 assert(IS_FAT_LOCK(lockword));
589 lr = GET_FAT_LOCK(lockword);
591 /* remove the lock-record from the hashtable */
593 LLNI_CRITICAL_START_THREAD(t);
594 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
595 tmplr = lock_hashtable.ptr[slot];
596 LLNI_CRITICAL_END_THREAD(t);
599 /* special handling if it's the first in the chain */
601 lock_hashtable.ptr[slot] = lr->hashlink;
604 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
605 if (tmplr->hashlink == lr) {
606 tmplr->hashlink = lr->hashlink;
611 assert(tmplr != NULL);
614 /* decrease entry count */
616 lock_hashtable.entries--;
618 /* unlock the hashtable */
620 mutex_unlock(&(lock_hashtable.mutex));
622 /* free the lock record */
624 lock_record_free(lr);
628 /* lock_record_finalizer *******************************************************
630 XXX Remove me for exact GC.
632 *******************************************************************************/
634 static void lock_record_finalizer(void *object, void *p)
639 o = (java_handle_t *) object;
641 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
642 /* XXX this is only a dirty hack to make Boehm work with handles */
644 o = LLNI_WRAP((java_object_t *) o);
647 LLNI_class_get(o, c);
650 if (opt_DebugFinalizer) {
652 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
659 /* check for a finalizer function */
661 if (c->finalizer != NULL)
662 finalizer_run(object, p);
664 /* remove the lock-record entry from the hashtable and free it */
666 lock_hashtable_remove(THREADOBJECT, o);
670 /*============================================================================*/
671 /* OBJECT LOCK INITIALIZATION */
672 /*============================================================================*/
675 /* lock_init_object_lock *******************************************************
677 Initialize the monitor pointer of the given object. The monitor gets
678 initialized to an unlocked state.
680 *******************************************************************************/
682 void lock_init_object_lock(java_object_t *o)
686 o->lockword = THIN_UNLOCKED;
690 /*============================================================================*/
691 /* LOCKING ALGORITHM */
692 /*============================================================================*/
695 /* lock_lockword_get ***********************************************************
697 Get the lockword for the given object.
700 t............the current thread
701 o............the object
703 *******************************************************************************/
705 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
709 LLNI_CRITICAL_START_THREAD(t);
710 lockword = LLNI_DIRECT(o)->lockword;
711 LLNI_CRITICAL_END_THREAD(t);
717 /* lock_lockword_set ***********************************************************
719 Set the lockword for the given object.
722 t............the current thread
723 o............the object
724 lockword.....the new lockword value
726 *******************************************************************************/
728 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
730 LLNI_CRITICAL_START_THREAD(t);
731 LLNI_DIRECT(o)->lockword = lockword;
732 LLNI_CRITICAL_END_THREAD(t);
736 /* lock_record_enter ***********************************************************
738 Enter the lock represented by the given lock record.
741 t.................the current thread
742 lr................the lock record
744 *******************************************************************************/
746 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
748 mutex_lock(&(lr->mutex));
753 /* lock_record_exit ************************************************************
755 Release the lock represented by the given lock record.
758 t.................the current thread
759 lr................the lock record
762 The current thread must own the lock represented by this lock record.
763 This is NOT checked by this function!
765 *******************************************************************************/
767 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
770 mutex_unlock(&(lr->mutex));
774 /* lock_inflate ****************************************************************
776 Inflate the lock of the given object. This may only be called by the
777 owner of the monitor of the object.
780 t............the current thread
781 o............the object of which to inflate the lock
782 lr...........the lock record to install. The current thread must
783 own the lock of this lock record!
786 The current thread must be the owner of this object's monitor AND
787 of the lock record's lock!
789 *******************************************************************************/
791 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
795 /* get the current lock count */
797 lockword = lock_lockword_get(t, o);
799 if (IS_FAT_LOCK(lockword)) {
800 assert(GET_FAT_LOCK(lockword) == lr);
804 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
806 /* copy the count from the thin lock */
808 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
811 DEBUGLOCKS(("[lock_inflate : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
812 lr, t, o, lockword, lr->count));
816 lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
820 /* TODO Move this function into threadlist.[ch]. */
822 static threadobject *threads_lookup_thread_id(int index)
828 for (t = threadlist_first(); t != NULL; t = threadlist_next(t)) {
829 if (t->state == THREAD_STATE_NEW)
831 if (t->index == index)
839 static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o)
842 threadobject *t_other;
845 index = GET_THREAD_INDEX(lockword);
846 t_other = threads_lookup_thread_id(index);
848 /* failure, TODO: add statistics */
851 mutex_lock(&t_other->flc_lock);
852 old_flc = t_other->flc_bit;
853 t_other->flc_bit = true;
855 DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d",
856 t->index, t_other->index));
858 /* Set FLC bit first, then read the lockword again */
861 lockword = lock_lockword_get(t, o);
863 /* Lockword is still the way it was seen before */
864 if (IS_THIN_LOCK(lockword) && (GET_THREAD_INDEX(lockword) == index))
866 /* Add tuple (t, o) to the other thread's FLC list */
868 t->flc_next = t_other->flc_list;
869 t_other->flc_list = t;
873 threadobject *current;
875 /* Wait until another thread sees the flc bit and notifies
877 pthread_cond_wait(&t->flc_cond, &t_other->flc_lock);
879 /* Traverse FLC list looking if we're still there */
880 current = t_other->flc_list;
881 while (current && current != t)
882 current = current->flc_next;
884 /* not in list anymore, can stop waiting */
887 /* We are still in the list -- the other thread cannot have seen
889 assert(t_other->flc_bit);
892 t->flc_object = NULL; /* for garbage collector? */
896 t_other->flc_bit = old_flc;
898 mutex_unlock(&t_other->flc_lock);
901 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
903 threadobject *current;
905 mutex_lock(&t->flc_lock);
907 current = t->flc_list;
910 if (current->flc_object != o)
912 /* The object has to be inflated so the other threads can properly
915 /* Only if not already inflated */
916 ptrint lockword = lock_lockword_get(t, current->flc_object);
917 if (IS_THIN_LOCK(lockword)) {
918 lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
919 lock_record_enter(t, lr);
921 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
922 t->index, (void*) current->flc_object, (void*) lr));
924 lock_inflate(t, current->flc_object, lr);
927 /* Wake the waiting thread */
928 pthread_cond_broadcast(¤t->flc_cond);
930 current = current->flc_next;
935 mutex_unlock(&t->flc_lock);
938 /* lock_monitor_enter **********************************************************
940 Acquire the monitor of the given object. If the current thread already
941 owns the monitor, the lock counter is simply increased.
943 This function blocks until it can acquire the monitor.
946 t............the current thread
947 o............the object of which to enter the monitor
950 true.........the lock has been successfully acquired
951 false........an exception has been thrown
953 *******************************************************************************/
955 bool lock_monitor_enter(java_handle_t *o)
958 /* CAUTION: This code assumes that ptrint is unsigned! */
964 exceptions_throw_nullpointerexception();
970 thinlock = t->thinlock;
973 /* most common case: try to thin-lock an unlocked object */
975 LLNI_CRITICAL_START_THREAD(t);
976 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
977 LLNI_CRITICAL_END_THREAD(t);
979 if (lockword == THIN_UNLOCKED) {
980 /* success. we locked it */
981 /* The Java Memory Model requires a memory barrier here: */
982 /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
983 MEMORY_BARRIER_AFTER_ATOMIC();
987 /* next common case: recursive lock with small recursion count */
988 /* We don't have to worry about stale values here, as any stale value */
989 /* will indicate another thread holding the lock (or an inflated lock) */
991 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
992 /* we own this monitor */
993 /* check the current recursion count */
995 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
997 /* the recursion count is low enough */
999 lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
1001 /* success. we locked it */
1005 /* recursion count overflow */
1007 lr = lock_hashtable_get(t, o);
1008 lock_record_enter(t, lr);
1009 lock_inflate(t, o, lr);
1012 notify_flc_waiters(t, o);
1018 /* the lock is either contented or fat */
1020 if (IS_FAT_LOCK(lockword)) {
1022 lr = GET_FAT_LOCK(lockword);
1024 /* check for recursive entering */
1025 if (lr->owner == t) {
1030 /* acquire the mutex of the lock record */
1032 lock_record_enter(t, lr);
1034 assert(lr->count == 0);
1039 /****** inflation path ******/
1041 #if defined(ENABLE_JVMTI)
1042 /* Monitor Contended Enter */
1043 jvmti_MonitorContendedEntering(false, o);
1046 sable_flc_waiting(lockword, t, o);
1048 #if defined(ENABLE_JVMTI)
1049 /* Monitor Contended Entered */
1050 jvmti_MonitorContendedEntering(true, o);
1056 /* lock_monitor_exit ***********************************************************
1058 Decrement the counter of a (currently owned) monitor. If the counter
1059 reaches zero, release the monitor.
1061 If the current thread is not the owner of the monitor, an
1062 IllegalMonitorState exception is thrown.
1065 t............the current thread
1066 o............the object of which to exit the monitor
1069 true.........everything ok,
1070 false........an exception has been thrown
1072 *******************************************************************************/
1074 bool lock_monitor_exit(java_handle_t *o)
1081 exceptions_throw_nullpointerexception();
1087 thinlock = t->thinlock;
1089 /* We don't have to worry about stale values here, as any stale value */
1090 /* will indicate that we don't own the lock. */
1092 lockword = lock_lockword_get(t, o);
1094 /* most common case: we release a thin lock that we hold once */
1096 if (lockword == thinlock) {
1097 /* memory barrier for Java Memory Model */
1098 STORE_ORDER_BARRIER();
1099 lock_lockword_set(t, o, THIN_UNLOCKED);
1100 /* memory barrier for thin locking */
1103 /* check if there has been a flat lock contention on this object */
1106 DEBUGLOCKS(("thread %d saw flc bit", t->index));
1108 /* there has been a contention on this thin lock */
1109 notify_flc_waiters(t, o);
1115 /* next common case: we release a recursive lock, count > 0 */
1117 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1118 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1122 /* either the lock is fat, or we don't hold it at all */
1124 if (IS_FAT_LOCK(lockword)) {
1128 lr = GET_FAT_LOCK(lockword);
1130 /* check if we own this monitor */
1131 /* We don't have to worry about stale values here, as any stale value */
1132 /* will be != t and thus fail this check. */
1134 if (lr->owner != t) {
1135 exceptions_throw_illegalmonitorstateexception();
1139 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1141 if (lr->count != 0) {
1142 /* we had locked this one recursively. just decrement, it will */
1143 /* still be locked. */
1148 /* unlock this lock record */
1151 mutex_unlock(&(lr->mutex));
1156 /* legal thin lock cases have been handled above, so this is an error */
1158 exceptions_throw_illegalmonitorstateexception();
1164 /* lock_record_add_waiter ******************************************************
1166 Add a thread to the list of waiting threads of a lock record.
1169 lr...........the lock record
1170 thread.......the thread to add
1172 *******************************************************************************/
1174 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1178 /* Allocate a waiter data structure. */
1180 w = NEW(lock_waiter_t);
1182 #if defined(ENABLE_STATISTICS)
1184 size_lock_waiter += sizeof(lock_waiter_t);
1187 /* Store the thread in the waiter structure. */
1191 /* Add the waiter as last entry to waiters list. */
1193 list_add_last(lr->waiters, w);
1197 /* lock_record_remove_waiter ***************************************************
1199 Remove a thread from the list of waiting threads of a lock record.
1202 lr...........the lock record
1203 t............the current thread
1206 The current thread must be the owner of the lock record.
1208 *******************************************************************************/
1210 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1215 /* Get the waiters list. */
1219 for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1220 if (w->thread == thread) {
1221 /* Remove the waiter entry from the list. */
1225 /* Free the waiter data structure. */
1227 FREE(w, lock_waiter_t);
1229 #if defined(ENABLE_STATISTICS)
1231 size_lock_waiter -= sizeof(lock_waiter_t);
1238 /* This should never happen. */
1240 vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1244 /* lock_record_wait ************************************************************
1246 Wait on a lock record for a given (maximum) amount of time.
1249 t............the current thread
1250 lr...........the lock record
1251 millis.......milliseconds of timeout
1252 nanos........nanoseconds of timeout
1255 true.........we have been interrupted,
1256 false........everything ok
1259 The current thread must be the owner of the lock record.
1260 This is NOT checked by this function!
1262 *******************************************************************************/
1264 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1267 bool wasinterrupted = false;
1269 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1270 lr, thread, millis, nanos));
1272 /* { the thread t owns the fat lock record lr on the object o } */
1274 /* register us as waiter for this object */
1276 lock_record_add_waiter(lr, thread);
1278 /* remember the old lock count */
1280 lockcount = lr->count;
1282 /* unlock this record */
1285 lock_record_exit(thread, lr);
1287 /* wait until notified/interrupted/timed out */
1289 threads_wait_with_timeout_relative(thread, millis, nanos);
1291 /* re-enter the monitor */
1293 lock_record_enter(thread, lr);
1295 /* remove us from the list of waiting threads */
1297 lock_record_remove_waiter(lr, thread);
1299 /* restore the old lock count */
1301 lr->count = lockcount;
1303 /* We can only be signaled OR interrupted, not both. If both flags
1304 are set, reset only signaled and leave the thread in
1305 interrupted state. Otherwise, clear both. */
1307 if (!thread->signaled) {
1308 wasinterrupted = thread->interrupted;
1309 thread->interrupted = false;
1312 thread->signaled = false;
1314 /* return if we have been interrupted */
1316 return wasinterrupted;
1320 /* lock_monitor_wait ***********************************************************
1322 Wait on an object for a given (maximum) amount of time.
1325 t............the current thread
1326 o............the object
1327 millis.......milliseconds of timeout
1328 nanos........nanoseconds of timeout
1331 The current thread must be the owner of the object's monitor.
1333 *******************************************************************************/
1335 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1340 lockword = lock_lockword_get(t, o);
1342 /* check if we own this monitor */
1343 /* We don't have to worry about stale values here, as any stale value */
1344 /* will fail this check. */
1346 if (IS_FAT_LOCK(lockword)) {
1348 lr = GET_FAT_LOCK(lockword);
1350 if (lr->owner != t) {
1351 exceptions_throw_illegalmonitorstateexception();
1356 /* it's a thin lock */
1358 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1359 exceptions_throw_illegalmonitorstateexception();
1363 /* inflate this lock */
1365 lr = lock_hashtable_get(t, o);
1366 lock_record_enter(t, lr);
1367 lock_inflate(t, o, lr);
1369 notify_flc_waiters(t, o);
1372 /* { the thread t owns the fat lock record lr on the object o } */
1374 if (lock_record_wait(t, lr, millis, nanos))
1375 exceptions_throw_interruptedexception();
1379 /* lock_record_notify **********************************************************
1381 Notify one thread or all threads waiting on the given lock record.
1384 t............the current thread
1385 lr...........the lock record
1386 one..........if true, only notify one thread
1389 The current thread must be the owner of the lock record.
1390 This is NOT checked by this function!
1392 *******************************************************************************/
1394 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1398 threadobject *waitingthread;
1400 /* { the thread t owns the fat lock record lr on the object o } */
1402 /* Get the waiters list. */
1406 for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1407 /* signal the waiting thread */
1409 waitingthread = w->thread;
1411 /* We must skip threads which have already been notified. They will
1412 remove themselves from the list. */
1414 if (waitingthread->signaled)
1417 /* Enter the wait-mutex. */
1419 mutex_lock(&(waitingthread->waitmutex));
1421 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1422 lr, t, waitingthread, waitingthread->sleeping, one));
1424 /* Signal the thread if it's sleeping. sleeping can be false
1425 when the waiting thread is blocked between giving up the
1426 monitor and entering the waitmutex. It will eventually
1427 observe that it's signaled and refrain from going to
1430 if (waitingthread->sleeping)
1431 pthread_cond_signal(&(waitingthread->waitcond));
1433 /* Mark the thread as signaled. */
1435 waitingthread->signaled = true;
1437 /* Leave the wait-mutex. */
1439 mutex_unlock(&(waitingthread->waitmutex));
1441 /* if we should only wake one, we are done */
1449 /* lock_monitor_notify *********************************************************
1451 Notify one thread or all threads waiting on the given object.
1454 t............the current thread
1455 o............the object
1456 one..........if true, only notify one thread
1459 The current thread must be the owner of the object's monitor.
1461 *******************************************************************************/
1463 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1468 lockword = lock_lockword_get(t, o);
1470 /* check if we own this monitor */
1471 /* We don't have to worry about stale values here, as any stale value */
1472 /* will fail this check. */
1474 if (IS_FAT_LOCK(lockword)) {
1476 lr = GET_FAT_LOCK(lockword);
1478 if (lr->owner != t) {
1479 exceptions_throw_illegalmonitorstateexception();
1484 /* it's a thin lock */
1486 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1487 exceptions_throw_illegalmonitorstateexception();
1491 /* no thread can wait on a thin lock, so there's nothing to do. */
1495 /* { the thread t owns the fat lock record lr on the object o } */
1497 lock_record_notify(t, lr, one);
1502 /*============================================================================*/
1503 /* INQUIRY FUNCIONS */
1504 /*============================================================================*/
1507 /* lock_is_held_by_current_thread **********************************************
1509 Return true if the current thread owns the monitor of the given object.
1512 o............the object
1515 true, if the current thread holds the lock of this object.
1517 *******************************************************************************/
1519 bool lock_is_held_by_current_thread(java_handle_t *o)
1527 /* check if we own this monitor */
1528 /* We don't have to worry about stale values here, as any stale value */
1529 /* will fail this check. */
1531 lockword = lock_lockword_get(t, o);
1533 if (IS_FAT_LOCK(lockword)) {
1534 /* it's a fat lock */
1536 lr = GET_FAT_LOCK(lockword);
1538 return (lr->owner == t);
1541 /* it's a thin lock */
1543 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1549 /*============================================================================*/
1550 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1551 /*============================================================================*/
1554 /* lock_wait_for_object ********************************************************
1556 Wait for the given object.
1559 o............the object
1560 millis.......milliseconds to wait
1561 nanos........nanoseconds to wait
1563 *******************************************************************************/
1565 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1567 threadobject *thread;
1569 thread = THREADOBJECT;
1571 lock_monitor_wait(thread, o, millis, nanos);
1575 /* lock_notify_object **********************************************************
1577 Notify one thread waiting on the given object.
1580 o............the object
1582 *******************************************************************************/
1584 void lock_notify_object(java_handle_t *o)
1586 threadobject *thread;
1588 thread = THREADOBJECT;
1590 lock_monitor_notify(thread, o, true);
1594 /* lock_notify_all_object ******************************************************
1596 Notify all threads waiting on the given object.
1599 o............the object
1601 *******************************************************************************/
1603 void lock_notify_all_object(java_handle_t *o)
1605 threadobject *thread;
1607 thread = THREADOBJECT;
1609 lock_monitor_notify(thread, o, false);
1614 * These are local overrides for various environment variables in Emacs.
1615 * Please do not remove this and leave it at the end of the file, where
1616 * Emacs will automagically detect them.
1617 * ---------------------------------------------------------------------
1620 * indent-tabs-mode: t
1624 * vim:noexpandtab:sw=4:ts=4: