1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007, 2008
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 This file is part of CACAO.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2, or (at
11 your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
37 #include "mm/memory.h"
39 #include "native/llni.h"
41 #include "threads/lock-common.h"
42 #include "threads/threadlist.h"
43 #include "threads/threads-common.h"
45 #include "threads/native/lock.h"
46 #include "threads/native/threads.h"
48 #include "toolbox/list.h"
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
56 #include "vmcore/options.h"
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
70 /* includes for atomic instructions: */
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/native/generic-primitives.h"
75 #include "machine-instr.h"
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
87 /* debug **********************************************************************/
90 # define DEBUGLOCKS(format) \
92 if (opt_DebugLocks) { \
97 # define DEBUGLOCKS(format)
101 /******************************************************************************/
103 /******************************************************************************/
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
108 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
110 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
111 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
114 /******************************************************************************/
115 /* MACROS FOR THIN/FAT LOCKS */
116 /******************************************************************************/
118 /* We use a variant of the tasuki locks described in the paper
120 * Tamiya Onodera, Kiyokuni Kawachiya
121 * A Study of Locking Objects with Bimodal Fields
122 * Proceedings of the ACM OOPSLA '99, pp. 223-237
125 * The underlying thin locks are a variant of the thin locks described in
127 * Bacon, Konuru, Murthy, Serrano
128 * Thin Locks: Featherweight Synchronization for Java
129 * Proceedings of the ACM Conference on Programming Language Design and
130 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
133 * In thin lock mode the lockword looks like this:
135 * ,----------------------,-----------,---,
136 * | thread ID | count | 0 |
137 * `----------------------'-----------'---´
139 * thread ID......the 'index' of the owning thread, or 0
140 * count..........number of times the lock has been entered minus 1
141 * 0..............the shape bit is 0 in thin lock mode
143 * In fat lock mode it is basically a lock_record_t *:
145 * ,----------------------------------,---,
146 * | lock_record_t * (without LSB) | 1 |
147 * `----------------------------------'---´
149 * 1..............the shape bit is 1 in fat lock mode
152 #if SIZEOF_VOID_P == 8
153 #define THIN_LOCK_WORD_SIZE 64
155 #define THIN_LOCK_WORD_SIZE 32
158 #define THIN_LOCK_SHAPE_BIT 0x01
160 #define THIN_UNLOCKED 0
162 #define THIN_LOCK_COUNT_SHIFT 1
163 #define THIN_LOCK_COUNT_SIZE 8
164 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
165 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
166 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
168 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
169 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
171 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
172 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
174 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
175 #define MAKE_FAT_LOCK(ptr) ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
177 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
178 #define GET_THREAD_INDEX(lockword) ((unsigned) lockword >> THIN_LOCK_TID_SHIFT)
181 /* global variables ***********************************************************/
183 /* hashtable mapping objects to lock records */
184 static lock_hashtable_t lock_hashtable;
187 /******************************************************************************/
189 /******************************************************************************/
191 static void lock_hashtable_init(void);
193 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
194 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
195 static void lock_record_enter(threadobject *t, lock_record_t *lr);
196 static void lock_record_exit(threadobject *t, lock_record_t *lr);
197 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
198 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
201 /*============================================================================*/
202 /* INITIALIZATION OF DATA STRUCTURES */
203 /*============================================================================*/
206 /* lock_init *******************************************************************
208 Initialize global data for locking.
210 *******************************************************************************/
214 /* initialize lock hashtable */
216 lock_hashtable_init();
218 #if defined(ENABLE_VMLOG)
219 vmlog_cacao_init_lock();
224 /* lock_pre_compute_thinlock ***************************************************
226 Pre-compute the thin lock value for a thread index.
229 index........the thead index (>= 1)
232 the thin lock value for this thread index
234 *******************************************************************************/
236 ptrint lock_pre_compute_thinlock(s4 index)
238 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
242 /* lock_record_new *************************************************************
244 Allocate a lock record.
246 *******************************************************************************/
248 static lock_record_t *lock_record_new(void)
253 /* allocate the data structure on the C heap */
255 lr = NEW(lock_record_t);
257 #if defined(ENABLE_STATISTICS)
259 size_lock_record += sizeof(lock_record_t);
262 /* initialize the members */
267 lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
269 #if defined(ENABLE_GC_CACAO)
270 /* register the lock object as weak reference with the GC */
272 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
275 /* initialize the mutex */
277 result = pthread_mutex_init(&(lr->mutex), NULL);
279 vm_abort_errnum(result, "lock_record_new: pthread_mutex_init failed");
281 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
287 /* lock_record_free ************************************************************
292 lr....lock record to free
294 *******************************************************************************/
296 static void lock_record_free(lock_record_t *lr)
300 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
302 /* Destroy the mutex. */
304 result = pthread_mutex_destroy(&(lr->mutex));
306 vm_abort_errnum(result, "lock_record_free: pthread_mutex_destroy failed");
308 #if defined(ENABLE_GC_CACAO)
309 /* unregister the lock object reference with the GC */
311 gc_weakreference_unregister(&(lr->object));
314 /* Free the waiters list. */
316 list_free(lr->waiters);
318 /* Free the data structure. */
320 FREE(lr, lock_record_t);
322 #if defined(ENABLE_STATISTICS)
324 size_lock_record -= sizeof(lock_record_t);
329 /*============================================================================*/
330 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
331 /*============================================================================*/
333 /* lock_hashtable_init *********************************************************
335 Initialize the global hashtable mapping objects to lock records.
337 *******************************************************************************/
339 static void lock_hashtable_init(void)
341 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
343 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
344 lock_hashtable.entries = 0;
345 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
347 #if defined(ENABLE_STATISTICS)
349 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
352 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
356 /* lock_hashtable_grow *********************************************************
358 Grow the lock record hashtable to about twice its current size and
361 *******************************************************************************/
363 /* must be called with hashtable mutex locked */
364 static void lock_hashtable_grow(void)
368 lock_record_t **oldtable;
369 lock_record_t **newtable;
376 /* allocate a new table */
378 oldsize = lock_hashtable.size;
379 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
381 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
383 oldtable = lock_hashtable.ptr;
384 newtable = MNEW(lock_record_t *, newsize);
386 #if defined(ENABLE_STATISTICS)
388 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
391 MZERO(newtable, lock_record_t *, newsize);
393 /* rehash the entries */
395 for (i = 0; i < oldsize; i++) {
400 h = heap_hashcode(lr->object);
401 newslot = h % newsize;
403 lr->hashlink = newtable[newslot];
404 newtable[newslot] = lr;
410 /* replace the old table */
412 lock_hashtable.ptr = newtable;
413 lock_hashtable.size = newsize;
415 MFREE(oldtable, lock_record_t *, oldsize);
417 #if defined(ENABLE_STATISTICS)
419 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
424 /* lock_hashtable_cleanup ******************************************************
426 Removes (and frees) lock records which have a cleared object reference
427 from the hashtable. The locked object was reclaimed by the GC.
429 *******************************************************************************/
431 #if defined(ENABLE_GC_CACAO)
432 void lock_hashtable_cleanup(void)
442 /* lock the hashtable */
444 pthread_mutex_lock(&(lock_hashtable.mutex));
446 /* search the hashtable for cleared references */
448 for (i = 0; i < lock_hashtable.size; i++) {
449 lr = lock_hashtable.ptr[i];
455 /* remove lock records with cleared references */
457 if (lr->object == NULL) {
459 /* unlink the lock record from the hashtable */
462 lock_hashtable.ptr[i] = next;
464 prev->hashlink = next;
466 /* free the lock record */
468 lock_record_free(lr);
478 /* unlock the hashtable */
480 pthread_mutex_unlock(&(lock_hashtable.mutex));
485 /* lock_hashtable_get **********************************************************
487 Find the lock record for the given object. If it does not exists,
488 yet, create it and enter it in the hashtable.
491 t....the current thread
492 o....the object to look up
495 the lock record to use for this object
497 *******************************************************************************/
499 #if defined(ENABLE_GC_BOEHM)
500 static void lock_record_finalizer(void *object, void *p);
503 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
509 lockword = lock_lockword_get(t, o);
511 if (IS_FAT_LOCK(lockword))
512 return GET_FAT_LOCK(lockword);
514 /* lock the hashtable */
516 pthread_mutex_lock(&(lock_hashtable.mutex));
518 /* lookup the lock record in the hashtable */
520 LLNI_CRITICAL_START_THREAD(t);
521 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
522 lr = lock_hashtable.ptr[slot];
524 for (; lr != NULL; lr = lr->hashlink) {
525 if (lr->object == LLNI_DIRECT(o))
528 LLNI_CRITICAL_END_THREAD(t);
531 /* not found, we must create a new one */
533 lr = lock_record_new();
535 LLNI_CRITICAL_START_THREAD(t);
536 lr->object = LLNI_DIRECT(o);
537 LLNI_CRITICAL_END_THREAD(t);
539 #if defined(ENABLE_GC_BOEHM)
540 /* register new finalizer to clean up the lock record */
542 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
545 /* enter it in the hashtable */
547 lr->hashlink = lock_hashtable.ptr[slot];
548 lock_hashtable.ptr[slot] = lr;
549 lock_hashtable.entries++;
551 /* check whether the hash should grow */
553 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
554 lock_hashtable_grow();
558 /* unlock the hashtable */
560 pthread_mutex_unlock(&(lock_hashtable.mutex));
562 /* return the new lock record */
568 /* lock_hashtable_remove *******************************************************
570 Remove the lock record for the given object from the hashtable
571 and free it afterwards.
574 t....the current thread
575 o....the object to look up
577 *******************************************************************************/
579 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
584 lock_record_t *tmplr;
586 /* lock the hashtable */
588 pthread_mutex_lock(&(lock_hashtable.mutex));
590 /* get lock record */
592 lockword = lock_lockword_get(t, o);
594 assert(IS_FAT_LOCK(lockword));
596 lr = GET_FAT_LOCK(lockword);
598 /* remove the lock-record from the hashtable */
600 LLNI_CRITICAL_START_THREAD(t);
601 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
602 tmplr = lock_hashtable.ptr[slot];
603 LLNI_CRITICAL_END_THREAD(t);
606 /* special handling if it's the first in the chain */
608 lock_hashtable.ptr[slot] = lr->hashlink;
611 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
612 if (tmplr->hashlink == lr) {
613 tmplr->hashlink = lr->hashlink;
618 assert(tmplr != NULL);
621 /* decrease entry count */
623 lock_hashtable.entries--;
625 /* unlock the hashtable */
627 pthread_mutex_unlock(&(lock_hashtable.mutex));
629 /* free the lock record */
631 lock_record_free(lr);
635 /* lock_record_finalizer *******************************************************
637 XXX Remove me for exact GC.
639 *******************************************************************************/
641 static void lock_record_finalizer(void *object, void *p)
646 o = (java_handle_t *) object;
648 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
649 /* XXX this is only a dirty hack to make Boehm work with handles */
651 o = LLNI_WRAP((java_object_t *) o);
654 LLNI_class_get(o, c);
657 if (opt_DebugFinalizer) {
659 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
666 /* check for a finalizer function */
668 if (c->finalizer != NULL)
669 finalizer_run(object, p);
671 /* remove the lock-record entry from the hashtable and free it */
673 lock_hashtable_remove(THREADOBJECT, o);
677 /*============================================================================*/
678 /* OBJECT LOCK INITIALIZATION */
679 /*============================================================================*/
682 /* lock_init_object_lock *******************************************************
684 Initialize the monitor pointer of the given object. The monitor gets
685 initialized to an unlocked state.
687 *******************************************************************************/
689 void lock_init_object_lock(java_object_t *o)
693 o->lockword = THIN_UNLOCKED;
697 /*============================================================================*/
698 /* LOCKING ALGORITHM */
699 /*============================================================================*/
702 /* lock_lockword_get ***********************************************************
704 Get the lockword for the given object.
707 t............the current thread
708 o............the object
710 *******************************************************************************/
712 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
716 LLNI_CRITICAL_START_THREAD(t);
717 lockword = LLNI_DIRECT(o)->lockword;
718 LLNI_CRITICAL_END_THREAD(t);
724 /* lock_lockword_set ***********************************************************
726 Set the lockword for the given object.
729 t............the current thread
730 o............the object
731 lockword.....the new lockword value
733 *******************************************************************************/
735 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
737 LLNI_CRITICAL_START_THREAD(t);
738 LLNI_DIRECT(o)->lockword = lockword;
739 LLNI_CRITICAL_END_THREAD(t);
743 /* lock_record_enter ***********************************************************
745 Enter the lock represented by the given lock record.
748 t.................the current thread
749 lr................the lock record
751 *******************************************************************************/
753 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
755 pthread_mutex_lock(&(lr->mutex));
760 /* lock_record_exit ************************************************************
762 Release the lock represented by the given lock record.
765 t.................the current thread
766 lr................the lock record
769 The current thread must own the lock represented by this lock record.
770 This is NOT checked by this function!
772 *******************************************************************************/
774 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
777 pthread_mutex_unlock(&(lr->mutex));
781 /* lock_inflate ****************************************************************
783 Inflate the lock of the given object. This may only be called by the
784 owner of the monitor of the object.
787 t............the current thread
788 o............the object of which to inflate the lock
789 lr...........the lock record to install. The current thread must
790 own the lock of this lock record!
793 The current thread must be the owner of this object's monitor AND
794 of the lock record's lock!
796 *******************************************************************************/
798 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
802 /* get the current lock count */
804 lockword = lock_lockword_get(t, o);
806 if (IS_FAT_LOCK(lockword)) {
807 assert(GET_FAT_LOCK(lockword) == lr);
811 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
813 /* copy the count from the thin lock */
815 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
818 DEBUGLOCKS(("[lock_inflate : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
819 lr, t, o, lockword, lr->count));
823 lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
827 /* TODO Move this function into threadlist.[ch]. */
829 static threadobject *threads_lookup_thread_id(int index)
835 for (t = threadlist_first(); t != NULL; t = threadlist_next(t)) {
836 if (t->state == THREAD_STATE_NEW)
838 if (t->index == index)
846 static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o)
849 threadobject *t_other;
852 index = GET_THREAD_INDEX(lockword);
853 t_other = threads_lookup_thread_id(index);
855 /* failure, TODO: add statistics */
858 pthread_mutex_lock(&t_other->flc_lock);
859 old_flc = t_other->flc_bit;
860 t_other->flc_bit = true;
862 DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d",
863 t->index, t_other->index));
865 /* Set FLC bit first, then read the lockword again */
868 lockword = lock_lockword_get(t, o);
870 /* Lockword is still the way it was seen before */
871 if (IS_THIN_LOCK(lockword) && (GET_THREAD_INDEX(lockword) == index))
873 /* Add tuple (t, o) to the other thread's FLC list */
875 t->flc_next = t_other->flc_list;
876 t_other->flc_list = t;
880 threadobject *current;
882 /* Wait until another thread sees the flc bit and notifies
884 pthread_cond_wait(&t->flc_cond, &t_other->flc_lock);
886 /* Traverse FLC list looking if we're still there */
887 current = t_other->flc_list;
888 while (current && current != t)
889 current = current->flc_next;
891 /* not in list anymore, can stop waiting */
894 /* We are still in the list -- the other thread cannot have seen
896 assert(t_other->flc_bit);
899 t->flc_object = NULL; /* for garbage collector? */
903 t_other->flc_bit = old_flc;
905 pthread_mutex_unlock(&t_other->flc_lock);
908 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
910 threadobject *current;
912 pthread_mutex_lock(&t->flc_lock);
914 current = t->flc_list;
917 if (current->flc_object != o)
919 /* The object has to be inflated so the other threads can properly
922 /* Only if not already inflated */
923 ptrint lockword = lock_lockword_get(t, current->flc_object);
924 if (IS_THIN_LOCK(lockword)) {
925 lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
926 lock_record_enter(t, lr);
928 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
929 t->index, (void*) current->flc_object, (void*) lr));
931 lock_inflate(t, current->flc_object, lr);
934 /* Wake the waiting thread */
935 pthread_cond_broadcast(¤t->flc_cond);
937 current = current->flc_next;
942 pthread_mutex_unlock(&t->flc_lock);
945 /* lock_monitor_enter **********************************************************
947 Acquire the monitor of the given object. If the current thread already
948 owns the monitor, the lock counter is simply increased.
950 This function blocks until it can acquire the monitor.
953 t............the current thread
954 o............the object of which to enter the monitor
957 true.........the lock has been successfully acquired
958 false........an exception has been thrown
960 *******************************************************************************/
962 bool lock_monitor_enter(java_handle_t *o)
965 /* CAUTION: This code assumes that ptrint is unsigned! */
971 exceptions_throw_nullpointerexception();
977 thinlock = t->thinlock;
980 /* most common case: try to thin-lock an unlocked object */
982 LLNI_CRITICAL_START_THREAD(t);
983 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
984 LLNI_CRITICAL_END_THREAD(t);
986 if (lockword == THIN_UNLOCKED) {
987 /* success. we locked it */
988 /* The Java Memory Model requires a memory barrier here: */
989 /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
990 MEMORY_BARRIER_AFTER_ATOMIC();
994 /* next common case: recursive lock with small recursion count */
995 /* We don't have to worry about stale values here, as any stale value */
996 /* will indicate another thread holding the lock (or an inflated lock) */
998 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
999 /* we own this monitor */
1000 /* check the current recursion count */
1002 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
1004 /* the recursion count is low enough */
1006 lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
1008 /* success. we locked it */
1012 /* recursion count overflow */
1014 lr = lock_hashtable_get(t, o);
1015 lock_record_enter(t, lr);
1016 lock_inflate(t, o, lr);
1019 notify_flc_waiters(t, o);
1025 /* the lock is either contented or fat */
1027 if (IS_FAT_LOCK(lockword)) {
1029 lr = GET_FAT_LOCK(lockword);
1031 /* check for recursive entering */
1032 if (lr->owner == t) {
1037 /* acquire the mutex of the lock record */
1039 lock_record_enter(t, lr);
1041 assert(lr->count == 0);
1046 /****** inflation path ******/
1048 #if defined(ENABLE_JVMTI)
1049 /* Monitor Contended Enter */
1050 jvmti_MonitorContendedEntering(false, o);
1053 sable_flc_waiting(lockword, t, o);
1055 #if defined(ENABLE_JVMTI)
1056 /* Monitor Contended Entered */
1057 jvmti_MonitorContendedEntering(true, o);
1063 /* lock_monitor_exit ***********************************************************
1065 Decrement the counter of a (currently owned) monitor. If the counter
1066 reaches zero, release the monitor.
1068 If the current thread is not the owner of the monitor, an
1069 IllegalMonitorState exception is thrown.
1072 t............the current thread
1073 o............the object of which to exit the monitor
1076 true.........everything ok,
1077 false........an exception has been thrown
1079 *******************************************************************************/
1081 bool lock_monitor_exit(java_handle_t *o)
1088 exceptions_throw_nullpointerexception();
1094 thinlock = t->thinlock;
1096 /* We don't have to worry about stale values here, as any stale value */
1097 /* will indicate that we don't own the lock. */
1099 lockword = lock_lockword_get(t, o);
1101 /* most common case: we release a thin lock that we hold once */
1103 if (lockword == thinlock) {
1104 /* memory barrier for Java Memory Model */
1105 STORE_ORDER_BARRIER();
1106 lock_lockword_set(t, o, THIN_UNLOCKED);
1107 /* memory barrier for thin locking */
1110 /* check if there has been a flat lock contention on this object */
1113 DEBUGLOCKS(("thread %d saw flc bit", t->index));
1115 /* there has been a contention on this thin lock */
1116 notify_flc_waiters(t, o);
1122 /* next common case: we release a recursive lock, count > 0 */
1124 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1125 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1129 /* either the lock is fat, or we don't hold it at all */
1131 if (IS_FAT_LOCK(lockword)) {
1135 lr = GET_FAT_LOCK(lockword);
1137 /* check if we own this monitor */
1138 /* We don't have to worry about stale values here, as any stale value */
1139 /* will be != t and thus fail this check. */
1141 if (lr->owner != t) {
1142 exceptions_throw_illegalmonitorstateexception();
1146 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1148 if (lr->count != 0) {
1149 /* we had locked this one recursively. just decrement, it will */
1150 /* still be locked. */
1155 /* unlock this lock record */
1158 pthread_mutex_unlock(&(lr->mutex));
1163 /* legal thin lock cases have been handled above, so this is an error */
1165 exceptions_throw_illegalmonitorstateexception();
1171 /* lock_record_add_waiter ******************************************************
1173 Add a thread to the list of waiting threads of a lock record.
1176 lr...........the lock record
1177 thread.......the thread to add
1179 *******************************************************************************/
1181 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1185 /* Allocate a waiter data structure. */
1187 w = NEW(lock_waiter_t);
1189 #if defined(ENABLE_STATISTICS)
1191 size_lock_waiter += sizeof(lock_waiter_t);
1194 /* Store the thread in the waiter structure. */
1198 /* Add the waiter as last entry to waiters list. */
1200 list_add_last(lr->waiters, w);
1204 /* lock_record_remove_waiter ***************************************************
1206 Remove a thread from the list of waiting threads of a lock record.
1209 lr...........the lock record
1210 t............the current thread
1213 The current thread must be the owner of the lock record.
1215 *******************************************************************************/
1217 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1222 /* Get the waiters list. */
1226 for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1227 if (w->thread == thread) {
1228 /* Remove the waiter entry from the list. */
1232 /* Free the waiter data structure. */
1234 FREE(w, lock_waiter_t);
1236 #if defined(ENABLE_STATISTICS)
1238 size_lock_waiter -= sizeof(lock_waiter_t);
1245 /* This should never happen. */
1247 vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1251 /* lock_record_wait ************************************************************
1253 Wait on a lock record for a given (maximum) amount of time.
1256 t............the current thread
1257 lr...........the lock record
1258 millis.......milliseconds of timeout
1259 nanos........nanoseconds of timeout
1262 true.........we have been interrupted,
1263 false........everything ok
1266 The current thread must be the owner of the lock record.
1267 This is NOT checked by this function!
1269 *******************************************************************************/
1271 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1274 bool wasinterrupted = false;
1276 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1277 lr, thread, millis, nanos));
1279 /* { the thread t owns the fat lock record lr on the object o } */
1281 /* register us as waiter for this object */
1283 lock_record_add_waiter(lr, thread);
1285 /* remember the old lock count */
1287 lockcount = lr->count;
1289 /* unlock this record */
1292 lock_record_exit(thread, lr);
1294 /* wait until notified/interrupted/timed out */
1296 threads_wait_with_timeout_relative(thread, millis, nanos);
1298 /* re-enter the monitor */
1300 lock_record_enter(thread, lr);
1302 /* remove us from the list of waiting threads */
1304 lock_record_remove_waiter(lr, thread);
1306 /* restore the old lock count */
1308 lr->count = lockcount;
1310 /* We can only be signaled OR interrupted, not both. If both flags
1311 are set, reset only signaled and leave the thread in
1312 interrupted state. Otherwise, clear both. */
1314 if (!thread->signaled) {
1315 wasinterrupted = thread->interrupted;
1316 thread->interrupted = false;
1319 thread->signaled = false;
1321 /* return if we have been interrupted */
1323 return wasinterrupted;
1327 /* lock_monitor_wait ***********************************************************
1329 Wait on an object for a given (maximum) amount of time.
1332 t............the current thread
1333 o............the object
1334 millis.......milliseconds of timeout
1335 nanos........nanoseconds of timeout
1338 The current thread must be the owner of the object's monitor.
1340 *******************************************************************************/
1342 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1347 lockword = lock_lockword_get(t, o);
1349 /* check if we own this monitor */
1350 /* We don't have to worry about stale values here, as any stale value */
1351 /* will fail this check. */
1353 if (IS_FAT_LOCK(lockword)) {
1355 lr = GET_FAT_LOCK(lockword);
1357 if (lr->owner != t) {
1358 exceptions_throw_illegalmonitorstateexception();
1363 /* it's a thin lock */
1365 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1366 exceptions_throw_illegalmonitorstateexception();
1370 /* inflate this lock */
1372 lr = lock_hashtable_get(t, o);
1373 lock_record_enter(t, lr);
1374 lock_inflate(t, o, lr);
1376 notify_flc_waiters(t, o);
1379 /* { the thread t owns the fat lock record lr on the object o } */
1381 if (lock_record_wait(t, lr, millis, nanos))
1382 exceptions_throw_interruptedexception();
1386 /* lock_record_notify **********************************************************
1388 Notify one thread or all threads waiting on the given lock record.
1391 t............the current thread
1392 lr...........the lock record
1393 one..........if true, only notify one thread
1396 The current thread must be the owner of the lock record.
1397 This is NOT checked by this function!
1399 *******************************************************************************/
1401 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1405 threadobject *waitingthread;
1407 /* { the thread t owns the fat lock record lr on the object o } */
1409 /* Get the waiters list. */
1413 for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1414 /* signal the waiting thread */
1416 waitingthread = w->thread;
1418 /* We must skip threads which have already been notified or
1419 interrupted. They will remove themselves from the list. */
1421 if (waitingthread->signaled || waitingthread->interrupted)
1424 /* Enter the wait-mutex. */
1426 pthread_mutex_lock(&(waitingthread->waitmutex));
1428 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1429 lr, t, waitingthread, waitingthread->sleeping, one));
1431 /* Signal the thread if it's sleeping. sleeping can be false
1432 when the waiting thread is blocked between giving up the
1433 monitor and entering the waitmutex. It will eventually
1434 observe that it's signaled and refrain from going to
1437 if (waitingthread->sleeping)
1438 pthread_cond_signal(&(waitingthread->waitcond));
1440 /* Mark the thread as signaled. */
1442 waitingthread->signaled = true;
1444 /* Leave the wait-mutex. */
1446 pthread_mutex_unlock(&(waitingthread->waitmutex));
1448 /* if we should only wake one, we are done */
1456 /* lock_monitor_notify *********************************************************
1458 Notify one thread or all threads waiting on the given object.
1461 t............the current thread
1462 o............the object
1463 one..........if true, only notify one thread
1466 The current thread must be the owner of the object's monitor.
1468 *******************************************************************************/
1470 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1475 lockword = lock_lockword_get(t, o);
1477 /* check if we own this monitor */
1478 /* We don't have to worry about stale values here, as any stale value */
1479 /* will fail this check. */
1481 if (IS_FAT_LOCK(lockword)) {
1483 lr = GET_FAT_LOCK(lockword);
1485 if (lr->owner != t) {
1486 exceptions_throw_illegalmonitorstateexception();
1491 /* it's a thin lock */
1493 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1494 exceptions_throw_illegalmonitorstateexception();
1498 /* no thread can wait on a thin lock, so there's nothing to do. */
1502 /* { the thread t owns the fat lock record lr on the object o } */
1504 lock_record_notify(t, lr, one);
1509 /*============================================================================*/
1510 /* INQUIRY FUNCIONS */
1511 /*============================================================================*/
1514 /* lock_is_held_by_current_thread **********************************************
1516 Return true if the current thread owns the monitor of the given object.
1519 o............the object
1522 true, if the current thread holds the lock of this object.
1524 *******************************************************************************/
1526 bool lock_is_held_by_current_thread(java_handle_t *o)
1534 /* check if we own this monitor */
1535 /* We don't have to worry about stale values here, as any stale value */
1536 /* will fail this check. */
1538 lockword = lock_lockword_get(t, o);
1540 if (IS_FAT_LOCK(lockword)) {
1541 /* it's a fat lock */
1543 lr = GET_FAT_LOCK(lockword);
1545 return (lr->owner == t);
1548 /* it's a thin lock */
1550 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1556 /*============================================================================*/
1557 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1558 /*============================================================================*/
1561 /* lock_wait_for_object ********************************************************
1563 Wait for the given object.
1566 o............the object
1567 millis.......milliseconds to wait
1568 nanos........nanoseconds to wait
1570 *******************************************************************************/
1572 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1574 threadobject *thread;
1576 thread = THREADOBJECT;
1578 lock_monitor_wait(thread, o, millis, nanos);
1582 /* lock_notify_object **********************************************************
1584 Notify one thread waiting on the given object.
1587 o............the object
1589 *******************************************************************************/
1591 void lock_notify_object(java_handle_t *o)
1593 threadobject *thread;
1595 thread = THREADOBJECT;
1597 lock_monitor_notify(thread, o, true);
1601 /* lock_notify_all_object ******************************************************
1603 Notify all threads waiting on the given object.
1606 o............the object
1608 *******************************************************************************/
1610 void lock_notify_all_object(java_handle_t *o)
1612 threadobject *thread;
1614 thread = THREADOBJECT;
1616 lock_monitor_notify(thread, o, false);
1621 * These are local overrides for various environment variables in Emacs.
1622 * Please do not remove this and leave it at the end of the file, where
1623 * Emacs will automagically detect them.
1624 * ---------------------------------------------------------------------
1627 * indent-tabs-mode: t
1631 * vim:noexpandtab:sw=4:ts=4: