1 /* src/threads/lock.cpp - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007, 2008
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 This file is part of CACAO.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2, or (at
11 your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
36 #include "mm/memory.h"
38 #include "native/llni.h"
40 #include "threads/lock.hpp"
41 #include "threads/mutex.hpp"
42 #include "threads/threadlist.hpp"
43 #include "threads/thread.hpp"
45 #include "toolbox/list.hpp"
47 #include "vm/exceptions.hpp"
48 #include "vm/finalizer.h"
49 #include "vm/global.h"
50 #include "vm/options.h"
51 #include "vm/string.hpp"
54 #if defined(ENABLE_STATISTICS)
55 # include "vm/statistics.h"
58 #if defined(ENABLE_VMLOG)
59 #include <vmlog_cacao.h>
62 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
66 /* includes for atomic instructions: */
68 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
69 #include "threads/posix/generic-primitives.h"
71 #include "threads/atomic.hpp"
74 #if defined(ENABLE_JVMTI)
75 #include "native/jvmti/cacaodbg.h"
78 #if defined(ENABLE_GC_BOEHM)
79 # include "mm/boehm-gc/include/gc.h"
83 /* debug **********************************************************************/
86 # define DEBUGLOCKS(format) \
88 if (opt_DebugLocks) { \
93 # define DEBUGLOCKS(format)
97 /******************************************************************************/
99 /******************************************************************************/
101 /* number of lock records in the first pool allocated for a thread */
102 #define LOCK_INITIAL_LOCK_RECORDS 8
104 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
107 /******************************************************************************/
108 /* MACROS FOR THIN/FAT LOCKS */
109 /******************************************************************************/
111 /* We use a variant of the tasuki locks described in the paper
113 * Tamiya Onodera, Kiyokuni Kawachiya
114 * A Study of Locking Objects with Bimodal Fields
115 * Proceedings of the ACM OOPSLA '99, pp. 223-237
118 * The underlying thin locks are a variant of the thin locks described in
120 * Bacon, Konuru, Murthy, Serrano
121 * Thin Locks: Featherweight Synchronization for Java
122 * Proceedings of the ACM Conference on Programming Language Design and
123 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
126 * In thin lock mode the lockword looks like this:
128 * ,----------------------,-----------,---,
129 * | thread ID | count | 0 |
130 * `----------------------'-----------'---'
132 * thread ID......the 'index' of the owning thread, or 0
133 * count..........number of times the lock has been entered minus 1
134 * 0..............the shape bit is 0 in thin lock mode
136 * In fat lock mode it is basically a lock_record_t *:
138 * ,----------------------------------,---,
139 * | lock_record_t * (without LSB) | 1 |
140 * `----------------------------------'---'
142 * 1..............the shape bit is 1 in fat lock mode
145 #if SIZEOF_VOID_P == 8
146 #define THIN_LOCK_WORD_SIZE 64
148 #define THIN_LOCK_WORD_SIZE 32
151 #define THIN_LOCK_SHAPE_BIT 0x01
153 #define THIN_UNLOCKED 0
155 #define THIN_LOCK_COUNT_SHIFT 1
156 #define THIN_LOCK_COUNT_SIZE 8
157 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
158 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
159 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
161 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
162 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
164 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
165 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
167 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
168 #define MAKE_FAT_LOCK(ptr) ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
170 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
171 #define GET_THREAD_INDEX(lockword) ((unsigned) lockword >> THIN_LOCK_TID_SHIFT)
174 /* global variables ***********************************************************/
176 /* hashtable mapping objects to lock records */
177 static lock_hashtable_t lock_hashtable;
180 /******************************************************************************/
182 /******************************************************************************/
184 static void lock_hashtable_init(void);
186 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
187 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
188 static void lock_record_enter(threadobject *t, lock_record_t *lr);
189 static void lock_record_exit(threadobject *t, lock_record_t *lr);
190 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
191 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
194 /*============================================================================*/
195 /* INITIALIZATION OF DATA STRUCTURES */
196 /*============================================================================*/
199 /* lock_init *******************************************************************
201 Initialize global data for locking.
203 *******************************************************************************/
207 /* initialize lock hashtable */
209 lock_hashtable_init();
211 #if defined(ENABLE_VMLOG)
212 vmlog_cacao_init_lock();
217 /* lock_pre_compute_thinlock ***************************************************
219 Pre-compute the thin lock value for a thread index.
222 index........the thead index (>= 1)
225 the thin lock value for this thread index
227 *******************************************************************************/
229 ptrint lock_pre_compute_thinlock(s4 index)
231 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
235 /* lock_record_new *************************************************************
237 Allocate a lock record.
239 *******************************************************************************/
241 static lock_record_t *lock_record_new(void)
245 /* allocate the data structure on the C heap */
247 lr = NEW(lock_record_t);
249 #if defined(ENABLE_STATISTICS)
251 size_lock_record += sizeof(lock_record_t);
254 /* initialize the members */
259 lr->waiters = new List<threadobject*>();
261 #if defined(ENABLE_GC_CACAO)
262 /* register the lock object as weak reference with the GC */
264 gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
267 // Initialize the mutex.
268 lr->mutex = new Mutex();
270 DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
276 /* lock_record_free ************************************************************
281 lr....lock record to free
283 *******************************************************************************/
285 static void lock_record_free(lock_record_t *lr)
287 DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
289 // Destroy the mutex.
292 #if defined(ENABLE_GC_CACAO)
293 /* unregister the lock object reference with the GC */
295 gc_weakreference_unregister(&(lr->object));
298 // Free the waiters list.
301 /* Free the data structure. */
303 FREE(lr, lock_record_t);
305 #if defined(ENABLE_STATISTICS)
307 size_lock_record -= sizeof(lock_record_t);
312 /*============================================================================*/
313 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
314 /*============================================================================*/
316 /* lock_hashtable_init *********************************************************
318 Initialize the global hashtable mapping objects to lock records.
320 *******************************************************************************/
322 static void lock_hashtable_init(void)
324 lock_hashtable.mutex = new Mutex();
326 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
327 lock_hashtable.entries = 0;
328 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
330 #if defined(ENABLE_STATISTICS)
332 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
335 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
339 /* lock_hashtable_grow *********************************************************
341 Grow the lock record hashtable to about twice its current size and
344 *******************************************************************************/
346 /* must be called with hashtable mutex locked */
347 static void lock_hashtable_grow(void)
351 lock_record_t **oldtable;
352 lock_record_t **newtable;
359 /* allocate a new table */
361 oldsize = lock_hashtable.size;
362 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
364 DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
366 oldtable = lock_hashtable.ptr;
367 newtable = MNEW(lock_record_t *, newsize);
369 #if defined(ENABLE_STATISTICS)
371 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
374 MZERO(newtable, lock_record_t *, newsize);
376 /* rehash the entries */
378 for (i = 0; i < oldsize; i++) {
383 h = heap_hashcode(lr->object);
384 newslot = h % newsize;
386 lr->hashlink = newtable[newslot];
387 newtable[newslot] = lr;
393 /* replace the old table */
395 lock_hashtable.ptr = newtable;
396 lock_hashtable.size = newsize;
398 MFREE(oldtable, lock_record_t *, oldsize);
400 #if defined(ENABLE_STATISTICS)
402 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
407 /* lock_hashtable_cleanup ******************************************************
409 Removes (and frees) lock records which have a cleared object reference
410 from the hashtable. The locked object was reclaimed by the GC.
412 *******************************************************************************/
414 #if defined(ENABLE_GC_CACAO)
415 void lock_hashtable_cleanup(void)
425 /* lock the hashtable */
427 Mutex_lock(lock_hashtable.mutex);
429 /* search the hashtable for cleared references */
431 for (i = 0; i < lock_hashtable.size; i++) {
432 lr = lock_hashtable.ptr[i];
438 /* remove lock records with cleared references */
440 if (lr->object == NULL) {
442 /* unlink the lock record from the hashtable */
445 lock_hashtable.ptr[i] = next;
447 prev->hashlink = next;
449 /* free the lock record */
451 lock_record_free(lr);
461 /* unlock the hashtable */
463 Mutex_unlock(lock_hashtable.mutex);
468 /* lock_hashtable_get **********************************************************
470 Find the lock record for the given object. If it does not exists,
471 yet, create it and enter it in the hashtable.
474 t....the current thread
475 o....the object to look up
478 the lock record to use for this object
480 *******************************************************************************/
482 #if defined(ENABLE_GC_BOEHM)
483 static void lock_record_finalizer(void *object, void *p);
486 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
492 lockword = lock_lockword_get(t, o);
494 if (IS_FAT_LOCK(lockword))
495 return GET_FAT_LOCK(lockword);
497 // Lock the hashtable.
498 lock_hashtable.mutex->lock();
500 /* lookup the lock record in the hashtable */
502 LLNI_CRITICAL_START_THREAD(t);
503 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
504 lr = lock_hashtable.ptr[slot];
506 for (; lr != NULL; lr = lr->hashlink) {
507 if (lr->object == LLNI_DIRECT(o))
510 LLNI_CRITICAL_END_THREAD(t);
513 /* not found, we must create a new one */
515 lr = lock_record_new();
517 LLNI_CRITICAL_START_THREAD(t);
518 lr->object = LLNI_DIRECT(o);
519 LLNI_CRITICAL_END_THREAD(t);
521 #if defined(ENABLE_GC_BOEHM)
522 /* register new finalizer to clean up the lock record */
524 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
527 /* enter it in the hashtable */
529 lr->hashlink = lock_hashtable.ptr[slot];
530 lock_hashtable.ptr[slot] = lr;
531 lock_hashtable.entries++;
533 /* check whether the hash should grow */
535 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
536 lock_hashtable_grow();
540 // Unlock the hashtable.
541 lock_hashtable.mutex->unlock();
543 /* return the new lock record */
549 /* lock_hashtable_remove *******************************************************
551 Remove the lock record for the given object from the hashtable
552 and free it afterwards.
555 t....the current thread
556 o....the object to look up
558 *******************************************************************************/
560 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
565 lock_record_t *tmplr;
567 // Lock the hashtable.
568 lock_hashtable.mutex->lock();
570 /* get lock record */
572 lockword = lock_lockword_get(t, o);
574 assert(IS_FAT_LOCK(lockword));
576 lr = GET_FAT_LOCK(lockword);
578 /* remove the lock-record from the hashtable */
580 LLNI_CRITICAL_START_THREAD(t);
581 slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
582 tmplr = lock_hashtable.ptr[slot];
583 LLNI_CRITICAL_END_THREAD(t);
586 /* special handling if it's the first in the chain */
588 lock_hashtable.ptr[slot] = lr->hashlink;
591 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
592 if (tmplr->hashlink == lr) {
593 tmplr->hashlink = lr->hashlink;
598 assert(tmplr != NULL);
601 /* decrease entry count */
603 lock_hashtable.entries--;
605 // Unlock the hashtable.
606 lock_hashtable.mutex->unlock();
608 /* free the lock record */
610 lock_record_free(lr);
614 /* lock_record_finalizer *******************************************************
616 XXX Remove me for exact GC.
618 *******************************************************************************/
620 static void lock_record_finalizer(void *object, void *p)
625 o = (java_handle_t *) object;
627 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
628 /* XXX this is only a dirty hack to make Boehm work with handles */
630 o = LLNI_WRAP((java_object_t *) o);
633 LLNI_class_get(o, c);
636 if (opt_DebugFinalizer) {
638 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
645 /* check for a finalizer function */
647 if (c->finalizer != NULL)
648 finalizer_run(object, p);
650 /* remove the lock-record entry from the hashtable and free it */
652 lock_hashtable_remove(THREADOBJECT, o);
656 /*============================================================================*/
657 /* OBJECT LOCK INITIALIZATION */
658 /*============================================================================*/
661 /* lock_init_object_lock *******************************************************
663 Initialize the monitor pointer of the given object. The monitor gets
664 initialized to an unlocked state.
666 *******************************************************************************/
668 void lock_init_object_lock(java_object_t *o)
672 o->lockword = THIN_UNLOCKED;
676 /*============================================================================*/
677 /* LOCKING ALGORITHM */
678 /*============================================================================*/
681 /* lock_lockword_get ***********************************************************
683 Get the lockword for the given object.
686 t............the current thread
687 o............the object
689 *******************************************************************************/
691 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
695 LLNI_CRITICAL_START_THREAD(t);
696 lockword = LLNI_DIRECT(o)->lockword;
697 LLNI_CRITICAL_END_THREAD(t);
703 /* lock_lockword_set ***********************************************************
705 Set the lockword for the given object.
708 t............the current thread
709 o............the object
710 lockword.....the new lockword value
712 *******************************************************************************/
714 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
716 LLNI_CRITICAL_START_THREAD(t);
717 LLNI_DIRECT(o)->lockword = lockword;
718 LLNI_CRITICAL_END_THREAD(t);
722 /* lock_record_enter ***********************************************************
724 Enter the lock represented by the given lock record.
727 t.................the current thread
728 lr................the lock record
730 *******************************************************************************/
732 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
739 /* lock_record_exit ************************************************************
741 Release the lock represented by the given lock record.
744 t.................the current thread
745 lr................the lock record
748 The current thread must own the lock represented by this lock record.
749 This is NOT checked by this function!
751 *******************************************************************************/
753 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
760 /* lock_inflate ****************************************************************
762 Inflate the lock of the given object. This may only be called by the
763 owner of the monitor of the object.
766 t............the current thread
767 o............the object of which to inflate the lock
768 lr...........the lock record to install. The current thread must
769 own the lock of this lock record!
772 The current thread must be the owner of this object's monitor AND
773 of the lock record's lock!
775 *******************************************************************************/
777 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
781 /* get the current lock count */
783 lockword = lock_lockword_get(t, o);
785 if (IS_FAT_LOCK(lockword)) {
786 assert(GET_FAT_LOCK(lockword) == lr);
790 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
792 /* copy the count from the thin lock */
794 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
797 DEBUGLOCKS(("[lock_inflate : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
798 lr, t, o, lockword, lr->count));
802 lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
806 static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o)
809 threadobject *t_other;
812 index = GET_THREAD_INDEX(lockword);
813 t_other = ThreadList::get_thread_by_index(index);
816 /* failure, TODO: add statistics */
819 t_other->flc_lock->lock();
820 old_flc = t_other->flc_bit;
821 t_other->flc_bit = true;
823 DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d",
824 t->index, t_other->index));
826 // Set FLC bit first, then read the lockword again.
827 Atomic::memory_barrier();
829 lockword = lock_lockword_get(t, o);
831 /* Lockword is still the way it was seen before */
832 if (IS_THIN_LOCK(lockword) && (GET_THREAD_INDEX(lockword) == index))
834 /* Add tuple (t, o) to the other thread's FLC list */
836 t->flc_next = t_other->flc_list;
837 t_other->flc_list = t;
841 threadobject *current;
843 // Wait until another thread sees the flc bit and notifies
845 t->flc_cond->wait(t_other->flc_lock);
847 /* Traverse FLC list looking if we're still there */
848 current = t_other->flc_list;
849 while (current && current != t)
850 current = current->flc_next;
852 /* not in list anymore, can stop waiting */
855 /* We are still in the list -- the other thread cannot have seen
857 assert(t_other->flc_bit);
860 t->flc_object = NULL; /* for garbage collector? */
864 t_other->flc_bit = old_flc;
866 t_other->flc_lock->unlock();
869 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
871 threadobject *current;
875 current = t->flc_list;
878 if (current->flc_object != o)
880 /* The object has to be inflated so the other threads can properly
883 /* Only if not already inflated */
884 ptrint lockword = lock_lockword_get(t, current->flc_object);
885 if (IS_THIN_LOCK(lockword)) {
886 lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
887 lock_record_enter(t, lr);
889 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
890 t->index, (void*) current->flc_object, (void*) lr));
892 lock_inflate(t, current->flc_object, lr);
896 // Wake the waiting threads.
897 current->flc_cond->broadcast();
899 current = current->flc_next;
905 t->flc_lock->unlock();
908 /* lock_monitor_enter **********************************************************
910 Acquire the monitor of the given object. If the current thread already
911 owns the monitor, the lock counter is simply increased.
913 This function blocks until it can acquire the monitor.
916 t............the current thread
917 o............the object of which to enter the monitor
920 true.........the lock has been successfully acquired
921 false........an exception has been thrown
923 *******************************************************************************/
925 bool lock_monitor_enter(java_handle_t *o)
928 /* CAUTION: This code assumes that ptrint is unsigned! */
934 exceptions_throw_nullpointerexception();
940 thinlock = t->thinlock;
943 /* most common case: try to thin-lock an unlocked object */
945 LLNI_CRITICAL_START_THREAD(t);
946 lockword = Atomic::compare_and_swap(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
947 LLNI_CRITICAL_END_THREAD(t);
949 if (lockword == THIN_UNLOCKED) {
950 /* success. we locked it */
951 // The Java Memory Model requires an instruction barrier here
952 // (because of the CAS above).
953 Atomic::instruction_barrier();
957 /* next common case: recursive lock with small recursion count */
958 /* We don't have to worry about stale values here, as any stale value */
959 /* will indicate another thread holding the lock (or an inflated lock) */
961 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
962 /* we own this monitor */
963 /* check the current recursion count */
965 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
967 /* the recursion count is low enough */
969 lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
971 /* success. we locked it */
975 /* recursion count overflow */
977 lr = lock_hashtable_get(t, o);
978 lock_record_enter(t, lr);
979 lock_inflate(t, o, lr);
982 notify_flc_waiters(t, o);
988 /* the lock is either contented or fat */
990 if (IS_FAT_LOCK(lockword)) {
992 lr = GET_FAT_LOCK(lockword);
994 /* check for recursive entering */
995 if (lr->owner == t) {
1000 /* acquire the mutex of the lock record */
1002 lock_record_enter(t, lr);
1004 assert(lr->count == 0);
1009 /****** inflation path ******/
1011 #if defined(ENABLE_JVMTI)
1012 /* Monitor Contended Enter */
1013 jvmti_MonitorContendedEntering(false, o);
1016 sable_flc_waiting(lockword, t, o);
1018 #if defined(ENABLE_JVMTI)
1019 /* Monitor Contended Entered */
1020 jvmti_MonitorContendedEntering(true, o);
1026 /* lock_monitor_exit ***********************************************************
1028 Decrement the counter of a (currently owned) monitor. If the counter
1029 reaches zero, release the monitor.
1031 If the current thread is not the owner of the monitor, an
1032 IllegalMonitorState exception is thrown.
1035 t............the current thread
1036 o............the object of which to exit the monitor
1039 true.........everything ok,
1040 false........an exception has been thrown
1042 *******************************************************************************/
1044 bool lock_monitor_exit(java_handle_t *o)
1051 exceptions_throw_nullpointerexception();
1057 thinlock = t->thinlock;
1059 /* We don't have to worry about stale values here, as any stale value */
1060 /* will indicate that we don't own the lock. */
1062 lockword = lock_lockword_get(t, o);
1064 /* most common case: we release a thin lock that we hold once */
1066 if (lockword == thinlock) {
1067 // Memory barrier for Java Memory Model.
1068 Atomic::write_memory_barrier();
1069 lock_lockword_set(t, o, THIN_UNLOCKED);
1070 // Memory barrier for thin locking.
1071 Atomic::memory_barrier();
1073 /* check if there has been a flat lock contention on this object */
1076 DEBUGLOCKS(("thread %d saw flc bit", t->index));
1078 /* there has been a contention on this thin lock */
1079 notify_flc_waiters(t, o);
1085 /* next common case: we release a recursive lock, count > 0 */
1087 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1088 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1092 /* either the lock is fat, or we don't hold it at all */
1094 if (IS_FAT_LOCK(lockword)) {
1098 lr = GET_FAT_LOCK(lockword);
1100 /* check if we own this monitor */
1101 /* We don't have to worry about stale values here, as any stale value */
1102 /* will be != t and thus fail this check. */
1104 if (lr->owner != t) {
1105 exceptions_throw_illegalmonitorstateexception();
1109 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1111 if (lr->count != 0) {
1112 /* we had locked this one recursively. just decrement, it will */
1113 /* still be locked. */
1118 /* unlock this lock record */
1121 lr->mutex->unlock();
1126 /* legal thin lock cases have been handled above, so this is an error */
1128 exceptions_throw_illegalmonitorstateexception();
1134 /* lock_record_add_waiter ******************************************************
1136 Add a thread to the list of waiting threads of a lock record.
1139 lr...........the lock record
1140 thread.......the thread to add
1142 *******************************************************************************/
1144 static void lock_record_add_waiter(lock_record_t *lr, threadobject* t)
1146 // Add the thread as last entry to waiters list.
1147 lr->waiters->push_back(t);
1149 #if defined(ENABLE_STATISTICS)
1151 size_lock_waiter += sizeof(threadobject*);
1156 /* lock_record_remove_waiter ***************************************************
1158 Remove a thread from the list of waiting threads of a lock record.
1161 lr...........the lock record
1162 t............the current thread
1165 The current thread must be the owner of the lock record.
1167 *******************************************************************************/
1169 static void lock_record_remove_waiter(lock_record_t *lr, threadobject* t)
1171 // Remove the thread from the waiters.
1172 lr->waiters->remove(t);
1174 #if defined(ENABLE_STATISTICS)
1176 size_lock_waiter -= sizeof(threadobject*);
1181 /* lock_record_wait ************************************************************
1183 Wait on a lock record for a given (maximum) amount of time.
1186 t............the current thread
1187 lr...........the lock record
1188 millis.......milliseconds of timeout
1189 nanos........nanoseconds of timeout
1192 true.........we have been interrupted,
1193 false........everything ok
1196 The current thread must be the owner of the lock record.
1197 This is NOT checked by this function!
1199 *******************************************************************************/
1201 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1204 bool wasinterrupted = false;
1206 DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1207 lr, thread, millis, nanos));
1209 /* { the thread t owns the fat lock record lr on the object o } */
1211 /* register us as waiter for this object */
1213 lock_record_add_waiter(lr, thread);
1215 /* remember the old lock count */
1217 lockcount = lr->count;
1219 /* unlock this record */
1222 lock_record_exit(thread, lr);
1224 /* wait until notified/interrupted/timed out */
1226 threads_wait_with_timeout_relative(thread, millis, nanos);
1228 /* re-enter the monitor */
1230 lock_record_enter(thread, lr);
1232 /* remove us from the list of waiting threads */
1234 lock_record_remove_waiter(lr, thread);
1236 /* restore the old lock count */
1238 lr->count = lockcount;
1240 /* We can only be signaled OR interrupted, not both. If both flags
1241 are set, reset only signaled and leave the thread in
1242 interrupted state. Otherwise, clear both. */
1244 if (!thread->signaled) {
1245 wasinterrupted = thread->interrupted;
1246 thread->interrupted = false;
1249 thread->signaled = false;
1251 /* return if we have been interrupted */
1253 return wasinterrupted;
1257 /* lock_monitor_wait ***********************************************************
1259 Wait on an object for a given (maximum) amount of time.
1262 t............the current thread
1263 o............the object
1264 millis.......milliseconds of timeout
1265 nanos........nanoseconds of timeout
1268 The current thread must be the owner of the object's monitor.
1270 *******************************************************************************/
1272 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1277 lockword = lock_lockword_get(t, o);
1279 /* check if we own this monitor */
1280 /* We don't have to worry about stale values here, as any stale value */
1281 /* will fail this check. */
1283 if (IS_FAT_LOCK(lockword)) {
1285 lr = GET_FAT_LOCK(lockword);
1287 if (lr->owner != t) {
1288 exceptions_throw_illegalmonitorstateexception();
1293 /* it's a thin lock */
1295 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1296 exceptions_throw_illegalmonitorstateexception();
1300 /* inflate this lock */
1302 lr = lock_hashtable_get(t, o);
1303 lock_record_enter(t, lr);
1304 lock_inflate(t, o, lr);
1306 notify_flc_waiters(t, o);
1309 /* { the thread t owns the fat lock record lr on the object o } */
1311 if (lock_record_wait(t, lr, millis, nanos))
1312 exceptions_throw_interruptedexception();
1316 /* lock_record_notify **********************************************************
1318 Notify one thread or all threads waiting on the given lock record.
1321 t............the current thread
1322 lr...........the lock record
1323 one..........if true, only notify one thread
1326 The current thread must be the owner of the lock record.
1327 This is NOT checked by this function!
1329 *******************************************************************************/
1331 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1333 /* { the thread t owns the fat lock record lr on the object o } */
1335 for (List<threadobject*>::iterator it = lr->waiters->begin(); it != lr->waiters->end(); it++) {
1336 threadobject* waiter = *it;
1338 // We must skip threads which have already been notified. They
1339 // will remove themselves from the list.
1340 if (waiter->signaled)
1343 // Enter the wait-mutex.
1344 waiter->waitmutex->lock();
1346 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]", lr, t, waiter, one));
1348 // Signal the waiter.
1349 waiter->waitcond->signal();
1351 // Mark the thread as signaled.
1352 waiter->signaled = true;
1354 // Leave the wait-mutex.
1355 waiter->waitmutex->unlock();
1357 // If we should only wake one thread, we are done.
1364 /* lock_monitor_notify *********************************************************
1366 Notify one thread or all threads waiting on the given object.
1369 t............the current thread
1370 o............the object
1371 one..........if true, only notify one thread
1374 The current thread must be the owner of the object's monitor.
1376 *******************************************************************************/
1378 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1383 lockword = lock_lockword_get(t, o);
1385 /* check if we own this monitor */
1386 /* We don't have to worry about stale values here, as any stale value */
1387 /* will fail this check. */
1389 if (IS_FAT_LOCK(lockword)) {
1391 lr = GET_FAT_LOCK(lockword);
1393 if (lr->owner != t) {
1394 exceptions_throw_illegalmonitorstateexception();
1399 /* it's a thin lock */
1401 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1402 exceptions_throw_illegalmonitorstateexception();
1406 /* no thread can wait on a thin lock, so there's nothing to do. */
1410 /* { the thread t owns the fat lock record lr on the object o } */
1412 lock_record_notify(t, lr, one);
1417 /*============================================================================*/
1418 /* INQUIRY FUNCIONS */
1419 /*============================================================================*/
1422 /* lock_is_held_by_current_thread **********************************************
1424 Return true if the current thread owns the monitor of the given object.
1427 o............the object
1430 true, if the current thread holds the lock of this object.
1432 *******************************************************************************/
1434 bool lock_is_held_by_current_thread(java_handle_t *o)
1442 /* check if we own this monitor */
1443 /* We don't have to worry about stale values here, as any stale value */
1444 /* will fail this check. */
1446 lockword = lock_lockword_get(t, o);
1448 if (IS_FAT_LOCK(lockword)) {
1449 /* it's a fat lock */
1451 lr = GET_FAT_LOCK(lockword);
1453 return (lr->owner == t);
1456 /* it's a thin lock */
1458 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1464 /*============================================================================*/
1465 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1466 /*============================================================================*/
1469 /* lock_wait_for_object ********************************************************
1471 Wait for the given object.
1474 o............the object
1475 millis.......milliseconds to wait
1476 nanos........nanoseconds to wait
1478 *******************************************************************************/
1480 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1482 threadobject *thread;
1484 thread = THREADOBJECT;
1486 lock_monitor_wait(thread, o, millis, nanos);
1490 /* lock_notify_object **********************************************************
1492 Notify one thread waiting on the given object.
1495 o............the object
1497 *******************************************************************************/
1499 void lock_notify_object(java_handle_t *o)
1501 threadobject *thread;
1503 thread = THREADOBJECT;
1505 lock_monitor_notify(thread, o, true);
1509 /* lock_notify_all_object ******************************************************
1511 Notify all threads waiting on the given object.
1514 o............the object
1516 *******************************************************************************/
1518 void lock_notify_all_object(java_handle_t *o)
1520 threadobject *thread;
1522 thread = THREADOBJECT;
1524 lock_monitor_notify(thread, o, false);
1529 * These are local overrides for various environment variables in Emacs.
1530 * Please do not remove this and leave it at the end of the file, where
1531 * Emacs will automagically detect them.
1532 * ---------------------------------------------------------------------
1535 * indent-tabs-mode: t
1539 * vim:noexpandtab:sw=4:ts=4: