1 /* src/threads/native/lock.c - lock implementation
3 Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 $Id: lock.c 8295 2007-08-11 17:57:24Z michi $
40 #include "mm/memory.h"
42 #include "threads/native/lock.h"
43 #include "threads/native/threads.h"
45 #include "vm/global.h"
46 #include "vm/exceptions.h"
47 #include "vm/finalizer.h"
48 #include "vm/stringlocal.h"
51 #include "vmcore/options.h"
53 #if defined(ENABLE_STATISTICS)
54 # include "vmcore/statistics.h"
57 #if defined(ENABLE_VMLOG)
58 #include <vmlog_cacao.h>
61 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
65 /* includes for atomic instructions: */
67 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
68 #include "threads/native/generic-primitives.h"
70 #include "machine-instr.h"
73 #if defined(ENABLE_JVMTI)
74 #include "native/jvmti/cacaodbg.h"
77 #if defined(ENABLE_GC_BOEHM)
78 # include "mm/boehm-gc/include/gc.h"
82 /******************************************************************************/
83 /* DEBUGGING MACROS */
84 /******************************************************************************/
86 /* #define LOCK_VERBOSE */
88 #if defined(LOCK_VERBOSE)
89 #define LOCK_LOG(args) do { printf args; fflush(stdout); } while (0)
91 #define LOCK_LOG(args)
95 /******************************************************************************/
97 /******************************************************************************/
99 /* number of lock records in the first pool allocated for a thread */
100 #define LOCK_INITIAL_LOCK_RECORDS 8
102 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
104 #define LOCK_HASH(obj) ((ptrint)(obj))
106 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
107 ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
109 /* CAUTION: oldvalue is evaluated twice! */
110 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
111 (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
114 /******************************************************************************/
115 /* MACROS FOR THE FLAT LOCK CONTENTION BIT */
116 /******************************************************************************/
118 #define LOCK_SET_FLC_BIT(obj) ((obj)->hdrflags |= HDRFLAG_FLC)
119 #define LOCK_CLEAR_FLC_BIT(obj) ((obj)->hdrflags &= ~ HDRFLAG_FLC)
120 #define LOCK_TEST_FLC_BIT(obj) ((obj)->hdrflags & HDRFLAG_FLC)
123 /******************************************************************************/
124 /* MACROS FOR THIN/FAT LOCKS */
125 /******************************************************************************/
127 /* We use a variant of the tasuki locks described in the paper
129 * Tamiya Onodera, Kiyokuni Kawachiya
130 * A Study of Locking Objects with Bimodal Fields
131 * Proceedings of the ACM OOPSLA '99, pp. 223-237
134 * The underlying thin locks are a variant of the thin locks described in
136 * Bacon, Konuru, Murthy, Serrano
137 * Thin Locks: Featherweight Synchronization for Java
138 * Proceedings of the ACM Conference on Programming Language Design and
139 * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
142 * In thin lock mode the lockword (monitorPtr) looks like this:
144 * ,----------------------,-----------,---,
145 * | thread ID | count | 0 |
146 * `----------------------'-----------'---´
148 * thread ID......the 'index' of the owning thread, or 0
149 * count..........number of times the lock has been entered minus 1
150 * 0..............the shape bit is 0 in thin lock mode
152 * In fat lock mode it is basically a lock_record_t *:
154 * ,----------------------------------,---,
155 * | lock_record_t * (without LSB) | 1 |
156 * `----------------------------------'---´
158 * 1..............the shape bit is 1 in fat lock mode
161 #if SIZEOF_VOID_P == 8
162 #define THIN_LOCK_WORD_SIZE 64
164 #define THIN_LOCK_WORD_SIZE 32
167 #define THIN_LOCK_SHAPE_BIT 0x01
169 #define THIN_UNLOCKED 0
171 #define THIN_LOCK_COUNT_SHIFT 1
172 #define THIN_LOCK_COUNT_SIZE 8
173 #define THIN_LOCK_COUNT_INCR (1 << THIN_LOCK_COUNT_SHIFT)
174 #define THIN_LOCK_COUNT_MAX ((1 << THIN_LOCK_COUNT_SIZE) - 1)
175 #define THIN_LOCK_COUNT_MASK (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
177 #define THIN_LOCK_TID_SHIFT (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
178 #define THIN_LOCK_TID_SIZE (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
180 #define IS_THIN_LOCK(lockword) (!((lockword) & THIN_LOCK_SHAPE_BIT))
181 #define IS_FAT_LOCK(lockword) ((lockword) & THIN_LOCK_SHAPE_BIT)
183 #define GET_FAT_LOCK(lockword) ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
184 #define MAKE_FAT_LOCK(ptr) ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
186 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
189 /* global variables ***********************************************************/
191 /* hashtable mapping objects to lock records */
192 static lock_hashtable_t lock_hashtable;
195 /******************************************************************************/
197 /******************************************************************************/
199 static void lock_hashtable_init(void);
201 static void lock_record_enter(threadobject *t, lock_record_t *lr);
202 static void lock_record_exit(threadobject *t, lock_record_t *lr);
203 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
204 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
207 /*============================================================================*/
208 /* INITIALIZATION OF DATA STRUCTURES */
209 /*============================================================================*/
212 /* lock_init *******************************************************************
214 Initialize global data for locking.
216 *******************************************************************************/
220 /* initialize lock hashtable */
222 lock_hashtable_init();
224 #if defined(ENABLE_VMLOG)
225 vmlog_cacao_init_lock();
230 /* lock_pre_compute_thinlock ***************************************************
232 Pre-compute the thin lock value for a thread index.
235 index........the thead index (>= 1)
238 the thin lock value for this thread index
240 *******************************************************************************/
242 ptrint lock_pre_compute_thinlock(s4 index)
244 return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
248 /* lock_record_new *************************************************************
250 Allocate a lock record.
252 *******************************************************************************/
254 static lock_record_t *lock_record_new(void)
258 /* allocate the data structure on the C heap */
260 lr = NEW(lock_record_t);
262 #if defined(ENABLE_STATISTICS)
264 size_lock_record += sizeof(lock_record_t);
267 /* initialize the members */
274 /* initialize the mutex */
276 pthread_mutex_init(&(lr->mutex), NULL);
282 /* lock_record_free ************************************************************
287 lr....lock record to free
289 *******************************************************************************/
291 static void lock_record_free(lock_record_t *lr)
294 /* check the members */
302 /* destroy the mutex */
304 pthread_mutex_destroy(&(lr->mutex));
306 /* free the data structure */
308 FREE(lr, lock_record_t);
310 #if defined(ENABLE_STATISTICS)
312 size_lock_record -= sizeof(lock_record_t);
317 /*============================================================================*/
318 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
319 /*============================================================================*/
321 /* lock_hashtable_init *********************************************************
323 Initialize the global hashtable mapping objects to lock records.
325 *******************************************************************************/
327 static void lock_hashtable_init(void)
329 pthread_mutex_init(&(lock_hashtable.mutex), NULL);
331 lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
332 lock_hashtable.entries = 0;
333 lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
335 #if defined(ENABLE_STATISTICS)
337 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
340 MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
344 /* lock_hashtable_grow *********************************************************
346 Grow the lock record hashtable to about twice its current size and
349 *******************************************************************************/
351 /* must be called with hashtable mutex locked */
352 static void lock_hashtable_grow(void)
356 lock_record_t **oldtable;
357 lock_record_t **newtable;
364 /* allocate a new table */
366 oldsize = lock_hashtable.size;
367 newsize = oldsize*2 + 1; /* XXX should use prime numbers */
369 LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
371 oldtable = lock_hashtable.ptr;
372 newtable = MNEW(lock_record_t *, newsize);
374 #if defined(ENABLE_STATISTICS)
376 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
379 MZERO(newtable, lock_record_t *, newsize);
381 /* rehash the entries */
383 for (i = 0; i < oldsize; i++) {
388 h = LOCK_HASH(lr->object);
389 newslot = h % newsize;
391 lr->hashlink = newtable[newslot];
392 newtable[newslot] = lr;
398 /* replace the old table */
400 lock_hashtable.ptr = newtable;
401 lock_hashtable.size = newsize;
403 MFREE(oldtable, lock_record_t *, oldsize);
405 #if defined(ENABLE_STATISTICS)
407 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
412 /* lock_hashtable_get **********************************************************
414 Find the lock record for the given object. If it does not exists,
415 yet, create it and enter it in the hashtable.
418 o....the object to look up
421 the lock record to use for this object
423 *******************************************************************************/
425 #if defined(ENABLE_GC_BOEHM)
426 static void lock_record_finalizer(void *object, void *p);
429 static lock_record_t *lock_hashtable_get(java_object_t *o)
435 lockword = (ptrint) o->monitorPtr;
437 if (IS_FAT_LOCK(lockword))
438 return GET_FAT_LOCK(lockword);
440 /* lock the hashtable */
442 pthread_mutex_lock(&(lock_hashtable.mutex));
444 /* lookup the lock record in the hashtable */
446 slot = LOCK_HASH(o) % lock_hashtable.size;
447 lr = lock_hashtable.ptr[slot];
449 for (; lr != NULL; lr = lr->hashlink) {
450 if (lr->object == o) {
451 pthread_mutex_unlock(&(lock_hashtable.mutex));
456 /* not found, we must create a new one */
458 lr = lock_record_new();
462 #if defined(ENABLE_GC_BOEHM)
463 /* register new finalizer to clean up the lock record */
465 GC_REGISTER_FINALIZER(o, lock_record_finalizer, 0, 0, 0);
468 LOCK_LOG(("thread %d allocated for %p new lr %p\n",
469 t->index, (void*) o, (void*) lr));
471 /* enter it in the hashtable */
473 lr->hashlink = lock_hashtable.ptr[slot];
474 lock_hashtable.ptr[slot] = lr;
475 lock_hashtable.entries++;
477 /* check whether the hash should grow */
479 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
480 lock_hashtable_grow();
483 /* unlock the hashtable */
485 pthread_mutex_unlock(&(lock_hashtable.mutex));
487 /* return the new lock record */
493 /* lock_hashtable_remove *******************************************************
495 Remove the lock record for the given object from the hashtable.
498 o....the object to look up
500 *******************************************************************************/
502 static void lock_hashtable_remove(java_object_t *o)
507 lock_record_t *tmplr;
509 /* lock the hashtable */
511 pthread_mutex_lock(&(lock_hashtable.mutex));
513 /* get lock record */
515 lockword = (ptrint) o->monitorPtr;
517 assert(IS_FAT_LOCK(lockword));
519 lr = GET_FAT_LOCK(lockword);
521 /* remove the lock-record from the hashtable */
523 slot = LOCK_HASH(o) % lock_hashtable.size;
524 tmplr = lock_hashtable.ptr[slot];
527 /* special handling if it's the first in the chain */
529 lock_hashtable.ptr[slot] = lr->hashlink;
532 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
533 if (tmplr->hashlink == lr) {
534 tmplr->hashlink = lr->hashlink;
539 assert(tmplr != NULL);
542 /* decrease entry count */
544 lock_hashtable.entries--;
546 /* unlock the hashtable */
548 pthread_mutex_unlock(&(lock_hashtable.mutex));
552 /* lock_record_finalizer *******************************************************
554 XXX Remove me for exact GC.
556 *******************************************************************************/
558 static void lock_record_finalizer(void *object, void *p)
564 o = (java_object_t *) object;
566 /* check for a finalizer function */
568 if (o->vftbl->class->finalizer != NULL)
569 finalizer_run(object, p);
571 /* remove the lock-record entry from the hashtable */
573 lock_hashtable_remove(o);
575 /* get lock record */
577 lockword = (ptrint) o->monitorPtr;
579 assert(IS_FAT_LOCK(lockword));
581 lr = GET_FAT_LOCK(lockword);
583 /* now release the lock record */
585 lock_record_free(lr);
589 /*============================================================================*/
590 /* OBJECT LOCK INITIALIZATION */
591 /*============================================================================*/
594 /* lock_init_object_lock *******************************************************
596 Initialize the monitor pointer of the given object. The monitor gets
597 initialized to an unlocked state.
599 *******************************************************************************/
601 void lock_init_object_lock(java_object_t *o)
605 o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
606 LOCK_CLEAR_FLC_BIT(o);
610 /* lock_get_initial_lock_word **************************************************
612 Returns the initial (unlocked) lock word. The pointer is
613 required in the code generator to set up a virtual
614 java_objectheader for code patch locking.
616 *******************************************************************************/
618 lock_record_t *lock_get_initial_lock_word(void)
620 return (lock_record_t *) THIN_UNLOCKED;
625 /*============================================================================*/
626 /* LOCKING ALGORITHM */
627 /*============================================================================*/
630 /* lock_record_enter ***********************************************************
632 Enter the lock represented by the given lock record.
635 t.................the current thread
636 lr................the lock record
638 *******************************************************************************/
640 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
642 pthread_mutex_lock(&(lr->mutex));
648 /* lock_record_exit ************************************************************
650 Release the lock represented by the given lock record.
653 t.................the current thread
654 lr................the lock record
657 The current thread must own the lock represented by this lock record.
658 This is NOT checked by this function!
660 *******************************************************************************/
662 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
665 pthread_mutex_unlock(&(lr->mutex));
669 /* lock_inflate ****************************************************************
671 Inflate the lock of the given object. This may only be called by the
672 owner of the monitor of the object.
675 t............the current thread
676 o............the object of which to inflate the lock
677 lr...........the lock record to install. The current thread must
678 own the lock of this lock record!
681 The current thread must be the owner of this object's monitor AND
682 of the lock record's lock!
684 *******************************************************************************/
686 static void lock_inflate(threadobject *t, java_object_t *o, lock_record_t *lr)
690 /* get the current lock count */
692 lockword = (ptrint) o->monitorPtr;
694 if (IS_FAT_LOCK(lockword)) {
695 assert(GET_FAT_LOCK(lockword) == lr);
698 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
700 /* copy the count from the thin lock */
702 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
705 LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
706 t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
708 /* clear flat-lock-contention bit */
710 LOCK_CLEAR_FLC_BIT(o);
712 /* notify waiting objects */
714 lock_record_notify(t, lr, false);
718 o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
722 /* lock_monitor_enter **********************************************************
724 Acquire the monitor of the given object. If the current thread already
725 owns the monitor, the lock counter is simply increased.
727 This function blocks until it can acquire the monitor.
730 t............the current thread
731 o............the object of which to enter the monitor
734 true.........the lock has been successfully acquired
735 false........an exception has been thrown
737 *******************************************************************************/
739 bool lock_monitor_enter(java_object_t *o)
742 /* CAUTION: This code assumes that ptrint is unsigned! */
748 exceptions_throw_nullpointerexception();
754 thinlock = t->thinlock;
756 /* most common case: try to thin-lock an unlocked object */
758 if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
759 /* success. we locked it */
760 /* The Java Memory Model requires a memory barrier here: */
765 /* next common case: recursive lock with small recursion count */
766 /* We don't have to worry about stale values here, as any stale value */
767 /* will indicate another thread holding the lock (or an inflated lock) */
769 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
770 /* we own this monitor */
771 /* check the current recursion count */
773 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
775 /* the recursion count is low enough */
777 o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
779 /* success. we locked it */
783 /* recursion count overflow */
785 lr = lock_hashtable_get(o);
786 lock_record_enter(t, lr);
787 lock_inflate(t, o, lr);
794 /* the lock is either contented or fat */
796 if (IS_FAT_LOCK(lockword)) {
798 lr = GET_FAT_LOCK(lockword);
800 /* check for recursive entering */
801 if (lr->owner == t) {
806 /* acquire the mutex of the lock record */
808 lock_record_enter(t, lr);
810 assert(lr->count == 0);
815 /****** inflation path ******/
817 /* first obtain the lock record for this object */
819 lr = lock_hashtable_get(o);
821 #if defined(ENABLE_JVMTI)
822 /* Monitor Contended Enter */
823 jvmti_MonitorContendedEntering(false, o);
826 /* enter the monitor */
828 lock_record_enter(t, lr);
830 #if defined(ENABLE_JVMTI)
831 /* Monitor Contended Entered */
832 jvmti_MonitorContendedEntering(true, o);
837 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
838 /* Set the flat lock contention bit to let the owning thread
839 know that we want to be notified of unlocking. */
843 LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
844 t->index, (void*) o, (void*) lr));
846 /* try to lock the object */
848 if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
849 /* we can inflate the lock ourselves */
851 LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
852 t->index, (void*) o, (void*) lr));
854 lock_inflate(t, o, lr);
857 /* wait until another thread sees the flc bit and notifies
860 LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
861 t->index, (void*) o, (void*) lr));
863 lock_record_wait(t, lr, 0, 0);
867 /* we own the inflated lock now */
873 /* lock_monitor_exit ***********************************************************
875 Decrement the counter of a (currently owned) monitor. If the counter
876 reaches zero, release the monitor.
878 If the current thread is not the owner of the monitor, an
879 IllegalMonitorState exception is thrown.
882 t............the current thread
883 o............the object of which to exit the monitor
886 true.........everything ok,
887 false........an exception has been thrown
889 *******************************************************************************/
891 bool lock_monitor_exit(java_object_t *o)
898 exceptions_throw_nullpointerexception();
904 /* We don't have to worry about stale values here, as any stale value */
905 /* will indicate that we don't own the lock. */
907 lockword = (ptrint) o->monitorPtr;
908 thinlock = t->thinlock;
910 /* most common case: we release a thin lock that we hold once */
912 if (lockword == thinlock) {
913 /* memory barrier for Java Memory Model */
915 o->monitorPtr = THIN_UNLOCKED;
916 /* memory barrier for thin locking */
919 /* check if there has been a flat lock contention on this object */
921 if (LOCK_TEST_FLC_BIT(o)) {
924 LOCK_LOG(("thread %d saw flc bit on %p %s\n",
925 t->index, (void*) o, o->vftbl->class->name->text));
927 /* there has been a contention on this thin lock */
929 lr = lock_hashtable_get(o);
931 LOCK_LOG(("thread %d for %p got lr %p\n",
932 t->index, (void*) o, (void*) lr));
934 lock_record_enter(t, lr);
936 if (LOCK_TEST_FLC_BIT(o)) {
937 /* notify a thread that it can try to inflate the lock now */
939 lock_record_notify(t, lr, true);
942 lock_record_exit(t, lr);
948 /* next common case: we release a recursive lock, count > 0 */
950 if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
951 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
955 /* either the lock is fat, or we don't hold it at all */
957 if (IS_FAT_LOCK(lockword)) {
961 lr = GET_FAT_LOCK(lockword);
963 /* check if we own this monitor */
964 /* We don't have to worry about stale values here, as any stale value */
965 /* will be != t and thus fail this check. */
967 if (lr->owner != t) {
968 exceptions_throw_illegalmonitorstateexception();
972 /* { the current thread `t` owns the lock record `lr` on object `o` } */
974 if (lr->count != 0) {
975 /* we had locked this one recursively. just decrement, it will */
976 /* still be locked. */
981 /* unlock this lock record */
984 pthread_mutex_unlock(&(lr->mutex));
989 /* legal thin lock cases have been handled above, so this is an error */
991 exceptions_throw_illegalmonitorstateexception();
997 /* lock_record_add_waiter ******************************************************
999 Add a thread to the list of waiting threads of a lock record.
1002 lr...........the lock record
1003 thread.......the thread to add
1005 *******************************************************************************/
1007 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1009 lock_waiter_t *waiter;
1011 /* allocate a waiter data structure */
1013 waiter = NEW(lock_waiter_t);
1015 #if defined(ENABLE_STATISTICS)
1017 size_lock_waiter += sizeof(lock_waiter_t);
1020 waiter->waiter = thread;
1021 waiter->next = lr->waiters;
1023 lr->waiters = waiter;
1027 /* lock_record_remove_waiter ***************************************************
1029 Remove a thread from the list of waiting threads of a lock record.
1032 lr...........the lock record
1033 t............the current thread
1036 The current thread must be the owner of the lock record.
1038 *******************************************************************************/
1040 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1042 lock_waiter_t **link;
1045 link = &(lr->waiters);
1047 while ((w = *link)) {
1048 if (w->waiter == thread) {
1051 /* free the waiter data structure */
1053 FREE(w, lock_waiter_t);
1055 #if defined(ENABLE_STATISTICS)
1057 size_lock_waiter -= sizeof(lock_waiter_t);
1066 /* this should never happen */
1068 vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
1072 /* lock_record_wait ************************************************************
1074 Wait on a lock record for a given (maximum) amount of time.
1077 t............the current thread
1078 lr...........the lock record
1079 millis.......milliseconds of timeout
1080 nanos........nanoseconds of timeout
1083 The current thread must be the owner of the lock record.
1084 This is NOT checked by this function!
1086 *******************************************************************************/
1088 static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1091 bool wasinterrupted;
1093 /* { the thread t owns the fat lock record lr on the object o } */
1095 /* register us as waiter for this object */
1097 lock_record_add_waiter(lr, thread);
1099 /* remember the old lock count */
1101 lockcount = lr->count;
1103 /* unlock this record */
1106 lock_record_exit(thread, lr);
1108 /* wait until notified/interrupted/timed out */
1110 wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1112 /* re-enter the monitor */
1114 lock_record_enter(thread, lr);
1116 /* remove us from the list of waiting threads */
1118 lock_record_remove_waiter(lr, thread);
1120 /* restore the old lock count */
1122 lr->count = lockcount;
1124 /* if we have been interrupted, throw the appropriate exception */
1127 exceptions_throw_interruptedexception();
1131 /* lock_monitor_wait ***********************************************************
1133 Wait on an object for a given (maximum) amount of time.
1136 t............the current thread
1137 o............the object
1138 millis.......milliseconds of timeout
1139 nanos........nanoseconds of timeout
1142 The current thread must be the owner of the object's monitor.
1144 *******************************************************************************/
1146 static void lock_monitor_wait(threadobject *t, java_object_t *o, s8 millis, s4 nanos)
1151 lockword = (ptrint) o->monitorPtr;
1153 /* check if we own this monitor */
1154 /* We don't have to worry about stale values here, as any stale value */
1155 /* will fail this check. */
1157 if (IS_FAT_LOCK(lockword)) {
1159 lr = GET_FAT_LOCK(lockword);
1161 if (lr->owner != t) {
1162 exceptions_throw_illegalmonitorstateexception();
1167 /* it's a thin lock */
1169 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1170 exceptions_throw_illegalmonitorstateexception();
1174 /* inflate this lock */
1176 lr = lock_hashtable_get(o);
1178 lock_record_enter(t, lr);
1179 lock_inflate(t, o, lr);
1182 /* { the thread t owns the fat lock record lr on the object o } */
1184 lock_record_wait(t, lr, millis, nanos);
1188 /* lock_record_notify **********************************************************
1190 Notify one thread or all threads waiting on the given lock record.
1193 t............the current thread
1194 lr...........the lock record
1195 one..........if true, only notify one thread
1198 The current thread must be the owner of the lock record.
1199 This is NOT checked by this function!
1201 *******************************************************************************/
1203 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1205 lock_waiter_t *waiter;
1206 threadobject *waitingthread;
1208 /* { the thread t owns the fat lock record lr on the object o } */
1210 /* for each waiter: */
1212 for (waiter = lr->waiters; waiter != NULL; waiter = waiter->next) {
1214 /* signal the waiting thread */
1216 waitingthread = waiter->waiter;
1218 pthread_mutex_lock(&waitingthread->waitmutex);
1220 if (waitingthread->sleeping)
1221 pthread_cond_signal(&waitingthread->waitcond);
1223 waitingthread->signaled = true;
1225 pthread_mutex_unlock(&waitingthread->waitmutex);
1227 /* if we should only wake one, we are done */
1235 /* lock_monitor_notify *********************************************************
1237 Notify one thread or all threads waiting on the given object.
1240 t............the current thread
1241 o............the object
1242 one..........if true, only notify one thread
1245 The current thread must be the owner of the object's monitor.
1247 *******************************************************************************/
1249 static void lock_monitor_notify(threadobject *t, java_object_t *o, bool one)
1254 lockword = (ptrint) o->monitorPtr;
1256 /* check if we own this monitor */
1257 /* We don't have to worry about stale values here, as any stale value */
1258 /* will fail this check. */
1260 if (IS_FAT_LOCK(lockword)) {
1262 lr = GET_FAT_LOCK(lockword);
1264 if (lr->owner != t) {
1265 exceptions_throw_illegalmonitorstateexception();
1270 /* it's a thin lock */
1272 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1273 exceptions_throw_illegalmonitorstateexception();
1277 /* inflate this lock */
1279 lr = lock_hashtable_get(o);
1281 lock_record_enter(t, lr);
1282 lock_inflate(t, o, lr);
1285 /* { the thread t owns the fat lock record lr on the object o } */
1287 lock_record_notify(t, lr, one);
1292 /*============================================================================*/
1293 /* INQUIRY FUNCIONS */
1294 /*============================================================================*/
1297 /* lock_is_held_by_current_thread **********************************************
1299 Return true if the current thread owns the monitor of the given object.
1302 o............the object
1305 true, if the current thread holds the lock of this object.
1307 *******************************************************************************/
1309 bool lock_is_held_by_current_thread(java_object_t *o)
1317 /* check if we own this monitor */
1318 /* We don't have to worry about stale values here, as any stale value */
1319 /* will fail this check. */
1321 lockword = (ptrint) o->monitorPtr;
1323 if (IS_FAT_LOCK(lockword)) {
1324 /* it's a fat lock */
1326 lr = GET_FAT_LOCK(lockword);
1328 return (lr->owner == t);
1331 /* it's a thin lock */
1333 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1339 /*============================================================================*/
1340 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1341 /*============================================================================*/
1344 /* lock_wait_for_object ********************************************************
1346 Wait for the given object.
1349 o............the object
1350 millis.......milliseconds to wait
1351 nanos........nanoseconds to wait
1353 *******************************************************************************/
1355 void lock_wait_for_object(java_object_t *o, s8 millis, s4 nanos)
1357 threadobject *thread;
1359 thread = THREADOBJECT;
1361 lock_monitor_wait(thread, o, millis, nanos);
1365 /* lock_notify_object **********************************************************
1367 Notify one thread waiting on the given object.
1370 o............the object
1372 *******************************************************************************/
1374 void lock_notify_object(java_object_t *o)
1376 threadobject *thread;
1378 thread = THREADOBJECT;
1380 lock_monitor_notify(thread, o, true);
1384 /* lock_notify_all_object ******************************************************
1386 Notify all threads waiting on the given object.
1389 o............the object
1391 *******************************************************************************/
1393 void lock_notify_all_object(java_object_t *o)
1395 threadobject *thread;
1397 thread = THREADOBJECT;
1399 lock_monitor_notify(thread, o, false);
1404 * These are local overrides for various environment variables in Emacs.
1405 * Please do not remove this and leave it at the end of the file, where
1406 * Emacs will automagically detect them.
1407 * ---------------------------------------------------------------------
1410 * indent-tabs-mode: t
1414 * vim:noexpandtab:sw=4:ts=4: