* src/vmcore/statistics.c (size_threadobject, size_lock_hashtable)
[cacao.git] / src / threads / native / lock.c
1 /* src/threads/native/lock.c - lock implementation
2
3    Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6    J. Wenninger, Institut f. Computersprachen - TU Wien
7
8    This file is part of CACAO.
9
10    This program is free software; you can redistribute it and/or
11    modify it under the terms of the GNU General Public License as
12    published by the Free Software Foundation; either version 2, or (at
13    your option) any later version.
14
15    This program is distributed in the hope that it will be useful, but
16    WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18    General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23    02110-1301, USA.
24
25    $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
26
27 */
28
29
30 #include "config.h"
31
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <assert.h>
35 #include <sys/time.h>
36 #include <pthread.h>
37
38 #include "vm/types.h"
39
40 #include "mm/memory.h"
41
42 #include "threads/native/lock.h"
43 #include "threads/native/threads.h"
44
45 #include "vm/global.h"
46 #include "vm/exceptions.h"
47 #include "vm/stringlocal.h"
48 #include "vm/vm.h"
49
50 #include "vmcore/options.h"
51
52 #if defined(ENABLE_STATISTICS)
53 # include "vmcore/statistics.h"
54 #endif
55
56 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
57
58 #include "arch.h"
59
60 /* includes for atomic instructions: */
61
62 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
63 #include "threads/native/generic-primitives.h"
64 #else
65 #include "machine-instr.h"
66 #endif
67
68 #if defined(ENABLE_JVMTI)
69 #include "native/jvmti/cacaodbg.h"
70 #endif
71
72
73 /******************************************************************************/
74 /* DEBUGGING MACROS                                                           */
75 /******************************************************************************/
76
77 /* #define LOCK_VERBOSE */
78
79 #if defined(LOCK_VERBOSE)
80 #define LOCK_LOG(args)  do { printf args; fflush(stdout); } while (0)
81 #else
82 #define LOCK_LOG(args)
83 #endif
84
85
86 /******************************************************************************/
87 /* MACROS                                                                     */
88 /******************************************************************************/
89
90 /* number of lock records in the first pool allocated for a thread */
91 #define LOCK_INITIAL_LOCK_RECORDS 8
92
93 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
94
95 #define LOCK_HASH(obj)  ((ptrint)(obj))
96
97 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
98         ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
99
100 /* CAUTION: oldvalue is evaluated twice! */
101 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
102         (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
103
104
105 /******************************************************************************/
106 /* MACROS FOR THE FLAT LOCK CONTENTION BIT                                    */
107 /******************************************************************************/
108
109 #define LOCK_SET_FLC_BIT(obj)    ((obj)->hdrflags |= HDRFLAG_FLC)
110 #define LOCK_CLEAR_FLC_BIT(obj)  ((obj)->hdrflags &= ~ HDRFLAG_FLC)
111 #define LOCK_TEST_FLC_BIT(obj)   ((obj)->hdrflags & HDRFLAG_FLC)
112
113
114 /******************************************************************************/
115 /* MACROS FOR THIN/FAT LOCKS                                                  */
116 /******************************************************************************/
117
118 /* We use a variant of the tasuki locks described in the paper
119  *     
120  *     Tamiya Onodera, Kiyokuni Kawachiya
121  *     A Study of Locking Objects with Bimodal Fields
122  *     Proceedings of the ACM OOPSLA '99, pp. 223-237
123  *     1999
124  *
125  * The underlying thin locks are a variant of the thin locks described in
126  * 
127  *     Bacon, Konuru, Murthy, Serrano
128  *     Thin Locks: Featherweight Synchronization for Java
129  *         Proceedings of the ACM Conference on Programming Language Design and 
130  *         Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
131  *         June 1998
132  *
133  * In thin lock mode the lockword (monitorPtr) looks like this:
134  *
135  *     ,----------------------,-----------,---,
136  *     |      thread ID       |   count   | 0 |
137  *     `----------------------'-----------'---´
138  *
139  *     thread ID......the 'index' of the owning thread, or 0
140  *     count..........number of times the lock has been entered minus 1
141  *     0..............the shape bit is 0 in thin lock mode
142  *
143  * In fat lock mode it is basically a lock_record_t *:
144  *
145  *     ,----------------------------------,---,
146  *     |    lock_record_t * (without LSB) | 1 |
147  *     `----------------------------------'---´
148  *
149  *     1..............the shape bit is 1 in fat lock mode
150  */
151
152 #if SIZEOF_VOID_P == 8
153 #define THIN_LOCK_WORD_SIZE    64
154 #else
155 #define THIN_LOCK_WORD_SIZE    32
156 #endif
157
158 #define THIN_LOCK_SHAPE_BIT    0x01
159
160 #define THIN_UNLOCKED          0
161
162 #define THIN_LOCK_COUNT_SHIFT  1
163 #define THIN_LOCK_COUNT_SIZE   8
164 #define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
165 #define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
166 #define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
167
168 #define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
169 #define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
170
171 #define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
172 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
173
174 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
175 #define MAKE_FAT_LOCK(ptr)      ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
176
177 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
178
179
180 /******************************************************************************/
181 /* GLOBAL VARIABLES                                                           */
182 /******************************************************************************/
183
184 /* global lock record pool list header */
185 lock_record_pool_t *lock_global_pool;
186
187 /* mutex for synchronizing access to the global pool */
188 pthread_mutex_t lock_global_pool_lock;
189
190 /* hashtable mapping objects to lock records */
191 static lock_hashtable_t lock_hashtable;
192
193
194 /******************************************************************************/
195 /* PROTOTYPES                                                                 */
196 /******************************************************************************/
197
198 static void lock_hashtable_init(void);
199 static lock_record_t * lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o);
200
201 static lock_record_t * lock_record_alloc(threadobject *t);
202
203 static void lock_record_enter(threadobject *t, lock_record_t *lr);
204 static void lock_record_exit(threadobject *t, lock_record_t *lr);
205 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
206 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
207
208
209 /*============================================================================*/
210 /* INITIALIZATION OF DATA STRUCTURES                                          */
211 /*============================================================================*/
212
213
214 /* lock_init *******************************************************************
215
216    Initialize global data for locking.
217
218 *******************************************************************************/
219
220 void lock_init(void)
221 {
222         pthread_mutex_init(&lock_global_pool_lock, NULL);
223
224         lock_hashtable_init();
225 }
226
227
228 /* lock_record_init ************************************************************
229
230    Initialize a lock record.
231
232    IN:
233       r............the lock record to initialize
234           t............will become the owner
235
236 *******************************************************************************/
237
238 static void lock_record_init(lock_record_t *r, threadobject *t)
239 {
240         r->owner = NULL;
241         r->count = 0;
242         r->waiters = NULL;
243
244 #if !defined(NDEBUG)
245         r->nextfree = NULL;
246 #endif
247
248         pthread_mutex_init(&(r->mutex), NULL);
249 }
250
251
252 /* lock_init_execution_env *****************************************************
253
254    Initialize the execution environment for a thread.
255
256    IN:
257       thread.......the thread
258
259 *******************************************************************************/
260
261 void lock_init_execution_env(threadobject *thread)
262 {
263         thread->ee.firstfree = NULL;
264         thread->ee.lockrecordpools = NULL;
265         thread->ee.lockrecordcount = 0;
266 }
267
268
269
270 /* lock_pre_compute_thinlock ***************************************************
271
272    Pre-compute the thin lock value for a thread index.
273
274    IN:
275       index........the thead index (>= 1)
276
277    RETURN VALUE:
278       the thin lock value for this thread index
279
280 *******************************************************************************/
281
282 ptrint lock_pre_compute_thinlock(s4 index)
283 {
284         return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
285 }
286
287
288
289 /*============================================================================*/
290 /* LOCK RECORD MANAGEMENT                                                     */
291 /*============================================================================*/
292
293
294 /* lock_record_alloc_new_pool **************************************************
295
296    Get a new lock record pool from the memory allocator.
297
298    IN:
299       thread.......the thread that will own the lock records
300           size.........number of lock records in the pool to allocate
301
302    RETURN VALUE:
303       the new lock record pool, with initialized lock records
304
305 *******************************************************************************/
306
307 static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
308 {
309         int i;
310         lock_record_pool_t *pool;
311
312         /* get the pool from the memory allocator */
313
314         pool = mem_alloc(sizeof(lock_record_pool_header_t)
315                                    + sizeof(lock_record_t) * size);
316
317         /* initialize the pool header */
318
319         pool->header.size = size;
320
321         /* initialize the individual lock records */
322
323         for (i=0; i<size; i++) {
324                 lock_record_init(&pool->lr[i], thread);
325
326                 pool->lr[i].nextfree = &pool->lr[i+1];
327         }
328
329         /* terminate free list */
330
331         pool->lr[i-1].nextfree = NULL;
332
333         return pool;
334 }
335
336
337 /* lock_record_alloc_pool ******************************************************
338
339    Allocate a lock record pool. The pool is either taken from the global free
340    list or requested from the memory allocator.
341
342    IN:
343       thread.......the thread that will own the lock records
344           size.........number of lock records in the pool to allocate
345
346    RETURN VALUE:
347       the new lock record pool, with initialized lock records
348
349 *******************************************************************************/
350
351 static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
352 {
353         pthread_mutex_lock(&lock_global_pool_lock);
354
355         if (lock_global_pool) {
356                 int i;
357                 lock_record_pool_t *pool;
358
359                 /* pop a pool from the global freelist */
360
361                 pool = lock_global_pool;
362                 lock_global_pool = pool->header.next;
363
364                 pthread_mutex_unlock(&lock_global_pool_lock);
365
366                 /* re-initialize owner and freelist chaining */
367
368                 for (i=0; i < pool->header.size; i++) {
369                         pool->lr[i].owner = NULL;
370                         pool->lr[i].nextfree = &pool->lr[i+1];
371                 }
372                 pool->lr[i-1].nextfree = NULL;
373
374                 return pool;
375         }
376
377         pthread_mutex_unlock(&lock_global_pool_lock);
378
379         /* we have to get a new pool from the allocator */
380
381         return lock_record_alloc_new_pool(t, size);
382 }
383
384
385 /* lock_record_free_pools ******************************************************
386
387    Free the lock record pools in the given linked list. The pools are inserted
388    into the global freelist.
389
390    IN:
391       pool.........list header
392
393 *******************************************************************************/
394
395 void lock_record_free_pools(lock_record_pool_t *pool)
396 {
397         lock_record_pool_header_t *last;
398
399         assert(false); /* XXX this function does not match the new locking */
400                        /*     algorithm. We must find another way to free  */
401                        /*     unused lock records.                         */
402
403         if (!pool)
404                 return;
405
406         pthread_mutex_lock(&lock_global_pool_lock);
407
408         /* find the last pool in the list */
409
410         last = &pool->header;
411         while (last->next)
412                 last = &last->next->header;
413
414         /* chain it to the lock_global_pool freelist */
415
416         last->next = lock_global_pool;
417
418         /* insert the freed pools into the freelist */
419
420         lock_global_pool = pool;
421
422         pthread_mutex_unlock(&lock_global_pool_lock);
423 }
424
425
426 /* lock_record_alloc ***********************************************************
427
428    Allocate a lock record which is owned by the current thread.
429
430    IN:
431       t............the current thread 
432
433 *******************************************************************************/
434
435 static lock_record_t *lock_record_alloc(threadobject *t)
436 {
437         lock_record_t *r;
438
439         assert(t);
440         r = t->ee.firstfree;
441
442         if (!r) {
443                 int poolsize;
444                 lock_record_pool_t *pool;
445
446                 /* get a new pool */
447
448                 poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2
449                                                                                  : LOCK_INITIAL_LOCK_RECORDS;
450                 pool = lock_record_alloc_pool(t, poolsize);
451
452                 /* add it to our per-thread pool list */
453
454                 pool->header.next = t->ee.lockrecordpools;
455                 t->ee.lockrecordpools = pool;
456                 t->ee.lockrecordcount += pool->header.size;
457
458                 /* take the first record from the pool */
459                 r = &pool->lr[0];
460         }
461
462         /* pop the record from the freelist */
463
464         t->ee.firstfree = r->nextfree;
465 #ifndef NDEBUG
466         r->nextfree = NULL; /* in order to find invalid uses of nextfree */
467 #endif
468
469         return r;
470 }
471
472
473 /* lock_record_recycle *********************************************************
474
475    Recycle the given lock record. It will be inserted in the appropriate
476    free list.
477
478    IN:
479       t............the owner
480           r............lock record to recycle
481
482 *******************************************************************************/
483
484 static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
485 {
486         assert(t);
487         assert(r);
488         assert(r->owner == NULL);
489         assert(r->nextfree == NULL);
490
491         r->nextfree = t->ee.firstfree;
492         t->ee.firstfree = r;
493 }
494
495
496
497 /*============================================================================*/
498 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
499 /*============================================================================*/
500
501
502 /* lock_hashtable_init *********************************************************
503
504    Initialize the global hashtable mapping objects to lock records.
505
506 *******************************************************************************/
507
508 static void lock_hashtable_init(void)
509 {
510         pthread_mutex_init(&(lock_hashtable.mutex), NULL);
511
512         lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
513         lock_hashtable.entries = 0;
514         lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
515
516 #if defined(ENABLE_STATISTICS)
517         if (opt_stat)
518                 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
519 #endif
520
521         MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
522 }
523
524
525 /* lock_hashtable_grow *********************************************************
526
527    Grow the lock record hashtable to about twice its current size and
528    rehash the entries.
529
530 *******************************************************************************/
531
532 /* must be called with hashtable mutex locked */
533 static void lock_hashtable_grow(void)
534 {
535         u4 oldsize;
536         u4 newsize;
537         lock_record_t **oldtable;
538         lock_record_t **newtable;
539         lock_record_t *lr;
540         lock_record_t *next;
541         u4 i;
542         u4 h;
543         u4 newslot;
544
545         /* allocate a new table */
546
547         oldsize = lock_hashtable.size;
548         newsize = oldsize*2 + 1; /* XXX should use prime numbers */
549
550         LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
551
552         oldtable = lock_hashtable.ptr;
553         newtable = MNEW(lock_record_t *, newsize);
554
555 #if defined(ENABLE_STATISTICS)
556         if (opt_stat)
557                 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
558 #endif
559
560         MZERO(newtable, lock_record_t *, newsize);
561
562         /* rehash the entries */
563
564         for (i = 0; i < oldsize; i++) {
565                 lr = oldtable[i];
566                 while (lr) {
567                         next = lr->hashlink;
568
569                         h = LOCK_HASH(lr->obj);
570                         newslot = h % newsize;
571
572                         lr->hashlink = newtable[newslot];
573                         newtable[newslot] = lr;
574
575                         lr = next;
576                 }
577         }
578
579         /* replace the old table */
580
581         lock_hashtable.ptr  = newtable;
582         lock_hashtable.size = newsize;
583
584         MFREE(oldtable, lock_record_t *, oldsize);
585
586 #if defined(ENABLE_STATISTICS)
587         if (opt_stat)
588                 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
589 #endif
590 }
591
592
593 /* lock_hashtable_get_lock_record **********************************************
594
595    Find the lock record for the given object. If it does not exists, yet,
596    create it and enter it in the hashtable.
597
598    IN:
599       t.................the current thread
600           o.................the object to look up
601
602    RETURN VALUE:
603       the lock record to use for this object
604
605 *******************************************************************************/
606
607 static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o)
608 {
609         ptrint lockword;
610         u4 slot;
611         lock_record_t *lr;
612
613         lockword = (ptrint) o->monitorPtr;
614
615         if (IS_FAT_LOCK(lockword)) {
616                 return GET_FAT_LOCK(lockword);
617         }
618
619         /* lock the hashtable */
620
621         pthread_mutex_lock(&(lock_hashtable.mutex));
622
623         /* lookup the lock record in the hashtable */
624
625         slot = LOCK_HASH(o) % lock_hashtable.size;
626         lr = lock_hashtable.ptr[slot];
627         while (lr) {
628                 if (lr->obj == o) {
629                         pthread_mutex_unlock(&(lock_hashtable.mutex));
630                         return lr;
631                 }
632
633                 lr = lr->hashlink;
634         }
635
636         /* not found, we must create a new one */
637
638         lr = lock_record_alloc(t);
639         lr->obj = o;
640         LOCK_LOG(("thread %d allocated for %p new lr %p\n",
641                         t->index, (void*) o, (void*) lr));
642
643         /* enter it in the hashtable */
644
645         lr->hashlink = lock_hashtable.ptr[slot];
646         lock_hashtable.ptr[slot] = lr;
647         lock_hashtable.entries++;
648
649         /* check whether the hash should grow */
650
651         if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
652                 lock_hashtable_grow();
653         }
654
655         /* unlock the hashtable */
656
657         pthread_mutex_unlock(&(lock_hashtable.mutex));
658
659         /* return the new lock record */
660
661         return lr;
662 }
663
664
665 /*============================================================================*/
666 /* OBJECT LOCK INITIALIZATION                                                 */
667 /*============================================================================*/
668
669
670 /* lock_init_object_lock *******************************************************
671
672    Initialize the monitor pointer of the given object. The monitor gets
673    initialized to an unlocked state.
674
675 *******************************************************************************/
676
677 void lock_init_object_lock(java_objectheader *o)
678 {
679         assert(o);
680
681         o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
682         LOCK_CLEAR_FLC_BIT(o);
683 }
684
685
686 /* lock_get_initial_lock_word **************************************************
687
688    Returns the initial (unlocked) lock word. The pointer is
689    required in the code generator to set up a virtual
690    java_objectheader for code patch locking.
691
692 *******************************************************************************/
693
694 lock_record_t *lock_get_initial_lock_word(void)
695 {
696         return (lock_record_t *) THIN_UNLOCKED;
697 }
698
699
700
701 /*============================================================================*/
702 /* LOCKING ALGORITHM                                                          */
703 /*============================================================================*/
704
705
706 /* lock_record_enter ***********************************************************
707
708    Enter the lock represented by the given lock record.
709
710    IN:
711       t.................the current thread
712           lr................the lock record
713
714 *******************************************************************************/
715
716 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
717 {
718         pthread_mutex_lock(&(lr->mutex));
719         lr->owner = t;
720 }
721
722
723 /* lock_record_exit ************************************************************
724
725    Release the lock represented by the given lock record.
726
727    IN:
728       t.................the current thread
729           lr................the lock record
730
731    PRE-CONDITION:
732       The current thread must own the lock represented by this lock record.
733           This is NOT checked by this function!
734
735 *******************************************************************************/
736
737 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
738 {
739         lr->owner = NULL;
740         pthread_mutex_unlock(&(lr->mutex));
741 }
742
743
744 /* lock_inflate ****************************************************************
745
746    Inflate the lock of the given object. This may only be called by the
747    owner of the monitor of the object.
748
749    IN:
750       t............the current thread
751           o............the object of which to inflate the lock
752           lr...........the lock record to install. The current thread must
753                        own the lock of this lock record!
754
755    PRE-CONDITION:
756       The current thread must be the owner of this object's monitor AND
757           of the lock record's lock!
758
759 *******************************************************************************/
760
761 static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *lr)
762 {
763         ptrint lockword;
764
765         /* get the current lock count */
766
767         lockword = (ptrint) o->monitorPtr;
768
769         if (IS_FAT_LOCK(lockword)) {
770                 assert(GET_FAT_LOCK(lockword) == lr);
771         }
772         else {
773                 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
774
775                 /* copy the count from the thin lock */
776
777                 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
778         }
779
780         LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
781                         t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
782
783         /* clear flat-lock-contention bit */
784
785         LOCK_CLEAR_FLC_BIT(o);
786
787         /* notify waiting objects */
788
789         lock_record_notify(t, lr, false);
790
791         /* install it */
792
793         o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
794 }
795
796
797 /* lock_monitor_enter **********************************************************
798
799    Acquire the monitor of the given object. If the current thread already
800    owns the monitor, the lock counter is simply increased.
801
802    This function blocks until it can acquire the monitor.
803
804    IN:
805       t............the current thread
806           o............the object of which to enter the monitor
807
808    RETURN VALUE:
809       true.........the lock has been successfully acquired
810           false........an exception has been thrown
811
812 *******************************************************************************/
813
814 bool lock_monitor_enter(java_objectheader *o)
815 {
816         threadobject *t;
817         /* CAUTION: This code assumes that ptrint is unsigned! */
818         ptrint        lockword;
819         ptrint        thinlock;
820
821         if (o == NULL) {
822                 exceptions_throw_nullpointerexception();
823                 return false;
824         }
825
826         t = THREADOBJECT;
827
828         thinlock = t->thinlock;
829
830         /* most common case: try to thin-lock an unlocked object */
831
832         if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
833                 /* success. we locked it */
834                 /* The Java Memory Model requires a memory barrier here: */
835                 MEMORY_BARRIER();
836                 return true;
837         }
838
839         /* next common case: recursive lock with small recursion count */
840         /* We don't have to worry about stale values here, as any stale value  */
841         /* will indicate another thread holding the lock (or an inflated lock) */
842
843         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
844                 /* we own this monitor               */
845                 /* check the current recursion count */
846
847                 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
848                 {
849                         /* the recursion count is low enough */
850
851                         o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
852
853                         /* success. we locked it */
854                         return true;
855                 }
856                 else {
857                         lock_record_t *lr;
858
859                         /* recursion count overflow */
860
861                         lr = lock_hashtable_get_lock_record(t, o);
862                         lock_record_enter(t, lr);
863                         lock_inflate(t, o, lr);
864                         lr->count++;
865
866                         return true;
867                 }
868         }
869
870         /* the lock is either contented or fat */
871
872         {
873                 lock_record_t *lr;
874
875                 if (IS_FAT_LOCK(lockword)) {
876
877                         lr = GET_FAT_LOCK(lockword);
878
879                         /* check for recursive entering */
880                         if (lr->owner == t) {
881                                 lr->count++;
882                                 return true;
883                         }
884
885                         /* acquire the mutex of the lock record */
886
887                         lock_record_enter(t, lr);
888
889                         assert(lr->count == 0);
890
891                         return true;
892                 }
893
894                 /****** inflation path ******/
895
896                 /* first obtain the lock record for this object */
897
898                 lr = lock_hashtable_get_lock_record(t, o);
899
900 #if defined(ENABLE_JVMTI)
901         /* Monitor Contended Enter */
902                 jvmti_MonitorContendedEntering(false, o);
903 #endif
904                 /* enter the monitor */
905
906                 lock_record_enter(t, lr);
907
908
909 #if defined(ENABLE_JVMTI)
910                 /* Monitor Contended Entered */
911                 jvmti_MonitorContendedEntering(true, o);
912 #endif
913
914                 /* inflation loop */
915
916                 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
917                         /* Set the flat lock contention bit to let the owning thread */
918                         /* know that we want to be notified of unlocking.            */
919
920                         LOCK_SET_FLC_BIT(o);
921
922                         LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
923                                         t->index, (void*) o, (void*) lr));
924
925                         /* try to lock the object */
926
927                         if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
928                                 /* we can inflate the lock ourselves */
929                                 LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
930                                                 t->index, (void*) o, (void*) lr));
931                                 lock_inflate(t, o, lr);
932                         }
933                         else {
934                                 /* wait until another thread sees the flc bit and notifies us of unlocking */
935                                 LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
936                                                 t->index, (void*) o, (void*) lr));
937                                 lock_record_wait(t, lr, 0, 0);
938                         }
939                 }
940
941                 /* we own the inflated lock now */
942
943                 return true;
944         }
945 }
946
947
948 /* lock_monitor_exit ***********************************************************
949
950    Decrement the counter of a (currently owned) monitor. If the counter
951    reaches zero, release the monitor.
952
953    If the current thread is not the owner of the monitor, an 
954    IllegalMonitorState exception is thrown.
955
956    IN:
957       t............the current thread
958           o............the object of which to exit the monitor
959
960    RETURN VALUE:
961       true.........everything ok,
962           false........an exception has been thrown
963
964 *******************************************************************************/
965
966 bool lock_monitor_exit(java_objectheader *o)
967 {
968         threadobject *t;
969         ptrint        lockword;
970         ptrint        thinlock;
971
972         if (o == NULL) {
973                 exceptions_throw_nullpointerexception();
974                 return false;
975         }
976
977         t = THREADOBJECT;
978
979         /* We don't have to worry about stale values here, as any stale value */
980         /* will indicate that we don't own the lock.                          */
981
982         lockword = (ptrint) o->monitorPtr;
983         thinlock = t->thinlock;
984
985         /* most common case: we release a thin lock that we hold once */
986
987         if (lockword == thinlock) {
988                 /* memory barrier for Java Memory Model */
989                 MEMORY_BARRIER();
990                 o->monitorPtr = THIN_UNLOCKED;
991                 /* memory barrier for thin locking */
992                 MEMORY_BARRIER();
993
994                 /* check if there has been a flat lock contention on this object */
995
996                 if (LOCK_TEST_FLC_BIT(o)) {
997                         lock_record_t *lr;
998
999                         LOCK_LOG(("thread %d saw flc bit on %p %s\n",
1000                                         t->index, (void*) o, o->vftbl->class->name->text));
1001
1002                         /* there has been a contention on this thin lock */
1003
1004                         lr = lock_hashtable_get_lock_record(t, o);
1005
1006                         LOCK_LOG(("thread %d for %p got lr %p\n",
1007                                         t->index, (void*) o, (void*) lr));
1008
1009                         lock_record_enter(t, lr);
1010
1011                         if (LOCK_TEST_FLC_BIT(o)) {
1012                                 /* notify a thread that it can try to inflate the lock now */
1013
1014                                 lock_record_notify(t, lr, true);
1015                         }
1016
1017                         lock_record_exit(t, lr);
1018                 }
1019
1020                 return true;
1021         }
1022
1023         /* next common case: we release a recursive lock, count > 0 */
1024
1025         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1026                 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
1027                 return true;
1028         }
1029
1030         /* either the lock is fat, or we don't hold it at all */
1031
1032         if (IS_FAT_LOCK(lockword)) {
1033
1034                 lock_record_t *lr;
1035
1036                 lr = GET_FAT_LOCK(lockword);
1037
1038                 /* check if we own this monitor */
1039                 /* We don't have to worry about stale values here, as any stale value */
1040                 /* will be != t and thus fail this check.                             */
1041
1042                 if (lr->owner != t) {
1043                         exceptions_throw_illegalmonitorstateexception();
1044                         return false;
1045                 }
1046
1047                 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1048
1049                 if (lr->count != 0) {
1050                         /* we had locked this one recursively. just decrement, it will */
1051                         /* still be locked. */
1052                         lr->count--;
1053                         return true;
1054                 }
1055
1056                 /* unlock this lock record */
1057
1058                 lr->owner = NULL;
1059                 pthread_mutex_unlock(&(lr->mutex));
1060
1061                 return true;
1062         }
1063
1064         /* legal thin lock cases have been handled above, so this is an error */
1065
1066         exceptions_throw_illegalmonitorstateexception();
1067
1068         return false;
1069 }
1070
1071
1072 /* lock_record_add_waiter ******************************************************
1073
1074    Add a thread to the list of waiting threads of a lock record.
1075
1076    IN:
1077       lr...........the lock record
1078       thread.......the thread to add
1079
1080 *******************************************************************************/
1081
1082 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1083 {
1084         lock_waiter_t *waiter;
1085
1086         /* allocate a waiter data structure */
1087
1088         waiter = NEW(lock_waiter_t);
1089
1090 #if defined(ENABLE_STATISTICS)
1091         if (opt_stat)
1092                 size_lock_waiter += sizeof(lock_waiter_t);
1093 #endif
1094
1095         waiter->waiter = thread;
1096         waiter->next   = lr->waiters;
1097
1098         lr->waiters = waiter;
1099 }
1100
1101
1102 /* lock_record_remove_waiter ***************************************************
1103
1104    Remove a thread from the list of waiting threads of a lock record.
1105
1106    IN:
1107       lr...........the lock record
1108       t............the current thread
1109
1110    PRE-CONDITION:
1111       The current thread must be the owner of the lock record.
1112    
1113 *******************************************************************************/
1114
1115 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1116 {
1117         lock_waiter_t **link;
1118         lock_waiter_t  *w;
1119
1120         link = &(lr->waiters);
1121
1122         while ((w = *link)) {
1123                 if (w->waiter == thread) {
1124                         *link = w->next;
1125
1126                         /* free the waiter data structure */
1127
1128                         FREE(w, lock_waiter_t);
1129
1130 #if defined(ENABLE_STATISTICS)
1131                         if (opt_stat)
1132                                 size_lock_waiter -= sizeof(lock_waiter_t);
1133 #endif
1134
1135                         return;
1136                 }
1137
1138                 link = &(w->next);
1139         }
1140
1141         /* this should never happen */
1142
1143         vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
1144 }
1145
1146
1147 /* lock_record_wait ************************************************************
1148
1149    Wait on a lock record for a given (maximum) amount of time.
1150
1151    IN:
1152       t............the current thread
1153           lr...........the lock record
1154           millis.......milliseconds of timeout
1155           nanos........nanoseconds of timeout
1156
1157    PRE-CONDITION:
1158       The current thread must be the owner of the lock record.
1159           This is NOT checked by this function!
1160    
1161 *******************************************************************************/
1162
1163 static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1164 {
1165         s4   lockcount;
1166         bool wasinterrupted;
1167
1168         /* { the thread t owns the fat lock record lr on the object o } */
1169
1170         /* register us as waiter for this object */
1171
1172         lock_record_add_waiter(lr, thread);
1173
1174         /* remember the old lock count */
1175
1176         lockcount = lr->count;
1177
1178         /* unlock this record */
1179
1180         lr->count = 0;
1181         lock_record_exit(thread, lr);
1182
1183         /* wait until notified/interrupted/timed out */
1184
1185         wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1186
1187         /* re-enter the monitor */
1188
1189         lock_record_enter(thread, lr);
1190
1191         /* remove us from the list of waiting threads */
1192
1193         lock_record_remove_waiter(lr, thread);
1194
1195         /* restore the old lock count */
1196
1197         lr->count = lockcount;
1198
1199         /* if we have been interrupted, throw the appropriate exception */
1200
1201         if (wasinterrupted)
1202                 exceptions_throw_interruptedexception();
1203 }
1204
1205
1206 /* lock_monitor_wait ***********************************************************
1207
1208    Wait on an object for a given (maximum) amount of time.
1209
1210    IN:
1211       t............the current thread
1212           o............the object
1213           millis.......milliseconds of timeout
1214           nanos........nanoseconds of timeout
1215
1216    PRE-CONDITION:
1217       The current thread must be the owner of the object's monitor.
1218    
1219 *******************************************************************************/
1220
1221 static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
1222 {
1223         ptrint         lockword;
1224         lock_record_t *lr;
1225
1226         lockword = (ptrint) o->monitorPtr;
1227
1228         /* check if we own this monitor */
1229         /* We don't have to worry about stale values here, as any stale value */
1230         /* will fail this check.                                              */
1231
1232         if (IS_FAT_LOCK(lockword)) {
1233
1234                 lr = GET_FAT_LOCK(lockword);
1235
1236                 if (lr->owner != t) {
1237                         exceptions_throw_illegalmonitorstateexception();
1238                         return;
1239                 }
1240         }
1241         else {
1242                 /* it's a thin lock */
1243
1244                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1245                         exceptions_throw_illegalmonitorstateexception();
1246                         return;
1247                 }
1248
1249                 /* inflate this lock */
1250                 lr = lock_hashtable_get_lock_record(t, o);
1251                 lock_record_enter(t, lr);
1252                 lock_inflate(t, o, lr);
1253         }
1254
1255         /* { the thread t owns the fat lock record lr on the object o } */
1256
1257         lock_record_wait(t, lr, millis, nanos);
1258 }
1259
1260
1261 /* lock_record_notify **********************************************************
1262
1263    Notify one thread or all threads waiting on the given lock record.
1264
1265    IN:
1266       t............the current thread
1267           lr...........the lock record
1268           one..........if true, only notify one thread
1269
1270    PRE-CONDITION:
1271       The current thread must be the owner of the lock record.
1272           This is NOT checked by this function!
1273    
1274 *******************************************************************************/
1275
1276 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1277 {
1278         lock_waiter_t *waiter;
1279         threadobject *waitingthread;
1280
1281         /* { the thread t owns the fat lock record lr on the object o } */
1282
1283         /* for each waiter: */
1284
1285         for (waiter = lr->waiters; waiter; waiter = waiter->next) {
1286
1287                 /* signal the waiting thread */
1288
1289                 waitingthread = waiter->waiter;
1290
1291                 pthread_mutex_lock(&waitingthread->waitmutex);
1292                 if (waitingthread->sleeping)
1293                         pthread_cond_signal(&waitingthread->waitcond);
1294                 waitingthread->signaled = true;
1295                 pthread_mutex_unlock(&waitingthread->waitmutex);
1296
1297                 /* if we should only wake one, we are done */
1298
1299                 if (one)
1300                         break;
1301         }
1302 }
1303
1304
1305 /* lock_monitor_notify *********************************************************
1306
1307    Notify one thread or all threads waiting on the given object.
1308
1309    IN:
1310       t............the current thread
1311           o............the object
1312           one..........if true, only notify one thread
1313
1314    PRE-CONDITION:
1315       The current thread must be the owner of the object's monitor.
1316    
1317 *******************************************************************************/
1318
1319 static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
1320 {
1321         ptrint lockword;
1322         lock_record_t *lr;
1323
1324         lockword = (ptrint) o->monitorPtr;
1325
1326         /* check if we own this monitor */
1327         /* We don't have to worry about stale values here, as any stale value */
1328         /* will fail this check.                                              */
1329
1330         if (IS_FAT_LOCK(lockword)) {
1331
1332                 lr = GET_FAT_LOCK(lockword);
1333
1334                 if (lr->owner != t) {
1335                         exceptions_throw_illegalmonitorstateexception();
1336                         return;
1337                 }
1338         }
1339         else {
1340                 /* it's a thin lock */
1341
1342                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1343                         exceptions_throw_illegalmonitorstateexception();
1344                         return;
1345                 }
1346
1347                 /* inflate this lock */
1348                 lr = lock_hashtable_get_lock_record(t, o);
1349                 lock_record_enter(t, lr);
1350                 lock_inflate(t, o, lr);
1351         }
1352
1353         /* { the thread t owns the fat lock record lr on the object o } */
1354
1355         lock_record_notify(t, lr, one);
1356 }
1357
1358
1359
1360 /*============================================================================*/
1361 /* INQUIRY FUNCIONS                                                           */
1362 /*============================================================================*/
1363
1364
1365 /* lock_is_held_by_current_thread **********************************************
1366
1367    Return true if the current thread owns the monitor of the given object.
1368
1369    IN:
1370           o............the object
1371
1372    RETURN VALUE:
1373       true, if the current thread holds the lock of this object.
1374    
1375 *******************************************************************************/
1376
1377 bool lock_is_held_by_current_thread(java_objectheader *o)
1378 {
1379         ptrint        lockword;
1380         threadobject *t;
1381
1382         /* check if we own this monitor */
1383         /* We don't have to worry about stale values here, as any stale value */
1384         /* will fail this check.                                              */
1385
1386         lockword = (ptrint) o->monitorPtr;
1387         t = THREADOBJECT;
1388
1389         if (IS_FAT_LOCK(lockword)) {
1390                 lock_record_t *lr;
1391
1392                 /* it's a fat lock */
1393                 lr = GET_FAT_LOCK(lockword);
1394
1395                 return (lr->owner == t);
1396         }
1397         else {
1398                 /* it's a thin lock */
1399
1400                 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1401         }
1402 }
1403
1404
1405
1406 /*============================================================================*/
1407 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD                              */
1408 /*============================================================================*/
1409
1410
1411 /* lock_wait_for_object ********************************************************
1412
1413    Wait for the given object.
1414
1415    IN:
1416           o............the object
1417           millis.......milliseconds to wait
1418           nanos........nanoseconds to wait
1419    
1420 *******************************************************************************/
1421
1422 void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
1423 {
1424         threadobject *thread;
1425
1426         thread = THREADOBJECT;
1427
1428         lock_monitor_wait(thread, o, millis, nanos);
1429 }
1430
1431
1432 /* lock_notify_object **********************************************************
1433
1434    Notify one thread waiting on the given object.
1435
1436    IN:
1437           o............the object
1438    
1439 *******************************************************************************/
1440
1441 void lock_notify_object(java_objectheader *o)
1442 {
1443         threadobject *thread;
1444
1445         thread = THREADOBJECT;
1446
1447         lock_monitor_notify(thread, o, true);
1448 }
1449
1450
1451 /* lock_notify_all_object ******************************************************
1452
1453    Notify all threads waiting on the given object.
1454
1455    IN:
1456           o............the object
1457    
1458 *******************************************************************************/
1459
1460 void lock_notify_all_object(java_objectheader *o)
1461 {
1462         threadobject *thread;
1463
1464         thread = THREADOBJECT;
1465
1466         lock_monitor_notify(thread, o, false);
1467 }
1468
1469
1470 /*
1471  * These are local overrides for various environment variables in Emacs.
1472  * Please do not remove this and leave it at the end of the file, where
1473  * Emacs will automagically detect them.
1474  * ---------------------------------------------------------------------
1475  * Local variables:
1476  * mode: c
1477  * indent-tabs-mode: t
1478  * c-basic-offset: 4
1479  * tab-width: 4
1480  * End:
1481  * vim:noexpandtab:sw=4:ts=4:
1482  */