* src/vmcore/statistics.c (size_lock_record_pool): Added.
[cacao.git] / src / threads / native / lock.c
1 /* src/threads/native/lock.c - lock implementation
2
3    Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6    J. Wenninger, Institut f. Computersprachen - TU Wien
7
8    This file is part of CACAO.
9
10    This program is free software; you can redistribute it and/or
11    modify it under the terms of the GNU General Public License as
12    published by the Free Software Foundation; either version 2, or (at
13    your option) any later version.
14
15    This program is distributed in the hope that it will be useful, but
16    WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18    General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23    02110-1301, USA.
24
25    $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
26
27 */
28
29
30 #include "config.h"
31
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <assert.h>
35 #include <sys/time.h>
36 #include <pthread.h>
37
38 #include "vm/types.h"
39
40 #include "mm/memory.h"
41
42 #include "threads/native/lock.h"
43 #include "threads/native/threads.h"
44
45 #include "vm/global.h"
46 #include "vm/exceptions.h"
47 #include "vm/stringlocal.h"
48 #include "vm/vm.h"
49
50 #include "vmcore/options.h"
51
52 #if defined(ENABLE_STATISTICS)
53 # include "vmcore/statistics.h"
54 #endif
55
56 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
57
58 #include "arch.h"
59
60 /* includes for atomic instructions: */
61
62 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
63 #include "threads/native/generic-primitives.h"
64 #else
65 #include "machine-instr.h"
66 #endif
67
68 #if defined(ENABLE_JVMTI)
69 #include "native/jvmti/cacaodbg.h"
70 #endif
71
72
73 /******************************************************************************/
74 /* DEBUGGING MACROS                                                           */
75 /******************************************************************************/
76
77 /* #define LOCK_VERBOSE */
78
79 #if defined(LOCK_VERBOSE)
80 #define LOCK_LOG(args)  do { printf args; fflush(stdout); } while (0)
81 #else
82 #define LOCK_LOG(args)
83 #endif
84
85
86 /******************************************************************************/
87 /* MACROS                                                                     */
88 /******************************************************************************/
89
90 /* number of lock records in the first pool allocated for a thread */
91 #define LOCK_INITIAL_LOCK_RECORDS 8
92
93 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
94
95 #define LOCK_HASH(obj)  ((ptrint)(obj))
96
97 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
98         ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
99
100 /* CAUTION: oldvalue is evaluated twice! */
101 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
102         (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
103
104
105 /******************************************************************************/
106 /* MACROS FOR THE FLAT LOCK CONTENTION BIT                                    */
107 /******************************************************************************/
108
109 #define LOCK_SET_FLC_BIT(obj)    ((obj)->hdrflags |= HDRFLAG_FLC)
110 #define LOCK_CLEAR_FLC_BIT(obj)  ((obj)->hdrflags &= ~ HDRFLAG_FLC)
111 #define LOCK_TEST_FLC_BIT(obj)   ((obj)->hdrflags & HDRFLAG_FLC)
112
113
114 /******************************************************************************/
115 /* MACROS FOR THIN/FAT LOCKS                                                  */
116 /******************************************************************************/
117
118 /* We use a variant of the tasuki locks described in the paper
119  *     
120  *     Tamiya Onodera, Kiyokuni Kawachiya
121  *     A Study of Locking Objects with Bimodal Fields
122  *     Proceedings of the ACM OOPSLA '99, pp. 223-237
123  *     1999
124  *
125  * The underlying thin locks are a variant of the thin locks described in
126  * 
127  *     Bacon, Konuru, Murthy, Serrano
128  *     Thin Locks: Featherweight Synchronization for Java
129  *         Proceedings of the ACM Conference on Programming Language Design and 
130  *         Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
131  *         June 1998
132  *
133  * In thin lock mode the lockword (monitorPtr) looks like this:
134  *
135  *     ,----------------------,-----------,---,
136  *     |      thread ID       |   count   | 0 |
137  *     `----------------------'-----------'---´
138  *
139  *     thread ID......the 'index' of the owning thread, or 0
140  *     count..........number of times the lock has been entered minus 1
141  *     0..............the shape bit is 0 in thin lock mode
142  *
143  * In fat lock mode it is basically a lock_record_t *:
144  *
145  *     ,----------------------------------,---,
146  *     |    lock_record_t * (without LSB) | 1 |
147  *     `----------------------------------'---´
148  *
149  *     1..............the shape bit is 1 in fat lock mode
150  */
151
152 #if SIZEOF_VOID_P == 8
153 #define THIN_LOCK_WORD_SIZE    64
154 #else
155 #define THIN_LOCK_WORD_SIZE    32
156 #endif
157
158 #define THIN_LOCK_SHAPE_BIT    0x01
159
160 #define THIN_UNLOCKED          0
161
162 #define THIN_LOCK_COUNT_SHIFT  1
163 #define THIN_LOCK_COUNT_SIZE   8
164 #define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
165 #define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
166 #define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
167
168 #define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
169 #define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
170
171 #define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
172 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
173
174 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
175 #define MAKE_FAT_LOCK(ptr)      ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
176
177 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
178
179
180 /******************************************************************************/
181 /* GLOBAL VARIABLES                                                           */
182 /******************************************************************************/
183
184 /* global lock record pool list header */
185 lock_record_pool_t *lock_global_pool;
186
187 /* mutex for synchronizing access to the global pool */
188 pthread_mutex_t lock_global_pool_lock;
189
190 /* hashtable mapping objects to lock records */
191 static lock_hashtable_t lock_hashtable;
192
193
194 /******************************************************************************/
195 /* PROTOTYPES                                                                 */
196 /******************************************************************************/
197
198 static void lock_hashtable_init(void);
199 static lock_record_t * lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o);
200
201 static lock_record_t * lock_record_alloc(threadobject *t);
202
203 static void lock_record_enter(threadobject *t, lock_record_t *lr);
204 static void lock_record_exit(threadobject *t, lock_record_t *lr);
205 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
206 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
207
208
209 /*============================================================================*/
210 /* INITIALIZATION OF DATA STRUCTURES                                          */
211 /*============================================================================*/
212
213
214 /* lock_init *******************************************************************
215
216    Initialize global data for locking.
217
218 *******************************************************************************/
219
220 void lock_init(void)
221 {
222         pthread_mutex_init(&lock_global_pool_lock, NULL);
223
224         lock_hashtable_init();
225 }
226
227
228 /* lock_record_init ************************************************************
229
230    Initialize a lock record.
231
232    IN:
233       r............the lock record to initialize
234           t............will become the owner
235
236 *******************************************************************************/
237
238 static void lock_record_init(lock_record_t *r, threadobject *t)
239 {
240         r->owner = NULL;
241         r->count = 0;
242         r->waiters = NULL;
243
244 #if !defined(NDEBUG)
245         r->nextfree = NULL;
246 #endif
247
248         pthread_mutex_init(&(r->mutex), NULL);
249 }
250
251
252 /* lock_init_execution_env *****************************************************
253
254    Initialize the execution environment for a thread.
255
256    IN:
257       thread.......the thread
258
259 *******************************************************************************/
260
261 void lock_init_execution_env(threadobject *thread)
262 {
263         thread->ee.firstfree = NULL;
264         thread->ee.lockrecordpools = NULL;
265         thread->ee.lockrecordcount = 0;
266 }
267
268
269
270 /* lock_pre_compute_thinlock ***************************************************
271
272    Pre-compute the thin lock value for a thread index.
273
274    IN:
275       index........the thead index (>= 1)
276
277    RETURN VALUE:
278       the thin lock value for this thread index
279
280 *******************************************************************************/
281
282 ptrint lock_pre_compute_thinlock(s4 index)
283 {
284         return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
285 }
286
287
288
289 /*============================================================================*/
290 /* LOCK RECORD MANAGEMENT                                                     */
291 /*============================================================================*/
292
293
294 /* lock_record_alloc_new_pool **************************************************
295
296    Get a new lock record pool from the memory allocator.
297
298    IN:
299       thread.......the thread that will own the lock records
300           size.........number of lock records in the pool to allocate
301
302    RETURN VALUE:
303       the new lock record pool, with initialized lock records
304
305 *******************************************************************************/
306
307 static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
308 {
309         lock_record_pool_t *pool;
310         s4                  i;
311
312         /* get the pool from the memory allocator */
313
314         pool = mem_alloc(sizeof(lock_record_pool_header_t)
315                                    + sizeof(lock_record_t) * size);
316
317 #if defined(ENABLE_STATISTICS)
318         if (opt_stat)
319                 size_lock_record_pool += sizeof(lock_record_pool_header_t) +
320                         sizeof(lock_record_t) * size;
321 #endif
322
323         /* initialize the pool header */
324
325         pool->header.size = size;
326
327         /* initialize the individual lock records */
328
329         for (i = 0; i < size; i++) {
330                 lock_record_init(&pool->lr[i], thread);
331
332                 pool->lr[i].nextfree = &pool->lr[i + 1];
333         }
334
335         /* terminate free list */
336
337         pool->lr[i - 1].nextfree = NULL;
338
339         return pool;
340 }
341
342
343 /* lock_record_alloc_pool ******************************************************
344
345    Allocate a lock record pool. The pool is either taken from the global free
346    list or requested from the memory allocator.
347
348    IN:
349       thread.......the thread that will own the lock records
350           size.........number of lock records in the pool to allocate
351
352    RETURN VALUE:
353       the new lock record pool, with initialized lock records
354
355 *******************************************************************************/
356
357 static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
358 {
359         pthread_mutex_lock(&lock_global_pool_lock);
360
361         if (lock_global_pool != NULL) {
362                 int i;
363                 lock_record_pool_t *pool;
364
365                 /* pop a pool from the global freelist */
366
367                 pool             = lock_global_pool;
368                 lock_global_pool = pool->header.next;
369
370                 pthread_mutex_unlock(&lock_global_pool_lock);
371
372                 /* re-initialize owner and freelist chaining */
373
374                 for (i = 0; i < pool->header.size; i++) {
375                         pool->lr[i].owner    = NULL;
376                         pool->lr[i].nextfree = &pool->lr[i + 1];
377                 }
378                 pool->lr[i - 1].nextfree = NULL;
379
380                 return pool;
381         }
382
383         pthread_mutex_unlock(&lock_global_pool_lock);
384
385         /* we have to get a new pool from the allocator */
386
387         return lock_record_alloc_new_pool(t, size);
388 }
389
390
391 /* lock_record_free_pools ******************************************************
392
393    Free the lock record pools in the given linked list. The pools are inserted
394    into the global freelist.
395
396    IN:
397       pool.........list header
398
399 *******************************************************************************/
400
401 void lock_record_free_pools(lock_record_pool_t *pool)
402 {
403         lock_record_pool_header_t *last;
404
405         assert(false); /* XXX this function does not match the new locking */
406                        /*     algorithm. We must find another way to free  */
407                        /*     unused lock records.                         */
408
409         if (pool == NULL)
410                 return;
411
412         pthread_mutex_lock(&lock_global_pool_lock);
413
414         /* find the last pool in the list */
415
416         last = &pool->header;
417
418         while (last->next)
419                 last = &last->next->header;
420
421         /* chain it to the lock_global_pool freelist */
422
423         last->next = lock_global_pool;
424
425         /* insert the freed pools into the freelist */
426
427         lock_global_pool = pool;
428
429         pthread_mutex_unlock(&lock_global_pool_lock);
430 }
431
432
433 /* lock_record_alloc ***********************************************************
434
435    Allocate a lock record which is owned by the current thread.
436
437    IN:
438       t............the current thread 
439
440 *******************************************************************************/
441
442 static lock_record_t *lock_record_alloc(threadobject *t)
443 {
444         lock_record_t *r;
445
446         assert(t);
447         r = t->ee.firstfree;
448
449         if (!r) {
450                 int poolsize;
451                 lock_record_pool_t *pool;
452
453                 /* get a new pool */
454
455                 poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2
456                                                                                  : LOCK_INITIAL_LOCK_RECORDS;
457                 pool = lock_record_alloc_pool(t, poolsize);
458
459                 /* add it to our per-thread pool list */
460
461                 pool->header.next = t->ee.lockrecordpools;
462                 t->ee.lockrecordpools = pool;
463                 t->ee.lockrecordcount += pool->header.size;
464
465                 /* take the first record from the pool */
466                 r = &pool->lr[0];
467         }
468
469         /* pop the record from the freelist */
470
471         t->ee.firstfree = r->nextfree;
472 #ifndef NDEBUG
473         r->nextfree = NULL; /* in order to find invalid uses of nextfree */
474 #endif
475
476         return r;
477 }
478
479
480 /* lock_record_recycle *********************************************************
481
482    Recycle the given lock record. It will be inserted in the appropriate
483    free list.
484
485    IN:
486       t............the owner
487           r............lock record to recycle
488
489 *******************************************************************************/
490
491 static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
492 {
493         assert(t);
494         assert(r);
495         assert(r->owner == NULL);
496         assert(r->nextfree == NULL);
497
498         r->nextfree = t->ee.firstfree;
499         t->ee.firstfree = r;
500 }
501
502
503
504 /*============================================================================*/
505 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
506 /*============================================================================*/
507
508
509 /* lock_hashtable_init *********************************************************
510
511    Initialize the global hashtable mapping objects to lock records.
512
513 *******************************************************************************/
514
515 static void lock_hashtable_init(void)
516 {
517         pthread_mutex_init(&(lock_hashtable.mutex), NULL);
518
519         lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
520         lock_hashtable.entries = 0;
521         lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
522
523 #if defined(ENABLE_STATISTICS)
524         if (opt_stat)
525                 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
526 #endif
527
528         MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
529 }
530
531
532 /* lock_hashtable_grow *********************************************************
533
534    Grow the lock record hashtable to about twice its current size and
535    rehash the entries.
536
537 *******************************************************************************/
538
539 /* must be called with hashtable mutex locked */
540 static void lock_hashtable_grow(void)
541 {
542         u4 oldsize;
543         u4 newsize;
544         lock_record_t **oldtable;
545         lock_record_t **newtable;
546         lock_record_t *lr;
547         lock_record_t *next;
548         u4 i;
549         u4 h;
550         u4 newslot;
551
552         /* allocate a new table */
553
554         oldsize = lock_hashtable.size;
555         newsize = oldsize*2 + 1; /* XXX should use prime numbers */
556
557         LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
558
559         oldtable = lock_hashtable.ptr;
560         newtable = MNEW(lock_record_t *, newsize);
561
562 #if defined(ENABLE_STATISTICS)
563         if (opt_stat)
564                 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
565 #endif
566
567         MZERO(newtable, lock_record_t *, newsize);
568
569         /* rehash the entries */
570
571         for (i = 0; i < oldsize; i++) {
572                 lr = oldtable[i];
573                 while (lr) {
574                         next = lr->hashlink;
575
576                         h = LOCK_HASH(lr->obj);
577                         newslot = h % newsize;
578
579                         lr->hashlink = newtable[newslot];
580                         newtable[newslot] = lr;
581
582                         lr = next;
583                 }
584         }
585
586         /* replace the old table */
587
588         lock_hashtable.ptr  = newtable;
589         lock_hashtable.size = newsize;
590
591         MFREE(oldtable, lock_record_t *, oldsize);
592
593 #if defined(ENABLE_STATISTICS)
594         if (opt_stat)
595                 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
596 #endif
597 }
598
599
600 /* lock_hashtable_get_lock_record **********************************************
601
602    Find the lock record for the given object. If it does not exists, yet,
603    create it and enter it in the hashtable.
604
605    IN:
606       t.................the current thread
607           o.................the object to look up
608
609    RETURN VALUE:
610       the lock record to use for this object
611
612 *******************************************************************************/
613
614 static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o)
615 {
616         ptrint lockword;
617         u4 slot;
618         lock_record_t *lr;
619
620         lockword = (ptrint) o->monitorPtr;
621
622         if (IS_FAT_LOCK(lockword)) {
623                 return GET_FAT_LOCK(lockword);
624         }
625
626         /* lock the hashtable */
627
628         pthread_mutex_lock(&(lock_hashtable.mutex));
629
630         /* lookup the lock record in the hashtable */
631
632         slot = LOCK_HASH(o) % lock_hashtable.size;
633         lr = lock_hashtable.ptr[slot];
634         while (lr) {
635                 if (lr->obj == o) {
636                         pthread_mutex_unlock(&(lock_hashtable.mutex));
637                         return lr;
638                 }
639
640                 lr = lr->hashlink;
641         }
642
643         /* not found, we must create a new one */
644
645         lr = lock_record_alloc(t);
646         lr->obj = o;
647         LOCK_LOG(("thread %d allocated for %p new lr %p\n",
648                         t->index, (void*) o, (void*) lr));
649
650         /* enter it in the hashtable */
651
652         lr->hashlink = lock_hashtable.ptr[slot];
653         lock_hashtable.ptr[slot] = lr;
654         lock_hashtable.entries++;
655
656         /* check whether the hash should grow */
657
658         if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
659                 lock_hashtable_grow();
660         }
661
662         /* unlock the hashtable */
663
664         pthread_mutex_unlock(&(lock_hashtable.mutex));
665
666         /* return the new lock record */
667
668         return lr;
669 }
670
671
672 /*============================================================================*/
673 /* OBJECT LOCK INITIALIZATION                                                 */
674 /*============================================================================*/
675
676
677 /* lock_init_object_lock *******************************************************
678
679    Initialize the monitor pointer of the given object. The monitor gets
680    initialized to an unlocked state.
681
682 *******************************************************************************/
683
684 void lock_init_object_lock(java_objectheader *o)
685 {
686         assert(o);
687
688         o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
689         LOCK_CLEAR_FLC_BIT(o);
690 }
691
692
693 /* lock_get_initial_lock_word **************************************************
694
695    Returns the initial (unlocked) lock word. The pointer is
696    required in the code generator to set up a virtual
697    java_objectheader for code patch locking.
698
699 *******************************************************************************/
700
701 lock_record_t *lock_get_initial_lock_word(void)
702 {
703         return (lock_record_t *) THIN_UNLOCKED;
704 }
705
706
707
708 /*============================================================================*/
709 /* LOCKING ALGORITHM                                                          */
710 /*============================================================================*/
711
712
713 /* lock_record_enter ***********************************************************
714
715    Enter the lock represented by the given lock record.
716
717    IN:
718       t.................the current thread
719           lr................the lock record
720
721 *******************************************************************************/
722
723 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
724 {
725         pthread_mutex_lock(&(lr->mutex));
726         lr->owner = t;
727 }
728
729
730 /* lock_record_exit ************************************************************
731
732    Release the lock represented by the given lock record.
733
734    IN:
735       t.................the current thread
736           lr................the lock record
737
738    PRE-CONDITION:
739       The current thread must own the lock represented by this lock record.
740           This is NOT checked by this function!
741
742 *******************************************************************************/
743
744 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
745 {
746         lr->owner = NULL;
747         pthread_mutex_unlock(&(lr->mutex));
748 }
749
750
751 /* lock_inflate ****************************************************************
752
753    Inflate the lock of the given object. This may only be called by the
754    owner of the monitor of the object.
755
756    IN:
757       t............the current thread
758           o............the object of which to inflate the lock
759           lr...........the lock record to install. The current thread must
760                        own the lock of this lock record!
761
762    PRE-CONDITION:
763       The current thread must be the owner of this object's monitor AND
764           of the lock record's lock!
765
766 *******************************************************************************/
767
768 static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *lr)
769 {
770         ptrint lockword;
771
772         /* get the current lock count */
773
774         lockword = (ptrint) o->monitorPtr;
775
776         if (IS_FAT_LOCK(lockword)) {
777                 assert(GET_FAT_LOCK(lockword) == lr);
778         }
779         else {
780                 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
781
782                 /* copy the count from the thin lock */
783
784                 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
785         }
786
787         LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
788                         t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
789
790         /* clear flat-lock-contention bit */
791
792         LOCK_CLEAR_FLC_BIT(o);
793
794         /* notify waiting objects */
795
796         lock_record_notify(t, lr, false);
797
798         /* install it */
799
800         o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
801 }
802
803
804 /* lock_monitor_enter **********************************************************
805
806    Acquire the monitor of the given object. If the current thread already
807    owns the monitor, the lock counter is simply increased.
808
809    This function blocks until it can acquire the monitor.
810
811    IN:
812       t............the current thread
813           o............the object of which to enter the monitor
814
815    RETURN VALUE:
816       true.........the lock has been successfully acquired
817           false........an exception has been thrown
818
819 *******************************************************************************/
820
821 bool lock_monitor_enter(java_objectheader *o)
822 {
823         threadobject *t;
824         /* CAUTION: This code assumes that ptrint is unsigned! */
825         ptrint        lockword;
826         ptrint        thinlock;
827
828         if (o == NULL) {
829                 exceptions_throw_nullpointerexception();
830                 return false;
831         }
832
833         t = THREADOBJECT;
834
835         thinlock = t->thinlock;
836
837         /* most common case: try to thin-lock an unlocked object */
838
839         if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
840                 /* success. we locked it */
841                 /* The Java Memory Model requires a memory barrier here: */
842                 MEMORY_BARRIER();
843                 return true;
844         }
845
846         /* next common case: recursive lock with small recursion count */
847         /* We don't have to worry about stale values here, as any stale value  */
848         /* will indicate another thread holding the lock (or an inflated lock) */
849
850         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
851                 /* we own this monitor               */
852                 /* check the current recursion count */
853
854                 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
855                 {
856                         /* the recursion count is low enough */
857
858                         o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
859
860                         /* success. we locked it */
861                         return true;
862                 }
863                 else {
864                         lock_record_t *lr;
865
866                         /* recursion count overflow */
867
868                         lr = lock_hashtable_get_lock_record(t, o);
869                         lock_record_enter(t, lr);
870                         lock_inflate(t, o, lr);
871                         lr->count++;
872
873                         return true;
874                 }
875         }
876
877         /* the lock is either contented or fat */
878
879         {
880                 lock_record_t *lr;
881
882                 if (IS_FAT_LOCK(lockword)) {
883
884                         lr = GET_FAT_LOCK(lockword);
885
886                         /* check for recursive entering */
887                         if (lr->owner == t) {
888                                 lr->count++;
889                                 return true;
890                         }
891
892                         /* acquire the mutex of the lock record */
893
894                         lock_record_enter(t, lr);
895
896                         assert(lr->count == 0);
897
898                         return true;
899                 }
900
901                 /****** inflation path ******/
902
903                 /* first obtain the lock record for this object */
904
905                 lr = lock_hashtable_get_lock_record(t, o);
906
907 #if defined(ENABLE_JVMTI)
908         /* Monitor Contended Enter */
909                 jvmti_MonitorContendedEntering(false, o);
910 #endif
911                 /* enter the monitor */
912
913                 lock_record_enter(t, lr);
914
915
916 #if defined(ENABLE_JVMTI)
917                 /* Monitor Contended Entered */
918                 jvmti_MonitorContendedEntering(true, o);
919 #endif
920
921                 /* inflation loop */
922
923                 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
924                         /* Set the flat lock contention bit to let the owning thread */
925                         /* know that we want to be notified of unlocking.            */
926
927                         LOCK_SET_FLC_BIT(o);
928
929                         LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
930                                         t->index, (void*) o, (void*) lr));
931
932                         /* try to lock the object */
933
934                         if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
935                                 /* we can inflate the lock ourselves */
936                                 LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
937                                                 t->index, (void*) o, (void*) lr));
938                                 lock_inflate(t, o, lr);
939                         }
940                         else {
941                                 /* wait until another thread sees the flc bit and notifies us of unlocking */
942                                 LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
943                                                 t->index, (void*) o, (void*) lr));
944                                 lock_record_wait(t, lr, 0, 0);
945                         }
946                 }
947
948                 /* we own the inflated lock now */
949
950                 return true;
951         }
952 }
953
954
955 /* lock_monitor_exit ***********************************************************
956
957    Decrement the counter of a (currently owned) monitor. If the counter
958    reaches zero, release the monitor.
959
960    If the current thread is not the owner of the monitor, an 
961    IllegalMonitorState exception is thrown.
962
963    IN:
964       t............the current thread
965           o............the object of which to exit the monitor
966
967    RETURN VALUE:
968       true.........everything ok,
969           false........an exception has been thrown
970
971 *******************************************************************************/
972
973 bool lock_monitor_exit(java_objectheader *o)
974 {
975         threadobject *t;
976         ptrint        lockword;
977         ptrint        thinlock;
978
979         if (o == NULL) {
980                 exceptions_throw_nullpointerexception();
981                 return false;
982         }
983
984         t = THREADOBJECT;
985
986         /* We don't have to worry about stale values here, as any stale value */
987         /* will indicate that we don't own the lock.                          */
988
989         lockword = (ptrint) o->monitorPtr;
990         thinlock = t->thinlock;
991
992         /* most common case: we release a thin lock that we hold once */
993
994         if (lockword == thinlock) {
995                 /* memory barrier for Java Memory Model */
996                 MEMORY_BARRIER();
997                 o->monitorPtr = THIN_UNLOCKED;
998                 /* memory barrier for thin locking */
999                 MEMORY_BARRIER();
1000
1001                 /* check if there has been a flat lock contention on this object */
1002
1003                 if (LOCK_TEST_FLC_BIT(o)) {
1004                         lock_record_t *lr;
1005
1006                         LOCK_LOG(("thread %d saw flc bit on %p %s\n",
1007                                         t->index, (void*) o, o->vftbl->class->name->text));
1008
1009                         /* there has been a contention on this thin lock */
1010
1011                         lr = lock_hashtable_get_lock_record(t, o);
1012
1013                         LOCK_LOG(("thread %d for %p got lr %p\n",
1014                                         t->index, (void*) o, (void*) lr));
1015
1016                         lock_record_enter(t, lr);
1017
1018                         if (LOCK_TEST_FLC_BIT(o)) {
1019                                 /* notify a thread that it can try to inflate the lock now */
1020
1021                                 lock_record_notify(t, lr, true);
1022                         }
1023
1024                         lock_record_exit(t, lr);
1025                 }
1026
1027                 return true;
1028         }
1029
1030         /* next common case: we release a recursive lock, count > 0 */
1031
1032         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1033                 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
1034                 return true;
1035         }
1036
1037         /* either the lock is fat, or we don't hold it at all */
1038
1039         if (IS_FAT_LOCK(lockword)) {
1040
1041                 lock_record_t *lr;
1042
1043                 lr = GET_FAT_LOCK(lockword);
1044
1045                 /* check if we own this monitor */
1046                 /* We don't have to worry about stale values here, as any stale value */
1047                 /* will be != t and thus fail this check.                             */
1048
1049                 if (lr->owner != t) {
1050                         exceptions_throw_illegalmonitorstateexception();
1051                         return false;
1052                 }
1053
1054                 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1055
1056                 if (lr->count != 0) {
1057                         /* we had locked this one recursively. just decrement, it will */
1058                         /* still be locked. */
1059                         lr->count--;
1060                         return true;
1061                 }
1062
1063                 /* unlock this lock record */
1064
1065                 lr->owner = NULL;
1066                 pthread_mutex_unlock(&(lr->mutex));
1067
1068                 return true;
1069         }
1070
1071         /* legal thin lock cases have been handled above, so this is an error */
1072
1073         exceptions_throw_illegalmonitorstateexception();
1074
1075         return false;
1076 }
1077
1078
1079 /* lock_record_add_waiter ******************************************************
1080
1081    Add a thread to the list of waiting threads of a lock record.
1082
1083    IN:
1084       lr...........the lock record
1085       thread.......the thread to add
1086
1087 *******************************************************************************/
1088
1089 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1090 {
1091         lock_waiter_t *waiter;
1092
1093         /* allocate a waiter data structure */
1094
1095         waiter = NEW(lock_waiter_t);
1096
1097 #if defined(ENABLE_STATISTICS)
1098         if (opt_stat)
1099                 size_lock_waiter += sizeof(lock_waiter_t);
1100 #endif
1101
1102         waiter->waiter = thread;
1103         waiter->next   = lr->waiters;
1104
1105         lr->waiters = waiter;
1106 }
1107
1108
1109 /* lock_record_remove_waiter ***************************************************
1110
1111    Remove a thread from the list of waiting threads of a lock record.
1112
1113    IN:
1114       lr...........the lock record
1115       t............the current thread
1116
1117    PRE-CONDITION:
1118       The current thread must be the owner of the lock record.
1119    
1120 *******************************************************************************/
1121
1122 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1123 {
1124         lock_waiter_t **link;
1125         lock_waiter_t  *w;
1126
1127         link = &(lr->waiters);
1128
1129         while ((w = *link)) {
1130                 if (w->waiter == thread) {
1131                         *link = w->next;
1132
1133                         /* free the waiter data structure */
1134
1135                         FREE(w, lock_waiter_t);
1136
1137 #if defined(ENABLE_STATISTICS)
1138                         if (opt_stat)
1139                                 size_lock_waiter -= sizeof(lock_waiter_t);
1140 #endif
1141
1142                         return;
1143                 }
1144
1145                 link = &(w->next);
1146         }
1147
1148         /* this should never happen */
1149
1150         vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
1151 }
1152
1153
1154 /* lock_record_wait ************************************************************
1155
1156    Wait on a lock record for a given (maximum) amount of time.
1157
1158    IN:
1159       t............the current thread
1160           lr...........the lock record
1161           millis.......milliseconds of timeout
1162           nanos........nanoseconds of timeout
1163
1164    PRE-CONDITION:
1165       The current thread must be the owner of the lock record.
1166           This is NOT checked by this function!
1167    
1168 *******************************************************************************/
1169
1170 static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1171 {
1172         s4   lockcount;
1173         bool wasinterrupted;
1174
1175         /* { the thread t owns the fat lock record lr on the object o } */
1176
1177         /* register us as waiter for this object */
1178
1179         lock_record_add_waiter(lr, thread);
1180
1181         /* remember the old lock count */
1182
1183         lockcount = lr->count;
1184
1185         /* unlock this record */
1186
1187         lr->count = 0;
1188         lock_record_exit(thread, lr);
1189
1190         /* wait until notified/interrupted/timed out */
1191
1192         wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1193
1194         /* re-enter the monitor */
1195
1196         lock_record_enter(thread, lr);
1197
1198         /* remove us from the list of waiting threads */
1199
1200         lock_record_remove_waiter(lr, thread);
1201
1202         /* restore the old lock count */
1203
1204         lr->count = lockcount;
1205
1206         /* if we have been interrupted, throw the appropriate exception */
1207
1208         if (wasinterrupted)
1209                 exceptions_throw_interruptedexception();
1210 }
1211
1212
1213 /* lock_monitor_wait ***********************************************************
1214
1215    Wait on an object for a given (maximum) amount of time.
1216
1217    IN:
1218       t............the current thread
1219           o............the object
1220           millis.......milliseconds of timeout
1221           nanos........nanoseconds of timeout
1222
1223    PRE-CONDITION:
1224       The current thread must be the owner of the object's monitor.
1225    
1226 *******************************************************************************/
1227
1228 static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
1229 {
1230         ptrint         lockword;
1231         lock_record_t *lr;
1232
1233         lockword = (ptrint) o->monitorPtr;
1234
1235         /* check if we own this monitor */
1236         /* We don't have to worry about stale values here, as any stale value */
1237         /* will fail this check.                                              */
1238
1239         if (IS_FAT_LOCK(lockword)) {
1240
1241                 lr = GET_FAT_LOCK(lockword);
1242
1243                 if (lr->owner != t) {
1244                         exceptions_throw_illegalmonitorstateexception();
1245                         return;
1246                 }
1247         }
1248         else {
1249                 /* it's a thin lock */
1250
1251                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1252                         exceptions_throw_illegalmonitorstateexception();
1253                         return;
1254                 }
1255
1256                 /* inflate this lock */
1257                 lr = lock_hashtable_get_lock_record(t, o);
1258                 lock_record_enter(t, lr);
1259                 lock_inflate(t, o, lr);
1260         }
1261
1262         /* { the thread t owns the fat lock record lr on the object o } */
1263
1264         lock_record_wait(t, lr, millis, nanos);
1265 }
1266
1267
1268 /* lock_record_notify **********************************************************
1269
1270    Notify one thread or all threads waiting on the given lock record.
1271
1272    IN:
1273       t............the current thread
1274           lr...........the lock record
1275           one..........if true, only notify one thread
1276
1277    PRE-CONDITION:
1278       The current thread must be the owner of the lock record.
1279           This is NOT checked by this function!
1280    
1281 *******************************************************************************/
1282
1283 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1284 {
1285         lock_waiter_t *waiter;
1286         threadobject *waitingthread;
1287
1288         /* { the thread t owns the fat lock record lr on the object o } */
1289
1290         /* for each waiter: */
1291
1292         for (waiter = lr->waiters; waiter; waiter = waiter->next) {
1293
1294                 /* signal the waiting thread */
1295
1296                 waitingthread = waiter->waiter;
1297
1298                 pthread_mutex_lock(&waitingthread->waitmutex);
1299                 if (waitingthread->sleeping)
1300                         pthread_cond_signal(&waitingthread->waitcond);
1301                 waitingthread->signaled = true;
1302                 pthread_mutex_unlock(&waitingthread->waitmutex);
1303
1304                 /* if we should only wake one, we are done */
1305
1306                 if (one)
1307                         break;
1308         }
1309 }
1310
1311
1312 /* lock_monitor_notify *********************************************************
1313
1314    Notify one thread or all threads waiting on the given object.
1315
1316    IN:
1317       t............the current thread
1318           o............the object
1319           one..........if true, only notify one thread
1320
1321    PRE-CONDITION:
1322       The current thread must be the owner of the object's monitor.
1323    
1324 *******************************************************************************/
1325
1326 static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
1327 {
1328         ptrint lockword;
1329         lock_record_t *lr;
1330
1331         lockword = (ptrint) o->monitorPtr;
1332
1333         /* check if we own this monitor */
1334         /* We don't have to worry about stale values here, as any stale value */
1335         /* will fail this check.                                              */
1336
1337         if (IS_FAT_LOCK(lockword)) {
1338
1339                 lr = GET_FAT_LOCK(lockword);
1340
1341                 if (lr->owner != t) {
1342                         exceptions_throw_illegalmonitorstateexception();
1343                         return;
1344                 }
1345         }
1346         else {
1347                 /* it's a thin lock */
1348
1349                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1350                         exceptions_throw_illegalmonitorstateexception();
1351                         return;
1352                 }
1353
1354                 /* inflate this lock */
1355                 lr = lock_hashtable_get_lock_record(t, o);
1356                 lock_record_enter(t, lr);
1357                 lock_inflate(t, o, lr);
1358         }
1359
1360         /* { the thread t owns the fat lock record lr on the object o } */
1361
1362         lock_record_notify(t, lr, one);
1363 }
1364
1365
1366
1367 /*============================================================================*/
1368 /* INQUIRY FUNCIONS                                                           */
1369 /*============================================================================*/
1370
1371
1372 /* lock_is_held_by_current_thread **********************************************
1373
1374    Return true if the current thread owns the monitor of the given object.
1375
1376    IN:
1377           o............the object
1378
1379    RETURN VALUE:
1380       true, if the current thread holds the lock of this object.
1381    
1382 *******************************************************************************/
1383
1384 bool lock_is_held_by_current_thread(java_objectheader *o)
1385 {
1386         ptrint        lockword;
1387         threadobject *t;
1388
1389         /* check if we own this monitor */
1390         /* We don't have to worry about stale values here, as any stale value */
1391         /* will fail this check.                                              */
1392
1393         lockword = (ptrint) o->monitorPtr;
1394         t = THREADOBJECT;
1395
1396         if (IS_FAT_LOCK(lockword)) {
1397                 lock_record_t *lr;
1398
1399                 /* it's a fat lock */
1400                 lr = GET_FAT_LOCK(lockword);
1401
1402                 return (lr->owner == t);
1403         }
1404         else {
1405                 /* it's a thin lock */
1406
1407                 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1408         }
1409 }
1410
1411
1412
1413 /*============================================================================*/
1414 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD                              */
1415 /*============================================================================*/
1416
1417
1418 /* lock_wait_for_object ********************************************************
1419
1420    Wait for the given object.
1421
1422    IN:
1423           o............the object
1424           millis.......milliseconds to wait
1425           nanos........nanoseconds to wait
1426    
1427 *******************************************************************************/
1428
1429 void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
1430 {
1431         threadobject *thread;
1432
1433         thread = THREADOBJECT;
1434
1435         lock_monitor_wait(thread, o, millis, nanos);
1436 }
1437
1438
1439 /* lock_notify_object **********************************************************
1440
1441    Notify one thread waiting on the given object.
1442
1443    IN:
1444           o............the object
1445    
1446 *******************************************************************************/
1447
1448 void lock_notify_object(java_objectheader *o)
1449 {
1450         threadobject *thread;
1451
1452         thread = THREADOBJECT;
1453
1454         lock_monitor_notify(thread, o, true);
1455 }
1456
1457
1458 /* lock_notify_all_object ******************************************************
1459
1460    Notify all threads waiting on the given object.
1461
1462    IN:
1463           o............the object
1464    
1465 *******************************************************************************/
1466
1467 void lock_notify_all_object(java_objectheader *o)
1468 {
1469         threadobject *thread;
1470
1471         thread = THREADOBJECT;
1472
1473         lock_monitor_notify(thread, o, false);
1474 }
1475
1476
1477 /*
1478  * These are local overrides for various environment variables in Emacs.
1479  * Please do not remove this and leave it at the end of the file, where
1480  * Emacs will automagically detect them.
1481  * ---------------------------------------------------------------------
1482  * Local variables:
1483  * mode: c
1484  * indent-tabs-mode: t
1485  * c-basic-offset: 4
1486  * tab-width: 4
1487  * End:
1488  * vim:noexpandtab:sw=4:ts=4:
1489  */