Merge (tasuki lock modification backout)
[cacao.git] / src / threads / native / lock.c
1 /* src/threads/native/lock.c - lock implementation
2
3    Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6    J. Wenninger, Institut f. Computersprachen - TU Wien
7
8    This file is part of CACAO.
9
10    This program is free software; you can redistribute it and/or
11    modify it under the terms of the GNU General Public License as
12    published by the Free Software Foundation; either version 2, or (at
13    your option) any later version.
14
15    This program is distributed in the hope that it will be useful, but
16    WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18    General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23    02110-1301, USA.
24
25 */
26
27
28 #include "config.h"
29
30 #include <assert.h>
31 #include <stdint.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <sys/time.h>
35 #include <pthread.h>
36
37 #include "vm/types.h"
38
39 #include "mm/memory.h"
40
41 #include "native/llni.h"
42
43 #include "threads/lock-common.h"
44
45 #include "threads/native/lock.h"
46 #include "threads/native/threads.h"
47
48 #include "toolbox/list.h"
49
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
54 #include "vm/vm.h"
55
56 #include "vmcore/options.h"
57
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
60 #endif
61
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
64 #endif
65
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
67
68 #include "arch.h"
69
70 /* includes for atomic instructions: */
71
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/native/generic-primitives.h"
74 #else
75 #include "machine-instr.h"
76 #endif
77
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
80 #endif
81
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
84 #endif
85
86
87 /* debug **********************************************************************/
88
89 #if !defined(NDEBUG)
90 # define DEBUGLOCKS(format) \
91     do { \
92         if (opt_DebugLocks) { \
93             log_println format; \
94         } \
95     } while (0)
96 #else
97 # define DEBUGLOCKS(format)
98 #endif
99
100
101 /******************************************************************************/
102 /* MACROS                                                                     */
103 /******************************************************************************/
104
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
107
108 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
109
110 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
111         ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
112
113
114 /******************************************************************************/
115 /* MACROS FOR THE FLAT LOCK CONTENTION BIT                                    */
116 /******************************************************************************/
117
118 #define LOCK_SET_FLC_BIT(obj)    ((obj)->hdrflags |= HDRFLAG_FLC)
119 #define LOCK_CLEAR_FLC_BIT(obj)  ((obj)->hdrflags &= ~ HDRFLAG_FLC)
120 #define LOCK_TEST_FLC_BIT(obj)   ((obj)->hdrflags & HDRFLAG_FLC)
121
122
123 /******************************************************************************/
124 /* MACROS FOR THIN/FAT LOCKS                                                  */
125 /******************************************************************************/
126
127 /* We use a variant of the tasuki locks described in the paper
128  *     
129  *     Tamiya Onodera, Kiyokuni Kawachiya
130  *     A Study of Locking Objects with Bimodal Fields
131  *     Proceedings of the ACM OOPSLA '99, pp. 223-237
132  *     1999
133  *
134  * The underlying thin locks are a variant of the thin locks described in
135  * 
136  *     Bacon, Konuru, Murthy, Serrano
137  *     Thin Locks: Featherweight Synchronization for Java
138  *         Proceedings of the ACM Conference on Programming Language Design and 
139  *         Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
140  *         June 1998
141  *
142  * In thin lock mode the lockword looks like this:
143  *
144  *     ,----------------------,-----------,---,
145  *     |      thread ID       |   count   | 0 |
146  *     `----------------------'-----------'---´
147  *
148  *     thread ID......the 'index' of the owning thread, or 0
149  *     count..........number of times the lock has been entered minus 1
150  *     0..............the shape bit is 0 in thin lock mode
151  *
152  * In fat lock mode it is basically a lock_record_t *:
153  *
154  *     ,----------------------------------,---,
155  *     |    lock_record_t * (without LSB) | 1 |
156  *     `----------------------------------'---´
157  *
158  *     1..............the shape bit is 1 in fat lock mode
159  */
160
161 #if SIZEOF_VOID_P == 8
162 #define THIN_LOCK_WORD_SIZE    64
163 #else
164 #define THIN_LOCK_WORD_SIZE    32
165 #endif
166
167 #define THIN_LOCK_SHAPE_BIT    0x01
168
169 #define THIN_UNLOCKED          0
170
171 #define THIN_LOCK_COUNT_SHIFT  1
172 #define THIN_LOCK_COUNT_SIZE   8
173 #define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
174 #define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
175 #define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
176
177 #define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
178 #define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
179
180 #define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
181 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
182
183 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
184 #define MAKE_FAT_LOCK(ptr)      ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
185
186 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
187
188
189 /* global variables ***********************************************************/
190
191 /* hashtable mapping objects to lock records */
192 static lock_hashtable_t lock_hashtable;
193
194
195 /******************************************************************************/
196 /* PROTOTYPES                                                                 */
197 /******************************************************************************/
198
199 static void lock_hashtable_init(void);
200
201 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
202 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
203 static void lock_record_enter(threadobject *t, lock_record_t *lr);
204 static void lock_record_exit(threadobject *t, lock_record_t *lr);
205 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
206 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
207
208
209 /*============================================================================*/
210 /* INITIALIZATION OF DATA STRUCTURES                                          */
211 /*============================================================================*/
212
213
214 /* lock_init *******************************************************************
215
216    Initialize global data for locking.
217
218 *******************************************************************************/
219
220 void lock_init(void)
221 {
222         /* initialize lock hashtable */
223
224         lock_hashtable_init();
225
226 #if defined(ENABLE_VMLOG)
227         vmlog_cacao_init_lock();
228 #endif
229 }
230
231
232 /* lock_pre_compute_thinlock ***************************************************
233
234    Pre-compute the thin lock value for a thread index.
235
236    IN:
237       index........the thead index (>= 1)
238
239    RETURN VALUE:
240       the thin lock value for this thread index
241
242 *******************************************************************************/
243
244 ptrint lock_pre_compute_thinlock(s4 index)
245 {
246         return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
247 }
248
249
250 /* lock_record_new *************************************************************
251
252    Allocate a lock record.
253
254 *******************************************************************************/
255
256 static lock_record_t *lock_record_new(void)
257 {
258         lock_record_t *lr;
259
260         /* allocate the data structure on the C heap */
261
262         lr = NEW(lock_record_t);
263
264 #if defined(ENABLE_STATISTICS)
265         if (opt_stat)
266                 size_lock_record += sizeof(lock_record_t);
267 #endif
268
269         /* initialize the members */
270
271         lr->object  = NULL;
272         lr->owner   = NULL;
273         lr->count   = 0;
274         lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
275
276 #if defined(ENABLE_GC_CACAO)
277         /* register the lock object as weak reference with the GC */
278
279         gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
280 #endif
281
282         /* initialize the mutex */
283
284         pthread_mutex_init(&(lr->mutex), NULL);
285
286         DEBUGLOCKS(("[lock_record_new   : lr=%p]", (void *) lr));
287
288         return lr;
289 }
290
291
292 /* lock_record_free ************************************************************
293
294    Free a lock record.
295
296    IN:
297        lr....lock record to free
298
299 *******************************************************************************/
300
301 static void lock_record_free(lock_record_t *lr)
302 {
303         DEBUGLOCKS(("[lock_record_free  : lr=%p]", (void *) lr));
304
305         /* Destroy the mutex. */
306
307         pthread_mutex_destroy(&(lr->mutex));
308
309 #if defined(ENABLE_GC_CACAO)
310         /* unregister the lock object reference with the GC */
311
312         gc_weakreference_unregister(&(lr->object));
313 #endif
314
315         /* Free the waiters list. */
316
317         list_free(lr->waiters);
318
319         /* Free the data structure. */
320
321         FREE(lr, lock_record_t);
322
323 #if defined(ENABLE_STATISTICS)
324         if (opt_stat)
325                 size_lock_record -= sizeof(lock_record_t);
326 #endif
327 }
328
329
330 /*============================================================================*/
331 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
332 /*============================================================================*/
333
334 /* lock_hashtable_init *********************************************************
335
336    Initialize the global hashtable mapping objects to lock records.
337
338 *******************************************************************************/
339
340 static void lock_hashtable_init(void)
341 {
342         pthread_mutex_init(&(lock_hashtable.mutex), NULL);
343
344         lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
345         lock_hashtable.entries = 0;
346         lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
347
348 #if defined(ENABLE_STATISTICS)
349         if (opt_stat)
350                 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
351 #endif
352
353         MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
354 }
355
356
357 /* lock_hashtable_grow *********************************************************
358
359    Grow the lock record hashtable to about twice its current size and
360    rehash the entries.
361
362 *******************************************************************************/
363
364 /* must be called with hashtable mutex locked */
365 static void lock_hashtable_grow(void)
366 {
367         u4 oldsize;
368         u4 newsize;
369         lock_record_t **oldtable;
370         lock_record_t **newtable;
371         lock_record_t *lr;
372         lock_record_t *next;
373         u4 i;
374         u4 h;
375         u4 newslot;
376
377         /* allocate a new table */
378
379         oldsize = lock_hashtable.size;
380         newsize = oldsize*2 + 1; /* XXX should use prime numbers */
381
382         DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
383
384         oldtable = lock_hashtable.ptr;
385         newtable = MNEW(lock_record_t *, newsize);
386
387 #if defined(ENABLE_STATISTICS)
388         if (opt_stat)
389                 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
390 #endif
391
392         MZERO(newtable, lock_record_t *, newsize);
393
394         /* rehash the entries */
395
396         for (i = 0; i < oldsize; i++) {
397                 lr = oldtable[i];
398                 while (lr) {
399                         next = lr->hashlink;
400
401                         h = heap_hashcode(lr->object);
402                         newslot = h % newsize;
403
404                         lr->hashlink = newtable[newslot];
405                         newtable[newslot] = lr;
406
407                         lr = next;
408                 }
409         }
410
411         /* replace the old table */
412
413         lock_hashtable.ptr  = newtable;
414         lock_hashtable.size = newsize;
415
416         MFREE(oldtable, lock_record_t *, oldsize);
417
418 #if defined(ENABLE_STATISTICS)
419         if (opt_stat)
420                 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
421 #endif
422 }
423
424
425 /* lock_hashtable_cleanup ******************************************************
426
427    Removes (and frees) lock records which have a cleared object reference
428    from the hashtable. The locked object was reclaimed by the GC.
429
430 *******************************************************************************/
431
432 #if defined(ENABLE_GC_CACAO)
433 void lock_hashtable_cleanup(void)
434 {
435         threadobject  *t;
436         lock_record_t *lr;
437         lock_record_t *prev;
438         lock_record_t *next;
439         int i;
440
441         t = THREADOBJECT;
442
443         /* lock the hashtable */
444
445         pthread_mutex_lock(&(lock_hashtable.mutex));
446
447         /* search the hashtable for cleared references */
448
449         for (i = 0; i < lock_hashtable.size; i++) {
450                 lr = lock_hashtable.ptr[i];
451                 prev = NULL;
452
453                 while (lr) {
454                         next = lr->hashlink;
455
456                         /* remove lock records with cleared references */
457
458                         if (lr->object == NULL) {
459
460                                 /* unlink the lock record from the hashtable */
461
462                                 if (prev == NULL)
463                                         lock_hashtable.ptr[i] = next;
464                                 else
465                                         prev->hashlink = next;
466
467                                 /* free the lock record */
468
469                                 lock_record_free(lr);
470
471                         } else {
472                                 prev = lr;
473                         }
474
475                         lr = next;
476                 }
477         }
478
479         /* unlock the hashtable */
480
481         pthread_mutex_unlock(&(lock_hashtable.mutex));
482 }
483 #endif
484
485
486 /* lock_hashtable_get **********************************************************
487
488    Find the lock record for the given object.  If it does not exists,
489    yet, create it and enter it in the hashtable.
490
491    IN:
492       t....the current thread
493           o....the object to look up
494
495    RETURN VALUE:
496       the lock record to use for this object
497
498 *******************************************************************************/
499
500 #if defined(ENABLE_GC_BOEHM)
501 static void lock_record_finalizer(void *object, void *p);
502 #endif
503
504 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
505 {
506         uintptr_t      lockword;
507         u4             slot;
508         lock_record_t *lr;
509
510         lockword = lock_lockword_get(t, o);
511
512         if (IS_FAT_LOCK(lockword))
513                 return GET_FAT_LOCK(lockword);
514
515         /* lock the hashtable */
516
517         pthread_mutex_lock(&(lock_hashtable.mutex));
518
519         /* lookup the lock record in the hashtable */
520
521         LLNI_CRITICAL_START_THREAD(t);
522         slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
523         lr   = lock_hashtable.ptr[slot];
524
525         for (; lr != NULL; lr = lr->hashlink) {
526                 if (lr->object == LLNI_DIRECT(o))
527                         break;
528         }
529         LLNI_CRITICAL_END_THREAD(t);
530
531         if (lr == NULL) {
532                 /* not found, we must create a new one */
533
534                 lr = lock_record_new();
535
536                 LLNI_CRITICAL_START_THREAD(t);
537                 lr->object = LLNI_DIRECT(o);
538                 LLNI_CRITICAL_END_THREAD(t);
539
540 #if defined(ENABLE_GC_BOEHM)
541                 /* register new finalizer to clean up the lock record */
542
543                 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
544 #endif
545
546                 /* enter it in the hashtable */
547
548                 lr->hashlink             = lock_hashtable.ptr[slot];
549                 lock_hashtable.ptr[slot] = lr;
550                 lock_hashtable.entries++;
551
552                 /* check whether the hash should grow */
553
554                 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
555                         lock_hashtable_grow();
556                 }
557         }
558
559         /* unlock the hashtable */
560
561         pthread_mutex_unlock(&(lock_hashtable.mutex));
562
563         /* return the new lock record */
564
565         return lr;
566 }
567
568
569 /* lock_hashtable_remove *******************************************************
570
571    Remove the lock record for the given object from the hashtable
572    and free it afterwards.
573
574    IN:
575        t....the current thread
576        o....the object to look up
577
578 *******************************************************************************/
579
580 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
581 {
582         uintptr_t      lockword;
583         lock_record_t *lr;
584         u4             slot;
585         lock_record_t *tmplr;
586
587         /* lock the hashtable */
588
589         pthread_mutex_lock(&(lock_hashtable.mutex));
590
591         /* get lock record */
592
593         lockword = lock_lockword_get(t, o);
594
595         assert(IS_FAT_LOCK(lockword));
596
597         lr = GET_FAT_LOCK(lockword);
598
599         /* remove the lock-record from the hashtable */
600
601         LLNI_CRITICAL_START_THREAD(t);
602         slot  = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
603         tmplr = lock_hashtable.ptr[slot];
604         LLNI_CRITICAL_END_THREAD(t);
605
606         if (tmplr == lr) {
607                 /* special handling if it's the first in the chain */
608
609                 lock_hashtable.ptr[slot] = lr->hashlink;
610         }
611         else {
612                 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
613                         if (tmplr->hashlink == lr) {
614                                 tmplr->hashlink = lr->hashlink;
615                                 break;
616                         }
617                 }
618
619                 assert(tmplr != NULL);
620         }
621
622         /* decrease entry count */
623
624         lock_hashtable.entries--;
625
626         /* unlock the hashtable */
627
628         pthread_mutex_unlock(&(lock_hashtable.mutex));
629
630         /* free the lock record */
631
632         lock_record_free(lr);
633 }
634
635
636 /* lock_record_finalizer *******************************************************
637
638    XXX Remove me for exact GC.
639
640 *******************************************************************************/
641
642 static void lock_record_finalizer(void *object, void *p)
643 {
644         java_handle_t *o;
645         classinfo     *c;
646
647         o = (java_handle_t *) object;
648
649 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
650         /* XXX this is only a dirty hack to make Boehm work with handles */
651
652         o = LLNI_WRAP((java_object_t *) o);
653 #endif
654
655         LLNI_class_get(o, c);
656
657 #if !defined(NDEBUG)
658         if (opt_DebugFinalizer) {
659                 log_start();
660                 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
661                 class_print(c);
662                 log_print("]");
663                 log_finish();
664         }
665 #endif
666
667         /* check for a finalizer function */
668
669         if (c->finalizer != NULL)
670                 finalizer_run(object, p);
671
672         /* remove the lock-record entry from the hashtable and free it */
673
674         lock_hashtable_remove(THREADOBJECT, o);
675 }
676
677
678 /*============================================================================*/
679 /* OBJECT LOCK INITIALIZATION                                                 */
680 /*============================================================================*/
681
682
683 /* lock_init_object_lock *******************************************************
684
685    Initialize the monitor pointer of the given object. The monitor gets
686    initialized to an unlocked state.
687
688 *******************************************************************************/
689
690 void lock_init_object_lock(java_object_t *o)
691 {
692         assert(o);
693
694         o->lockword = THIN_UNLOCKED;
695         LOCK_CLEAR_FLC_BIT(o);
696 }
697
698
699 /*============================================================================*/
700 /* LOCKING ALGORITHM                                                          */
701 /*============================================================================*/
702
703
704 /* lock_lockword_get ***********************************************************
705
706    Get the lockword for the given object.
707
708    IN:
709       t............the current thread
710       o............the object
711
712 *******************************************************************************/
713
714 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
715 {
716         uintptr_t lockword;
717
718         LLNI_CRITICAL_START_THREAD(t);
719         lockword = LLNI_DIRECT(o)->lockword;
720         LLNI_CRITICAL_END_THREAD(t);
721
722         return lockword;
723 }
724
725
726 /* lock_lockword_set ***********************************************************
727
728    Set the lockword for the given object.
729
730    IN:
731       t............the current thread
732       o............the object
733           lockword.....the new lockword value
734
735 *******************************************************************************/
736
737 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
738 {
739         LLNI_CRITICAL_START_THREAD(t);
740         LLNI_DIRECT(o)->lockword = lockword;
741         LLNI_CRITICAL_END_THREAD(t);
742 }
743
744
745 /* lock_record_enter ***********************************************************
746
747    Enter the lock represented by the given lock record.
748
749    IN:
750       t.................the current thread
751           lr................the lock record
752
753 *******************************************************************************/
754
755 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
756 {
757         pthread_mutex_lock(&(lr->mutex));
758         lr->owner = t;
759 }
760
761
762 /* lock_record_exit ************************************************************
763
764    Release the lock represented by the given lock record.
765
766    IN:
767       t.................the current thread
768           lr................the lock record
769
770    PRE-CONDITION:
771       The current thread must own the lock represented by this lock record.
772           This is NOT checked by this function!
773
774 *******************************************************************************/
775
776 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
777 {
778         lr->owner = NULL;
779         pthread_mutex_unlock(&(lr->mutex));
780 }
781
782
783 /* lock_inflate ****************************************************************
784
785    Inflate the lock of the given object. This may only be called by the
786    owner of the monitor of the object.
787
788    IN:
789       t............the current thread
790           o............the object of which to inflate the lock
791           lr...........the lock record to install. The current thread must
792                        own the lock of this lock record!
793
794    PRE-CONDITION:
795       The current thread must be the owner of this object's monitor AND
796           of the lock record's lock!
797
798 *******************************************************************************/
799
800 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
801 {
802         uintptr_t lockword;
803
804         /* get the current lock count */
805
806         lockword = lock_lockword_get(t, o);
807
808         if (IS_FAT_LOCK(lockword)) {
809                 assert(GET_FAT_LOCK(lockword) == lr);
810         }
811         else {
812                 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
813
814                 /* copy the count from the thin lock */
815
816                 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
817         }
818
819         DEBUGLOCKS(("[lock_inflate      : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
820                                 lr, t, o, lockword, lr->count));
821
822         /* clear flat-lock-contention bit */
823
824         LLNI_CRITICAL_START_THREAD(t);
825         LOCK_CLEAR_FLC_BIT(LLNI_DIRECT(o));
826         LLNI_CRITICAL_END_THREAD(t);
827
828         /* notify waiting objects */
829
830         lock_record_notify(t, lr, false);
831
832         /* install it */
833
834         lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
835 }
836
837
838 /* lock_monitor_enter **********************************************************
839
840    Acquire the monitor of the given object. If the current thread already
841    owns the monitor, the lock counter is simply increased.
842
843    This function blocks until it can acquire the monitor.
844
845    IN:
846       t............the current thread
847           o............the object of which to enter the monitor
848
849    RETURN VALUE:
850       true.........the lock has been successfully acquired
851           false........an exception has been thrown
852
853 *******************************************************************************/
854
855 bool lock_monitor_enter(java_handle_t *o)
856 {
857         threadobject  *t;
858         /* CAUTION: This code assumes that ptrint is unsigned! */
859         ptrint         lockword;
860         ptrint         thinlock;
861         lock_record_t *lr;
862
863         if (o == NULL) {
864                 exceptions_throw_nullpointerexception();
865                 return false;
866         }
867
868         t = THREADOBJECT;
869
870         thinlock = t->thinlock;
871
872         /* most common case: try to thin-lock an unlocked object */
873
874         LLNI_CRITICAL_START_THREAD(t);
875         lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
876         LLNI_CRITICAL_END_THREAD(t);
877
878         if (lockword == THIN_UNLOCKED) {
879                 /* success. we locked it */
880                 /* The Java Memory Model requires a memory barrier here: */
881                 /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
882                 MEMORY_BARRIER_AFTER_ATOMIC();
883                 return true;
884         }
885
886         /* next common case: recursive lock with small recursion count */
887         /* We don't have to worry about stale values here, as any stale value  */
888         /* will indicate another thread holding the lock (or an inflated lock) */
889
890         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
891                 /* we own this monitor               */
892                 /* check the current recursion count */
893
894                 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
895                 {
896                         /* the recursion count is low enough */
897
898                         lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
899
900                         /* success. we locked it */
901                         return true;
902                 }
903                 else {
904                         /* recursion count overflow */
905
906                         lr = lock_hashtable_get(t, o);
907                         lock_record_enter(t, lr);
908                         lock_inflate(t, o, lr);
909                         lr->count++;
910
911                         return true;
912                 }
913         }
914
915         /* the lock is either contented or fat */
916
917         if (IS_FAT_LOCK(lockword)) {
918
919                 lr = GET_FAT_LOCK(lockword);
920
921                 /* check for recursive entering */
922                 if (lr->owner == t) {
923                         lr->count++;
924                         return true;
925                 }
926
927                 /* acquire the mutex of the lock record */
928
929                 lock_record_enter(t, lr);
930
931                 assert(lr->count == 0);
932
933                 return true;
934         }
935
936         /****** inflation path ******/
937
938         /* first obtain the lock record for this object */
939
940         lr = lock_hashtable_get(t, o);
941
942 #if defined(ENABLE_JVMTI)
943         /* Monitor Contended Enter */
944         jvmti_MonitorContendedEntering(false, o);
945 #endif
946
947         /* enter the monitor */
948
949         lock_record_enter(t, lr);
950
951 #if defined(ENABLE_JVMTI)
952         /* Monitor Contended Entered */
953         jvmti_MonitorContendedEntering(true, o);
954 #endif
955
956         /* inflation loop */
957
958         while (IS_THIN_LOCK(lockword = lock_lockword_get(t, o))) {
959                 /* Set the flat lock contention bit to let the owning thread
960                    know that we want to be notified of unlocking. */
961
962                 LLNI_CRITICAL_START_THREAD(t);
963                 LOCK_SET_FLC_BIT(LLNI_DIRECT(o));
964                 LLNI_CRITICAL_END_THREAD(t);
965
966                 DEBUGLOCKS(("thread %d set flc bit on %p lr %p",
967                                         t->index, (void*) o, (void*) lr));
968
969                 /* try to lock the object */
970
971                 LLNI_CRITICAL_START_THREAD(t);
972                 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
973                 LLNI_CRITICAL_END_THREAD(t);
974
975                 if (lockword == THIN_UNLOCKED) {
976                         /* we can inflate the lock ourselves */
977
978                         DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
979                                                 t->index, (void*) o, (void*) lr));
980
981                         lock_inflate(t, o, lr);
982                 }
983                 else {
984                         /* Wait until another thread sees the flc bit and notifies
985                            us of unlocking. */
986
987                         (void) lock_record_wait(t, lr, 0, 0);
988                 }
989         }
990
991         /* we own the inflated lock now */
992
993         return true;
994 }
995
996
997 /* lock_monitor_exit ***********************************************************
998
999    Decrement the counter of a (currently owned) monitor. If the counter
1000    reaches zero, release the monitor.
1001
1002    If the current thread is not the owner of the monitor, an 
1003    IllegalMonitorState exception is thrown.
1004
1005    IN:
1006       t............the current thread
1007           o............the object of which to exit the monitor
1008
1009    RETURN VALUE:
1010       true.........everything ok,
1011           false........an exception has been thrown
1012
1013 *******************************************************************************/
1014
1015 bool lock_monitor_exit(java_handle_t *o)
1016 {
1017         threadobject *t;
1018         uintptr_t     lockword;
1019         ptrint        thinlock;
1020
1021         if (o == NULL) {
1022                 exceptions_throw_nullpointerexception();
1023                 return false;
1024         }
1025
1026         t = THREADOBJECT;
1027
1028         thinlock = t->thinlock;
1029
1030         /* We don't have to worry about stale values here, as any stale value */
1031         /* will indicate that we don't own the lock.                          */
1032
1033         lockword = lock_lockword_get(t, o);
1034
1035         /* most common case: we release a thin lock that we hold once */
1036
1037         if (lockword == thinlock) {
1038                 /* memory barrier for Java Memory Model */
1039                 STORE_ORDER_BARRIER();
1040                 lock_lockword_set(t, o, THIN_UNLOCKED);
1041                 /* memory barrier for thin locking */
1042                 MEMORY_BARRIER();
1043
1044                 /* check if there has been a flat lock contention on this object */
1045
1046                 if (LOCK_TEST_FLC_BIT(LLNI_DIRECT(o))) {
1047                         lock_record_t *lr;
1048
1049                         DEBUGLOCKS(("thread %d saw flc bit on %p",
1050                                                 t->index, (void*) o));
1051
1052                         /* there has been a contention on this thin lock */
1053
1054                         lr = lock_hashtable_get(t, o);
1055
1056                         DEBUGLOCKS(("thread %d for %p got lr %p",
1057                                                 t->index, (void*) o, (void*) lr));
1058
1059                         lock_record_enter(t, lr);
1060
1061                         if (LOCK_TEST_FLC_BIT(LLNI_DIRECT(o))) {
1062                                 /* notify a thread that it can try to inflate the lock now */
1063
1064                                 lock_record_notify(t, lr, true);
1065                         }
1066
1067                         lock_record_exit(t, lr);
1068                 }
1069
1070                 return true;
1071         }
1072
1073         /* next common case: we release a recursive lock, count > 0 */
1074
1075         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1076                 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1077                 return true;
1078         }
1079
1080         /* either the lock is fat, or we don't hold it at all */
1081
1082         if (IS_FAT_LOCK(lockword)) {
1083
1084                 lock_record_t *lr;
1085
1086                 lr = GET_FAT_LOCK(lockword);
1087
1088                 /* check if we own this monitor */
1089                 /* We don't have to worry about stale values here, as any stale value */
1090                 /* will be != t and thus fail this check.                             */
1091
1092                 if (lr->owner != t) {
1093                         exceptions_throw_illegalmonitorstateexception();
1094                         return false;
1095                 }
1096
1097                 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1098
1099                 if (lr->count != 0) {
1100                         /* we had locked this one recursively. just decrement, it will */
1101                         /* still be locked. */
1102                         lr->count--;
1103                         return true;
1104                 }
1105
1106                 /* unlock this lock record */
1107
1108                 lr->owner = NULL;
1109                 pthread_mutex_unlock(&(lr->mutex));
1110
1111                 return true;
1112         }
1113
1114         /* legal thin lock cases have been handled above, so this is an error */
1115
1116         exceptions_throw_illegalmonitorstateexception();
1117
1118         return false;
1119 }
1120
1121
1122 /* lock_record_add_waiter ******************************************************
1123
1124    Add a thread to the list of waiting threads of a lock record.
1125
1126    IN:
1127       lr...........the lock record
1128       thread.......the thread to add
1129
1130 *******************************************************************************/
1131
1132 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1133 {
1134         lock_waiter_t *w;
1135
1136         /* Allocate a waiter data structure. */
1137
1138         w = NEW(lock_waiter_t);
1139
1140 #if defined(ENABLE_STATISTICS)
1141         if (opt_stat)
1142                 size_lock_waiter += sizeof(lock_waiter_t);
1143 #endif
1144
1145         /* Store the thread in the waiter structure. */
1146
1147         w->thread = thread;
1148
1149         /* Add the waiter as last entry to waiters list. */
1150
1151         list_add_last(lr->waiters, w);
1152 }
1153
1154
1155 /* lock_record_remove_waiter ***************************************************
1156
1157    Remove a thread from the list of waiting threads of a lock record.
1158
1159    IN:
1160       lr...........the lock record
1161       t............the current thread
1162
1163    PRE-CONDITION:
1164       The current thread must be the owner of the lock record.
1165    
1166 *******************************************************************************/
1167
1168 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1169 {
1170         list_t        *l;
1171         lock_waiter_t *w;
1172
1173         /* Get the waiters list. */
1174
1175         l = lr->waiters;
1176
1177         for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1178                 if (w->thread == thread) {
1179                         /* Remove the waiter entry from the list. */
1180
1181                         list_remove_unsynced(l, w);
1182
1183                         /* Free the waiter data structure. */
1184
1185                         FREE(w, lock_waiter_t);
1186
1187 #if defined(ENABLE_STATISTICS)
1188                         if (opt_stat)
1189                                 size_lock_waiter -= sizeof(lock_waiter_t);
1190 #endif
1191
1192                         return;
1193                 }
1194         }
1195
1196         /* This should never happen. */
1197
1198         vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1199 }
1200
1201
1202 /* lock_record_wait ************************************************************
1203
1204    Wait on a lock record for a given (maximum) amount of time.
1205
1206    IN:
1207       t............the current thread
1208           lr...........the lock record
1209           millis.......milliseconds of timeout
1210           nanos........nanoseconds of timeout
1211
1212    RETURN VALUE:
1213       true.........we have been interrupted,
1214       false........everything ok
1215
1216    PRE-CONDITION:
1217       The current thread must be the owner of the lock record.
1218           This is NOT checked by this function!
1219    
1220 *******************************************************************************/
1221
1222 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1223 {
1224         s4   lockcount;
1225         bool wasinterrupted;
1226
1227         DEBUGLOCKS(("[lock_record_wait  : lr=%p, t=%p, millis=%lld, nanos=%d]",
1228                                 lr, thread, millis, nanos));
1229
1230         /* { the thread t owns the fat lock record lr on the object o } */
1231
1232         /* register us as waiter for this object */
1233
1234         lock_record_add_waiter(lr, thread);
1235
1236         /* remember the old lock count */
1237
1238         lockcount = lr->count;
1239
1240         /* unlock this record */
1241
1242         lr->count = 0;
1243         lock_record_exit(thread, lr);
1244
1245         /* wait until notified/interrupted/timed out */
1246
1247         wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1248
1249         /* re-enter the monitor */
1250
1251         lock_record_enter(thread, lr);
1252
1253         /* remove us from the list of waiting threads */
1254
1255         lock_record_remove_waiter(lr, thread);
1256
1257         /* restore the old lock count */
1258
1259         lr->count = lockcount;
1260
1261         /* return if we have been interrupted */
1262
1263         return wasinterrupted;
1264 }
1265
1266
1267 /* lock_monitor_wait ***********************************************************
1268
1269    Wait on an object for a given (maximum) amount of time.
1270
1271    IN:
1272       t............the current thread
1273           o............the object
1274           millis.......milliseconds of timeout
1275           nanos........nanoseconds of timeout
1276
1277    PRE-CONDITION:
1278       The current thread must be the owner of the object's monitor.
1279    
1280 *******************************************************************************/
1281
1282 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1283 {
1284         uintptr_t      lockword;
1285         lock_record_t *lr;
1286
1287         lockword = lock_lockword_get(t, o);
1288
1289         /* check if we own this monitor */
1290         /* We don't have to worry about stale values here, as any stale value */
1291         /* will fail this check.                                              */
1292
1293         if (IS_FAT_LOCK(lockword)) {
1294
1295                 lr = GET_FAT_LOCK(lockword);
1296
1297                 if (lr->owner != t) {
1298                         exceptions_throw_illegalmonitorstateexception();
1299                         return;
1300                 }
1301         }
1302         else {
1303                 /* it's a thin lock */
1304
1305                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1306                         exceptions_throw_illegalmonitorstateexception();
1307                         return;
1308                 }
1309
1310                 /* inflate this lock */
1311
1312                 lr = lock_hashtable_get(t, o);
1313                 lock_record_enter(t, lr);
1314                 lock_inflate(t, o, lr);
1315         }
1316
1317         /* { the thread t owns the fat lock record lr on the object o } */
1318
1319         if (lock_record_wait(t, lr, millis, nanos))
1320                 exceptions_throw_interruptedexception();
1321 }
1322
1323
1324 /* lock_record_notify **********************************************************
1325
1326    Notify one thread or all threads waiting on the given lock record.
1327
1328    IN:
1329       t............the current thread
1330           lr...........the lock record
1331           one..........if true, only notify one thread
1332
1333    PRE-CONDITION:
1334       The current thread must be the owner of the lock record.
1335           This is NOT checked by this function!
1336    
1337 *******************************************************************************/
1338
1339 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1340 {
1341         list_t        *l;
1342         lock_waiter_t *w;
1343         threadobject  *waitingthread;
1344
1345         /* { the thread t owns the fat lock record lr on the object o } */
1346
1347         /* Get the waiters list. */
1348
1349         l = lr->waiters;
1350
1351         for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1352                 /* signal the waiting thread */
1353
1354                 waitingthread = w->thread;
1355
1356                 /* If the thread was already signaled but hasn't removed
1357                    itself from the list yet, just ignore it. */
1358
1359                 if (waitingthread->signaled == true)
1360                         continue;
1361
1362                 /* Enter the wait-mutex. */
1363
1364                 pthread_mutex_lock(&(waitingthread->waitmutex));
1365
1366                 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1367                                         lr, t, waitingthread, waitingthread->sleeping, one));
1368
1369                 /* Signal the thread if it's sleeping. */
1370
1371                 if (waitingthread->sleeping)
1372                         pthread_cond_signal(&(waitingthread->waitcond));
1373
1374                 /* Mark the thread as signaled. */
1375
1376                 waitingthread->signaled = true;
1377
1378                 /* Leave the wait-mutex. */
1379
1380                 pthread_mutex_unlock(&(waitingthread->waitmutex));
1381
1382                 /* if we should only wake one, we are done */
1383
1384                 if (one)
1385                         break;
1386         }
1387 }
1388
1389
1390 /* lock_monitor_notify *********************************************************
1391
1392    Notify one thread or all threads waiting on the given object.
1393
1394    IN:
1395       t............the current thread
1396           o............the object
1397           one..........if true, only notify one thread
1398
1399    PRE-CONDITION:
1400       The current thread must be the owner of the object's monitor.
1401    
1402 *******************************************************************************/
1403
1404 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1405 {
1406         uintptr_t      lockword;
1407         lock_record_t *lr;
1408
1409         lockword = lock_lockword_get(t, o);
1410
1411         /* check if we own this monitor */
1412         /* We don't have to worry about stale values here, as any stale value */
1413         /* will fail this check.                                              */
1414
1415         if (IS_FAT_LOCK(lockword)) {
1416
1417                 lr = GET_FAT_LOCK(lockword);
1418
1419                 if (lr->owner != t) {
1420                         exceptions_throw_illegalmonitorstateexception();
1421                         return;
1422                 }
1423         }
1424         else {
1425                 /* it's a thin lock */
1426
1427                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1428                         exceptions_throw_illegalmonitorstateexception();
1429                         return;
1430                 }
1431
1432                 /* inflate this lock */
1433
1434                 lr = lock_hashtable_get(t, o);
1435                 lock_record_enter(t, lr);
1436                 lock_inflate(t, o, lr);
1437         }
1438
1439         /* { the thread t owns the fat lock record lr on the object o } */
1440
1441         lock_record_notify(t, lr, one);
1442 }
1443
1444
1445
1446 /*============================================================================*/
1447 /* INQUIRY FUNCIONS                                                           */
1448 /*============================================================================*/
1449
1450
1451 /* lock_is_held_by_current_thread **********************************************
1452
1453    Return true if the current thread owns the monitor of the given object.
1454
1455    IN:
1456           o............the object
1457
1458    RETURN VALUE:
1459       true, if the current thread holds the lock of this object.
1460    
1461 *******************************************************************************/
1462
1463 bool lock_is_held_by_current_thread(java_handle_t *o)
1464 {
1465         threadobject  *t;
1466         uintptr_t      lockword;
1467         lock_record_t *lr;
1468
1469         t = THREADOBJECT;
1470
1471         /* check if we own this monitor */
1472         /* We don't have to worry about stale values here, as any stale value */
1473         /* will fail this check.                                              */
1474
1475         lockword = lock_lockword_get(t, o);
1476
1477         if (IS_FAT_LOCK(lockword)) {
1478                 /* it's a fat lock */
1479
1480                 lr = GET_FAT_LOCK(lockword);
1481
1482                 return (lr->owner == t);
1483         }
1484         else {
1485                 /* it's a thin lock */
1486
1487                 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1488         }
1489 }
1490
1491
1492
1493 /*============================================================================*/
1494 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD                              */
1495 /*============================================================================*/
1496
1497
1498 /* lock_wait_for_object ********************************************************
1499
1500    Wait for the given object.
1501
1502    IN:
1503           o............the object
1504           millis.......milliseconds to wait
1505           nanos........nanoseconds to wait
1506    
1507 *******************************************************************************/
1508
1509 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1510 {
1511         threadobject *thread;
1512
1513         thread = THREADOBJECT;
1514
1515         lock_monitor_wait(thread, o, millis, nanos);
1516 }
1517
1518
1519 /* lock_notify_object **********************************************************
1520
1521    Notify one thread waiting on the given object.
1522
1523    IN:
1524           o............the object
1525    
1526 *******************************************************************************/
1527
1528 void lock_notify_object(java_handle_t *o)
1529 {
1530         threadobject *thread;
1531
1532         thread = THREADOBJECT;
1533
1534         lock_monitor_notify(thread, o, true);
1535 }
1536
1537
1538 /* lock_notify_all_object ******************************************************
1539
1540    Notify all threads waiting on the given object.
1541
1542    IN:
1543           o............the object
1544    
1545 *******************************************************************************/
1546
1547 void lock_notify_all_object(java_handle_t *o)
1548 {
1549         threadobject *thread;
1550
1551         thread = THREADOBJECT;
1552
1553         lock_monitor_notify(thread, o, false);
1554 }
1555
1556
1557 /*
1558  * These are local overrides for various environment variables in Emacs.
1559  * Please do not remove this and leave it at the end of the file, where
1560  * Emacs will automagically detect them.
1561  * ---------------------------------------------------------------------
1562  * Local variables:
1563  * mode: c
1564  * indent-tabs-mode: t
1565  * c-basic-offset: 4
1566  * tab-width: 4
1567  * End:
1568  * vim:noexpandtab:sw=4:ts=4:
1569  */