95aa60b35fd612b9eb070a1d53b9de492d1cf71d
[cacao.git] / src / threads / native / lock.c
1 /* src/threads/native/lock.c - lock implementation
2
3    Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6    J. Wenninger, Institut f. Computersprachen - TU Wien
7
8    This file is part of CACAO.
9
10    This program is free software; you can redistribute it and/or
11    modify it under the terms of the GNU General Public License as
12    published by the Free Software Foundation; either version 2, or (at
13    your option) any later version.
14
15    This program is distributed in the hope that it will be useful, but
16    WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18    General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23    02110-1301, USA.
24
25 */
26
27
28 #include "config.h"
29
30 #include <assert.h>
31 #include <stdint.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <sys/time.h>
35 #include <pthread.h>
36
37 #include "vm/types.h"
38
39 #include "mm/memory.h"
40
41 #include "native/llni.h"
42
43 #include "threads/lock-common.h"
44
45 #include "threads/native/lock.h"
46 #include "threads/native/threads.h"
47
48 #include "toolbox/list.h"
49
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
54 #include "vm/vm.h"
55
56 #include "vmcore/options.h"
57
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
60 #endif
61
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
64 #endif
65
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
67
68 #include "arch.h"
69
70 /* includes for atomic instructions: */
71
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/native/generic-primitives.h"
74 #else
75 #include "machine-instr.h"
76 #endif
77
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
80 #endif
81
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
84 #endif
85
86
87 /* debug **********************************************************************/
88
89 #if !defined(NDEBUG)
90 # define DEBUGLOCKS(format) \
91     do { \
92         if (opt_DebugLocks) { \
93             log_println format; \
94         } \
95     } while (0)
96 #else
97 # define DEBUGLOCKS(format)
98 #endif
99
100
101 /******************************************************************************/
102 /* MACROS                                                                     */
103 /******************************************************************************/
104
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
107
108 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
109
110 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
111         ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
112
113
114 /******************************************************************************/
115 /* MACROS FOR THIN/FAT LOCKS                                                  */
116 /******************************************************************************/
117
118 /* We use a variant of the tasuki locks described in the paper
119  *     
120  *     Tamiya Onodera, Kiyokuni Kawachiya
121  *     A Study of Locking Objects with Bimodal Fields
122  *     Proceedings of the ACM OOPSLA '99, pp. 223-237
123  *     1999
124  *
125  * The underlying thin locks are a variant of the thin locks described in
126  * 
127  *     Bacon, Konuru, Murthy, Serrano
128  *     Thin Locks: Featherweight Synchronization for Java
129  *         Proceedings of the ACM Conference on Programming Language Design and 
130  *         Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
131  *         June 1998
132  *
133  * In thin lock mode the lockword looks like this:
134  *
135  *     ,----------------------,-----------,---,
136  *     |      thread ID       |   count   | 0 |
137  *     `----------------------'-----------'---´
138  *
139  *     thread ID......the 'index' of the owning thread, or 0
140  *     count..........number of times the lock has been entered minus 1
141  *     0..............the shape bit is 0 in thin lock mode
142  *
143  * In fat lock mode it is basically a lock_record_t *:
144  *
145  *     ,----------------------------------,---,
146  *     |    lock_record_t * (without LSB) | 1 |
147  *     `----------------------------------'---´
148  *
149  *     1..............the shape bit is 1 in fat lock mode
150  */
151
152 #if SIZEOF_VOID_P == 8
153 #define THIN_LOCK_WORD_SIZE    64
154 #else
155 #define THIN_LOCK_WORD_SIZE    32
156 #endif
157
158 #define THIN_LOCK_FLC_BIT      0x01
159 #define THIN_LOCK_SHAPE_BIT    0x02
160
161 #define THIN_UNLOCKED          0
162
163 #define THIN_LOCK_COUNT_SHIFT  2
164 #define THIN_LOCK_COUNT_SIZE   7
165 #define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
166 #define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
167 #define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
168
169 #define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
170 #define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
171
172 #define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
173 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
174
175 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~(THIN_LOCK_SHAPE_BIT | THIN_LOCK_FLC_BIT)))
176 #define MAKE_FAT_LOCK(ptr)      ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
177
178 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~(THIN_LOCK_COUNT_MASK | THIN_LOCK_FLC_BIT))
179 #define LOCK_WORD_WITHOUT_FLC_BIT(lockword) ((lockword) & ~THIN_LOCK_FLC_BIT)
180
181
182 /* global variables ***********************************************************/
183
184 /* hashtable mapping objects to lock records */
185 static lock_hashtable_t lock_hashtable;
186
187
188 /******************************************************************************/
189 /* PROTOTYPES                                                                 */
190 /******************************************************************************/
191
192 static void lock_hashtable_init(void);
193
194 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
195 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
196 static void lock_record_enter(threadobject *t, lock_record_t *lr);
197 static void lock_record_exit(threadobject *t, lock_record_t *lr);
198 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
199 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
200
201
202 /*============================================================================*/
203 /* INITIALIZATION OF DATA STRUCTURES                                          */
204 /*============================================================================*/
205
206
207 /* lock_init *******************************************************************
208
209    Initialize global data for locking.
210
211 *******************************************************************************/
212
213 void lock_init(void)
214 {
215         /* initialize lock hashtable */
216
217         lock_hashtable_init();
218
219 #if defined(ENABLE_VMLOG)
220         vmlog_cacao_init_lock();
221 #endif
222 }
223
224
225 /* lock_pre_compute_thinlock ***************************************************
226
227    Pre-compute the thin lock value for a thread index.
228
229    IN:
230       index........the thead index (>= 1)
231
232    RETURN VALUE:
233       the thin lock value for this thread index
234
235 *******************************************************************************/
236
237 ptrint lock_pre_compute_thinlock(s4 index)
238 {
239         return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
240 }
241
242
243 /* lock_record_new *************************************************************
244
245    Allocate a lock record.
246
247 *******************************************************************************/
248
249 static lock_record_t *lock_record_new(void)
250 {
251         lock_record_t *lr;
252
253         /* allocate the data structure on the C heap */
254
255         lr = NEW(lock_record_t);
256
257 #if defined(ENABLE_STATISTICS)
258         if (opt_stat)
259                 size_lock_record += sizeof(lock_record_t);
260 #endif
261
262         /* initialize the members */
263
264         lr->object  = NULL;
265         lr->owner   = NULL;
266         lr->count   = 0;
267         lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
268
269 #if defined(ENABLE_GC_CACAO)
270         /* register the lock object as weak reference with the GC */
271
272         gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
273 #endif
274
275         /* initialize the mutex */
276
277         pthread_mutex_init(&(lr->mutex), NULL);
278
279         DEBUGLOCKS(("[lock_record_new   : lr=%p]", (void *) lr));
280
281         return lr;
282 }
283
284
285 /* lock_record_free ************************************************************
286
287    Free a lock record.
288
289    IN:
290        lr....lock record to free
291
292 *******************************************************************************/
293
294 static void lock_record_free(lock_record_t *lr)
295 {
296         DEBUGLOCKS(("[lock_record_free  : lr=%p]", (void *) lr));
297
298         /* Destroy the mutex. */
299
300         pthread_mutex_destroy(&(lr->mutex));
301
302 #if defined(ENABLE_GC_CACAO)
303         /* unregister the lock object reference with the GC */
304
305         gc_weakreference_unregister(&(lr->object));
306 #endif
307
308         /* Free the waiters list. */
309
310         list_free(lr->waiters);
311
312         /* Free the data structure. */
313
314         FREE(lr, lock_record_t);
315
316 #if defined(ENABLE_STATISTICS)
317         if (opt_stat)
318                 size_lock_record -= sizeof(lock_record_t);
319 #endif
320 }
321
322
323 /*============================================================================*/
324 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
325 /*============================================================================*/
326
327 /* lock_hashtable_init *********************************************************
328
329    Initialize the global hashtable mapping objects to lock records.
330
331 *******************************************************************************/
332
333 static void lock_hashtable_init(void)
334 {
335         pthread_mutex_init(&(lock_hashtable.mutex), NULL);
336
337         lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
338         lock_hashtable.entries = 0;
339         lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
340
341 #if defined(ENABLE_STATISTICS)
342         if (opt_stat)
343                 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
344 #endif
345
346         MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
347 }
348
349
350 /* lock_hashtable_grow *********************************************************
351
352    Grow the lock record hashtable to about twice its current size and
353    rehash the entries.
354
355 *******************************************************************************/
356
357 /* must be called with hashtable mutex locked */
358 static void lock_hashtable_grow(void)
359 {
360         u4 oldsize;
361         u4 newsize;
362         lock_record_t **oldtable;
363         lock_record_t **newtable;
364         lock_record_t *lr;
365         lock_record_t *next;
366         u4 i;
367         u4 h;
368         u4 newslot;
369
370         /* allocate a new table */
371
372         oldsize = lock_hashtable.size;
373         newsize = oldsize*2 + 1; /* XXX should use prime numbers */
374
375         DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
376
377         oldtable = lock_hashtable.ptr;
378         newtable = MNEW(lock_record_t *, newsize);
379
380 #if defined(ENABLE_STATISTICS)
381         if (opt_stat)
382                 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
383 #endif
384
385         MZERO(newtable, lock_record_t *, newsize);
386
387         /* rehash the entries */
388
389         for (i = 0; i < oldsize; i++) {
390                 lr = oldtable[i];
391                 while (lr) {
392                         next = lr->hashlink;
393
394                         h = heap_hashcode(lr->object);
395                         newslot = h % newsize;
396
397                         lr->hashlink = newtable[newslot];
398                         newtable[newslot] = lr;
399
400                         lr = next;
401                 }
402         }
403
404         /* replace the old table */
405
406         lock_hashtable.ptr  = newtable;
407         lock_hashtable.size = newsize;
408
409         MFREE(oldtable, lock_record_t *, oldsize);
410
411 #if defined(ENABLE_STATISTICS)
412         if (opt_stat)
413                 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
414 #endif
415 }
416
417
418 /* lock_hashtable_cleanup ******************************************************
419
420    Removes (and frees) lock records which have a cleared object reference
421    from the hashtable. The locked object was reclaimed by the GC.
422
423 *******************************************************************************/
424
425 #if defined(ENABLE_GC_CACAO)
426 void lock_hashtable_cleanup(void)
427 {
428         threadobject  *t;
429         lock_record_t *lr;
430         lock_record_t *prev;
431         lock_record_t *next;
432         int i;
433
434         t = THREADOBJECT;
435
436         /* lock the hashtable */
437
438         pthread_mutex_lock(&(lock_hashtable.mutex));
439
440         /* search the hashtable for cleared references */
441
442         for (i = 0; i < lock_hashtable.size; i++) {
443                 lr = lock_hashtable.ptr[i];
444                 prev = NULL;
445
446                 while (lr) {
447                         next = lr->hashlink;
448
449                         /* remove lock records with cleared references */
450
451                         if (lr->object == NULL) {
452
453                                 /* unlink the lock record from the hashtable */
454
455                                 if (prev == NULL)
456                                         lock_hashtable.ptr[i] = next;
457                                 else
458                                         prev->hashlink = next;
459
460                                 /* free the lock record */
461
462                                 lock_record_free(lr);
463
464                         } else {
465                                 prev = lr;
466                         }
467
468                         lr = next;
469                 }
470         }
471
472         /* unlock the hashtable */
473
474         pthread_mutex_unlock(&(lock_hashtable.mutex));
475 }
476 #endif
477
478
479 /* lock_hashtable_get **********************************************************
480
481    Find the lock record for the given object.  If it does not exists,
482    yet, create it and enter it in the hashtable.
483
484    IN:
485       t....the current thread
486           o....the object to look up
487
488    RETURN VALUE:
489       the lock record to use for this object
490
491 *******************************************************************************/
492
493 #if defined(ENABLE_GC_BOEHM)
494 static void lock_record_finalizer(void *object, void *p);
495 #endif
496
497 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
498 {
499         uintptr_t      lockword;
500         u4             slot;
501         lock_record_t *lr;
502
503         lockword = lock_lockword_get(t, o);
504
505         if (IS_FAT_LOCK(lockword))
506                 return GET_FAT_LOCK(lockword);
507
508         /* lock the hashtable */
509
510         pthread_mutex_lock(&(lock_hashtable.mutex));
511
512         /* lookup the lock record in the hashtable */
513
514         LLNI_CRITICAL_START_THREAD(t);
515         slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
516         lr   = lock_hashtable.ptr[slot];
517
518         for (; lr != NULL; lr = lr->hashlink) {
519                 if (lr->object == LLNI_DIRECT(o))
520                         break;
521         }
522         LLNI_CRITICAL_END_THREAD(t);
523
524         if (lr == NULL) {
525                 /* not found, we must create a new one */
526
527                 lr = lock_record_new();
528
529                 LLNI_CRITICAL_START_THREAD(t);
530                 lr->object = LLNI_DIRECT(o);
531                 LLNI_CRITICAL_END_THREAD(t);
532
533 #if defined(ENABLE_GC_BOEHM)
534                 /* register new finalizer to clean up the lock record */
535
536                 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
537 #endif
538
539                 /* enter it in the hashtable */
540
541                 lr->hashlink             = lock_hashtable.ptr[slot];
542                 lock_hashtable.ptr[slot] = lr;
543                 lock_hashtable.entries++;
544
545                 /* check whether the hash should grow */
546
547                 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
548                         lock_hashtable_grow();
549                 }
550         }
551
552         /* unlock the hashtable */
553
554         pthread_mutex_unlock(&(lock_hashtable.mutex));
555
556         /* return the new lock record */
557
558         return lr;
559 }
560
561
562 /* lock_hashtable_remove *******************************************************
563
564    Remove the lock record for the given object from the hashtable
565    and free it afterwards.
566
567    IN:
568        t....the current thread
569        o....the object to look up
570
571 *******************************************************************************/
572
573 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
574 {
575         uintptr_t      lockword;
576         lock_record_t *lr;
577         u4             slot;
578         lock_record_t *tmplr;
579
580         /* lock the hashtable */
581
582         pthread_mutex_lock(&(lock_hashtable.mutex));
583
584         /* get lock record */
585
586         lockword = lock_lockword_get(t, o);
587
588         assert(IS_FAT_LOCK(lockword));
589
590         lr = GET_FAT_LOCK(lockword);
591
592         /* remove the lock-record from the hashtable */
593
594         LLNI_CRITICAL_START_THREAD(t);
595         slot  = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
596         tmplr = lock_hashtable.ptr[slot];
597         LLNI_CRITICAL_END_THREAD(t);
598
599         if (tmplr == lr) {
600                 /* special handling if it's the first in the chain */
601
602                 lock_hashtable.ptr[slot] = lr->hashlink;
603         }
604         else {
605                 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
606                         if (tmplr->hashlink == lr) {
607                                 tmplr->hashlink = lr->hashlink;
608                                 break;
609                         }
610                 }
611
612                 assert(tmplr != NULL);
613         }
614
615         /* decrease entry count */
616
617         lock_hashtable.entries--;
618
619         /* unlock the hashtable */
620
621         pthread_mutex_unlock(&(lock_hashtable.mutex));
622
623         /* free the lock record */
624
625         lock_record_free(lr);
626 }
627
628
629 /* lock_record_finalizer *******************************************************
630
631    XXX Remove me for exact GC.
632
633 *******************************************************************************/
634
635 static void lock_record_finalizer(void *object, void *p)
636 {
637         java_handle_t *o;
638         classinfo     *c;
639
640         o = (java_handle_t *) object;
641
642 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
643         /* XXX this is only a dirty hack to make Boehm work with handles */
644
645         o = LLNI_WRAP((java_object_t *) o);
646 #endif
647
648         LLNI_class_get(o, c);
649
650 #if !defined(NDEBUG)
651         if (opt_DebugFinalizer) {
652                 log_start();
653                 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
654                 class_print(c);
655                 log_print("]");
656                 log_finish();
657         }
658 #endif
659
660         /* check for a finalizer function */
661
662         if (c->finalizer != NULL)
663                 finalizer_run(object, p);
664
665         /* remove the lock-record entry from the hashtable and free it */
666
667         lock_hashtable_remove(THREADOBJECT, o);
668 }
669
670
671 /*============================================================================*/
672 /* OBJECT LOCK INITIALIZATION                                                 */
673 /*============================================================================*/
674
675
676 /* lock_init_object_lock *******************************************************
677
678    Initialize the monitor pointer of the given object. The monitor gets
679    initialized to an unlocked state.
680
681 *******************************************************************************/
682
683 void lock_init_object_lock(java_object_t *o)
684 {
685         assert(o);
686
687         o->lockword = THIN_UNLOCKED;
688 }
689
690
691 /*============================================================================*/
692 /* LOCKING ALGORITHM                                                          */
693 /*============================================================================*/
694
695
696 /* lock_lockword_get ***********************************************************
697
698    Get the lockword for the given object.
699
700    IN:
701       t............the current thread
702       o............the object
703
704 *******************************************************************************/
705
706 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
707 {
708         uintptr_t lockword;
709
710         LLNI_CRITICAL_START_THREAD(t);
711         lockword = LLNI_DIRECT(o)->lockword;
712         LLNI_CRITICAL_END_THREAD(t);
713
714         return lockword;
715 }
716
717
718 /* lock_lockword_set ***********************************************************
719
720    Set the lockword for the given object.
721
722    IN:
723       t............the current thread
724       o............the object
725           lockword.....the new lockword value
726
727 *******************************************************************************/
728
729 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
730 {
731         LLNI_CRITICAL_START_THREAD(t);
732         LLNI_DIRECT(o)->lockword = lockword;
733         LLNI_CRITICAL_END_THREAD(t);
734 }
735
736 /* lock_lockword_swap **********************************************************
737
738    Atomically sets a new lockword for the given object and returns the
739    previous value.
740
741    IN:
742       t............the current thread
743       o............the object
744           lockword.....the new lockword value
745
746    RETURN VALUE:
747       the previous lockword value
748 *******************************************************************************/
749
750 static inline ptrint lock_lockword_swap(threadobject *t, java_handle_t *o, uintptr_t lockword)
751 {
752         uintptr_t old;
753
754         LLNI_CRITICAL_START_THREAD(t);
755         old = LLNI_DIRECT(o)->lockword;
756         LLNI_CRITICAL_END_THREAD(t);
757
758         for (;;)
759         {
760                 uintptr_t r;
761
762                 LLNI_CRITICAL_START_THREAD(t);
763                 r = COMPARE_AND_SWAP_OLD_VALUE(&LLNI_DIRECT(o)->lockword, old, lockword);
764                 LLNI_CRITICAL_END_THREAD(t);
765                 if (r == old)
766                         return old;
767                 else
768                         old = r;
769         }
770 }
771
772 /* try_set_flc_bit ************************************************************
773
774    Tries to atomically set the FLC bit in the lockword, only if the
775    lockword is not inflated. Returns success status.
776
777    IN:
778       t............the current thread
779       o............the object
780           lockword.....the current lockword value
781
782    RETURN VALUE:
783       1 if the FLC is now set, 0 otherwise
784 *******************************************************************************/
785
786 static int try_set_flc_bit(threadobject *t, java_handle_t *o, ptrint lockword)
787 {
788         ptrint r;
789         if (lockword == THIN_UNLOCKED)
790                 return 0;
791
792         LLNI_CRITICAL_START_THREAD(t);
793         r = COMPARE_AND_SWAP_OLD_VALUE(&LLNI_DIRECT(o)->lockword, lockword, lockword | THIN_LOCK_FLC_BIT);
794         LLNI_CRITICAL_END_THREAD(t);
795
796         return r == lockword;
797 }
798
799
800 /* lock_record_enter ***********************************************************
801
802    Enter the lock represented by the given lock record.
803
804    IN:
805       t.................the current thread
806           lr................the lock record
807
808 *******************************************************************************/
809
810 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
811 {
812         pthread_mutex_lock(&(lr->mutex));
813         lr->owner = t;
814 }
815
816
817 /* lock_record_exit ************************************************************
818
819    Release the lock represented by the given lock record.
820
821    IN:
822       t.................the current thread
823           lr................the lock record
824
825    PRE-CONDITION:
826       The current thread must own the lock represented by this lock record.
827           This is NOT checked by this function!
828
829 *******************************************************************************/
830
831 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
832 {
833         lr->owner = NULL;
834         pthread_mutex_unlock(&(lr->mutex));
835 }
836
837
838 /* lock_inflate ****************************************************************
839
840    Inflate the lock of the given object. This may only be called by the
841    owner of the monitor of the object.
842
843    IN:
844       t............the current thread
845           o............the object of which to inflate the lock
846           lr...........the lock record to install. The current thread must
847                        own the lock of this lock record!
848
849    PRE-CONDITION:
850       The current thread must be the owner of this object's monitor AND
851           of the lock record's lock!
852
853 *******************************************************************************/
854
855 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
856 {
857         uintptr_t lockword;
858
859         /* get the current lock count */
860
861         lockword = lock_lockword_get(t, o);
862
863         if (IS_FAT_LOCK(lockword)) {
864                 assert(GET_FAT_LOCK(lockword) == lr);
865         }
866         else {
867                 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
868
869                 /* copy the count from the thin lock */
870
871                 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
872         }
873
874         DEBUGLOCKS(("[lock_inflate      : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
875                                 lr, t, o, lockword, lr->count));
876
877         /* notify waiting objects */
878
879         lock_record_notify(t, lr, false);
880
881         /* install it */
882
883         lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
884 }
885
886
887 /* lock_monitor_enter **********************************************************
888
889    Acquire the monitor of the given object. If the current thread already
890    owns the monitor, the lock counter is simply increased.
891
892    This function blocks until it can acquire the monitor.
893
894    IN:
895       t............the current thread
896           o............the object of which to enter the monitor
897
898    RETURN VALUE:
899       true.........the lock has been successfully acquired
900           false........an exception has been thrown
901
902 *******************************************************************************/
903
904 bool lock_monitor_enter(java_handle_t *o)
905 {
906         threadobject  *t;
907         /* CAUTION: This code assumes that ptrint is unsigned! */
908         ptrint         lockword;
909         ptrint         thinlock;
910         lock_record_t *lr;
911
912         if (o == NULL) {
913                 exceptions_throw_nullpointerexception();
914                 return false;
915         }
916
917         t = THREADOBJECT;
918
919         thinlock = t->thinlock;
920
921         /* most common case: try to thin-lock an unlocked object */
922
923         LLNI_CRITICAL_START_THREAD(t);
924         lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
925         LLNI_CRITICAL_END_THREAD(t);
926
927         if (lockword == THIN_UNLOCKED) {
928                 /* success. we locked it */
929                 /* The Java Memory Model requires a memory barrier here: */
930                 /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
931                 MEMORY_BARRIER_AFTER_ATOMIC();
932                 return true;
933         }
934
935         /* next common case: recursive lock with small recursion count */
936         /* We don't have to worry about stale values here, as any stale value  */
937         /* will indicate another thread holding the lock (or an inflated lock) */
938
939         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
940                 /* we own this monitor               */
941                 /* check the current recursion count */
942
943                 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
944                 {
945                         /* the recursion count is low enough */
946
947                         lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
948
949                         /* success. we locked it */
950                         return true;
951                 }
952                 else {
953                         /* recursion count overflow */
954
955                         lr = lock_hashtable_get(t, o);
956                         lock_record_enter(t, lr);
957                         lock_inflate(t, o, lr);
958                         lr->count++;
959
960                         return true;
961                 }
962         }
963
964         /* the lock is either contented or fat */
965
966         if (IS_FAT_LOCK(lockword)) {
967
968                 lr = GET_FAT_LOCK(lockword);
969
970                 /* check for recursive entering */
971                 if (lr->owner == t) {
972                         lr->count++;
973                         return true;
974                 }
975
976                 /* acquire the mutex of the lock record */
977
978                 lock_record_enter(t, lr);
979
980                 assert(lr->count == 0);
981
982                 return true;
983         }
984
985         /****** inflation path ******/
986
987         /* first obtain the lock record for this object */
988
989         lr = lock_hashtable_get(t, o);
990
991 #if defined(ENABLE_JVMTI)
992         /* Monitor Contended Enter */
993         jvmti_MonitorContendedEntering(false, o);
994 #endif
995
996         /* enter the monitor */
997
998         lock_record_enter(t, lr);
999
1000 #if defined(ENABLE_JVMTI)
1001         /* Monitor Contended Entered */
1002         jvmti_MonitorContendedEntering(true, o);
1003 #endif
1004
1005         /* inflation loop */
1006
1007         while (IS_THIN_LOCK(lockword = lock_lockword_get(t, o))) {
1008                 /* Set the flat lock contention bit to let the owning thread
1009                    know that we want to be notified of unlocking. */
1010
1011                 if (try_set_flc_bit(t, o, lockword)) {
1012                         /* Wait until another thread sees the flc bit and notifies
1013                            us of unlocking. */
1014
1015                         DEBUGLOCKS(("thread %d set flc bit on %p lr %p",
1016                                                 t->index, (void*) o, (void*) lr));
1017
1018                         (void) lock_record_wait(t, lr, 0, 0);
1019                 }
1020
1021                 /* try to lock the object */
1022
1023                 LLNI_CRITICAL_START_THREAD(t);
1024                 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
1025                 LLNI_CRITICAL_END_THREAD(t);
1026
1027                 if (lockword == THIN_UNLOCKED) {
1028                         /* we can inflate the lock ourselves */
1029
1030                         DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
1031                                                 t->index, (void*) o, (void*) lr));
1032
1033                         lock_inflate(t, o, lr);
1034                 }
1035         }
1036
1037         /* we own the inflated lock now */
1038
1039         return true;
1040 }
1041
1042
1043 /* lock_monitor_exit ***********************************************************
1044
1045    Decrement the counter of a (currently owned) monitor. If the counter
1046    reaches zero, release the monitor.
1047
1048    If the current thread is not the owner of the monitor, an 
1049    IllegalMonitorState exception is thrown.
1050
1051    IN:
1052       t............the current thread
1053           o............the object of which to exit the monitor
1054
1055    RETURN VALUE:
1056       true.........everything ok,
1057           false........an exception has been thrown
1058
1059 *******************************************************************************/
1060
1061 bool lock_monitor_exit(java_handle_t *o)
1062 {
1063         threadobject *t;
1064         uintptr_t     lockword;
1065         ptrint        thinlock;
1066
1067         if (o == NULL) {
1068                 exceptions_throw_nullpointerexception();
1069                 return false;
1070         }
1071
1072         t = THREADOBJECT;
1073
1074         thinlock = t->thinlock;
1075
1076         /* We don't have to worry about stale values here, as any stale value */
1077         /* will indicate that we don't own the lock.                          */
1078
1079         lockword = lock_lockword_get(t, o);
1080
1081         /* most common case: we release a thin lock that we hold once */
1082
1083         if (LOCK_WORD_WITHOUT_FLC_BIT(lockword) == thinlock) {
1084                 /* memory barrier for Java Memory Model */
1085                 MEMORY_BARRIER_BEFORE_ATOMIC();
1086                 lockword = lock_lockword_swap(t, o, THIN_UNLOCKED);
1087
1088                 /* check if there has been a flat lock contention on this object */
1089
1090                 if (lockword & THIN_LOCK_FLC_BIT) {
1091                         lock_record_t *lr;
1092
1093                         DEBUGLOCKS(("thread %d saw flc bit on %p",
1094                                                 t->index, (void*) o));
1095
1096                         /* there has been a contention on this thin lock */
1097
1098                         lr = lock_hashtable_get(t, o);
1099
1100                         DEBUGLOCKS(("thread %d for %p got lr %p",
1101                                                 t->index, (void*) o, (void*) lr));
1102
1103                         lock_record_enter(t, lr);
1104
1105                         /* notify a thread that it can try to inflate the lock now */
1106
1107                         lock_record_notify(t, lr, true);
1108
1109                         lock_record_exit(t, lr);
1110                 }
1111
1112                 return true;
1113         }
1114
1115         /* next common case: we release a recursive lock, count > 0 */
1116
1117         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1118                 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1119                 return true;
1120         }
1121
1122         /* either the lock is fat, or we don't hold it at all */
1123
1124         if (IS_FAT_LOCK(lockword)) {
1125
1126                 lock_record_t *lr;
1127
1128                 lr = GET_FAT_LOCK(lockword);
1129
1130                 /* check if we own this monitor */
1131                 /* We don't have to worry about stale values here, as any stale value */
1132                 /* will be != t and thus fail this check.                             */
1133
1134                 if (lr->owner != t) {
1135                         exceptions_throw_illegalmonitorstateexception();
1136                         return false;
1137                 }
1138
1139                 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1140
1141                 if (lr->count != 0) {
1142                         /* we had locked this one recursively. just decrement, it will */
1143                         /* still be locked. */
1144                         lr->count--;
1145                         return true;
1146                 }
1147
1148                 /* unlock this lock record */
1149
1150                 lr->owner = NULL;
1151                 pthread_mutex_unlock(&(lr->mutex));
1152
1153                 return true;
1154         }
1155
1156         /* legal thin lock cases have been handled above, so this is an error */
1157
1158         exceptions_throw_illegalmonitorstateexception();
1159
1160         return false;
1161 }
1162
1163
1164 /* lock_record_add_waiter ******************************************************
1165
1166    Add a thread to the list of waiting threads of a lock record.
1167
1168    IN:
1169       lr...........the lock record
1170       thread.......the thread to add
1171
1172 *******************************************************************************/
1173
1174 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1175 {
1176         lock_waiter_t *w;
1177
1178         /* Allocate a waiter data structure. */
1179
1180         w = NEW(lock_waiter_t);
1181
1182 #if defined(ENABLE_STATISTICS)
1183         if (opt_stat)
1184                 size_lock_waiter += sizeof(lock_waiter_t);
1185 #endif
1186
1187         /* Store the thread in the waiter structure. */
1188
1189         w->thread = thread;
1190
1191         /* Add the waiter as last entry to waiters list. */
1192
1193         list_add_last(lr->waiters, w);
1194 }
1195
1196
1197 /* lock_record_remove_waiter ***************************************************
1198
1199    Remove a thread from the list of waiting threads of a lock record.
1200
1201    IN:
1202       lr...........the lock record
1203       t............the current thread
1204
1205    PRE-CONDITION:
1206       The current thread must be the owner of the lock record.
1207    
1208 *******************************************************************************/
1209
1210 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1211 {
1212         list_t        *l;
1213         lock_waiter_t *w;
1214
1215         /* Get the waiters list. */
1216
1217         l = lr->waiters;
1218
1219         for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1220                 if (w->thread == thread) {
1221                         /* Remove the waiter entry from the list. */
1222
1223                         list_remove_unsynced(l, w);
1224
1225                         /* Free the waiter data structure. */
1226
1227                         FREE(w, lock_waiter_t);
1228
1229 #if defined(ENABLE_STATISTICS)
1230                         if (opt_stat)
1231                                 size_lock_waiter -= sizeof(lock_waiter_t);
1232 #endif
1233
1234                         return;
1235                 }
1236         }
1237
1238         /* This should never happen. */
1239
1240         vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1241 }
1242
1243
1244 /* lock_record_wait ************************************************************
1245
1246    Wait on a lock record for a given (maximum) amount of time.
1247
1248    IN:
1249       t............the current thread
1250           lr...........the lock record
1251           millis.......milliseconds of timeout
1252           nanos........nanoseconds of timeout
1253
1254    RETURN VALUE:
1255       true.........we have been interrupted,
1256       false........everything ok
1257
1258    PRE-CONDITION:
1259       The current thread must be the owner of the lock record.
1260           This is NOT checked by this function!
1261    
1262 *******************************************************************************/
1263
1264 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1265 {
1266         s4   lockcount;
1267         bool wasinterrupted;
1268
1269         DEBUGLOCKS(("[lock_record_wait  : lr=%p, t=%p, millis=%lld, nanos=%d]",
1270                                 lr, thread, millis, nanos));
1271
1272         /* { the thread t owns the fat lock record lr on the object o } */
1273
1274         /* register us as waiter for this object */
1275
1276         lock_record_add_waiter(lr, thread);
1277
1278         /* remember the old lock count */
1279
1280         lockcount = lr->count;
1281
1282         /* unlock this record */
1283
1284         lr->count = 0;
1285         lock_record_exit(thread, lr);
1286
1287         /* wait until notified/interrupted/timed out */
1288
1289         wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1290
1291         /* re-enter the monitor */
1292
1293         lock_record_enter(thread, lr);
1294
1295         /* remove us from the list of waiting threads */
1296
1297         lock_record_remove_waiter(lr, thread);
1298
1299         /* restore the old lock count */
1300
1301         lr->count = lockcount;
1302
1303         /* return if we have been interrupted */
1304
1305         return wasinterrupted;
1306 }
1307
1308
1309 /* lock_monitor_wait ***********************************************************
1310
1311    Wait on an object for a given (maximum) amount of time.
1312
1313    IN:
1314       t............the current thread
1315           o............the object
1316           millis.......milliseconds of timeout
1317           nanos........nanoseconds of timeout
1318
1319    PRE-CONDITION:
1320       The current thread must be the owner of the object's monitor.
1321    
1322 *******************************************************************************/
1323
1324 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1325 {
1326         uintptr_t      lockword;
1327         lock_record_t *lr;
1328
1329         lockword = lock_lockword_get(t, o);
1330
1331         /* check if we own this monitor */
1332         /* We don't have to worry about stale values here, as any stale value */
1333         /* will fail this check.                                              */
1334
1335         if (IS_FAT_LOCK(lockword)) {
1336
1337                 lr = GET_FAT_LOCK(lockword);
1338
1339                 if (lr->owner != t) {
1340                         exceptions_throw_illegalmonitorstateexception();
1341                         return;
1342                 }
1343         }
1344         else {
1345                 /* it's a thin lock */
1346
1347                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1348                         exceptions_throw_illegalmonitorstateexception();
1349                         return;
1350                 }
1351
1352                 /* inflate this lock */
1353
1354                 lr = lock_hashtable_get(t, o);
1355                 lock_record_enter(t, lr);
1356                 lock_inflate(t, o, lr);
1357         }
1358
1359         /* { the thread t owns the fat lock record lr on the object o } */
1360
1361         if (lock_record_wait(t, lr, millis, nanos))
1362                 exceptions_throw_interruptedexception();
1363 }
1364
1365
1366 /* lock_record_notify **********************************************************
1367
1368    Notify one thread or all threads waiting on the given lock record.
1369
1370    IN:
1371       t............the current thread
1372           lr...........the lock record
1373           one..........if true, only notify one thread
1374
1375    PRE-CONDITION:
1376       The current thread must be the owner of the lock record.
1377           This is NOT checked by this function!
1378    
1379 *******************************************************************************/
1380
1381 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1382 {
1383         list_t        *l;
1384         lock_waiter_t *w;
1385         threadobject  *waitingthread;
1386
1387         /* { the thread t owns the fat lock record lr on the object o } */
1388
1389         /* Get the waiters list. */
1390
1391         l = lr->waiters;
1392
1393         for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1394                 /* signal the waiting thread */
1395
1396                 waitingthread = w->thread;
1397
1398                 /* If the thread was already signaled but hasn't removed
1399                    itself from the list yet, just ignore it. */
1400
1401                 if (waitingthread->signaled == true)
1402                         continue;
1403
1404                 /* Enter the wait-mutex. */
1405
1406                 pthread_mutex_lock(&(waitingthread->waitmutex));
1407
1408                 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1409                                         lr, t, waitingthread, waitingthread->sleeping, one));
1410
1411                 /* Signal the thread if it's sleeping. */
1412
1413                 if (waitingthread->sleeping)
1414                         pthread_cond_signal(&(waitingthread->waitcond));
1415
1416                 /* Mark the thread as signaled. */
1417
1418                 waitingthread->signaled = true;
1419
1420                 /* Leave the wait-mutex. */
1421
1422                 pthread_mutex_unlock(&(waitingthread->waitmutex));
1423
1424                 /* if we should only wake one, we are done */
1425
1426                 if (one)
1427                         break;
1428         }
1429 }
1430
1431
1432 /* lock_monitor_notify *********************************************************
1433
1434    Notify one thread or all threads waiting on the given object.
1435
1436    IN:
1437       t............the current thread
1438           o............the object
1439           one..........if true, only notify one thread
1440
1441    PRE-CONDITION:
1442       The current thread must be the owner of the object's monitor.
1443    
1444 *******************************************************************************/
1445
1446 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1447 {
1448         uintptr_t      lockword;
1449         lock_record_t *lr;
1450
1451         lockword = lock_lockword_get(t, o);
1452
1453         /* check if we own this monitor */
1454         /* We don't have to worry about stale values here, as any stale value */
1455         /* will fail this check.                                              */
1456
1457         if (IS_FAT_LOCK(lockword)) {
1458
1459                 lr = GET_FAT_LOCK(lockword);
1460
1461                 if (lr->owner != t) {
1462                         exceptions_throw_illegalmonitorstateexception();
1463                         return;
1464                 }
1465         }
1466         else {
1467                 /* it's a thin lock */
1468
1469                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1470                         exceptions_throw_illegalmonitorstateexception();
1471                         return;
1472                 }
1473
1474                 /* inflate this lock */
1475
1476                 lr = lock_hashtable_get(t, o);
1477                 lock_record_enter(t, lr);
1478                 lock_inflate(t, o, lr);
1479         }
1480
1481         /* { the thread t owns the fat lock record lr on the object o } */
1482
1483         lock_record_notify(t, lr, one);
1484 }
1485
1486
1487
1488 /*============================================================================*/
1489 /* INQUIRY FUNCIONS                                                           */
1490 /*============================================================================*/
1491
1492
1493 /* lock_is_held_by_current_thread **********************************************
1494
1495    Return true if the current thread owns the monitor of the given object.
1496
1497    IN:
1498           o............the object
1499
1500    RETURN VALUE:
1501       true, if the current thread holds the lock of this object.
1502    
1503 *******************************************************************************/
1504
1505 bool lock_is_held_by_current_thread(java_handle_t *o)
1506 {
1507         threadobject  *t;
1508         uintptr_t      lockword;
1509         lock_record_t *lr;
1510
1511         t = THREADOBJECT;
1512
1513         /* check if we own this monitor */
1514         /* We don't have to worry about stale values here, as any stale value */
1515         /* will fail this check.                                              */
1516
1517         lockword = lock_lockword_get(t, o);
1518
1519         if (IS_FAT_LOCK(lockword)) {
1520                 /* it's a fat lock */
1521
1522                 lr = GET_FAT_LOCK(lockword);
1523
1524                 return (lr->owner == t);
1525         }
1526         else {
1527                 /* it's a thin lock */
1528
1529                 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1530         }
1531 }
1532
1533
1534
1535 /*============================================================================*/
1536 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD                              */
1537 /*============================================================================*/
1538
1539
1540 /* lock_wait_for_object ********************************************************
1541
1542    Wait for the given object.
1543
1544    IN:
1545           o............the object
1546           millis.......milliseconds to wait
1547           nanos........nanoseconds to wait
1548    
1549 *******************************************************************************/
1550
1551 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1552 {
1553         threadobject *thread;
1554
1555         thread = THREADOBJECT;
1556
1557         lock_monitor_wait(thread, o, millis, nanos);
1558 }
1559
1560
1561 /* lock_notify_object **********************************************************
1562
1563    Notify one thread waiting on the given object.
1564
1565    IN:
1566           o............the object
1567    
1568 *******************************************************************************/
1569
1570 void lock_notify_object(java_handle_t *o)
1571 {
1572         threadobject *thread;
1573
1574         thread = THREADOBJECT;
1575
1576         lock_monitor_notify(thread, o, true);
1577 }
1578
1579
1580 /* lock_notify_all_object ******************************************************
1581
1582    Notify all threads waiting on the given object.
1583
1584    IN:
1585           o............the object
1586    
1587 *******************************************************************************/
1588
1589 void lock_notify_all_object(java_handle_t *o)
1590 {
1591         threadobject *thread;
1592
1593         thread = THREADOBJECT;
1594
1595         lock_monitor_notify(thread, o, false);
1596 }
1597
1598
1599 /*
1600  * These are local overrides for various environment variables in Emacs.
1601  * Please do not remove this and leave it at the end of the file, where
1602  * Emacs will automagically detect them.
1603  * ---------------------------------------------------------------------
1604  * Local variables:
1605  * mode: c
1606  * indent-tabs-mode: t
1607  * c-basic-offset: 4
1608  * tab-width: 4
1609  * End:
1610  * vim:noexpandtab:sw=4:ts=4:
1611  */