* src/threads/posix/threads.c: Renamed from here...
[cacao.git] / src / threads / posix / lock.c
1 /* src/threads/posix/lock.c - lock implementation
2
3    Copyright (C) 1996-2005, 2006, 2007, 2008
4    CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5
6    This file is part of CACAO.
7
8    This program is free software; you can redistribute it and/or
9    modify it under the terms of the GNU General Public License as
10    published by the Free Software Foundation; either version 2, or (at
11    your option) any later version.
12
13    This program is distributed in the hope that it will be useful, but
14    WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16    General Public License for more details.
17
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21    02110-1301, USA.
22
23 */
24
25
26 #include "config.h"
27
28 #include <assert.h>
29 #include <stdint.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <sys/time.h>
33 #include <pthread.h>
34
35 #include "vm/types.h"
36
37 #include "mm/memory.h"
38
39 #include "native/llni.h"
40
41 #include "threads/lock-common.h"
42 #include "threads/threadlist.h"
43 #include "threads/thread.h"
44
45 #include "threads/posix/lock.h"
46
47 #include "toolbox/list.h"
48
49 #include "vm/global.h"
50 #include "vm/exceptions.h"
51 #include "vm/finalizer.h"
52 #include "vm/stringlocal.h"
53 #include "vm/vm.h"
54
55 #include "vmcore/options.h"
56
57 #if defined(ENABLE_STATISTICS)
58 # include "vmcore/statistics.h"
59 #endif
60
61 #if defined(ENABLE_VMLOG)
62 #include <vmlog_cacao.h>
63 #endif
64
65 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
66
67 #include "arch.h"
68
69 /* includes for atomic instructions: */
70
71 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
72 #include "threads/posix/generic-primitives.h"
73 #else
74 #include "machine-instr.h"
75 #endif
76
77 #if defined(ENABLE_JVMTI)
78 #include "native/jvmti/cacaodbg.h"
79 #endif
80
81 #if defined(ENABLE_GC_BOEHM)
82 # include "mm/boehm-gc/include/gc.h"
83 #endif
84
85
86 /* debug **********************************************************************/
87
88 #if !defined(NDEBUG)
89 # define DEBUGLOCKS(format) \
90     do { \
91         if (opt_DebugLocks) { \
92             log_println format; \
93         } \
94     } while (0)
95 #else
96 # define DEBUGLOCKS(format)
97 #endif
98
99
100 /******************************************************************************/
101 /* MACROS                                                                     */
102 /******************************************************************************/
103
104 /* number of lock records in the first pool allocated for a thread */
105 #define LOCK_INITIAL_LOCK_RECORDS 8
106
107 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
108
109 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
110         ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
111
112
113 /******************************************************************************/
114 /* MACROS FOR THIN/FAT LOCKS                                                  */
115 /******************************************************************************/
116
117 /* We use a variant of the tasuki locks described in the paper
118  *     
119  *     Tamiya Onodera, Kiyokuni Kawachiya
120  *     A Study of Locking Objects with Bimodal Fields
121  *     Proceedings of the ACM OOPSLA '99, pp. 223-237
122  *     1999
123  *
124  * The underlying thin locks are a variant of the thin locks described in
125  * 
126  *     Bacon, Konuru, Murthy, Serrano
127  *     Thin Locks: Featherweight Synchronization for Java
128  *         Proceedings of the ACM Conference on Programming Language Design and 
129  *         Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
130  *         June 1998
131  *
132  * In thin lock mode the lockword looks like this:
133  *
134  *     ,----------------------,-----------,---,
135  *     |      thread ID       |   count   | 0 |
136  *     `----------------------'-----------'---´
137  *
138  *     thread ID......the 'index' of the owning thread, or 0
139  *     count..........number of times the lock has been entered minus 1
140  *     0..............the shape bit is 0 in thin lock mode
141  *
142  * In fat lock mode it is basically a lock_record_t *:
143  *
144  *     ,----------------------------------,---,
145  *     |    lock_record_t * (without LSB) | 1 |
146  *     `----------------------------------'---´
147  *
148  *     1..............the shape bit is 1 in fat lock mode
149  */
150
151 #if SIZEOF_VOID_P == 8
152 #define THIN_LOCK_WORD_SIZE    64
153 #else
154 #define THIN_LOCK_WORD_SIZE    32
155 #endif
156
157 #define THIN_LOCK_SHAPE_BIT    0x01
158
159 #define THIN_UNLOCKED          0
160
161 #define THIN_LOCK_COUNT_SHIFT  1
162 #define THIN_LOCK_COUNT_SIZE   8
163 #define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
164 #define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
165 #define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
166
167 #define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
168 #define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
169
170 #define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
171 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
172
173 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
174 #define MAKE_FAT_LOCK(ptr)      ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
175
176 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
177 #define GET_THREAD_INDEX(lockword) ((unsigned) lockword >> THIN_LOCK_TID_SHIFT)
178
179
180 /* global variables ***********************************************************/
181
182 /* hashtable mapping objects to lock records */
183 static lock_hashtable_t lock_hashtable;
184
185
186 /******************************************************************************/
187 /* PROTOTYPES                                                                 */
188 /******************************************************************************/
189
190 static void lock_hashtable_init(void);
191
192 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
193 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
194 static void lock_record_enter(threadobject *t, lock_record_t *lr);
195 static void lock_record_exit(threadobject *t, lock_record_t *lr);
196 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
197 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
198
199
200 /*============================================================================*/
201 /* INITIALIZATION OF DATA STRUCTURES                                          */
202 /*============================================================================*/
203
204
205 /* lock_init *******************************************************************
206
207    Initialize global data for locking.
208
209 *******************************************************************************/
210
211 void lock_init(void)
212 {
213         /* initialize lock hashtable */
214
215         lock_hashtable_init();
216
217 #if defined(ENABLE_VMLOG)
218         vmlog_cacao_init_lock();
219 #endif
220 }
221
222
223 /* lock_pre_compute_thinlock ***************************************************
224
225    Pre-compute the thin lock value for a thread index.
226
227    IN:
228       index........the thead index (>= 1)
229
230    RETURN VALUE:
231       the thin lock value for this thread index
232
233 *******************************************************************************/
234
235 ptrint lock_pre_compute_thinlock(s4 index)
236 {
237         return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
238 }
239
240
241 /* lock_record_new *************************************************************
242
243    Allocate a lock record.
244
245 *******************************************************************************/
246
247 static lock_record_t *lock_record_new(void)
248 {
249         int result;
250         lock_record_t *lr;
251
252         /* allocate the data structure on the C heap */
253
254         lr = NEW(lock_record_t);
255
256 #if defined(ENABLE_STATISTICS)
257         if (opt_stat)
258                 size_lock_record += sizeof(lock_record_t);
259 #endif
260
261         /* initialize the members */
262
263         lr->object  = NULL;
264         lr->owner   = NULL;
265         lr->count   = 0;
266         lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
267
268 #if defined(ENABLE_GC_CACAO)
269         /* register the lock object as weak reference with the GC */
270
271         gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
272 #endif
273
274         /* initialize the mutex */
275
276         result = pthread_mutex_init(&(lr->mutex), NULL);
277         if (result != 0)
278                 vm_abort_errnum(result, "lock_record_new: pthread_mutex_init failed");
279
280         DEBUGLOCKS(("[lock_record_new   : lr=%p]", (void *) lr));
281
282         return lr;
283 }
284
285
286 /* lock_record_free ************************************************************
287
288    Free a lock record.
289
290    IN:
291        lr....lock record to free
292
293 *******************************************************************************/
294
295 static void lock_record_free(lock_record_t *lr)
296 {
297         int result;
298
299         DEBUGLOCKS(("[lock_record_free  : lr=%p]", (void *) lr));
300
301         /* Destroy the mutex. */
302
303         result = pthread_mutex_destroy(&(lr->mutex));
304         if (result != 0)
305                 vm_abort_errnum(result, "lock_record_free: pthread_mutex_destroy failed");
306
307 #if defined(ENABLE_GC_CACAO)
308         /* unregister the lock object reference with the GC */
309
310         gc_weakreference_unregister(&(lr->object));
311 #endif
312
313         /* Free the waiters list. */
314
315         list_free(lr->waiters);
316
317         /* Free the data structure. */
318
319         FREE(lr, lock_record_t);
320
321 #if defined(ENABLE_STATISTICS)
322         if (opt_stat)
323                 size_lock_record -= sizeof(lock_record_t);
324 #endif
325 }
326
327
328 /*============================================================================*/
329 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
330 /*============================================================================*/
331
332 /* lock_hashtable_init *********************************************************
333
334    Initialize the global hashtable mapping objects to lock records.
335
336 *******************************************************************************/
337
338 static void lock_hashtable_init(void)
339 {
340         pthread_mutex_init(&(lock_hashtable.mutex), NULL);
341
342         lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
343         lock_hashtable.entries = 0;
344         lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
345
346 #if defined(ENABLE_STATISTICS)
347         if (opt_stat)
348                 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
349 #endif
350
351         MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
352 }
353
354
355 /* lock_hashtable_grow *********************************************************
356
357    Grow the lock record hashtable to about twice its current size and
358    rehash the entries.
359
360 *******************************************************************************/
361
362 /* must be called with hashtable mutex locked */
363 static void lock_hashtable_grow(void)
364 {
365         u4 oldsize;
366         u4 newsize;
367         lock_record_t **oldtable;
368         lock_record_t **newtable;
369         lock_record_t *lr;
370         lock_record_t *next;
371         u4 i;
372         u4 h;
373         u4 newslot;
374
375         /* allocate a new table */
376
377         oldsize = lock_hashtable.size;
378         newsize = oldsize*2 + 1; /* XXX should use prime numbers */
379
380         DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
381
382         oldtable = lock_hashtable.ptr;
383         newtable = MNEW(lock_record_t *, newsize);
384
385 #if defined(ENABLE_STATISTICS)
386         if (opt_stat)
387                 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
388 #endif
389
390         MZERO(newtable, lock_record_t *, newsize);
391
392         /* rehash the entries */
393
394         for (i = 0; i < oldsize; i++) {
395                 lr = oldtable[i];
396                 while (lr) {
397                         next = lr->hashlink;
398
399                         h = heap_hashcode(lr->object);
400                         newslot = h % newsize;
401
402                         lr->hashlink = newtable[newslot];
403                         newtable[newslot] = lr;
404
405                         lr = next;
406                 }
407         }
408
409         /* replace the old table */
410
411         lock_hashtable.ptr  = newtable;
412         lock_hashtable.size = newsize;
413
414         MFREE(oldtable, lock_record_t *, oldsize);
415
416 #if defined(ENABLE_STATISTICS)
417         if (opt_stat)
418                 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
419 #endif
420 }
421
422
423 /* lock_hashtable_cleanup ******************************************************
424
425    Removes (and frees) lock records which have a cleared object reference
426    from the hashtable. The locked object was reclaimed by the GC.
427
428 *******************************************************************************/
429
430 #if defined(ENABLE_GC_CACAO)
431 void lock_hashtable_cleanup(void)
432 {
433         threadobject  *t;
434         lock_record_t *lr;
435         lock_record_t *prev;
436         lock_record_t *next;
437         int i;
438
439         t = THREADOBJECT;
440
441         /* lock the hashtable */
442
443         pthread_mutex_lock(&(lock_hashtable.mutex));
444
445         /* search the hashtable for cleared references */
446
447         for (i = 0; i < lock_hashtable.size; i++) {
448                 lr = lock_hashtable.ptr[i];
449                 prev = NULL;
450
451                 while (lr) {
452                         next = lr->hashlink;
453
454                         /* remove lock records with cleared references */
455
456                         if (lr->object == NULL) {
457
458                                 /* unlink the lock record from the hashtable */
459
460                                 if (prev == NULL)
461                                         lock_hashtable.ptr[i] = next;
462                                 else
463                                         prev->hashlink = next;
464
465                                 /* free the lock record */
466
467                                 lock_record_free(lr);
468
469                         } else {
470                                 prev = lr;
471                         }
472
473                         lr = next;
474                 }
475         }
476
477         /* unlock the hashtable */
478
479         pthread_mutex_unlock(&(lock_hashtable.mutex));
480 }
481 #endif
482
483
484 /* lock_hashtable_get **********************************************************
485
486    Find the lock record for the given object.  If it does not exists,
487    yet, create it and enter it in the hashtable.
488
489    IN:
490       t....the current thread
491           o....the object to look up
492
493    RETURN VALUE:
494       the lock record to use for this object
495
496 *******************************************************************************/
497
498 #if defined(ENABLE_GC_BOEHM)
499 static void lock_record_finalizer(void *object, void *p);
500 #endif
501
502 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
503 {
504         uintptr_t      lockword;
505         u4             slot;
506         lock_record_t *lr;
507
508         lockword = lock_lockword_get(t, o);
509
510         if (IS_FAT_LOCK(lockword))
511                 return GET_FAT_LOCK(lockword);
512
513         /* lock the hashtable */
514
515         pthread_mutex_lock(&(lock_hashtable.mutex));
516
517         /* lookup the lock record in the hashtable */
518
519         LLNI_CRITICAL_START_THREAD(t);
520         slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
521         lr   = lock_hashtable.ptr[slot];
522
523         for (; lr != NULL; lr = lr->hashlink) {
524                 if (lr->object == LLNI_DIRECT(o))
525                         break;
526         }
527         LLNI_CRITICAL_END_THREAD(t);
528
529         if (lr == NULL) {
530                 /* not found, we must create a new one */
531
532                 lr = lock_record_new();
533
534                 LLNI_CRITICAL_START_THREAD(t);
535                 lr->object = LLNI_DIRECT(o);
536                 LLNI_CRITICAL_END_THREAD(t);
537
538 #if defined(ENABLE_GC_BOEHM)
539                 /* register new finalizer to clean up the lock record */
540
541                 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
542 #endif
543
544                 /* enter it in the hashtable */
545
546                 lr->hashlink             = lock_hashtable.ptr[slot];
547                 lock_hashtable.ptr[slot] = lr;
548                 lock_hashtable.entries++;
549
550                 /* check whether the hash should grow */
551
552                 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
553                         lock_hashtable_grow();
554                 }
555         }
556
557         /* unlock the hashtable */
558
559         pthread_mutex_unlock(&(lock_hashtable.mutex));
560
561         /* return the new lock record */
562
563         return lr;
564 }
565
566
567 /* lock_hashtable_remove *******************************************************
568
569    Remove the lock record for the given object from the hashtable
570    and free it afterwards.
571
572    IN:
573        t....the current thread
574        o....the object to look up
575
576 *******************************************************************************/
577
578 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
579 {
580         uintptr_t      lockword;
581         lock_record_t *lr;
582         u4             slot;
583         lock_record_t *tmplr;
584
585         /* lock the hashtable */
586
587         pthread_mutex_lock(&(lock_hashtable.mutex));
588
589         /* get lock record */
590
591         lockword = lock_lockword_get(t, o);
592
593         assert(IS_FAT_LOCK(lockword));
594
595         lr = GET_FAT_LOCK(lockword);
596
597         /* remove the lock-record from the hashtable */
598
599         LLNI_CRITICAL_START_THREAD(t);
600         slot  = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
601         tmplr = lock_hashtable.ptr[slot];
602         LLNI_CRITICAL_END_THREAD(t);
603
604         if (tmplr == lr) {
605                 /* special handling if it's the first in the chain */
606
607                 lock_hashtable.ptr[slot] = lr->hashlink;
608         }
609         else {
610                 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
611                         if (tmplr->hashlink == lr) {
612                                 tmplr->hashlink = lr->hashlink;
613                                 break;
614                         }
615                 }
616
617                 assert(tmplr != NULL);
618         }
619
620         /* decrease entry count */
621
622         lock_hashtable.entries--;
623
624         /* unlock the hashtable */
625
626         pthread_mutex_unlock(&(lock_hashtable.mutex));
627
628         /* free the lock record */
629
630         lock_record_free(lr);
631 }
632
633
634 /* lock_record_finalizer *******************************************************
635
636    XXX Remove me for exact GC.
637
638 *******************************************************************************/
639
640 static void lock_record_finalizer(void *object, void *p)
641 {
642         java_handle_t *o;
643         classinfo     *c;
644
645         o = (java_handle_t *) object;
646
647 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
648         /* XXX this is only a dirty hack to make Boehm work with handles */
649
650         o = LLNI_WRAP((java_object_t *) o);
651 #endif
652
653         LLNI_class_get(o, c);
654
655 #if !defined(NDEBUG)
656         if (opt_DebugFinalizer) {
657                 log_start();
658                 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
659                 class_print(c);
660                 log_print("]");
661                 log_finish();
662         }
663 #endif
664
665         /* check for a finalizer function */
666
667         if (c->finalizer != NULL)
668                 finalizer_run(object, p);
669
670         /* remove the lock-record entry from the hashtable and free it */
671
672         lock_hashtable_remove(THREADOBJECT, o);
673 }
674
675
676 /*============================================================================*/
677 /* OBJECT LOCK INITIALIZATION                                                 */
678 /*============================================================================*/
679
680
681 /* lock_init_object_lock *******************************************************
682
683    Initialize the monitor pointer of the given object. The monitor gets
684    initialized to an unlocked state.
685
686 *******************************************************************************/
687
688 void lock_init_object_lock(java_object_t *o)
689 {
690         assert(o);
691
692         o->lockword = THIN_UNLOCKED;
693 }
694
695
696 /*============================================================================*/
697 /* LOCKING ALGORITHM                                                          */
698 /*============================================================================*/
699
700
701 /* lock_lockword_get ***********************************************************
702
703    Get the lockword for the given object.
704
705    IN:
706       t............the current thread
707       o............the object
708
709 *******************************************************************************/
710
711 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
712 {
713         uintptr_t lockword;
714
715         LLNI_CRITICAL_START_THREAD(t);
716         lockword = LLNI_DIRECT(o)->lockword;
717         LLNI_CRITICAL_END_THREAD(t);
718
719         return lockword;
720 }
721
722
723 /* lock_lockword_set ***********************************************************
724
725    Set the lockword for the given object.
726
727    IN:
728       t............the current thread
729       o............the object
730           lockword.....the new lockword value
731
732 *******************************************************************************/
733
734 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
735 {
736         LLNI_CRITICAL_START_THREAD(t);
737         LLNI_DIRECT(o)->lockword = lockword;
738         LLNI_CRITICAL_END_THREAD(t);
739 }
740
741
742 /* lock_record_enter ***********************************************************
743
744    Enter the lock represented by the given lock record.
745
746    IN:
747       t.................the current thread
748           lr................the lock record
749
750 *******************************************************************************/
751
752 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
753 {
754         pthread_mutex_lock(&(lr->mutex));
755         lr->owner = t;
756 }
757
758
759 /* lock_record_exit ************************************************************
760
761    Release the lock represented by the given lock record.
762
763    IN:
764       t.................the current thread
765           lr................the lock record
766
767    PRE-CONDITION:
768       The current thread must own the lock represented by this lock record.
769           This is NOT checked by this function!
770
771 *******************************************************************************/
772
773 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
774 {
775         lr->owner = NULL;
776         pthread_mutex_unlock(&(lr->mutex));
777 }
778
779
780 /* lock_inflate ****************************************************************
781
782    Inflate the lock of the given object. This may only be called by the
783    owner of the monitor of the object.
784
785    IN:
786       t............the current thread
787           o............the object of which to inflate the lock
788           lr...........the lock record to install. The current thread must
789                        own the lock of this lock record!
790
791    PRE-CONDITION:
792       The current thread must be the owner of this object's monitor AND
793           of the lock record's lock!
794
795 *******************************************************************************/
796
797 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
798 {
799         uintptr_t lockword;
800
801         /* get the current lock count */
802
803         lockword = lock_lockword_get(t, o);
804
805         if (IS_FAT_LOCK(lockword)) {
806                 assert(GET_FAT_LOCK(lockword) == lr);
807                 return;
808         }
809         else {
810                 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
811
812                 /* copy the count from the thin lock */
813
814                 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
815         }
816
817         DEBUGLOCKS(("[lock_inflate      : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
818                                 lr, t, o, lockword, lr->count));
819
820         /* install it */
821
822         lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
823 }
824
825
826 /* TODO Move this function into threadlist.[ch]. */
827
828 static threadobject *threads_lookup_thread_id(int index)
829 {
830         threadobject *t;
831
832         threadlist_lock();
833
834         for (t = threadlist_first(); t != NULL; t = threadlist_next(t)) {
835                 if (t->state == THREAD_STATE_NEW)
836                         continue;
837                 if (t->index == index)
838                         break;
839         }
840
841         threadlist_unlock();
842         return t;
843 }
844
845 static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o)
846 {
847         int index;
848         threadobject *t_other;
849         int old_flc;
850
851         index = GET_THREAD_INDEX(lockword);
852         t_other = threads_lookup_thread_id(index);
853         if (!t_other)
854 /*              failure, TODO: add statistics */
855                 return;
856
857         pthread_mutex_lock(&t_other->flc_lock);
858         old_flc = t_other->flc_bit;
859         t_other->flc_bit = true;
860
861         DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d",
862                                 t->index, t_other->index));
863
864         /* Set FLC bit first, then read the lockword again */
865         MEMORY_BARRIER();
866
867         lockword = lock_lockword_get(t, o);
868
869         /* Lockword is still the way it was seen before */
870         if (IS_THIN_LOCK(lockword) && (GET_THREAD_INDEX(lockword) == index))
871         {
872                 /* Add tuple (t, o) to the other thread's FLC list */
873                 t->flc_object = o;
874                 t->flc_next = t_other->flc_list;
875                 t_other->flc_list = t;
876
877                 for (;;)
878                 {
879                         threadobject *current;
880
881                         /* Wait until another thread sees the flc bit and notifies
882                            us of unlocking. */
883                         pthread_cond_wait(&t->flc_cond, &t_other->flc_lock);
884
885                         /* Traverse FLC list looking if we're still there */
886                         current = t_other->flc_list;
887                         while (current && current != t)
888                                 current = current->flc_next;
889                         if (!current)
890                                 /* not in list anymore, can stop waiting */
891                                 break;
892
893                         /* We are still in the list -- the other thread cannot have seen
894                            the FLC bit yet */
895                         assert(t_other->flc_bit);
896                 }
897
898                 t->flc_object = NULL;   /* for garbage collector? */
899                 t->flc_next = NULL;
900         }
901         else
902                 t_other->flc_bit = old_flc;
903
904         pthread_mutex_unlock(&t_other->flc_lock);
905 }
906
907 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
908 {
909         threadobject *current;
910
911         pthread_mutex_lock(&t->flc_lock);
912
913         current = t->flc_list;
914         while (current)
915         {
916                 if (current->flc_object != o)
917                 {
918                         /* The object has to be inflated so the other threads can properly
919                            block on it. */
920
921                         /* Only if not already inflated */
922                         ptrint lockword = lock_lockword_get(t, current->flc_object);
923                         if (IS_THIN_LOCK(lockword)) {
924                                 lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
925                                 lock_record_enter(t, lr);
926
927                                 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
928                                                         t->index, (void*) current->flc_object, (void*) lr));
929
930                                 lock_inflate(t, current->flc_object, lr);
931                         }
932                 }
933                 /* Wake the waiting thread */
934                 pthread_cond_broadcast(&current->flc_cond);
935
936                 current = current->flc_next;
937         }
938
939         t->flc_list = NULL;
940         t->flc_bit = false;
941         pthread_mutex_unlock(&t->flc_lock);
942 }
943
944 /* lock_monitor_enter **********************************************************
945
946    Acquire the monitor of the given object. If the current thread already
947    owns the monitor, the lock counter is simply increased.
948
949    This function blocks until it can acquire the monitor.
950
951    IN:
952       t............the current thread
953           o............the object of which to enter the monitor
954
955    RETURN VALUE:
956       true.........the lock has been successfully acquired
957           false........an exception has been thrown
958
959 *******************************************************************************/
960
961 bool lock_monitor_enter(java_handle_t *o)
962 {
963         threadobject  *t;
964         /* CAUTION: This code assumes that ptrint is unsigned! */
965         ptrint         lockword;
966         ptrint         thinlock;
967         lock_record_t *lr;
968
969         if (o == NULL) {
970                 exceptions_throw_nullpointerexception();
971                 return false;
972         }
973
974         t = THREADOBJECT;
975
976         thinlock = t->thinlock;
977
978 retry:
979         /* most common case: try to thin-lock an unlocked object */
980
981         LLNI_CRITICAL_START_THREAD(t);
982         lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
983         LLNI_CRITICAL_END_THREAD(t);
984
985         if (lockword == THIN_UNLOCKED) {
986                 /* success. we locked it */
987                 /* The Java Memory Model requires a memory barrier here: */
988                 /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
989                 MEMORY_BARRIER_AFTER_ATOMIC();
990                 return true;
991         }
992
993         /* next common case: recursive lock with small recursion count */
994         /* We don't have to worry about stale values here, as any stale value  */
995         /* will indicate another thread holding the lock (or an inflated lock) */
996
997         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
998                 /* we own this monitor               */
999                 /* check the current recursion count */
1000
1001                 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
1002                 {
1003                         /* the recursion count is low enough */
1004
1005                         lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
1006
1007                         /* success. we locked it */
1008                         return true;
1009                 }
1010                 else {
1011                         /* recursion count overflow */
1012
1013                         lr = lock_hashtable_get(t, o);
1014                         lock_record_enter(t, lr);
1015                         lock_inflate(t, o, lr);
1016                         lr->count++;
1017
1018                         notify_flc_waiters(t, o);
1019
1020                         return true;
1021                 }
1022         }
1023
1024         /* the lock is either contented or fat */
1025
1026         if (IS_FAT_LOCK(lockword)) {
1027
1028                 lr = GET_FAT_LOCK(lockword);
1029
1030                 /* check for recursive entering */
1031                 if (lr->owner == t) {
1032                         lr->count++;
1033                         return true;
1034                 }
1035
1036                 /* acquire the mutex of the lock record */
1037
1038                 lock_record_enter(t, lr);
1039
1040                 assert(lr->count == 0);
1041
1042                 return true;
1043         }
1044
1045         /****** inflation path ******/
1046
1047 #if defined(ENABLE_JVMTI)
1048         /* Monitor Contended Enter */
1049         jvmti_MonitorContendedEntering(false, o);
1050 #endif
1051
1052         sable_flc_waiting(lockword, t, o);
1053
1054 #if defined(ENABLE_JVMTI)
1055         /* Monitor Contended Entered */
1056         jvmti_MonitorContendedEntering(true, o);
1057 #endif
1058         goto retry;
1059 }
1060
1061
1062 /* lock_monitor_exit ***********************************************************
1063
1064    Decrement the counter of a (currently owned) monitor. If the counter
1065    reaches zero, release the monitor.
1066
1067    If the current thread is not the owner of the monitor, an 
1068    IllegalMonitorState exception is thrown.
1069
1070    IN:
1071       t............the current thread
1072           o............the object of which to exit the monitor
1073
1074    RETURN VALUE:
1075       true.........everything ok,
1076           false........an exception has been thrown
1077
1078 *******************************************************************************/
1079
1080 bool lock_monitor_exit(java_handle_t *o)
1081 {
1082         threadobject *t;
1083         uintptr_t     lockword;
1084         ptrint        thinlock;
1085
1086         if (o == NULL) {
1087                 exceptions_throw_nullpointerexception();
1088                 return false;
1089         }
1090
1091         t = THREADOBJECT;
1092
1093         thinlock = t->thinlock;
1094
1095         /* We don't have to worry about stale values here, as any stale value */
1096         /* will indicate that we don't own the lock.                          */
1097
1098         lockword = lock_lockword_get(t, o);
1099
1100         /* most common case: we release a thin lock that we hold once */
1101
1102         if (lockword == thinlock) {
1103                 /* memory barrier for Java Memory Model */
1104                 STORE_ORDER_BARRIER();
1105                 lock_lockword_set(t, o, THIN_UNLOCKED);
1106                 /* memory barrier for thin locking */
1107                 MEMORY_BARRIER();
1108
1109                 /* check if there has been a flat lock contention on this object */
1110
1111                 if (t->flc_bit) {
1112                         DEBUGLOCKS(("thread %d saw flc bit", t->index));
1113
1114                         /* there has been a contention on this thin lock */
1115                         notify_flc_waiters(t, o);
1116                 }
1117
1118                 return true;
1119         }
1120
1121         /* next common case: we release a recursive lock, count > 0 */
1122
1123         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1124                 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1125                 return true;
1126         }
1127
1128         /* either the lock is fat, or we don't hold it at all */
1129
1130         if (IS_FAT_LOCK(lockword)) {
1131
1132                 lock_record_t *lr;
1133
1134                 lr = GET_FAT_LOCK(lockword);
1135
1136                 /* check if we own this monitor */
1137                 /* We don't have to worry about stale values here, as any stale value */
1138                 /* will be != t and thus fail this check.                             */
1139
1140                 if (lr->owner != t) {
1141                         exceptions_throw_illegalmonitorstateexception();
1142                         return false;
1143                 }
1144
1145                 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1146
1147                 if (lr->count != 0) {
1148                         /* we had locked this one recursively. just decrement, it will */
1149                         /* still be locked. */
1150                         lr->count--;
1151                         return true;
1152                 }
1153
1154                 /* unlock this lock record */
1155
1156                 lr->owner = NULL;
1157                 pthread_mutex_unlock(&(lr->mutex));
1158
1159                 return true;
1160         }
1161
1162         /* legal thin lock cases have been handled above, so this is an error */
1163
1164         exceptions_throw_illegalmonitorstateexception();
1165
1166         return false;
1167 }
1168
1169
1170 /* lock_record_add_waiter ******************************************************
1171
1172    Add a thread to the list of waiting threads of a lock record.
1173
1174    IN:
1175       lr...........the lock record
1176       thread.......the thread to add
1177
1178 *******************************************************************************/
1179
1180 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1181 {
1182         lock_waiter_t *w;
1183
1184         /* Allocate a waiter data structure. */
1185
1186         w = NEW(lock_waiter_t);
1187
1188 #if defined(ENABLE_STATISTICS)
1189         if (opt_stat)
1190                 size_lock_waiter += sizeof(lock_waiter_t);
1191 #endif
1192
1193         /* Store the thread in the waiter structure. */
1194
1195         w->thread = thread;
1196
1197         /* Add the waiter as last entry to waiters list. */
1198
1199         list_add_last(lr->waiters, w);
1200 }
1201
1202
1203 /* lock_record_remove_waiter ***************************************************
1204
1205    Remove a thread from the list of waiting threads of a lock record.
1206
1207    IN:
1208       lr...........the lock record
1209       t............the current thread
1210
1211    PRE-CONDITION:
1212       The current thread must be the owner of the lock record.
1213    
1214 *******************************************************************************/
1215
1216 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1217 {
1218         list_t        *l;
1219         lock_waiter_t *w;
1220
1221         /* Get the waiters list. */
1222
1223         l = lr->waiters;
1224
1225         for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1226                 if (w->thread == thread) {
1227                         /* Remove the waiter entry from the list. */
1228
1229                         list_remove(l, w);
1230
1231                         /* Free the waiter data structure. */
1232
1233                         FREE(w, lock_waiter_t);
1234
1235 #if defined(ENABLE_STATISTICS)
1236                         if (opt_stat)
1237                                 size_lock_waiter -= sizeof(lock_waiter_t);
1238 #endif
1239
1240                         return;
1241                 }
1242         }
1243
1244         /* This should never happen. */
1245
1246         vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1247 }
1248
1249
1250 /* lock_record_wait ************************************************************
1251
1252    Wait on a lock record for a given (maximum) amount of time.
1253
1254    IN:
1255       t............the current thread
1256           lr...........the lock record
1257           millis.......milliseconds of timeout
1258           nanos........nanoseconds of timeout
1259
1260    RETURN VALUE:
1261       true.........we have been interrupted,
1262       false........everything ok
1263
1264    PRE-CONDITION:
1265       The current thread must be the owner of the lock record.
1266           This is NOT checked by this function!
1267    
1268 *******************************************************************************/
1269
1270 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1271 {
1272         s4   lockcount;
1273         bool wasinterrupted = false;
1274
1275         DEBUGLOCKS(("[lock_record_wait  : lr=%p, t=%p, millis=%lld, nanos=%d]",
1276                                 lr, thread, millis, nanos));
1277
1278         /* { the thread t owns the fat lock record lr on the object o } */
1279
1280         /* register us as waiter for this object */
1281
1282         lock_record_add_waiter(lr, thread);
1283
1284         /* remember the old lock count */
1285
1286         lockcount = lr->count;
1287
1288         /* unlock this record */
1289
1290         lr->count = 0;
1291         lock_record_exit(thread, lr);
1292
1293         /* wait until notified/interrupted/timed out */
1294
1295         threads_wait_with_timeout_relative(thread, millis, nanos);
1296
1297         /* re-enter the monitor */
1298
1299         lock_record_enter(thread, lr);
1300
1301         /* remove us from the list of waiting threads */
1302
1303         lock_record_remove_waiter(lr, thread);
1304
1305         /* restore the old lock count */
1306
1307         lr->count = lockcount;
1308
1309         /* We can only be signaled OR interrupted, not both. If both flags
1310            are set, reset only signaled and leave the thread in
1311            interrupted state. Otherwise, clear both. */
1312
1313         if (!thread->signaled) {
1314                 wasinterrupted = thread->interrupted;
1315                 thread->interrupted = false;
1316         }
1317
1318         thread->signaled = false;
1319
1320         /* return if we have been interrupted */
1321
1322         return wasinterrupted;
1323 }
1324
1325
1326 /* lock_monitor_wait ***********************************************************
1327
1328    Wait on an object for a given (maximum) amount of time.
1329
1330    IN:
1331       t............the current thread
1332           o............the object
1333           millis.......milliseconds of timeout
1334           nanos........nanoseconds of timeout
1335
1336    PRE-CONDITION:
1337       The current thread must be the owner of the object's monitor.
1338    
1339 *******************************************************************************/
1340
1341 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1342 {
1343         uintptr_t      lockword;
1344         lock_record_t *lr;
1345
1346         lockword = lock_lockword_get(t, o);
1347
1348         /* check if we own this monitor */
1349         /* We don't have to worry about stale values here, as any stale value */
1350         /* will fail this check.                                              */
1351
1352         if (IS_FAT_LOCK(lockword)) {
1353
1354                 lr = GET_FAT_LOCK(lockword);
1355
1356                 if (lr->owner != t) {
1357                         exceptions_throw_illegalmonitorstateexception();
1358                         return;
1359                 }
1360         }
1361         else {
1362                 /* it's a thin lock */
1363
1364                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1365                         exceptions_throw_illegalmonitorstateexception();
1366                         return;
1367                 }
1368
1369                 /* inflate this lock */
1370
1371                 lr = lock_hashtable_get(t, o);
1372                 lock_record_enter(t, lr);
1373                 lock_inflate(t, o, lr);
1374
1375                 notify_flc_waiters(t, o);
1376         }
1377
1378         /* { the thread t owns the fat lock record lr on the object o } */
1379
1380         if (lock_record_wait(t, lr, millis, nanos))
1381                 exceptions_throw_interruptedexception();
1382 }
1383
1384
1385 /* lock_record_notify **********************************************************
1386
1387    Notify one thread or all threads waiting on the given lock record.
1388
1389    IN:
1390       t............the current thread
1391           lr...........the lock record
1392           one..........if true, only notify one thread
1393
1394    PRE-CONDITION:
1395       The current thread must be the owner of the lock record.
1396           This is NOT checked by this function!
1397    
1398 *******************************************************************************/
1399
1400 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1401 {
1402         list_t        *l;
1403         lock_waiter_t *w;
1404         threadobject  *waitingthread;
1405
1406         /* { the thread t owns the fat lock record lr on the object o } */
1407
1408         /* Get the waiters list. */
1409
1410         l = lr->waiters;
1411
1412         for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1413                 /* signal the waiting thread */
1414
1415                 waitingthread = w->thread;
1416
1417                 /* We must skip threads which have already been notified or
1418                    interrupted. They will remove themselves from the list. */
1419
1420                 if (waitingthread->signaled || waitingthread->interrupted)
1421                         continue;
1422
1423                 /* Enter the wait-mutex. */
1424
1425                 pthread_mutex_lock(&(waitingthread->waitmutex));
1426
1427                 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1428                                         lr, t, waitingthread, waitingthread->sleeping, one));
1429
1430                 /* Signal the thread if it's sleeping. sleeping can be false
1431                    when the waiting thread is blocked between giving up the
1432                    monitor and entering the waitmutex. It will eventually
1433                    observe that it's signaled and refrain from going to
1434                    sleep. */
1435
1436                 if (waitingthread->sleeping)
1437                         pthread_cond_signal(&(waitingthread->waitcond));
1438
1439                 /* Mark the thread as signaled. */
1440
1441                 waitingthread->signaled = true;
1442
1443                 /* Leave the wait-mutex. */
1444
1445                 pthread_mutex_unlock(&(waitingthread->waitmutex));
1446
1447                 /* if we should only wake one, we are done */
1448
1449                 if (one)
1450                         break;
1451         }
1452 }
1453
1454
1455 /* lock_monitor_notify *********************************************************
1456
1457    Notify one thread or all threads waiting on the given object.
1458
1459    IN:
1460       t............the current thread
1461           o............the object
1462           one..........if true, only notify one thread
1463
1464    PRE-CONDITION:
1465       The current thread must be the owner of the object's monitor.
1466    
1467 *******************************************************************************/
1468
1469 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1470 {
1471         uintptr_t      lockword;
1472         lock_record_t *lr;
1473
1474         lockword = lock_lockword_get(t, o);
1475
1476         /* check if we own this monitor */
1477         /* We don't have to worry about stale values here, as any stale value */
1478         /* will fail this check.                                              */
1479
1480         if (IS_FAT_LOCK(lockword)) {
1481
1482                 lr = GET_FAT_LOCK(lockword);
1483
1484                 if (lr->owner != t) {
1485                         exceptions_throw_illegalmonitorstateexception();
1486                         return;
1487                 }
1488         }
1489         else {
1490                 /* it's a thin lock */
1491
1492                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1493                         exceptions_throw_illegalmonitorstateexception();
1494                         return;
1495                 }
1496
1497                 /* no thread can wait on a thin lock, so there's nothing to do. */
1498                 return;
1499         }
1500
1501         /* { the thread t owns the fat lock record lr on the object o } */
1502
1503         lock_record_notify(t, lr, one);
1504 }
1505
1506
1507
1508 /*============================================================================*/
1509 /* INQUIRY FUNCIONS                                                           */
1510 /*============================================================================*/
1511
1512
1513 /* lock_is_held_by_current_thread **********************************************
1514
1515    Return true if the current thread owns the monitor of the given object.
1516
1517    IN:
1518           o............the object
1519
1520    RETURN VALUE:
1521       true, if the current thread holds the lock of this object.
1522    
1523 *******************************************************************************/
1524
1525 bool lock_is_held_by_current_thread(java_handle_t *o)
1526 {
1527         threadobject  *t;
1528         uintptr_t      lockword;
1529         lock_record_t *lr;
1530
1531         t = THREADOBJECT;
1532
1533         /* check if we own this monitor */
1534         /* We don't have to worry about stale values here, as any stale value */
1535         /* will fail this check.                                              */
1536
1537         lockword = lock_lockword_get(t, o);
1538
1539         if (IS_FAT_LOCK(lockword)) {
1540                 /* it's a fat lock */
1541
1542                 lr = GET_FAT_LOCK(lockword);
1543
1544                 return (lr->owner == t);
1545         }
1546         else {
1547                 /* it's a thin lock */
1548
1549                 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1550         }
1551 }
1552
1553
1554
1555 /*============================================================================*/
1556 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD                              */
1557 /*============================================================================*/
1558
1559
1560 /* lock_wait_for_object ********************************************************
1561
1562    Wait for the given object.
1563
1564    IN:
1565           o............the object
1566           millis.......milliseconds to wait
1567           nanos........nanoseconds to wait
1568    
1569 *******************************************************************************/
1570
1571 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1572 {
1573         threadobject *thread;
1574
1575         thread = THREADOBJECT;
1576
1577         lock_monitor_wait(thread, o, millis, nanos);
1578 }
1579
1580
1581 /* lock_notify_object **********************************************************
1582
1583    Notify one thread waiting on the given object.
1584
1585    IN:
1586           o............the object
1587    
1588 *******************************************************************************/
1589
1590 void lock_notify_object(java_handle_t *o)
1591 {
1592         threadobject *thread;
1593
1594         thread = THREADOBJECT;
1595
1596         lock_monitor_notify(thread, o, true);
1597 }
1598
1599
1600 /* lock_notify_all_object ******************************************************
1601
1602    Notify all threads waiting on the given object.
1603
1604    IN:
1605           o............the object
1606    
1607 *******************************************************************************/
1608
1609 void lock_notify_all_object(java_handle_t *o)
1610 {
1611         threadobject *thread;
1612
1613         thread = THREADOBJECT;
1614
1615         lock_monitor_notify(thread, o, false);
1616 }
1617
1618
1619 /*
1620  * These are local overrides for various environment variables in Emacs.
1621  * Please do not remove this and leave it at the end of the file, where
1622  * Emacs will automagically detect them.
1623  * ---------------------------------------------------------------------
1624  * Local variables:
1625  * mode: c
1626  * indent-tabs-mode: t
1627  * c-basic-offset: 4
1628  * tab-width: 4
1629  * End:
1630  * vim:noexpandtab:sw=4:ts=4:
1631  */