* src/vm/jit/x86_64/codegen.c (INSTANCEOF, CHECKCAST): Not using red zone
[cacao.git] / src / threads / posix / lock.c
1 /* src/threads/posix/lock.c - lock implementation
2
3    Copyright (C) 1996-2005, 2006, 2007, 2008
4    CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5
6    This file is part of CACAO.
7
8    This program is free software; you can redistribute it and/or
9    modify it under the terms of the GNU General Public License as
10    published by the Free Software Foundation; either version 2, or (at
11    your option) any later version.
12
13    This program is distributed in the hope that it will be useful, but
14    WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16    General Public License for more details.
17
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21    02110-1301, USA.
22
23 */
24
25
26 #include "config.h"
27
28 #include <assert.h>
29 #include <stdint.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <sys/time.h>
33 #include <pthread.h>
34
35 #include "vm/types.h"
36
37 #include "mm/memory.h"
38
39 #include "native/llni.h"
40
41 #include "threads/lock-common.h"
42 #include "threads/mutex.h"
43 #include "threads/threadlist.h"
44 #include "threads/thread.h"
45
46 #include "threads/posix/lock.h"
47
48 #include "toolbox/list.h"
49
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
54 #include "vm/vm.h"
55
56 #include "vmcore/options.h"
57
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
60 #endif
61
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
64 #endif
65
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
67
68 #include "arch.h"
69
70 /* includes for atomic instructions: */
71
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/posix/generic-primitives.h"
74 #else
75 #include "machine-instr.h"
76 #endif
77
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
80 #endif
81
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
84 #endif
85
86
87 /* debug **********************************************************************/
88
89 #if !defined(NDEBUG)
90 # define DEBUGLOCKS(format) \
91     do { \
92         if (opt_DebugLocks) { \
93             log_println format; \
94         } \
95     } while (0)
96 #else
97 # define DEBUGLOCKS(format)
98 #endif
99
100
101 /******************************************************************************/
102 /* MACROS                                                                     */
103 /******************************************************************************/
104
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
107
108 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
109
110 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
111         ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
112
113
114 /******************************************************************************/
115 /* MACROS FOR THIN/FAT LOCKS                                                  */
116 /******************************************************************************/
117
118 /* We use a variant of the tasuki locks described in the paper
119  *     
120  *     Tamiya Onodera, Kiyokuni Kawachiya
121  *     A Study of Locking Objects with Bimodal Fields
122  *     Proceedings of the ACM OOPSLA '99, pp. 223-237
123  *     1999
124  *
125  * The underlying thin locks are a variant of the thin locks described in
126  * 
127  *     Bacon, Konuru, Murthy, Serrano
128  *     Thin Locks: Featherweight Synchronization for Java
129  *         Proceedings of the ACM Conference on Programming Language Design and 
130  *         Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
131  *         June 1998
132  *
133  * In thin lock mode the lockword looks like this:
134  *
135  *     ,----------------------,-----------,---,
136  *     |      thread ID       |   count   | 0 |
137  *     `----------------------'-----------'---'
138  *
139  *     thread ID......the 'index' of the owning thread, or 0
140  *     count..........number of times the lock has been entered minus 1
141  *     0..............the shape bit is 0 in thin lock mode
142  *
143  * In fat lock mode it is basically a lock_record_t *:
144  *
145  *     ,----------------------------------,---,
146  *     |    lock_record_t * (without LSB) | 1 |
147  *     `----------------------------------'---'
148  *
149  *     1..............the shape bit is 1 in fat lock mode
150  */
151
152 #if SIZEOF_VOID_P == 8
153 #define THIN_LOCK_WORD_SIZE    64
154 #else
155 #define THIN_LOCK_WORD_SIZE    32
156 #endif
157
158 #define THIN_LOCK_SHAPE_BIT    0x01
159
160 #define THIN_UNLOCKED          0
161
162 #define THIN_LOCK_COUNT_SHIFT  1
163 #define THIN_LOCK_COUNT_SIZE   8
164 #define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
165 #define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
166 #define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
167
168 #define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
169 #define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
170
171 #define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
172 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
173
174 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
175 #define MAKE_FAT_LOCK(ptr)      ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
176
177 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
178 #define GET_THREAD_INDEX(lockword) ((unsigned) lockword >> THIN_LOCK_TID_SHIFT)
179
180
181 /* global variables ***********************************************************/
182
183 /* hashtable mapping objects to lock records */
184 static lock_hashtable_t lock_hashtable;
185
186
187 /******************************************************************************/
188 /* PROTOTYPES                                                                 */
189 /******************************************************************************/
190
191 static void lock_hashtable_init(void);
192
193 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
194 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
195 static void lock_record_enter(threadobject *t, lock_record_t *lr);
196 static void lock_record_exit(threadobject *t, lock_record_t *lr);
197 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
198 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
199
200
201 /*============================================================================*/
202 /* INITIALIZATION OF DATA STRUCTURES                                          */
203 /*============================================================================*/
204
205
206 /* lock_init *******************************************************************
207
208    Initialize global data for locking.
209
210 *******************************************************************************/
211
212 void lock_init(void)
213 {
214         /* initialize lock hashtable */
215
216         lock_hashtable_init();
217
218 #if defined(ENABLE_VMLOG)
219         vmlog_cacao_init_lock();
220 #endif
221 }
222
223
224 /* lock_pre_compute_thinlock ***************************************************
225
226    Pre-compute the thin lock value for a thread index.
227
228    IN:
229       index........the thead index (>= 1)
230
231    RETURN VALUE:
232       the thin lock value for this thread index
233
234 *******************************************************************************/
235
236 ptrint lock_pre_compute_thinlock(s4 index)
237 {
238         return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
239 }
240
241
242 /* lock_record_new *************************************************************
243
244    Allocate a lock record.
245
246 *******************************************************************************/
247
248 static lock_record_t *lock_record_new(void)
249 {
250         lock_record_t *lr;
251
252         /* allocate the data structure on the C heap */
253
254         lr = NEW(lock_record_t);
255
256 #if defined(ENABLE_STATISTICS)
257         if (opt_stat)
258                 size_lock_record += sizeof(lock_record_t);
259 #endif
260
261         /* initialize the members */
262
263         lr->object  = NULL;
264         lr->owner   = NULL;
265         lr->count   = 0;
266         lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
267
268 #if defined(ENABLE_GC_CACAO)
269         /* register the lock object as weak reference with the GC */
270
271         gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
272 #endif
273
274         /* initialize the mutex */
275
276         mutex_init(&(lr->mutex));
277
278         DEBUGLOCKS(("[lock_record_new   : lr=%p]", (void *) lr));
279
280         return lr;
281 }
282
283
284 /* lock_record_free ************************************************************
285
286    Free a lock record.
287
288    IN:
289        lr....lock record to free
290
291 *******************************************************************************/
292
293 static void lock_record_free(lock_record_t *lr)
294 {
295         DEBUGLOCKS(("[lock_record_free  : lr=%p]", (void *) lr));
296
297         /* Destroy the mutex. */
298
299         mutex_destroy(&(lr->mutex));
300
301 #if defined(ENABLE_GC_CACAO)
302         /* unregister the lock object reference with the GC */
303
304         gc_weakreference_unregister(&(lr->object));
305 #endif
306
307         /* Free the waiters list. */
308
309         list_free(lr->waiters);
310
311         /* Free the data structure. */
312
313         FREE(lr, lock_record_t);
314
315 #if defined(ENABLE_STATISTICS)
316         if (opt_stat)
317                 size_lock_record -= sizeof(lock_record_t);
318 #endif
319 }
320
321
322 /*============================================================================*/
323 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
324 /*============================================================================*/
325
326 /* lock_hashtable_init *********************************************************
327
328    Initialize the global hashtable mapping objects to lock records.
329
330 *******************************************************************************/
331
332 static void lock_hashtable_init(void)
333 {
334         mutex_init(&(lock_hashtable.mutex));
335
336         lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
337         lock_hashtable.entries = 0;
338         lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
339
340 #if defined(ENABLE_STATISTICS)
341         if (opt_stat)
342                 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
343 #endif
344
345         MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
346 }
347
348
349 /* lock_hashtable_grow *********************************************************
350
351    Grow the lock record hashtable to about twice its current size and
352    rehash the entries.
353
354 *******************************************************************************/
355
356 /* must be called with hashtable mutex locked */
357 static void lock_hashtable_grow(void)
358 {
359         u4 oldsize;
360         u4 newsize;
361         lock_record_t **oldtable;
362         lock_record_t **newtable;
363         lock_record_t *lr;
364         lock_record_t *next;
365         u4 i;
366         u4 h;
367         u4 newslot;
368
369         /* allocate a new table */
370
371         oldsize = lock_hashtable.size;
372         newsize = oldsize*2 + 1; /* XXX should use prime numbers */
373
374         DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
375
376         oldtable = lock_hashtable.ptr;
377         newtable = MNEW(lock_record_t *, newsize);
378
379 #if defined(ENABLE_STATISTICS)
380         if (opt_stat)
381                 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
382 #endif
383
384         MZERO(newtable, lock_record_t *, newsize);
385
386         /* rehash the entries */
387
388         for (i = 0; i < oldsize; i++) {
389                 lr = oldtable[i];
390                 while (lr) {
391                         next = lr->hashlink;
392
393                         h = heap_hashcode(lr->object);
394                         newslot = h % newsize;
395
396                         lr->hashlink = newtable[newslot];
397                         newtable[newslot] = lr;
398
399                         lr = next;
400                 }
401         }
402
403         /* replace the old table */
404
405         lock_hashtable.ptr  = newtable;
406         lock_hashtable.size = newsize;
407
408         MFREE(oldtable, lock_record_t *, oldsize);
409
410 #if defined(ENABLE_STATISTICS)
411         if (opt_stat)
412                 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
413 #endif
414 }
415
416
417 /* lock_hashtable_cleanup ******************************************************
418
419    Removes (and frees) lock records which have a cleared object reference
420    from the hashtable. The locked object was reclaimed by the GC.
421
422 *******************************************************************************/
423
424 #if defined(ENABLE_GC_CACAO)
425 void lock_hashtable_cleanup(void)
426 {
427         threadobject  *t;
428         lock_record_t *lr;
429         lock_record_t *prev;
430         lock_record_t *next;
431         int i;
432
433         t = THREADOBJECT;
434
435         /* lock the hashtable */
436
437         mutex_lock(&(lock_hashtable.mutex));
438
439         /* search the hashtable for cleared references */
440
441         for (i = 0; i < lock_hashtable.size; i++) {
442                 lr = lock_hashtable.ptr[i];
443                 prev = NULL;
444
445                 while (lr) {
446                         next = lr->hashlink;
447
448                         /* remove lock records with cleared references */
449
450                         if (lr->object == NULL) {
451
452                                 /* unlink the lock record from the hashtable */
453
454                                 if (prev == NULL)
455                                         lock_hashtable.ptr[i] = next;
456                                 else
457                                         prev->hashlink = next;
458
459                                 /* free the lock record */
460
461                                 lock_record_free(lr);
462
463                         } else {
464                                 prev = lr;
465                         }
466
467                         lr = next;
468                 }
469         }
470
471         /* unlock the hashtable */
472
473         mutex_unlock(&(lock_hashtable.mutex));
474 }
475 #endif
476
477
478 /* lock_hashtable_get **********************************************************
479
480    Find the lock record for the given object.  If it does not exists,
481    yet, create it and enter it in the hashtable.
482
483    IN:
484       t....the current thread
485           o....the object to look up
486
487    RETURN VALUE:
488       the lock record to use for this object
489
490 *******************************************************************************/
491
492 #if defined(ENABLE_GC_BOEHM)
493 static void lock_record_finalizer(void *object, void *p);
494 #endif
495
496 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
497 {
498         uintptr_t      lockword;
499         u4             slot;
500         lock_record_t *lr;
501
502         lockword = lock_lockword_get(t, o);
503
504         if (IS_FAT_LOCK(lockword))
505                 return GET_FAT_LOCK(lockword);
506
507         /* lock the hashtable */
508
509         mutex_lock(&(lock_hashtable.mutex));
510
511         /* lookup the lock record in the hashtable */
512
513         LLNI_CRITICAL_START_THREAD(t);
514         slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
515         lr   = lock_hashtable.ptr[slot];
516
517         for (; lr != NULL; lr = lr->hashlink) {
518                 if (lr->object == LLNI_DIRECT(o))
519                         break;
520         }
521         LLNI_CRITICAL_END_THREAD(t);
522
523         if (lr == NULL) {
524                 /* not found, we must create a new one */
525
526                 lr = lock_record_new();
527
528                 LLNI_CRITICAL_START_THREAD(t);
529                 lr->object = LLNI_DIRECT(o);
530                 LLNI_CRITICAL_END_THREAD(t);
531
532 #if defined(ENABLE_GC_BOEHM)
533                 /* register new finalizer to clean up the lock record */
534
535                 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
536 #endif
537
538                 /* enter it in the hashtable */
539
540                 lr->hashlink             = lock_hashtable.ptr[slot];
541                 lock_hashtable.ptr[slot] = lr;
542                 lock_hashtable.entries++;
543
544                 /* check whether the hash should grow */
545
546                 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
547                         lock_hashtable_grow();
548                 }
549         }
550
551         /* unlock the hashtable */
552
553         mutex_unlock(&(lock_hashtable.mutex));
554
555         /* return the new lock record */
556
557         return lr;
558 }
559
560
561 /* lock_hashtable_remove *******************************************************
562
563    Remove the lock record for the given object from the hashtable
564    and free it afterwards.
565
566    IN:
567        t....the current thread
568        o....the object to look up
569
570 *******************************************************************************/
571
572 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
573 {
574         uintptr_t      lockword;
575         lock_record_t *lr;
576         u4             slot;
577         lock_record_t *tmplr;
578
579         /* lock the hashtable */
580
581         mutex_lock(&(lock_hashtable.mutex));
582
583         /* get lock record */
584
585         lockword = lock_lockword_get(t, o);
586
587         assert(IS_FAT_LOCK(lockword));
588
589         lr = GET_FAT_LOCK(lockword);
590
591         /* remove the lock-record from the hashtable */
592
593         LLNI_CRITICAL_START_THREAD(t);
594         slot  = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
595         tmplr = lock_hashtable.ptr[slot];
596         LLNI_CRITICAL_END_THREAD(t);
597
598         if (tmplr == lr) {
599                 /* special handling if it's the first in the chain */
600
601                 lock_hashtable.ptr[slot] = lr->hashlink;
602         }
603         else {
604                 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
605                         if (tmplr->hashlink == lr) {
606                                 tmplr->hashlink = lr->hashlink;
607                                 break;
608                         }
609                 }
610
611                 assert(tmplr != NULL);
612         }
613
614         /* decrease entry count */
615
616         lock_hashtable.entries--;
617
618         /* unlock the hashtable */
619
620         mutex_unlock(&(lock_hashtable.mutex));
621
622         /* free the lock record */
623
624         lock_record_free(lr);
625 }
626
627
628 /* lock_record_finalizer *******************************************************
629
630    XXX Remove me for exact GC.
631
632 *******************************************************************************/
633
634 static void lock_record_finalizer(void *object, void *p)
635 {
636         java_handle_t *o;
637         classinfo     *c;
638
639         o = (java_handle_t *) object;
640
641 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
642         /* XXX this is only a dirty hack to make Boehm work with handles */
643
644         o = LLNI_WRAP((java_object_t *) o);
645 #endif
646
647         LLNI_class_get(o, c);
648
649 #if !defined(NDEBUG)
650         if (opt_DebugFinalizer) {
651                 log_start();
652                 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
653                 class_print(c);
654                 log_print("]");
655                 log_finish();
656         }
657 #endif
658
659         /* check for a finalizer function */
660
661         if (c->finalizer != NULL)
662                 finalizer_run(object, p);
663
664         /* remove the lock-record entry from the hashtable and free it */
665
666         lock_hashtable_remove(THREADOBJECT, o);
667 }
668
669
670 /*============================================================================*/
671 /* OBJECT LOCK INITIALIZATION                                                 */
672 /*============================================================================*/
673
674
675 /* lock_init_object_lock *******************************************************
676
677    Initialize the monitor pointer of the given object. The monitor gets
678    initialized to an unlocked state.
679
680 *******************************************************************************/
681
682 void lock_init_object_lock(java_object_t *o)
683 {
684         assert(o);
685
686         o->lockword = THIN_UNLOCKED;
687 }
688
689
690 /*============================================================================*/
691 /* LOCKING ALGORITHM                                                          */
692 /*============================================================================*/
693
694
695 /* lock_lockword_get ***********************************************************
696
697    Get the lockword for the given object.
698
699    IN:
700       t............the current thread
701       o............the object
702
703 *******************************************************************************/
704
705 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
706 {
707         uintptr_t lockword;
708
709         LLNI_CRITICAL_START_THREAD(t);
710         lockword = LLNI_DIRECT(o)->lockword;
711         LLNI_CRITICAL_END_THREAD(t);
712
713         return lockword;
714 }
715
716
717 /* lock_lockword_set ***********************************************************
718
719    Set the lockword for the given object.
720
721    IN:
722       t............the current thread
723       o............the object
724           lockword.....the new lockword value
725
726 *******************************************************************************/
727
728 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
729 {
730         LLNI_CRITICAL_START_THREAD(t);
731         LLNI_DIRECT(o)->lockword = lockword;
732         LLNI_CRITICAL_END_THREAD(t);
733 }
734
735
736 /* lock_record_enter ***********************************************************
737
738    Enter the lock represented by the given lock record.
739
740    IN:
741       t.................the current thread
742           lr................the lock record
743
744 *******************************************************************************/
745
746 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
747 {
748         mutex_lock(&(lr->mutex));
749         lr->owner = t;
750 }
751
752
753 /* lock_record_exit ************************************************************
754
755    Release the lock represented by the given lock record.
756
757    IN:
758       t.................the current thread
759           lr................the lock record
760
761    PRE-CONDITION:
762       The current thread must own the lock represented by this lock record.
763           This is NOT checked by this function!
764
765 *******************************************************************************/
766
767 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
768 {
769         lr->owner = NULL;
770         mutex_unlock(&(lr->mutex));
771 }
772
773
774 /* lock_inflate ****************************************************************
775
776    Inflate the lock of the given object. This may only be called by the
777    owner of the monitor of the object.
778
779    IN:
780       t............the current thread
781           o............the object of which to inflate the lock
782           lr...........the lock record to install. The current thread must
783                        own the lock of this lock record!
784
785    PRE-CONDITION:
786       The current thread must be the owner of this object's monitor AND
787           of the lock record's lock!
788
789 *******************************************************************************/
790
791 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
792 {
793         uintptr_t lockword;
794
795         /* get the current lock count */
796
797         lockword = lock_lockword_get(t, o);
798
799         if (IS_FAT_LOCK(lockword)) {
800                 assert(GET_FAT_LOCK(lockword) == lr);
801                 return;
802         }
803         else {
804                 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
805
806                 /* copy the count from the thin lock */
807
808                 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
809         }
810
811         DEBUGLOCKS(("[lock_inflate      : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
812                                 lr, t, o, lockword, lr->count));
813
814         /* install it */
815
816         lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
817 }
818
819
820 /* TODO Move this function into threadlist.[ch]. */
821
822 static threadobject *threads_lookup_thread_id(int index)
823 {
824         threadobject *t;
825
826         threadlist_lock();
827
828         for (t = threadlist_first(); t != NULL; t = threadlist_next(t)) {
829                 if (t->state == THREAD_STATE_NEW)
830                         continue;
831                 if (t->index == index)
832                         break;
833         }
834
835         threadlist_unlock();
836         return t;
837 }
838
839 static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o)
840 {
841         int index;
842         threadobject *t_other;
843         int old_flc;
844
845         index = GET_THREAD_INDEX(lockword);
846         t_other = threads_lookup_thread_id(index);
847         if (!t_other)
848 /*              failure, TODO: add statistics */
849                 return;
850
851         mutex_lock(&t_other->flc_lock);
852         old_flc = t_other->flc_bit;
853         t_other->flc_bit = true;
854
855         DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d",
856                                 t->index, t_other->index));
857
858         /* Set FLC bit first, then read the lockword again */
859         MEMORY_BARRIER();
860
861         lockword = lock_lockword_get(t, o);
862
863         /* Lockword is still the way it was seen before */
864         if (IS_THIN_LOCK(lockword) && (GET_THREAD_INDEX(lockword) == index))
865         {
866                 /* Add tuple (t, o) to the other thread's FLC list */
867                 t->flc_object = o;
868                 t->flc_next = t_other->flc_list;
869                 t_other->flc_list = t;
870
871                 for (;;)
872                 {
873                         threadobject *current;
874
875                         /* Wait until another thread sees the flc bit and notifies
876                            us of unlocking. */
877                         pthread_cond_wait(&t->flc_cond, &t_other->flc_lock);
878
879                         /* Traverse FLC list looking if we're still there */
880                         current = t_other->flc_list;
881                         while (current && current != t)
882                                 current = current->flc_next;
883                         if (!current)
884                                 /* not in list anymore, can stop waiting */
885                                 break;
886
887                         /* We are still in the list -- the other thread cannot have seen
888                            the FLC bit yet */
889                         assert(t_other->flc_bit);
890                 }
891
892                 t->flc_object = NULL;   /* for garbage collector? */
893                 t->flc_next = NULL;
894         }
895         else
896                 t_other->flc_bit = old_flc;
897
898         mutex_unlock(&t_other->flc_lock);
899 }
900
901 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
902 {
903         threadobject *current;
904
905         mutex_lock(&t->flc_lock);
906
907         current = t->flc_list;
908         while (current)
909         {
910                 if (current->flc_object != o)
911                 {
912                         /* The object has to be inflated so the other threads can properly
913                            block on it. */
914
915                         /* Only if not already inflated */
916                         ptrint lockword = lock_lockword_get(t, current->flc_object);
917                         if (IS_THIN_LOCK(lockword)) {
918                                 lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
919                                 lock_record_enter(t, lr);
920
921                                 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
922                                                         t->index, (void*) current->flc_object, (void*) lr));
923
924                                 lock_inflate(t, current->flc_object, lr);
925                         }
926                 }
927                 /* Wake the waiting thread */
928                 pthread_cond_broadcast(&current->flc_cond);
929
930                 current = current->flc_next;
931         }
932
933         t->flc_list = NULL;
934         t->flc_bit = false;
935         mutex_unlock(&t->flc_lock);
936 }
937
938 /* lock_monitor_enter **********************************************************
939
940    Acquire the monitor of the given object. If the current thread already
941    owns the monitor, the lock counter is simply increased.
942
943    This function blocks until it can acquire the monitor.
944
945    IN:
946       t............the current thread
947           o............the object of which to enter the monitor
948
949    RETURN VALUE:
950       true.........the lock has been successfully acquired
951           false........an exception has been thrown
952
953 *******************************************************************************/
954
955 bool lock_monitor_enter(java_handle_t *o)
956 {
957         threadobject  *t;
958         /* CAUTION: This code assumes that ptrint is unsigned! */
959         ptrint         lockword;
960         ptrint         thinlock;
961         lock_record_t *lr;
962
963         if (o == NULL) {
964                 exceptions_throw_nullpointerexception();
965                 return false;
966         }
967
968         t = THREADOBJECT;
969
970         thinlock = t->thinlock;
971
972 retry:
973         /* most common case: try to thin-lock an unlocked object */
974
975         LLNI_CRITICAL_START_THREAD(t);
976         lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
977         LLNI_CRITICAL_END_THREAD(t);
978
979         if (lockword == THIN_UNLOCKED) {
980                 /* success. we locked it */
981                 /* The Java Memory Model requires a memory barrier here: */
982                 /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
983                 MEMORY_BARRIER_AFTER_ATOMIC();
984                 return true;
985         }
986
987         /* next common case: recursive lock with small recursion count */
988         /* We don't have to worry about stale values here, as any stale value  */
989         /* will indicate another thread holding the lock (or an inflated lock) */
990
991         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
992                 /* we own this monitor               */
993                 /* check the current recursion count */
994
995                 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
996                 {
997                         /* the recursion count is low enough */
998
999                         lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
1000
1001                         /* success. we locked it */
1002                         return true;
1003                 }
1004                 else {
1005                         /* recursion count overflow */
1006
1007                         lr = lock_hashtable_get(t, o);
1008                         lock_record_enter(t, lr);
1009                         lock_inflate(t, o, lr);
1010                         lr->count++;
1011
1012                         notify_flc_waiters(t, o);
1013
1014                         return true;
1015                 }
1016         }
1017
1018         /* the lock is either contented or fat */
1019
1020         if (IS_FAT_LOCK(lockword)) {
1021
1022                 lr = GET_FAT_LOCK(lockword);
1023
1024                 /* check for recursive entering */
1025                 if (lr->owner == t) {
1026                         lr->count++;
1027                         return true;
1028                 }
1029
1030                 /* acquire the mutex of the lock record */
1031
1032                 lock_record_enter(t, lr);
1033
1034                 assert(lr->count == 0);
1035
1036                 return true;
1037         }
1038
1039         /****** inflation path ******/
1040
1041 #if defined(ENABLE_JVMTI)
1042         /* Monitor Contended Enter */
1043         jvmti_MonitorContendedEntering(false, o);
1044 #endif
1045
1046         sable_flc_waiting(lockword, t, o);
1047
1048 #if defined(ENABLE_JVMTI)
1049         /* Monitor Contended Entered */
1050         jvmti_MonitorContendedEntering(true, o);
1051 #endif
1052         goto retry;
1053 }
1054
1055
1056 /* lock_monitor_exit ***********************************************************
1057
1058    Decrement the counter of a (currently owned) monitor. If the counter
1059    reaches zero, release the monitor.
1060
1061    If the current thread is not the owner of the monitor, an 
1062    IllegalMonitorState exception is thrown.
1063
1064    IN:
1065       t............the current thread
1066           o............the object of which to exit the monitor
1067
1068    RETURN VALUE:
1069       true.........everything ok,
1070           false........an exception has been thrown
1071
1072 *******************************************************************************/
1073
1074 bool lock_monitor_exit(java_handle_t *o)
1075 {
1076         threadobject *t;
1077         uintptr_t     lockword;
1078         ptrint        thinlock;
1079
1080         if (o == NULL) {
1081                 exceptions_throw_nullpointerexception();
1082                 return false;
1083         }
1084
1085         t = THREADOBJECT;
1086
1087         thinlock = t->thinlock;
1088
1089         /* We don't have to worry about stale values here, as any stale value */
1090         /* will indicate that we don't own the lock.                          */
1091
1092         lockword = lock_lockword_get(t, o);
1093
1094         /* most common case: we release a thin lock that we hold once */
1095
1096         if (lockword == thinlock) {
1097                 /* memory barrier for Java Memory Model */
1098                 STORE_ORDER_BARRIER();
1099                 lock_lockword_set(t, o, THIN_UNLOCKED);
1100                 /* memory barrier for thin locking */
1101                 MEMORY_BARRIER();
1102
1103                 /* check if there has been a flat lock contention on this object */
1104
1105                 if (t->flc_bit) {
1106                         DEBUGLOCKS(("thread %d saw flc bit", t->index));
1107
1108                         /* there has been a contention on this thin lock */
1109                         notify_flc_waiters(t, o);
1110                 }
1111
1112                 return true;
1113         }
1114
1115         /* next common case: we release a recursive lock, count > 0 */
1116
1117         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1118                 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1119                 return true;
1120         }
1121
1122         /* either the lock is fat, or we don't hold it at all */
1123
1124         if (IS_FAT_LOCK(lockword)) {
1125
1126                 lock_record_t *lr;
1127
1128                 lr = GET_FAT_LOCK(lockword);
1129
1130                 /* check if we own this monitor */
1131                 /* We don't have to worry about stale values here, as any stale value */
1132                 /* will be != t and thus fail this check.                             */
1133
1134                 if (lr->owner != t) {
1135                         exceptions_throw_illegalmonitorstateexception();
1136                         return false;
1137                 }
1138
1139                 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1140
1141                 if (lr->count != 0) {
1142                         /* we had locked this one recursively. just decrement, it will */
1143                         /* still be locked. */
1144                         lr->count--;
1145                         return true;
1146                 }
1147
1148                 /* unlock this lock record */
1149
1150                 lr->owner = NULL;
1151                 mutex_unlock(&(lr->mutex));
1152
1153                 return true;
1154         }
1155
1156         /* legal thin lock cases have been handled above, so this is an error */
1157
1158         exceptions_throw_illegalmonitorstateexception();
1159
1160         return false;
1161 }
1162
1163
1164 /* lock_record_add_waiter ******************************************************
1165
1166    Add a thread to the list of waiting threads of a lock record.
1167
1168    IN:
1169       lr...........the lock record
1170       thread.......the thread to add
1171
1172 *******************************************************************************/
1173
1174 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1175 {
1176         lock_waiter_t *w;
1177
1178         /* Allocate a waiter data structure. */
1179
1180         w = NEW(lock_waiter_t);
1181
1182 #if defined(ENABLE_STATISTICS)
1183         if (opt_stat)
1184                 size_lock_waiter += sizeof(lock_waiter_t);
1185 #endif
1186
1187         /* Store the thread in the waiter structure. */
1188
1189         w->thread = thread;
1190
1191         /* Add the waiter as last entry to waiters list. */
1192
1193         list_add_last(lr->waiters, w);
1194 }
1195
1196
1197 /* lock_record_remove_waiter ***************************************************
1198
1199    Remove a thread from the list of waiting threads of a lock record.
1200
1201    IN:
1202       lr...........the lock record
1203       t............the current thread
1204
1205    PRE-CONDITION:
1206       The current thread must be the owner of the lock record.
1207    
1208 *******************************************************************************/
1209
1210 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1211 {
1212         list_t        *l;
1213         lock_waiter_t *w;
1214
1215         /* Get the waiters list. */
1216
1217         l = lr->waiters;
1218
1219         for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1220                 if (w->thread == thread) {
1221                         /* Remove the waiter entry from the list. */
1222
1223                         list_remove(l, w);
1224
1225                         /* Free the waiter data structure. */
1226
1227                         FREE(w, lock_waiter_t);
1228
1229 #if defined(ENABLE_STATISTICS)
1230                         if (opt_stat)
1231                                 size_lock_waiter -= sizeof(lock_waiter_t);
1232 #endif
1233
1234                         return;
1235                 }
1236         }
1237
1238         /* This should never happen. */
1239
1240         vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1241 }
1242
1243
1244 /* lock_record_wait ************************************************************
1245
1246    Wait on a lock record for a given (maximum) amount of time.
1247
1248    IN:
1249       t............the current thread
1250           lr...........the lock record
1251           millis.......milliseconds of timeout
1252           nanos........nanoseconds of timeout
1253
1254    RETURN VALUE:
1255       true.........we have been interrupted,
1256       false........everything ok
1257
1258    PRE-CONDITION:
1259       The current thread must be the owner of the lock record.
1260           This is NOT checked by this function!
1261    
1262 *******************************************************************************/
1263
1264 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1265 {
1266         s4   lockcount;
1267         bool wasinterrupted = false;
1268
1269         DEBUGLOCKS(("[lock_record_wait  : lr=%p, t=%p, millis=%lld, nanos=%d]",
1270                                 lr, thread, millis, nanos));
1271
1272         /* { the thread t owns the fat lock record lr on the object o } */
1273
1274         /* register us as waiter for this object */
1275
1276         lock_record_add_waiter(lr, thread);
1277
1278         /* remember the old lock count */
1279
1280         lockcount = lr->count;
1281
1282         /* unlock this record */
1283
1284         lr->count = 0;
1285         lock_record_exit(thread, lr);
1286
1287         /* wait until notified/interrupted/timed out */
1288
1289         threads_wait_with_timeout_relative(thread, millis, nanos);
1290
1291         /* re-enter the monitor */
1292
1293         lock_record_enter(thread, lr);
1294
1295         /* remove us from the list of waiting threads */
1296
1297         lock_record_remove_waiter(lr, thread);
1298
1299         /* restore the old lock count */
1300
1301         lr->count = lockcount;
1302
1303         /* We can only be signaled OR interrupted, not both. If both flags
1304            are set, reset only signaled and leave the thread in
1305            interrupted state. Otherwise, clear both. */
1306
1307         if (!thread->signaled) {
1308                 wasinterrupted = thread->interrupted;
1309                 thread->interrupted = false;
1310         }
1311
1312         thread->signaled = false;
1313
1314         /* return if we have been interrupted */
1315
1316         return wasinterrupted;
1317 }
1318
1319
1320 /* lock_monitor_wait ***********************************************************
1321
1322    Wait on an object for a given (maximum) amount of time.
1323
1324    IN:
1325       t............the current thread
1326           o............the object
1327           millis.......milliseconds of timeout
1328           nanos........nanoseconds of timeout
1329
1330    PRE-CONDITION:
1331       The current thread must be the owner of the object's monitor.
1332    
1333 *******************************************************************************/
1334
1335 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1336 {
1337         uintptr_t      lockword;
1338         lock_record_t *lr;
1339
1340         lockword = lock_lockword_get(t, o);
1341
1342         /* check if we own this monitor */
1343         /* We don't have to worry about stale values here, as any stale value */
1344         /* will fail this check.                                              */
1345
1346         if (IS_FAT_LOCK(lockword)) {
1347
1348                 lr = GET_FAT_LOCK(lockword);
1349
1350                 if (lr->owner != t) {
1351                         exceptions_throw_illegalmonitorstateexception();
1352                         return;
1353                 }
1354         }
1355         else {
1356                 /* it's a thin lock */
1357
1358                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1359                         exceptions_throw_illegalmonitorstateexception();
1360                         return;
1361                 }
1362
1363                 /* inflate this lock */
1364
1365                 lr = lock_hashtable_get(t, o);
1366                 lock_record_enter(t, lr);
1367                 lock_inflate(t, o, lr);
1368
1369                 notify_flc_waiters(t, o);
1370         }
1371
1372         /* { the thread t owns the fat lock record lr on the object o } */
1373
1374         if (lock_record_wait(t, lr, millis, nanos))
1375                 exceptions_throw_interruptedexception();
1376 }
1377
1378
1379 /* lock_record_notify **********************************************************
1380
1381    Notify one thread or all threads waiting on the given lock record.
1382
1383    IN:
1384       t............the current thread
1385           lr...........the lock record
1386           one..........if true, only notify one thread
1387
1388    PRE-CONDITION:
1389       The current thread must be the owner of the lock record.
1390           This is NOT checked by this function!
1391    
1392 *******************************************************************************/
1393
1394 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1395 {
1396         list_t        *l;
1397         lock_waiter_t *w;
1398         threadobject  *waitingthread;
1399
1400         /* { the thread t owns the fat lock record lr on the object o } */
1401
1402         /* Get the waiters list. */
1403
1404         l = lr->waiters;
1405
1406         for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1407                 /* signal the waiting thread */
1408
1409                 waitingthread = w->thread;
1410
1411                 /* We must skip threads which have already been notified. They will
1412                    remove themselves from the list. */
1413
1414                 if (waitingthread->signaled)
1415                         continue;
1416
1417                 /* Enter the wait-mutex. */
1418
1419                 mutex_lock(&(waitingthread->waitmutex));
1420
1421                 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1422                                         lr, t, waitingthread, waitingthread->sleeping, one));
1423
1424                 /* Signal the thread if it's sleeping. sleeping can be false
1425                    when the waiting thread is blocked between giving up the
1426                    monitor and entering the waitmutex. It will eventually
1427                    observe that it's signaled and refrain from going to
1428                    sleep. */
1429
1430                 if (waitingthread->sleeping)
1431                         pthread_cond_signal(&(waitingthread->waitcond));
1432
1433                 /* Mark the thread as signaled. */
1434
1435                 waitingthread->signaled = true;
1436
1437                 /* Leave the wait-mutex. */
1438
1439                 mutex_unlock(&(waitingthread->waitmutex));
1440
1441                 /* if we should only wake one, we are done */
1442
1443                 if (one)
1444                         break;
1445         }
1446 }
1447
1448
1449 /* lock_monitor_notify *********************************************************
1450
1451    Notify one thread or all threads waiting on the given object.
1452
1453    IN:
1454       t............the current thread
1455           o............the object
1456           one..........if true, only notify one thread
1457
1458    PRE-CONDITION:
1459       The current thread must be the owner of the object's monitor.
1460    
1461 *******************************************************************************/
1462
1463 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1464 {
1465         uintptr_t      lockword;
1466         lock_record_t *lr;
1467
1468         lockword = lock_lockword_get(t, o);
1469
1470         /* check if we own this monitor */
1471         /* We don't have to worry about stale values here, as any stale value */
1472         /* will fail this check.                                              */
1473
1474         if (IS_FAT_LOCK(lockword)) {
1475
1476                 lr = GET_FAT_LOCK(lockword);
1477
1478                 if (lr->owner != t) {
1479                         exceptions_throw_illegalmonitorstateexception();
1480                         return;
1481                 }
1482         }
1483         else {
1484                 /* it's a thin lock */
1485
1486                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1487                         exceptions_throw_illegalmonitorstateexception();
1488                         return;
1489                 }
1490
1491                 /* no thread can wait on a thin lock, so there's nothing to do. */
1492                 return;
1493         }
1494
1495         /* { the thread t owns the fat lock record lr on the object o } */
1496
1497         lock_record_notify(t, lr, one);
1498 }
1499
1500
1501
1502 /*============================================================================*/
1503 /* INQUIRY FUNCIONS                                                           */
1504 /*============================================================================*/
1505
1506
1507 /* lock_is_held_by_current_thread **********************************************
1508
1509    Return true if the current thread owns the monitor of the given object.
1510
1511    IN:
1512           o............the object
1513
1514    RETURN VALUE:
1515       true, if the current thread holds the lock of this object.
1516    
1517 *******************************************************************************/
1518
1519 bool lock_is_held_by_current_thread(java_handle_t *o)
1520 {
1521         threadobject  *t;
1522         uintptr_t      lockword;
1523         lock_record_t *lr;
1524
1525         t = THREADOBJECT;
1526
1527         /* check if we own this monitor */
1528         /* We don't have to worry about stale values here, as any stale value */
1529         /* will fail this check.                                              */
1530
1531         lockword = lock_lockword_get(t, o);
1532
1533         if (IS_FAT_LOCK(lockword)) {
1534                 /* it's a fat lock */
1535
1536                 lr = GET_FAT_LOCK(lockword);
1537
1538                 return (lr->owner == t);
1539         }
1540         else {
1541                 /* it's a thin lock */
1542
1543                 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1544         }
1545 }
1546
1547
1548
1549 /*============================================================================*/
1550 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD                              */
1551 /*============================================================================*/
1552
1553
1554 /* lock_wait_for_object ********************************************************
1555
1556    Wait for the given object.
1557
1558    IN:
1559           o............the object
1560           millis.......milliseconds to wait
1561           nanos........nanoseconds to wait
1562    
1563 *******************************************************************************/
1564
1565 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1566 {
1567         threadobject *thread;
1568
1569         thread = THREADOBJECT;
1570
1571         lock_monitor_wait(thread, o, millis, nanos);
1572 }
1573
1574
1575 /* lock_notify_object **********************************************************
1576
1577    Notify one thread waiting on the given object.
1578
1579    IN:
1580           o............the object
1581    
1582 *******************************************************************************/
1583
1584 void lock_notify_object(java_handle_t *o)
1585 {
1586         threadobject *thread;
1587
1588         thread = THREADOBJECT;
1589
1590         lock_monitor_notify(thread, o, true);
1591 }
1592
1593
1594 /* lock_notify_all_object ******************************************************
1595
1596    Notify all threads waiting on the given object.
1597
1598    IN:
1599           o............the object
1600    
1601 *******************************************************************************/
1602
1603 void lock_notify_all_object(java_handle_t *o)
1604 {
1605         threadobject *thread;
1606
1607         thread = THREADOBJECT;
1608
1609         lock_monitor_notify(thread, o, false);
1610 }
1611
1612
1613 /*
1614  * These are local overrides for various environment variables in Emacs.
1615  * Please do not remove this and leave it at the end of the file, where
1616  * Emacs will automagically detect them.
1617  * ---------------------------------------------------------------------
1618  * Local variables:
1619  * mode: c
1620  * indent-tabs-mode: t
1621  * c-basic-offset: 4
1622  * tab-width: 4
1623  * End:
1624  * vim:noexpandtab:sw=4:ts=4:
1625  */