6ef203fc82bc2bf6d5820532f3e03fbdfd6e8983
[cacao.git] / src / threads / native / lock.c
1 /* src/threads/native/lock.c - lock implementation
2
3    Copyright (C) 1996-2005, 2006, 2007, 2008
4    CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5
6    This file is part of CACAO.
7
8    This program is free software; you can redistribute it and/or
9    modify it under the terms of the GNU General Public License as
10    published by the Free Software Foundation; either version 2, or (at
11    your option) any later version.
12
13    This program is distributed in the hope that it will be useful, but
14    WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16    General Public License for more details.
17
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21    02110-1301, USA.
22
23 */
24
25
26 #include "config.h"
27
28 #include <assert.h>
29 #include <stdint.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <sys/time.h>
33 #include <pthread.h>
34
35 #include "vm/types.h"
36
37 #include "mm/memory.h"
38
39 #include "native/llni.h"
40
41 #include "threads/lock-common.h"
42 #include "threads/threadlist.h"
43 #include "threads/threads-common.h"
44
45 #include "threads/native/lock.h"
46 #include "threads/native/threads.h"
47
48 #include "toolbox/list.h"
49
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
54 #include "vm/vm.h"
55
56 #include "vmcore/options.h"
57
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
60 #endif
61
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
64 #endif
65
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
67
68 #include "arch.h"
69
70 /* includes for atomic instructions: */
71
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/native/generic-primitives.h"
74 #else
75 #include "machine-instr.h"
76 #endif
77
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
80 #endif
81
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
84 #endif
85
86
87 /* debug **********************************************************************/
88
89 #if !defined(NDEBUG)
90 # define DEBUGLOCKS(format) \
91     do { \
92         if (opt_DebugLocks) { \
93             log_println format; \
94         } \
95     } while (0)
96 #else
97 # define DEBUGLOCKS(format)
98 #endif
99
100
101 /******************************************************************************/
102 /* MACROS                                                                     */
103 /******************************************************************************/
104
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
107
108 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
109
110 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
111         ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
112
113
114 /******************************************************************************/
115 /* MACROS FOR THIN/FAT LOCKS                                                  */
116 /******************************************************************************/
117
118 /* We use a variant of the tasuki locks described in the paper
119  *     
120  *     Tamiya Onodera, Kiyokuni Kawachiya
121  *     A Study of Locking Objects with Bimodal Fields
122  *     Proceedings of the ACM OOPSLA '99, pp. 223-237
123  *     1999
124  *
125  * The underlying thin locks are a variant of the thin locks described in
126  * 
127  *     Bacon, Konuru, Murthy, Serrano
128  *     Thin Locks: Featherweight Synchronization for Java
129  *         Proceedings of the ACM Conference on Programming Language Design and 
130  *         Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
131  *         June 1998
132  *
133  * In thin lock mode the lockword looks like this:
134  *
135  *     ,----------------------,-----------,---,
136  *     |      thread ID       |   count   | 0 |
137  *     `----------------------'-----------'---´
138  *
139  *     thread ID......the 'index' of the owning thread, or 0
140  *     count..........number of times the lock has been entered minus 1
141  *     0..............the shape bit is 0 in thin lock mode
142  *
143  * In fat lock mode it is basically a lock_record_t *:
144  *
145  *     ,----------------------------------,---,
146  *     |    lock_record_t * (without LSB) | 1 |
147  *     `----------------------------------'---´
148  *
149  *     1..............the shape bit is 1 in fat lock mode
150  */
151
152 #if SIZEOF_VOID_P == 8
153 #define THIN_LOCK_WORD_SIZE    64
154 #else
155 #define THIN_LOCK_WORD_SIZE    32
156 #endif
157
158 #define THIN_LOCK_SHAPE_BIT    0x01
159
160 #define THIN_UNLOCKED          0
161
162 #define THIN_LOCK_COUNT_SHIFT  1
163 #define THIN_LOCK_COUNT_SIZE   8
164 #define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
165 #define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
166 #define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
167
168 #define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
169 #define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
170
171 #define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
172 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
173
174 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
175 #define MAKE_FAT_LOCK(ptr)      ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
176
177 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
178 #define GET_THREAD_INDEX(lockword) ((unsigned) lockword >> THIN_LOCK_TID_SHIFT)
179
180
181 /* global variables ***********************************************************/
182
183 /* hashtable mapping objects to lock records */
184 static lock_hashtable_t lock_hashtable;
185
186
187 /******************************************************************************/
188 /* PROTOTYPES                                                                 */
189 /******************************************************************************/
190
191 static void lock_hashtable_init(void);
192
193 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
194 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
195 static void lock_record_enter(threadobject *t, lock_record_t *lr);
196 static void lock_record_exit(threadobject *t, lock_record_t *lr);
197 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
198 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
199
200
201 /*============================================================================*/
202 /* INITIALIZATION OF DATA STRUCTURES                                          */
203 /*============================================================================*/
204
205
206 /* lock_init *******************************************************************
207
208    Initialize global data for locking.
209
210 *******************************************************************************/
211
212 void lock_init(void)
213 {
214         /* initialize lock hashtable */
215
216         lock_hashtable_init();
217
218 #if defined(ENABLE_VMLOG)
219         vmlog_cacao_init_lock();
220 #endif
221 }
222
223
224 /* lock_pre_compute_thinlock ***************************************************
225
226    Pre-compute the thin lock value for a thread index.
227
228    IN:
229       index........the thead index (>= 1)
230
231    RETURN VALUE:
232       the thin lock value for this thread index
233
234 *******************************************************************************/
235
236 ptrint lock_pre_compute_thinlock(s4 index)
237 {
238         return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
239 }
240
241
242 /* lock_record_new *************************************************************
243
244    Allocate a lock record.
245
246 *******************************************************************************/
247
248 static lock_record_t *lock_record_new(void)
249 {
250         int result;
251         lock_record_t *lr;
252
253         /* allocate the data structure on the C heap */
254
255         lr = NEW(lock_record_t);
256
257 #if defined(ENABLE_STATISTICS)
258         if (opt_stat)
259                 size_lock_record += sizeof(lock_record_t);
260 #endif
261
262         /* initialize the members */
263
264         lr->object  = NULL;
265         lr->owner   = NULL;
266         lr->count   = 0;
267         lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
268
269 #if defined(ENABLE_GC_CACAO)
270         /* register the lock object as weak reference with the GC */
271
272         gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
273 #endif
274
275         /* initialize the mutex */
276
277         result = pthread_mutex_init(&(lr->mutex), NULL);
278         if (result != 0)
279                 vm_abort_errnum(result, "lock_record_new: pthread_mutex_init failed");
280
281         DEBUGLOCKS(("[lock_record_new   : lr=%p]", (void *) lr));
282
283         return lr;
284 }
285
286
287 /* lock_record_free ************************************************************
288
289    Free a lock record.
290
291    IN:
292        lr....lock record to free
293
294 *******************************************************************************/
295
296 static void lock_record_free(lock_record_t *lr)
297 {
298         int result;
299
300         DEBUGLOCKS(("[lock_record_free  : lr=%p]", (void *) lr));
301
302         /* Destroy the mutex. */
303
304         result = pthread_mutex_destroy(&(lr->mutex));
305         if (result != 0)
306                 vm_abort_errnum(result, "lock_record_free: pthread_mutex_destroy failed");
307
308 #if defined(ENABLE_GC_CACAO)
309         /* unregister the lock object reference with the GC */
310
311         gc_weakreference_unregister(&(lr->object));
312 #endif
313
314         /* Free the waiters list. */
315
316         list_free(lr->waiters);
317
318         /* Free the data structure. */
319
320         FREE(lr, lock_record_t);
321
322 #if defined(ENABLE_STATISTICS)
323         if (opt_stat)
324                 size_lock_record -= sizeof(lock_record_t);
325 #endif
326 }
327
328
329 /*============================================================================*/
330 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
331 /*============================================================================*/
332
333 /* lock_hashtable_init *********************************************************
334
335    Initialize the global hashtable mapping objects to lock records.
336
337 *******************************************************************************/
338
339 static void lock_hashtable_init(void)
340 {
341         pthread_mutex_init(&(lock_hashtable.mutex), NULL);
342
343         lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
344         lock_hashtable.entries = 0;
345         lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
346
347 #if defined(ENABLE_STATISTICS)
348         if (opt_stat)
349                 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
350 #endif
351
352         MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
353 }
354
355
356 /* lock_hashtable_grow *********************************************************
357
358    Grow the lock record hashtable to about twice its current size and
359    rehash the entries.
360
361 *******************************************************************************/
362
363 /* must be called with hashtable mutex locked */
364 static void lock_hashtable_grow(void)
365 {
366         u4 oldsize;
367         u4 newsize;
368         lock_record_t **oldtable;
369         lock_record_t **newtable;
370         lock_record_t *lr;
371         lock_record_t *next;
372         u4 i;
373         u4 h;
374         u4 newslot;
375
376         /* allocate a new table */
377
378         oldsize = lock_hashtable.size;
379         newsize = oldsize*2 + 1; /* XXX should use prime numbers */
380
381         DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
382
383         oldtable = lock_hashtable.ptr;
384         newtable = MNEW(lock_record_t *, newsize);
385
386 #if defined(ENABLE_STATISTICS)
387         if (opt_stat)
388                 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
389 #endif
390
391         MZERO(newtable, lock_record_t *, newsize);
392
393         /* rehash the entries */
394
395         for (i = 0; i < oldsize; i++) {
396                 lr = oldtable[i];
397                 while (lr) {
398                         next = lr->hashlink;
399
400                         h = heap_hashcode(lr->object);
401                         newslot = h % newsize;
402
403                         lr->hashlink = newtable[newslot];
404                         newtable[newslot] = lr;
405
406                         lr = next;
407                 }
408         }
409
410         /* replace the old table */
411
412         lock_hashtable.ptr  = newtable;
413         lock_hashtable.size = newsize;
414
415         MFREE(oldtable, lock_record_t *, oldsize);
416
417 #if defined(ENABLE_STATISTICS)
418         if (opt_stat)
419                 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
420 #endif
421 }
422
423
424 /* lock_hashtable_cleanup ******************************************************
425
426    Removes (and frees) lock records which have a cleared object reference
427    from the hashtable. The locked object was reclaimed by the GC.
428
429 *******************************************************************************/
430
431 #if defined(ENABLE_GC_CACAO)
432 void lock_hashtable_cleanup(void)
433 {
434         threadobject  *t;
435         lock_record_t *lr;
436         lock_record_t *prev;
437         lock_record_t *next;
438         int i;
439
440         t = THREADOBJECT;
441
442         /* lock the hashtable */
443
444         pthread_mutex_lock(&(lock_hashtable.mutex));
445
446         /* search the hashtable for cleared references */
447
448         for (i = 0; i < lock_hashtable.size; i++) {
449                 lr = lock_hashtable.ptr[i];
450                 prev = NULL;
451
452                 while (lr) {
453                         next = lr->hashlink;
454
455                         /* remove lock records with cleared references */
456
457                         if (lr->object == NULL) {
458
459                                 /* unlink the lock record from the hashtable */
460
461                                 if (prev == NULL)
462                                         lock_hashtable.ptr[i] = next;
463                                 else
464                                         prev->hashlink = next;
465
466                                 /* free the lock record */
467
468                                 lock_record_free(lr);
469
470                         } else {
471                                 prev = lr;
472                         }
473
474                         lr = next;
475                 }
476         }
477
478         /* unlock the hashtable */
479
480         pthread_mutex_unlock(&(lock_hashtable.mutex));
481 }
482 #endif
483
484
485 /* lock_hashtable_get **********************************************************
486
487    Find the lock record for the given object.  If it does not exists,
488    yet, create it and enter it in the hashtable.
489
490    IN:
491       t....the current thread
492           o....the object to look up
493
494    RETURN VALUE:
495       the lock record to use for this object
496
497 *******************************************************************************/
498
499 #if defined(ENABLE_GC_BOEHM)
500 static void lock_record_finalizer(void *object, void *p);
501 #endif
502
503 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
504 {
505         uintptr_t      lockword;
506         u4             slot;
507         lock_record_t *lr;
508
509         lockword = lock_lockword_get(t, o);
510
511         if (IS_FAT_LOCK(lockword))
512                 return GET_FAT_LOCK(lockword);
513
514         /* lock the hashtable */
515
516         pthread_mutex_lock(&(lock_hashtable.mutex));
517
518         /* lookup the lock record in the hashtable */
519
520         LLNI_CRITICAL_START_THREAD(t);
521         slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
522         lr   = lock_hashtable.ptr[slot];
523
524         for (; lr != NULL; lr = lr->hashlink) {
525                 if (lr->object == LLNI_DIRECT(o))
526                         break;
527         }
528         LLNI_CRITICAL_END_THREAD(t);
529
530         if (lr == NULL) {
531                 /* not found, we must create a new one */
532
533                 lr = lock_record_new();
534
535                 LLNI_CRITICAL_START_THREAD(t);
536                 lr->object = LLNI_DIRECT(o);
537                 LLNI_CRITICAL_END_THREAD(t);
538
539 #if defined(ENABLE_GC_BOEHM)
540                 /* register new finalizer to clean up the lock record */
541
542                 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
543 #endif
544
545                 /* enter it in the hashtable */
546
547                 lr->hashlink             = lock_hashtable.ptr[slot];
548                 lock_hashtable.ptr[slot] = lr;
549                 lock_hashtable.entries++;
550
551                 /* check whether the hash should grow */
552
553                 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
554                         lock_hashtable_grow();
555                 }
556         }
557
558         /* unlock the hashtable */
559
560         pthread_mutex_unlock(&(lock_hashtable.mutex));
561
562         /* return the new lock record */
563
564         return lr;
565 }
566
567
568 /* lock_hashtable_remove *******************************************************
569
570    Remove the lock record for the given object from the hashtable
571    and free it afterwards.
572
573    IN:
574        t....the current thread
575        o....the object to look up
576
577 *******************************************************************************/
578
579 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
580 {
581         uintptr_t      lockword;
582         lock_record_t *lr;
583         u4             slot;
584         lock_record_t *tmplr;
585
586         /* lock the hashtable */
587
588         pthread_mutex_lock(&(lock_hashtable.mutex));
589
590         /* get lock record */
591
592         lockword = lock_lockword_get(t, o);
593
594         assert(IS_FAT_LOCK(lockword));
595
596         lr = GET_FAT_LOCK(lockword);
597
598         /* remove the lock-record from the hashtable */
599
600         LLNI_CRITICAL_START_THREAD(t);
601         slot  = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
602         tmplr = lock_hashtable.ptr[slot];
603         LLNI_CRITICAL_END_THREAD(t);
604
605         if (tmplr == lr) {
606                 /* special handling if it's the first in the chain */
607
608                 lock_hashtable.ptr[slot] = lr->hashlink;
609         }
610         else {
611                 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
612                         if (tmplr->hashlink == lr) {
613                                 tmplr->hashlink = lr->hashlink;
614                                 break;
615                         }
616                 }
617
618                 assert(tmplr != NULL);
619         }
620
621         /* decrease entry count */
622
623         lock_hashtable.entries--;
624
625         /* unlock the hashtable */
626
627         pthread_mutex_unlock(&(lock_hashtable.mutex));
628
629         /* free the lock record */
630
631         lock_record_free(lr);
632 }
633
634
635 /* lock_record_finalizer *******************************************************
636
637    XXX Remove me for exact GC.
638
639 *******************************************************************************/
640
641 static void lock_record_finalizer(void *object, void *p)
642 {
643         java_handle_t *o;
644         classinfo     *c;
645
646         o = (java_handle_t *) object;
647
648 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
649         /* XXX this is only a dirty hack to make Boehm work with handles */
650
651         o = LLNI_WRAP((java_object_t *) o);
652 #endif
653
654         LLNI_class_get(o, c);
655
656 #if !defined(NDEBUG)
657         if (opt_DebugFinalizer) {
658                 log_start();
659                 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
660                 class_print(c);
661                 log_print("]");
662                 log_finish();
663         }
664 #endif
665
666         /* check for a finalizer function */
667
668         if (c->finalizer != NULL)
669                 finalizer_run(object, p);
670
671         /* remove the lock-record entry from the hashtable and free it */
672
673         lock_hashtable_remove(THREADOBJECT, o);
674 }
675
676
677 /*============================================================================*/
678 /* OBJECT LOCK INITIALIZATION                                                 */
679 /*============================================================================*/
680
681
682 /* lock_init_object_lock *******************************************************
683
684    Initialize the monitor pointer of the given object. The monitor gets
685    initialized to an unlocked state.
686
687 *******************************************************************************/
688
689 void lock_init_object_lock(java_object_t *o)
690 {
691         assert(o);
692
693         o->lockword = THIN_UNLOCKED;
694 }
695
696
697 /*============================================================================*/
698 /* LOCKING ALGORITHM                                                          */
699 /*============================================================================*/
700
701
702 /* lock_lockword_get ***********************************************************
703
704    Get the lockword for the given object.
705
706    IN:
707       t............the current thread
708       o............the object
709
710 *******************************************************************************/
711
712 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
713 {
714         uintptr_t lockword;
715
716         LLNI_CRITICAL_START_THREAD(t);
717         lockword = LLNI_DIRECT(o)->lockword;
718         LLNI_CRITICAL_END_THREAD(t);
719
720         return lockword;
721 }
722
723
724 /* lock_lockword_set ***********************************************************
725
726    Set the lockword for the given object.
727
728    IN:
729       t............the current thread
730       o............the object
731           lockword.....the new lockword value
732
733 *******************************************************************************/
734
735 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
736 {
737         LLNI_CRITICAL_START_THREAD(t);
738         LLNI_DIRECT(o)->lockword = lockword;
739         LLNI_CRITICAL_END_THREAD(t);
740 }
741
742
743 /* lock_record_enter ***********************************************************
744
745    Enter the lock represented by the given lock record.
746
747    IN:
748       t.................the current thread
749           lr................the lock record
750
751 *******************************************************************************/
752
753 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
754 {
755         pthread_mutex_lock(&(lr->mutex));
756         lr->owner = t;
757 }
758
759
760 /* lock_record_exit ************************************************************
761
762    Release the lock represented by the given lock record.
763
764    IN:
765       t.................the current thread
766           lr................the lock record
767
768    PRE-CONDITION:
769       The current thread must own the lock represented by this lock record.
770           This is NOT checked by this function!
771
772 *******************************************************************************/
773
774 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
775 {
776         lr->owner = NULL;
777         pthread_mutex_unlock(&(lr->mutex));
778 }
779
780
781 /* lock_inflate ****************************************************************
782
783    Inflate the lock of the given object. This may only be called by the
784    owner of the monitor of the object.
785
786    IN:
787       t............the current thread
788           o............the object of which to inflate the lock
789           lr...........the lock record to install. The current thread must
790                        own the lock of this lock record!
791
792    PRE-CONDITION:
793       The current thread must be the owner of this object's monitor AND
794           of the lock record's lock!
795
796 *******************************************************************************/
797
798 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
799 {
800         uintptr_t lockword;
801
802         /* get the current lock count */
803
804         lockword = lock_lockword_get(t, o);
805
806         if (IS_FAT_LOCK(lockword)) {
807                 assert(GET_FAT_LOCK(lockword) == lr);
808                 return;
809         }
810         else {
811                 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
812
813                 /* copy the count from the thin lock */
814
815                 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
816         }
817
818         DEBUGLOCKS(("[lock_inflate      : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
819                                 lr, t, o, lockword, lr->count));
820
821         /* install it */
822
823         lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
824 }
825
826
827 /* TODO Move this function into threadlist.[ch]. */
828
829 static threadobject *threads_lookup_thread_id(int index)
830 {
831         threadobject *t;
832
833         threadlist_lock();
834
835         for (t = threadlist_first(); t != NULL; t = threadlist_next(t)) {
836                 if (t->state == THREAD_STATE_NEW)
837                         continue;
838                 if (t->index == index)
839                         break;
840         }
841
842         threadlist_unlock();
843         return t;
844 }
845
846 static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o)
847 {
848         int index;
849         threadobject *t_other;
850         int old_flc;
851
852         index = GET_THREAD_INDEX(lockword);
853         t_other = threads_lookup_thread_id(index);
854         if (!t_other)
855 /*              failure, TODO: add statistics */
856                 return;
857
858         pthread_mutex_lock(&t_other->flc_lock);
859         old_flc = t_other->flc_bit;
860         t_other->flc_bit = true;
861
862         DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d",
863                                 t->index, t_other->index));
864
865         /* Set FLC bit first, then read the lockword again */
866         MEMORY_BARRIER();
867
868         lockword = lock_lockword_get(t, o);
869
870         /* Lockword is still the way it was seen before */
871         if (IS_THIN_LOCK(lockword) && (GET_THREAD_INDEX(lockword) == index))
872         {
873                 /* Add tuple (t, o) to the other thread's FLC list */
874                 t->flc_object = o;
875                 t->flc_next = t_other->flc_list;
876                 t_other->flc_list = t;
877
878                 for (;;)
879                 {
880                         threadobject *current;
881
882                         /* Wait until another thread sees the flc bit and notifies
883                            us of unlocking. */
884                         pthread_cond_wait(&t->flc_cond, &t_other->flc_lock);
885
886                         /* Traverse FLC list looking if we're still there */
887                         current = t_other->flc_list;
888                         while (current && current != t)
889                                 current = current->flc_next;
890                         if (!current)
891                                 /* not in list anymore, can stop waiting */
892                                 break;
893
894                         /* We are still in the list -- the other thread cannot have seen
895                            the FLC bit yet */
896                         assert(t_other->flc_bit);
897                 }
898
899                 t->flc_object = NULL;   /* for garbage collector? */
900                 t->flc_next = NULL;
901         }
902         else
903                 t_other->flc_bit = old_flc;
904
905         pthread_mutex_unlock(&t_other->flc_lock);
906 }
907
908 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
909 {
910         threadobject *current;
911
912         pthread_mutex_lock(&t->flc_lock);
913
914         current = t->flc_list;
915         while (current)
916         {
917                 if (current->flc_object != o)
918                 {
919                         /* The object has to be inflated so the other threads can properly
920                            block on it. */
921
922                         /* Only if not already inflated */
923                         ptrint lockword = lock_lockword_get(t, current->flc_object);
924                         if (IS_THIN_LOCK(lockword)) {
925                                 lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
926                                 lock_record_enter(t, lr);
927
928                                 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
929                                                         t->index, (void*) current->flc_object, (void*) lr));
930
931                                 lock_inflate(t, current->flc_object, lr);
932                         }
933                 }
934                 /* Wake the waiting thread */
935                 pthread_cond_broadcast(&current->flc_cond);
936
937                 current = current->flc_next;
938         }
939
940         t->flc_list = NULL;
941         t->flc_bit = false;
942         pthread_mutex_unlock(&t->flc_lock);
943 }
944
945 /* lock_monitor_enter **********************************************************
946
947    Acquire the monitor of the given object. If the current thread already
948    owns the monitor, the lock counter is simply increased.
949
950    This function blocks until it can acquire the monitor.
951
952    IN:
953       t............the current thread
954           o............the object of which to enter the monitor
955
956    RETURN VALUE:
957       true.........the lock has been successfully acquired
958           false........an exception has been thrown
959
960 *******************************************************************************/
961
962 bool lock_monitor_enter(java_handle_t *o)
963 {
964         threadobject  *t;
965         /* CAUTION: This code assumes that ptrint is unsigned! */
966         ptrint         lockword;
967         ptrint         thinlock;
968         lock_record_t *lr;
969
970         if (o == NULL) {
971                 exceptions_throw_nullpointerexception();
972                 return false;
973         }
974
975         t = THREADOBJECT;
976
977         thinlock = t->thinlock;
978
979 retry:
980         /* most common case: try to thin-lock an unlocked object */
981
982         LLNI_CRITICAL_START_THREAD(t);
983         lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
984         LLNI_CRITICAL_END_THREAD(t);
985
986         if (lockword == THIN_UNLOCKED) {
987                 /* success. we locked it */
988                 /* The Java Memory Model requires a memory barrier here: */
989                 /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
990                 MEMORY_BARRIER_AFTER_ATOMIC();
991                 return true;
992         }
993
994         /* next common case: recursive lock with small recursion count */
995         /* We don't have to worry about stale values here, as any stale value  */
996         /* will indicate another thread holding the lock (or an inflated lock) */
997
998         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
999                 /* we own this monitor               */
1000                 /* check the current recursion count */
1001
1002                 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
1003                 {
1004                         /* the recursion count is low enough */
1005
1006                         lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
1007
1008                         /* success. we locked it */
1009                         return true;
1010                 }
1011                 else {
1012                         /* recursion count overflow */
1013
1014                         lr = lock_hashtable_get(t, o);
1015                         lock_record_enter(t, lr);
1016                         lock_inflate(t, o, lr);
1017                         lr->count++;
1018
1019                         notify_flc_waiters(t, o);
1020
1021                         return true;
1022                 }
1023         }
1024
1025         /* the lock is either contented or fat */
1026
1027         if (IS_FAT_LOCK(lockword)) {
1028
1029                 lr = GET_FAT_LOCK(lockword);
1030
1031                 /* check for recursive entering */
1032                 if (lr->owner == t) {
1033                         lr->count++;
1034                         return true;
1035                 }
1036
1037                 /* acquire the mutex of the lock record */
1038
1039                 lock_record_enter(t, lr);
1040
1041                 assert(lr->count == 0);
1042
1043                 return true;
1044         }
1045
1046         /****** inflation path ******/
1047
1048 #if defined(ENABLE_JVMTI)
1049         /* Monitor Contended Enter */
1050         jvmti_MonitorContendedEntering(false, o);
1051 #endif
1052
1053         sable_flc_waiting(lockword, t, o);
1054
1055 #if defined(ENABLE_JVMTI)
1056         /* Monitor Contended Entered */
1057         jvmti_MonitorContendedEntering(true, o);
1058 #endif
1059         goto retry;
1060 }
1061
1062
1063 /* lock_monitor_exit ***********************************************************
1064
1065    Decrement the counter of a (currently owned) monitor. If the counter
1066    reaches zero, release the monitor.
1067
1068    If the current thread is not the owner of the monitor, an 
1069    IllegalMonitorState exception is thrown.
1070
1071    IN:
1072       t............the current thread
1073           o............the object of which to exit the monitor
1074
1075    RETURN VALUE:
1076       true.........everything ok,
1077           false........an exception has been thrown
1078
1079 *******************************************************************************/
1080
1081 bool lock_monitor_exit(java_handle_t *o)
1082 {
1083         threadobject *t;
1084         uintptr_t     lockword;
1085         ptrint        thinlock;
1086
1087         if (o == NULL) {
1088                 exceptions_throw_nullpointerexception();
1089                 return false;
1090         }
1091
1092         t = THREADOBJECT;
1093
1094         thinlock = t->thinlock;
1095
1096         /* We don't have to worry about stale values here, as any stale value */
1097         /* will indicate that we don't own the lock.                          */
1098
1099         lockword = lock_lockword_get(t, o);
1100
1101         /* most common case: we release a thin lock that we hold once */
1102
1103         if (lockword == thinlock) {
1104                 /* memory barrier for Java Memory Model */
1105                 STORE_ORDER_BARRIER();
1106                 lock_lockword_set(t, o, THIN_UNLOCKED);
1107                 /* memory barrier for thin locking */
1108                 MEMORY_BARRIER();
1109
1110                 /* check if there has been a flat lock contention on this object */
1111
1112                 if (t->flc_bit) {
1113                         DEBUGLOCKS(("thread %d saw flc bit", t->index));
1114
1115                         /* there has been a contention on this thin lock */
1116                         notify_flc_waiters(t, o);
1117                 }
1118
1119                 return true;
1120         }
1121
1122         /* next common case: we release a recursive lock, count > 0 */
1123
1124         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1125                 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1126                 return true;
1127         }
1128
1129         /* either the lock is fat, or we don't hold it at all */
1130
1131         if (IS_FAT_LOCK(lockword)) {
1132
1133                 lock_record_t *lr;
1134
1135                 lr = GET_FAT_LOCK(lockword);
1136
1137                 /* check if we own this monitor */
1138                 /* We don't have to worry about stale values here, as any stale value */
1139                 /* will be != t and thus fail this check.                             */
1140
1141                 if (lr->owner != t) {
1142                         exceptions_throw_illegalmonitorstateexception();
1143                         return false;
1144                 }
1145
1146                 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1147
1148                 if (lr->count != 0) {
1149                         /* we had locked this one recursively. just decrement, it will */
1150                         /* still be locked. */
1151                         lr->count--;
1152                         return true;
1153                 }
1154
1155                 /* unlock this lock record */
1156
1157                 lr->owner = NULL;
1158                 pthread_mutex_unlock(&(lr->mutex));
1159
1160                 return true;
1161         }
1162
1163         /* legal thin lock cases have been handled above, so this is an error */
1164
1165         exceptions_throw_illegalmonitorstateexception();
1166
1167         return false;
1168 }
1169
1170
1171 /* lock_record_add_waiter ******************************************************
1172
1173    Add a thread to the list of waiting threads of a lock record.
1174
1175    IN:
1176       lr...........the lock record
1177       thread.......the thread to add
1178
1179 *******************************************************************************/
1180
1181 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1182 {
1183         lock_waiter_t *w;
1184
1185         /* Allocate a waiter data structure. */
1186
1187         w = NEW(lock_waiter_t);
1188
1189 #if defined(ENABLE_STATISTICS)
1190         if (opt_stat)
1191                 size_lock_waiter += sizeof(lock_waiter_t);
1192 #endif
1193
1194         /* Store the thread in the waiter structure. */
1195
1196         w->thread = thread;
1197
1198         /* Add the waiter as last entry to waiters list. */
1199
1200         list_add_last(lr->waiters, w);
1201 }
1202
1203
1204 /* lock_record_remove_waiter ***************************************************
1205
1206    Remove a thread from the list of waiting threads of a lock record.
1207
1208    IN:
1209       lr...........the lock record
1210       t............the current thread
1211
1212    PRE-CONDITION:
1213       The current thread must be the owner of the lock record.
1214    
1215 *******************************************************************************/
1216
1217 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1218 {
1219         list_t        *l;
1220         lock_waiter_t *w;
1221
1222         /* Get the waiters list. */
1223
1224         l = lr->waiters;
1225
1226         for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1227                 if (w->thread == thread) {
1228                         /* Remove the waiter entry from the list. */
1229
1230                         list_remove(l, w);
1231
1232                         /* Free the waiter data structure. */
1233
1234                         FREE(w, lock_waiter_t);
1235
1236 #if defined(ENABLE_STATISTICS)
1237                         if (opt_stat)
1238                                 size_lock_waiter -= sizeof(lock_waiter_t);
1239 #endif
1240
1241                         return;
1242                 }
1243         }
1244
1245         /* This should never happen. */
1246
1247         vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1248 }
1249
1250
1251 /* lock_record_wait ************************************************************
1252
1253    Wait on a lock record for a given (maximum) amount of time.
1254
1255    IN:
1256       t............the current thread
1257           lr...........the lock record
1258           millis.......milliseconds of timeout
1259           nanos........nanoseconds of timeout
1260
1261    RETURN VALUE:
1262       true.........we have been interrupted,
1263       false........everything ok
1264
1265    PRE-CONDITION:
1266       The current thread must be the owner of the lock record.
1267           This is NOT checked by this function!
1268    
1269 *******************************************************************************/
1270
1271 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1272 {
1273         s4   lockcount;
1274         bool wasinterrupted = false;
1275
1276         DEBUGLOCKS(("[lock_record_wait  : lr=%p, t=%p, millis=%lld, nanos=%d]",
1277                                 lr, thread, millis, nanos));
1278
1279         /* { the thread t owns the fat lock record lr on the object o } */
1280
1281         /* register us as waiter for this object */
1282
1283         lock_record_add_waiter(lr, thread);
1284
1285         /* remember the old lock count */
1286
1287         lockcount = lr->count;
1288
1289         /* unlock this record */
1290
1291         lr->count = 0;
1292         lock_record_exit(thread, lr);
1293
1294         /* wait until notified/interrupted/timed out */
1295
1296         threads_wait_with_timeout_relative(thread, millis, nanos);
1297
1298         /* re-enter the monitor */
1299
1300         lock_record_enter(thread, lr);
1301
1302         /* remove us from the list of waiting threads */
1303
1304         lock_record_remove_waiter(lr, thread);
1305
1306         /* restore the old lock count */
1307
1308         lr->count = lockcount;
1309
1310         /* We can only be signaled OR interrupted, not both. If both flags
1311            are set, reset only signaled and leave the thread in
1312            interrupted state. Otherwise, clear both. */
1313
1314         if (!thread->signaled) {
1315                 wasinterrupted = thread->interrupted;
1316                 thread->interrupted = false;
1317         }
1318
1319         thread->signaled = false;
1320
1321         /* return if we have been interrupted */
1322
1323         return wasinterrupted;
1324 }
1325
1326
1327 /* lock_monitor_wait ***********************************************************
1328
1329    Wait on an object for a given (maximum) amount of time.
1330
1331    IN:
1332       t............the current thread
1333           o............the object
1334           millis.......milliseconds of timeout
1335           nanos........nanoseconds of timeout
1336
1337    PRE-CONDITION:
1338       The current thread must be the owner of the object's monitor.
1339    
1340 *******************************************************************************/
1341
1342 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1343 {
1344         uintptr_t      lockword;
1345         lock_record_t *lr;
1346
1347         lockword = lock_lockword_get(t, o);
1348
1349         /* check if we own this monitor */
1350         /* We don't have to worry about stale values here, as any stale value */
1351         /* will fail this check.                                              */
1352
1353         if (IS_FAT_LOCK(lockword)) {
1354
1355                 lr = GET_FAT_LOCK(lockword);
1356
1357                 if (lr->owner != t) {
1358                         exceptions_throw_illegalmonitorstateexception();
1359                         return;
1360                 }
1361         }
1362         else {
1363                 /* it's a thin lock */
1364
1365                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1366                         exceptions_throw_illegalmonitorstateexception();
1367                         return;
1368                 }
1369
1370                 /* inflate this lock */
1371
1372                 lr = lock_hashtable_get(t, o);
1373                 lock_record_enter(t, lr);
1374                 lock_inflate(t, o, lr);
1375
1376                 notify_flc_waiters(t, o);
1377         }
1378
1379         /* { the thread t owns the fat lock record lr on the object o } */
1380
1381         if (lock_record_wait(t, lr, millis, nanos))
1382                 exceptions_throw_interruptedexception();
1383 }
1384
1385
1386 /* lock_record_notify **********************************************************
1387
1388    Notify one thread or all threads waiting on the given lock record.
1389
1390    IN:
1391       t............the current thread
1392           lr...........the lock record
1393           one..........if true, only notify one thread
1394
1395    PRE-CONDITION:
1396       The current thread must be the owner of the lock record.
1397           This is NOT checked by this function!
1398    
1399 *******************************************************************************/
1400
1401 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1402 {
1403         list_t        *l;
1404         lock_waiter_t *w;
1405         threadobject  *waitingthread;
1406
1407         /* { the thread t owns the fat lock record lr on the object o } */
1408
1409         /* Get the waiters list. */
1410
1411         l = lr->waiters;
1412
1413         for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1414                 /* signal the waiting thread */
1415
1416                 waitingthread = w->thread;
1417
1418                 /* We must skip threads which have already been notified or
1419                    interrupted. They will remove themselves from the list. */
1420
1421                 if (waitingthread->signaled || waitingthread->interrupted)
1422                         continue;
1423
1424                 /* Enter the wait-mutex. */
1425
1426                 pthread_mutex_lock(&(waitingthread->waitmutex));
1427
1428                 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1429                                         lr, t, waitingthread, waitingthread->sleeping, one));
1430
1431                 /* Signal the thread if it's sleeping. sleeping can be false
1432                    when the waiting thread is blocked between giving up the
1433                    monitor and entering the waitmutex. It will eventually
1434                    observe that it's signaled and refrain from going to
1435                    sleep. */
1436
1437                 if (waitingthread->sleeping)
1438                         pthread_cond_signal(&(waitingthread->waitcond));
1439
1440                 /* Mark the thread as signaled. */
1441
1442                 waitingthread->signaled = true;
1443
1444                 /* Leave the wait-mutex. */
1445
1446                 pthread_mutex_unlock(&(waitingthread->waitmutex));
1447
1448                 /* if we should only wake one, we are done */
1449
1450                 if (one)
1451                         break;
1452         }
1453 }
1454
1455
1456 /* lock_monitor_notify *********************************************************
1457
1458    Notify one thread or all threads waiting on the given object.
1459
1460    IN:
1461       t............the current thread
1462           o............the object
1463           one..........if true, only notify one thread
1464
1465    PRE-CONDITION:
1466       The current thread must be the owner of the object's monitor.
1467    
1468 *******************************************************************************/
1469
1470 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1471 {
1472         uintptr_t      lockword;
1473         lock_record_t *lr;
1474
1475         lockword = lock_lockword_get(t, o);
1476
1477         /* check if we own this monitor */
1478         /* We don't have to worry about stale values here, as any stale value */
1479         /* will fail this check.                                              */
1480
1481         if (IS_FAT_LOCK(lockword)) {
1482
1483                 lr = GET_FAT_LOCK(lockword);
1484
1485                 if (lr->owner != t) {
1486                         exceptions_throw_illegalmonitorstateexception();
1487                         return;
1488                 }
1489         }
1490         else {
1491                 /* it's a thin lock */
1492
1493                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1494                         exceptions_throw_illegalmonitorstateexception();
1495                         return;
1496                 }
1497
1498                 /* no thread can wait on a thin lock, so there's nothing to do. */
1499                 return;
1500         }
1501
1502         /* { the thread t owns the fat lock record lr on the object o } */
1503
1504         lock_record_notify(t, lr, one);
1505 }
1506
1507
1508
1509 /*============================================================================*/
1510 /* INQUIRY FUNCIONS                                                           */
1511 /*============================================================================*/
1512
1513
1514 /* lock_is_held_by_current_thread **********************************************
1515
1516    Return true if the current thread owns the monitor of the given object.
1517
1518    IN:
1519           o............the object
1520
1521    RETURN VALUE:
1522       true, if the current thread holds the lock of this object.
1523    
1524 *******************************************************************************/
1525
1526 bool lock_is_held_by_current_thread(java_handle_t *o)
1527 {
1528         threadobject  *t;
1529         uintptr_t      lockword;
1530         lock_record_t *lr;
1531
1532         t = THREADOBJECT;
1533
1534         /* check if we own this monitor */
1535         /* We don't have to worry about stale values here, as any stale value */
1536         /* will fail this check.                                              */
1537
1538         lockword = lock_lockword_get(t, o);
1539
1540         if (IS_FAT_LOCK(lockword)) {
1541                 /* it's a fat lock */
1542
1543                 lr = GET_FAT_LOCK(lockword);
1544
1545                 return (lr->owner == t);
1546         }
1547         else {
1548                 /* it's a thin lock */
1549
1550                 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1551         }
1552 }
1553
1554
1555
1556 /*============================================================================*/
1557 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD                              */
1558 /*============================================================================*/
1559
1560
1561 /* lock_wait_for_object ********************************************************
1562
1563    Wait for the given object.
1564
1565    IN:
1566           o............the object
1567           millis.......milliseconds to wait
1568           nanos........nanoseconds to wait
1569    
1570 *******************************************************************************/
1571
1572 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1573 {
1574         threadobject *thread;
1575
1576         thread = THREADOBJECT;
1577
1578         lock_monitor_wait(thread, o, millis, nanos);
1579 }
1580
1581
1582 /* lock_notify_object **********************************************************
1583
1584    Notify one thread waiting on the given object.
1585
1586    IN:
1587           o............the object
1588    
1589 *******************************************************************************/
1590
1591 void lock_notify_object(java_handle_t *o)
1592 {
1593         threadobject *thread;
1594
1595         thread = THREADOBJECT;
1596
1597         lock_monitor_notify(thread, o, true);
1598 }
1599
1600
1601 /* lock_notify_all_object ******************************************************
1602
1603    Notify all threads waiting on the given object.
1604
1605    IN:
1606           o............the object
1607    
1608 *******************************************************************************/
1609
1610 void lock_notify_all_object(java_handle_t *o)
1611 {
1612         threadobject *thread;
1613
1614         thread = THREADOBJECT;
1615
1616         lock_monitor_notify(thread, o, false);
1617 }
1618
1619
1620 /*
1621  * These are local overrides for various environment variables in Emacs.
1622  * Please do not remove this and leave it at the end of the file, where
1623  * Emacs will automagically detect them.
1624  * ---------------------------------------------------------------------
1625  * Local variables:
1626  * mode: c
1627  * indent-tabs-mode: t
1628  * c-basic-offset: 4
1629  * tab-width: 4
1630  * End:
1631  * vim:noexpandtab:sw=4:ts=4:
1632  */