Merged with tip.
[cacao.git] / src / threads / posix / lock.c
1 /* src/threads/posix/lock.c - lock implementation
2
3    Copyright (C) 1996-2005, 2006, 2007, 2008
4    CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5
6    This file is part of CACAO.
7
8    This program is free software; you can redistribute it and/or
9    modify it under the terms of the GNU General Public License as
10    published by the Free Software Foundation; either version 2, or (at
11    your option) any later version.
12
13    This program is distributed in the hope that it will be useful, but
14    WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16    General Public License for more details.
17
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21    02110-1301, USA.
22
23 */
24
25
26 #include "config.h"
27
28 #include <assert.h>
29 #include <stdint.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <sys/time.h>
33 #include <pthread.h>
34
35 #include "vm/types.h"
36
37 #include "mm/memory.h"
38
39 #include "native/llni.h"
40
41 #include "threads/lock-common.h"
42 #include "threads/mutex.hpp"
43 #include "threads/threadlist.h"
44 #include "threads/thread.h"
45
46 #include "threads/posix/lock.h"
47
48 #include "toolbox/list.h"
49
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
54 #include "vm/vm.h"
55
56 #include "vmcore/options.h"
57
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
60 #endif
61
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
64 #endif
65
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
67
68 #include "arch.h"
69
70 /* includes for atomic instructions: */
71
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/posix/generic-primitives.h"
74 #else
75 #include "threads/atomic.hpp"
76 #endif
77
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
80 #endif
81
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
84 #endif
85
86
87 /* debug **********************************************************************/
88
89 #if !defined(NDEBUG)
90 # define DEBUGLOCKS(format) \
91     do { \
92         if (opt_DebugLocks) { \
93             log_println format; \
94         } \
95     } while (0)
96 #else
97 # define DEBUGLOCKS(format)
98 #endif
99
100
101 /******************************************************************************/
102 /* MACROS                                                                     */
103 /******************************************************************************/
104
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
107
108 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
109
110
111 /******************************************************************************/
112 /* MACROS FOR THIN/FAT LOCKS                                                  */
113 /******************************************************************************/
114
115 /* We use a variant of the tasuki locks described in the paper
116  *     
117  *     Tamiya Onodera, Kiyokuni Kawachiya
118  *     A Study of Locking Objects with Bimodal Fields
119  *     Proceedings of the ACM OOPSLA '99, pp. 223-237
120  *     1999
121  *
122  * The underlying thin locks are a variant of the thin locks described in
123  * 
124  *     Bacon, Konuru, Murthy, Serrano
125  *     Thin Locks: Featherweight Synchronization for Java
126  *         Proceedings of the ACM Conference on Programming Language Design and 
127  *         Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
128  *         June 1998
129  *
130  * In thin lock mode the lockword looks like this:
131  *
132  *     ,----------------------,-----------,---,
133  *     |      thread ID       |   count   | 0 |
134  *     `----------------------'-----------'---'
135  *
136  *     thread ID......the 'index' of the owning thread, or 0
137  *     count..........number of times the lock has been entered minus 1
138  *     0..............the shape bit is 0 in thin lock mode
139  *
140  * In fat lock mode it is basically a lock_record_t *:
141  *
142  *     ,----------------------------------,---,
143  *     |    lock_record_t * (without LSB) | 1 |
144  *     `----------------------------------'---'
145  *
146  *     1..............the shape bit is 1 in fat lock mode
147  */
148
149 #if SIZEOF_VOID_P == 8
150 #define THIN_LOCK_WORD_SIZE    64
151 #else
152 #define THIN_LOCK_WORD_SIZE    32
153 #endif
154
155 #define THIN_LOCK_SHAPE_BIT    0x01
156
157 #define THIN_UNLOCKED          0
158
159 #define THIN_LOCK_COUNT_SHIFT  1
160 #define THIN_LOCK_COUNT_SIZE   8
161 #define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
162 #define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
163 #define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
164
165 #define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
166 #define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
167
168 #define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
169 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
170
171 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
172 #define MAKE_FAT_LOCK(ptr)      ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
173
174 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
175 #define GET_THREAD_INDEX(lockword) ((unsigned) lockword >> THIN_LOCK_TID_SHIFT)
176
177
178 /* global variables ***********************************************************/
179
180 /* hashtable mapping objects to lock records */
181 static lock_hashtable_t lock_hashtable;
182
183
184 /******************************************************************************/
185 /* PROTOTYPES                                                                 */
186 /******************************************************************************/
187
188 static void lock_hashtable_init(void);
189
190 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
191 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
192 static void lock_record_enter(threadobject *t, lock_record_t *lr);
193 static void lock_record_exit(threadobject *t, lock_record_t *lr);
194 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
195 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
196
197
198 /*============================================================================*/
199 /* INITIALIZATION OF DATA STRUCTURES                                          */
200 /*============================================================================*/
201
202
203 /* lock_init *******************************************************************
204
205    Initialize global data for locking.
206
207 *******************************************************************************/
208
209 void lock_init(void)
210 {
211         /* initialize lock hashtable */
212
213         lock_hashtable_init();
214
215 #if defined(ENABLE_VMLOG)
216         vmlog_cacao_init_lock();
217 #endif
218 }
219
220
221 /* lock_pre_compute_thinlock ***************************************************
222
223    Pre-compute the thin lock value for a thread index.
224
225    IN:
226       index........the thead index (>= 1)
227
228    RETURN VALUE:
229       the thin lock value for this thread index
230
231 *******************************************************************************/
232
233 ptrint lock_pre_compute_thinlock(s4 index)
234 {
235         return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
236 }
237
238
239 /* lock_record_new *************************************************************
240
241    Allocate a lock record.
242
243 *******************************************************************************/
244
245 static lock_record_t *lock_record_new(void)
246 {
247         lock_record_t *lr;
248
249         /* allocate the data structure on the C heap */
250
251         lr = NEW(lock_record_t);
252
253 #if defined(ENABLE_STATISTICS)
254         if (opt_stat)
255                 size_lock_record += sizeof(lock_record_t);
256 #endif
257
258         /* initialize the members */
259
260         lr->object  = NULL;
261         lr->owner   = NULL;
262         lr->count   = 0;
263         lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
264
265 #if defined(ENABLE_GC_CACAO)
266         /* register the lock object as weak reference with the GC */
267
268         gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
269 #endif
270
271         /* initialize the mutex */
272
273         lr->mutex = Mutex_new();
274
275         DEBUGLOCKS(("[lock_record_new   : lr=%p]", (void *) lr));
276
277         return lr;
278 }
279
280
281 /* lock_record_free ************************************************************
282
283    Free a lock record.
284
285    IN:
286        lr....lock record to free
287
288 *******************************************************************************/
289
290 static void lock_record_free(lock_record_t *lr)
291 {
292         DEBUGLOCKS(("[lock_record_free  : lr=%p]", (void *) lr));
293
294         /* Destroy the mutex. */
295
296         Mutex_delete(lr->mutex);
297
298 #if defined(ENABLE_GC_CACAO)
299         /* unregister the lock object reference with the GC */
300
301         gc_weakreference_unregister(&(lr->object));
302 #endif
303
304         /* Free the waiters list. */
305
306         list_free(lr->waiters);
307
308         /* Free the data structure. */
309
310         FREE(lr, lock_record_t);
311
312 #if defined(ENABLE_STATISTICS)
313         if (opt_stat)
314                 size_lock_record -= sizeof(lock_record_t);
315 #endif
316 }
317
318
319 /*============================================================================*/
320 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
321 /*============================================================================*/
322
323 /* lock_hashtable_init *********************************************************
324
325    Initialize the global hashtable mapping objects to lock records.
326
327 *******************************************************************************/
328
329 static void lock_hashtable_init(void)
330 {
331         lock_hashtable.mutex = Mutex_new();
332
333         lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
334         lock_hashtable.entries = 0;
335         lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
336
337 #if defined(ENABLE_STATISTICS)
338         if (opt_stat)
339                 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
340 #endif
341
342         MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
343 }
344
345
346 /* lock_hashtable_grow *********************************************************
347
348    Grow the lock record hashtable to about twice its current size and
349    rehash the entries.
350
351 *******************************************************************************/
352
353 /* must be called with hashtable mutex locked */
354 static void lock_hashtable_grow(void)
355 {
356         u4 oldsize;
357         u4 newsize;
358         lock_record_t **oldtable;
359         lock_record_t **newtable;
360         lock_record_t *lr;
361         lock_record_t *next;
362         u4 i;
363         u4 h;
364         u4 newslot;
365
366         /* allocate a new table */
367
368         oldsize = lock_hashtable.size;
369         newsize = oldsize*2 + 1; /* XXX should use prime numbers */
370
371         DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
372
373         oldtable = lock_hashtable.ptr;
374         newtable = MNEW(lock_record_t *, newsize);
375
376 #if defined(ENABLE_STATISTICS)
377         if (opt_stat)
378                 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
379 #endif
380
381         MZERO(newtable, lock_record_t *, newsize);
382
383         /* rehash the entries */
384
385         for (i = 0; i < oldsize; i++) {
386                 lr = oldtable[i];
387                 while (lr) {
388                         next = lr->hashlink;
389
390                         h = heap_hashcode(lr->object);
391                         newslot = h % newsize;
392
393                         lr->hashlink = newtable[newslot];
394                         newtable[newslot] = lr;
395
396                         lr = next;
397                 }
398         }
399
400         /* replace the old table */
401
402         lock_hashtable.ptr  = newtable;
403         lock_hashtable.size = newsize;
404
405         MFREE(oldtable, lock_record_t *, oldsize);
406
407 #if defined(ENABLE_STATISTICS)
408         if (opt_stat)
409                 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
410 #endif
411 }
412
413
414 /* lock_hashtable_cleanup ******************************************************
415
416    Removes (and frees) lock records which have a cleared object reference
417    from the hashtable. The locked object was reclaimed by the GC.
418
419 *******************************************************************************/
420
421 #if defined(ENABLE_GC_CACAO)
422 void lock_hashtable_cleanup(void)
423 {
424         threadobject  *t;
425         lock_record_t *lr;
426         lock_record_t *prev;
427         lock_record_t *next;
428         int i;
429
430         t = THREADOBJECT;
431
432         /* lock the hashtable */
433
434         Mutex_lock(lock_hashtable.mutex);
435
436         /* search the hashtable for cleared references */
437
438         for (i = 0; i < lock_hashtable.size; i++) {
439                 lr = lock_hashtable.ptr[i];
440                 prev = NULL;
441
442                 while (lr) {
443                         next = lr->hashlink;
444
445                         /* remove lock records with cleared references */
446
447                         if (lr->object == NULL) {
448
449                                 /* unlink the lock record from the hashtable */
450
451                                 if (prev == NULL)
452                                         lock_hashtable.ptr[i] = next;
453                                 else
454                                         prev->hashlink = next;
455
456                                 /* free the lock record */
457
458                                 lock_record_free(lr);
459
460                         } else {
461                                 prev = lr;
462                         }
463
464                         lr = next;
465                 }
466         }
467
468         /* unlock the hashtable */
469
470         Mutex_unlock(lock_hashtable.mutex);
471 }
472 #endif
473
474
475 /* lock_hashtable_get **********************************************************
476
477    Find the lock record for the given object.  If it does not exists,
478    yet, create it and enter it in the hashtable.
479
480    IN:
481       t....the current thread
482           o....the object to look up
483
484    RETURN VALUE:
485       the lock record to use for this object
486
487 *******************************************************************************/
488
489 #if defined(ENABLE_GC_BOEHM)
490 static void lock_record_finalizer(void *object, void *p);
491 #endif
492
493 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
494 {
495         uintptr_t      lockword;
496         u4             slot;
497         lock_record_t *lr;
498
499         lockword = lock_lockword_get(t, o);
500
501         if (IS_FAT_LOCK(lockword))
502                 return GET_FAT_LOCK(lockword);
503
504         /* lock the hashtable */
505
506         Mutex_lock(lock_hashtable.mutex);
507
508         /* lookup the lock record in the hashtable */
509
510         LLNI_CRITICAL_START_THREAD(t);
511         slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
512         lr   = lock_hashtable.ptr[slot];
513
514         for (; lr != NULL; lr = lr->hashlink) {
515                 if (lr->object == LLNI_DIRECT(o))
516                         break;
517         }
518         LLNI_CRITICAL_END_THREAD(t);
519
520         if (lr == NULL) {
521                 /* not found, we must create a new one */
522
523                 lr = lock_record_new();
524
525                 LLNI_CRITICAL_START_THREAD(t);
526                 lr->object = LLNI_DIRECT(o);
527                 LLNI_CRITICAL_END_THREAD(t);
528
529 #if defined(ENABLE_GC_BOEHM)
530                 /* register new finalizer to clean up the lock record */
531
532                 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
533 #endif
534
535                 /* enter it in the hashtable */
536
537                 lr->hashlink             = lock_hashtable.ptr[slot];
538                 lock_hashtable.ptr[slot] = lr;
539                 lock_hashtable.entries++;
540
541                 /* check whether the hash should grow */
542
543                 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
544                         lock_hashtable_grow();
545                 }
546         }
547
548         /* unlock the hashtable */
549
550         Mutex_unlock(lock_hashtable.mutex);
551
552         /* return the new lock record */
553
554         return lr;
555 }
556
557
558 /* lock_hashtable_remove *******************************************************
559
560    Remove the lock record for the given object from the hashtable
561    and free it afterwards.
562
563    IN:
564        t....the current thread
565        o....the object to look up
566
567 *******************************************************************************/
568
569 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
570 {
571         uintptr_t      lockword;
572         lock_record_t *lr;
573         u4             slot;
574         lock_record_t *tmplr;
575
576         /* lock the hashtable */
577
578         Mutex_lock(lock_hashtable.mutex);
579
580         /* get lock record */
581
582         lockword = lock_lockword_get(t, o);
583
584         assert(IS_FAT_LOCK(lockword));
585
586         lr = GET_FAT_LOCK(lockword);
587
588         /* remove the lock-record from the hashtable */
589
590         LLNI_CRITICAL_START_THREAD(t);
591         slot  = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
592         tmplr = lock_hashtable.ptr[slot];
593         LLNI_CRITICAL_END_THREAD(t);
594
595         if (tmplr == lr) {
596                 /* special handling if it's the first in the chain */
597
598                 lock_hashtable.ptr[slot] = lr->hashlink;
599         }
600         else {
601                 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
602                         if (tmplr->hashlink == lr) {
603                                 tmplr->hashlink = lr->hashlink;
604                                 break;
605                         }
606                 }
607
608                 assert(tmplr != NULL);
609         }
610
611         /* decrease entry count */
612
613         lock_hashtable.entries--;
614
615         /* unlock the hashtable */
616
617         Mutex_unlock(lock_hashtable.mutex);
618
619         /* free the lock record */
620
621         lock_record_free(lr);
622 }
623
624
625 /* lock_record_finalizer *******************************************************
626
627    XXX Remove me for exact GC.
628
629 *******************************************************************************/
630
631 static void lock_record_finalizer(void *object, void *p)
632 {
633         java_handle_t *o;
634         classinfo     *c;
635
636         o = (java_handle_t *) object;
637
638 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
639         /* XXX this is only a dirty hack to make Boehm work with handles */
640
641         o = LLNI_WRAP((java_object_t *) o);
642 #endif
643
644         LLNI_class_get(o, c);
645
646 #if !defined(NDEBUG)
647         if (opt_DebugFinalizer) {
648                 log_start();
649                 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
650                 class_print(c);
651                 log_print("]");
652                 log_finish();
653         }
654 #endif
655
656         /* check for a finalizer function */
657
658         if (c->finalizer != NULL)
659                 finalizer_run(object, p);
660
661         /* remove the lock-record entry from the hashtable and free it */
662
663         lock_hashtable_remove(THREADOBJECT, o);
664 }
665
666
667 /*============================================================================*/
668 /* OBJECT LOCK INITIALIZATION                                                 */
669 /*============================================================================*/
670
671
672 /* lock_init_object_lock *******************************************************
673
674    Initialize the monitor pointer of the given object. The monitor gets
675    initialized to an unlocked state.
676
677 *******************************************************************************/
678
679 void lock_init_object_lock(java_object_t *o)
680 {
681         assert(o);
682
683         o->lockword = THIN_UNLOCKED;
684 }
685
686
687 /*============================================================================*/
688 /* LOCKING ALGORITHM                                                          */
689 /*============================================================================*/
690
691
692 /* lock_lockword_get ***********************************************************
693
694    Get the lockword for the given object.
695
696    IN:
697       t............the current thread
698       o............the object
699
700 *******************************************************************************/
701
702 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
703 {
704         uintptr_t lockword;
705
706         LLNI_CRITICAL_START_THREAD(t);
707         lockword = LLNI_DIRECT(o)->lockword;
708         LLNI_CRITICAL_END_THREAD(t);
709
710         return lockword;
711 }
712
713
714 /* lock_lockword_set ***********************************************************
715
716    Set the lockword for the given object.
717
718    IN:
719       t............the current thread
720       o............the object
721           lockword.....the new lockword value
722
723 *******************************************************************************/
724
725 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
726 {
727         LLNI_CRITICAL_START_THREAD(t);
728         LLNI_DIRECT(o)->lockword = lockword;
729         LLNI_CRITICAL_END_THREAD(t);
730 }
731
732
733 /* lock_record_enter ***********************************************************
734
735    Enter the lock represented by the given lock record.
736
737    IN:
738       t.................the current thread
739           lr................the lock record
740
741 *******************************************************************************/
742
743 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
744 {
745         Mutex_lock(lr->mutex);
746         lr->owner = t;
747 }
748
749
750 /* lock_record_exit ************************************************************
751
752    Release the lock represented by the given lock record.
753
754    IN:
755       t.................the current thread
756           lr................the lock record
757
758    PRE-CONDITION:
759       The current thread must own the lock represented by this lock record.
760           This is NOT checked by this function!
761
762 *******************************************************************************/
763
764 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
765 {
766         lr->owner = NULL;
767         Mutex_unlock(lr->mutex);
768 }
769
770
771 /* lock_inflate ****************************************************************
772
773    Inflate the lock of the given object. This may only be called by the
774    owner of the monitor of the object.
775
776    IN:
777       t............the current thread
778           o............the object of which to inflate the lock
779           lr...........the lock record to install. The current thread must
780                        own the lock of this lock record!
781
782    PRE-CONDITION:
783       The current thread must be the owner of this object's monitor AND
784           of the lock record's lock!
785
786 *******************************************************************************/
787
788 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
789 {
790         uintptr_t lockword;
791
792         /* get the current lock count */
793
794         lockword = lock_lockword_get(t, o);
795
796         if (IS_FAT_LOCK(lockword)) {
797                 assert(GET_FAT_LOCK(lockword) == lr);
798                 return;
799         }
800         else {
801                 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
802
803                 /* copy the count from the thin lock */
804
805                 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
806         }
807
808         DEBUGLOCKS(("[lock_inflate      : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
809                                 lr, t, o, lockword, lr->count));
810
811         /* install it */
812
813         lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
814 }
815
816
817 /* TODO Move this function into threadlist.[ch]. */
818
819 static threadobject *threads_lookup_thread_id(int index)
820 {
821         threadobject *t;
822
823         threadlist_lock();
824
825         for (t = threadlist_first(); t != NULL; t = threadlist_next(t)) {
826                 if (t->state == THREAD_STATE_NEW)
827                         continue;
828                 if (t->index == index)
829                         break;
830         }
831
832         threadlist_unlock();
833         return t;
834 }
835
836 static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o)
837 {
838         int index;
839         threadobject *t_other;
840         int old_flc;
841
842         index = GET_THREAD_INDEX(lockword);
843         t_other = threads_lookup_thread_id(index);
844         if (!t_other)
845 /*              failure, TODO: add statistics */
846                 return;
847
848         Mutex_lock(t_other->flc_lock);
849         old_flc = t_other->flc_bit;
850         t_other->flc_bit = true;
851
852         DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d",
853                                 t->index, t_other->index));
854
855         /* Set FLC bit first, then read the lockword again */
856         Atomic_memory_barrier();
857
858         lockword = lock_lockword_get(t, o);
859
860         /* Lockword is still the way it was seen before */
861         if (IS_THIN_LOCK(lockword) && (GET_THREAD_INDEX(lockword) == index))
862         {
863                 /* Add tuple (t, o) to the other thread's FLC list */
864                 t->flc_object = o;
865                 t->flc_next = t_other->flc_list;
866                 t_other->flc_list = t;
867
868                 for (;;)
869                 {
870                         threadobject *current;
871
872                         /* Wait until another thread sees the flc bit and notifies
873                            us of unlocking. */
874                         Condition_wait(t->flc_cond, t_other->flc_lock);
875
876                         /* Traverse FLC list looking if we're still there */
877                         current = t_other->flc_list;
878                         while (current && current != t)
879                                 current = current->flc_next;
880                         if (!current)
881                                 /* not in list anymore, can stop waiting */
882                                 break;
883
884                         /* We are still in the list -- the other thread cannot have seen
885                            the FLC bit yet */
886                         assert(t_other->flc_bit);
887                 }
888
889                 t->flc_object = NULL;   /* for garbage collector? */
890                 t->flc_next = NULL;
891         }
892         else
893                 t_other->flc_bit = old_flc;
894
895         Mutex_unlock(t_other->flc_lock);
896 }
897
898 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
899 {
900         threadobject *current;
901
902         Mutex_lock(t->flc_lock);
903
904         current = t->flc_list;
905         while (current)
906         {
907                 if (current->flc_object != o)
908                 {
909                         /* The object has to be inflated so the other threads can properly
910                            block on it. */
911
912                         /* Only if not already inflated */
913                         ptrint lockword = lock_lockword_get(t, current->flc_object);
914                         if (IS_THIN_LOCK(lockword)) {
915                                 lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
916                                 lock_record_enter(t, lr);
917
918                                 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
919                                                         t->index, (void*) current->flc_object, (void*) lr));
920
921                                 lock_inflate(t, current->flc_object, lr);
922                         }
923                 }
924                 /* Wake the waiting thread */
925                 Condition_broadcast(current->flc_cond);
926
927                 current = current->flc_next;
928         }
929
930         t->flc_list = NULL;
931         t->flc_bit = false;
932         Mutex_unlock(t->flc_lock);
933 }
934
935 /* lock_monitor_enter **********************************************************
936
937    Acquire the monitor of the given object. If the current thread already
938    owns the monitor, the lock counter is simply increased.
939
940    This function blocks until it can acquire the monitor.
941
942    IN:
943       t............the current thread
944           o............the object of which to enter the monitor
945
946    RETURN VALUE:
947       true.........the lock has been successfully acquired
948           false........an exception has been thrown
949
950 *******************************************************************************/
951
952 bool lock_monitor_enter(java_handle_t *o)
953 {
954         threadobject  *t;
955         /* CAUTION: This code assumes that ptrint is unsigned! */
956         ptrint         lockword;
957         ptrint         thinlock;
958         lock_record_t *lr;
959
960         if (o == NULL) {
961                 exceptions_throw_nullpointerexception();
962                 return false;
963         }
964
965         t = THREADOBJECT;
966
967         thinlock = t->thinlock;
968
969 retry:
970         /* most common case: try to thin-lock an unlocked object */
971
972         LLNI_CRITICAL_START_THREAD(t);
973         lockword = Atomic_compare_and_swap_ptr(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
974         LLNI_CRITICAL_END_THREAD(t);
975
976         if (lockword == THIN_UNLOCKED) {
977                 /* success. we locked it */
978                 /* The Java Memory Model requires a memory barrier here: */
979                 /* Because of the CAS above, this barrier is a nop on x86 / x86_64 */
980                 Atomic_instruction_barrier();
981                 return true;
982         }
983
984         /* next common case: recursive lock with small recursion count */
985         /* We don't have to worry about stale values here, as any stale value  */
986         /* will indicate another thread holding the lock (or an inflated lock) */
987
988         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
989                 /* we own this monitor               */
990                 /* check the current recursion count */
991
992                 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
993                 {
994                         /* the recursion count is low enough */
995
996                         lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
997
998                         /* success. we locked it */
999                         return true;
1000                 }
1001                 else {
1002                         /* recursion count overflow */
1003
1004                         lr = lock_hashtable_get(t, o);
1005                         lock_record_enter(t, lr);
1006                         lock_inflate(t, o, lr);
1007                         lr->count++;
1008
1009                         notify_flc_waiters(t, o);
1010
1011                         return true;
1012                 }
1013         }
1014
1015         /* the lock is either contented or fat */
1016
1017         if (IS_FAT_LOCK(lockword)) {
1018
1019                 lr = GET_FAT_LOCK(lockword);
1020
1021                 /* check for recursive entering */
1022                 if (lr->owner == t) {
1023                         lr->count++;
1024                         return true;
1025                 }
1026
1027                 /* acquire the mutex of the lock record */
1028
1029                 lock_record_enter(t, lr);
1030
1031                 assert(lr->count == 0);
1032
1033                 return true;
1034         }
1035
1036         /****** inflation path ******/
1037
1038 #if defined(ENABLE_JVMTI)
1039         /* Monitor Contended Enter */
1040         jvmti_MonitorContendedEntering(false, o);
1041 #endif
1042
1043         sable_flc_waiting(lockword, t, o);
1044
1045 #if defined(ENABLE_JVMTI)
1046         /* Monitor Contended Entered */
1047         jvmti_MonitorContendedEntering(true, o);
1048 #endif
1049         goto retry;
1050 }
1051
1052
1053 /* lock_monitor_exit ***********************************************************
1054
1055    Decrement the counter of a (currently owned) monitor. If the counter
1056    reaches zero, release the monitor.
1057
1058    If the current thread is not the owner of the monitor, an 
1059    IllegalMonitorState exception is thrown.
1060
1061    IN:
1062       t............the current thread
1063           o............the object of which to exit the monitor
1064
1065    RETURN VALUE:
1066       true.........everything ok,
1067           false........an exception has been thrown
1068
1069 *******************************************************************************/
1070
1071 bool lock_monitor_exit(java_handle_t *o)
1072 {
1073         threadobject *t;
1074         uintptr_t     lockword;
1075         ptrint        thinlock;
1076
1077         if (o == NULL) {
1078                 exceptions_throw_nullpointerexception();
1079                 return false;
1080         }
1081
1082         t = THREADOBJECT;
1083
1084         thinlock = t->thinlock;
1085
1086         /* We don't have to worry about stale values here, as any stale value */
1087         /* will indicate that we don't own the lock.                          */
1088
1089         lockword = lock_lockword_get(t, o);
1090
1091         /* most common case: we release a thin lock that we hold once */
1092
1093         if (lockword == thinlock) {
1094                 /* memory barrier for Java Memory Model */
1095                 Atomic_write_memory_barrier();
1096                 lock_lockword_set(t, o, THIN_UNLOCKED);
1097                 /* Memory barrier for thin locking. */
1098                 Atomic_memory_barrier();
1099
1100                 /* check if there has been a flat lock contention on this object */
1101
1102                 if (t->flc_bit) {
1103                         DEBUGLOCKS(("thread %d saw flc bit", t->index));
1104
1105                         /* there has been a contention on this thin lock */
1106                         notify_flc_waiters(t, o);
1107                 }
1108
1109                 return true;
1110         }
1111
1112         /* next common case: we release a recursive lock, count > 0 */
1113
1114         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1115                 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1116                 return true;
1117         }
1118
1119         /* either the lock is fat, or we don't hold it at all */
1120
1121         if (IS_FAT_LOCK(lockword)) {
1122
1123                 lock_record_t *lr;
1124
1125                 lr = GET_FAT_LOCK(lockword);
1126
1127                 /* check if we own this monitor */
1128                 /* We don't have to worry about stale values here, as any stale value */
1129                 /* will be != t and thus fail this check.                             */
1130
1131                 if (lr->owner != t) {
1132                         exceptions_throw_illegalmonitorstateexception();
1133                         return false;
1134                 }
1135
1136                 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1137
1138                 if (lr->count != 0) {
1139                         /* we had locked this one recursively. just decrement, it will */
1140                         /* still be locked. */
1141                         lr->count--;
1142                         return true;
1143                 }
1144
1145                 /* unlock this lock record */
1146
1147                 lr->owner = NULL;
1148                 Mutex_unlock(lr->mutex);
1149
1150                 return true;
1151         }
1152
1153         /* legal thin lock cases have been handled above, so this is an error */
1154
1155         exceptions_throw_illegalmonitorstateexception();
1156
1157         return false;
1158 }
1159
1160
1161 /* lock_record_add_waiter ******************************************************
1162
1163    Add a thread to the list of waiting threads of a lock record.
1164
1165    IN:
1166       lr...........the lock record
1167       thread.......the thread to add
1168
1169 *******************************************************************************/
1170
1171 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1172 {
1173         lock_waiter_t *w;
1174
1175         /* Allocate a waiter data structure. */
1176
1177         w = NEW(lock_waiter_t);
1178
1179 #if defined(ENABLE_STATISTICS)
1180         if (opt_stat)
1181                 size_lock_waiter += sizeof(lock_waiter_t);
1182 #endif
1183
1184         /* Store the thread in the waiter structure. */
1185
1186         w->thread = thread;
1187
1188         /* Add the waiter as last entry to waiters list. */
1189
1190         list_add_last(lr->waiters, w);
1191 }
1192
1193
1194 /* lock_record_remove_waiter ***************************************************
1195
1196    Remove a thread from the list of waiting threads of a lock record.
1197
1198    IN:
1199       lr...........the lock record
1200       t............the current thread
1201
1202    PRE-CONDITION:
1203       The current thread must be the owner of the lock record.
1204    
1205 *******************************************************************************/
1206
1207 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1208 {
1209         list_t        *l;
1210         lock_waiter_t *w;
1211
1212         /* Get the waiters list. */
1213
1214         l = lr->waiters;
1215
1216         for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1217                 if (w->thread == thread) {
1218                         /* Remove the waiter entry from the list. */
1219
1220                         list_remove(l, w);
1221
1222                         /* Free the waiter data structure. */
1223
1224                         FREE(w, lock_waiter_t);
1225
1226 #if defined(ENABLE_STATISTICS)
1227                         if (opt_stat)
1228                                 size_lock_waiter -= sizeof(lock_waiter_t);
1229 #endif
1230
1231                         return;
1232                 }
1233         }
1234
1235         /* This should never happen. */
1236
1237         vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1238 }
1239
1240
1241 /* lock_record_wait ************************************************************
1242
1243    Wait on a lock record for a given (maximum) amount of time.
1244
1245    IN:
1246       t............the current thread
1247           lr...........the lock record
1248           millis.......milliseconds of timeout
1249           nanos........nanoseconds of timeout
1250
1251    RETURN VALUE:
1252       true.........we have been interrupted,
1253       false........everything ok
1254
1255    PRE-CONDITION:
1256       The current thread must be the owner of the lock record.
1257           This is NOT checked by this function!
1258    
1259 *******************************************************************************/
1260
1261 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1262 {
1263         s4   lockcount;
1264         bool wasinterrupted = false;
1265
1266         DEBUGLOCKS(("[lock_record_wait  : lr=%p, t=%p, millis=%lld, nanos=%d]",
1267                                 lr, thread, millis, nanos));
1268
1269         /* { the thread t owns the fat lock record lr on the object o } */
1270
1271         /* register us as waiter for this object */
1272
1273         lock_record_add_waiter(lr, thread);
1274
1275         /* remember the old lock count */
1276
1277         lockcount = lr->count;
1278
1279         /* unlock this record */
1280
1281         lr->count = 0;
1282         lock_record_exit(thread, lr);
1283
1284         /* wait until notified/interrupted/timed out */
1285
1286         threads_wait_with_timeout_relative(thread, millis, nanos);
1287
1288         /* re-enter the monitor */
1289
1290         lock_record_enter(thread, lr);
1291
1292         /* remove us from the list of waiting threads */
1293
1294         lock_record_remove_waiter(lr, thread);
1295
1296         /* restore the old lock count */
1297
1298         lr->count = lockcount;
1299
1300         /* We can only be signaled OR interrupted, not both. If both flags
1301            are set, reset only signaled and leave the thread in
1302            interrupted state. Otherwise, clear both. */
1303
1304         if (!thread->signaled) {
1305                 wasinterrupted = thread->interrupted;
1306                 thread->interrupted = false;
1307         }
1308
1309         thread->signaled = false;
1310
1311         /* return if we have been interrupted */
1312
1313         return wasinterrupted;
1314 }
1315
1316
1317 /* lock_monitor_wait ***********************************************************
1318
1319    Wait on an object for a given (maximum) amount of time.
1320
1321    IN:
1322       t............the current thread
1323           o............the object
1324           millis.......milliseconds of timeout
1325           nanos........nanoseconds of timeout
1326
1327    PRE-CONDITION:
1328       The current thread must be the owner of the object's monitor.
1329    
1330 *******************************************************************************/
1331
1332 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1333 {
1334         uintptr_t      lockword;
1335         lock_record_t *lr;
1336
1337         lockword = lock_lockword_get(t, o);
1338
1339         /* check if we own this monitor */
1340         /* We don't have to worry about stale values here, as any stale value */
1341         /* will fail this check.                                              */
1342
1343         if (IS_FAT_LOCK(lockword)) {
1344
1345                 lr = GET_FAT_LOCK(lockword);
1346
1347                 if (lr->owner != t) {
1348                         exceptions_throw_illegalmonitorstateexception();
1349                         return;
1350                 }
1351         }
1352         else {
1353                 /* it's a thin lock */
1354
1355                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1356                         exceptions_throw_illegalmonitorstateexception();
1357                         return;
1358                 }
1359
1360                 /* inflate this lock */
1361
1362                 lr = lock_hashtable_get(t, o);
1363                 lock_record_enter(t, lr);
1364                 lock_inflate(t, o, lr);
1365
1366                 notify_flc_waiters(t, o);
1367         }
1368
1369         /* { the thread t owns the fat lock record lr on the object o } */
1370
1371         if (lock_record_wait(t, lr, millis, nanos))
1372                 exceptions_throw_interruptedexception();
1373 }
1374
1375
1376 /* lock_record_notify **********************************************************
1377
1378    Notify one thread or all threads waiting on the given lock record.
1379
1380    IN:
1381       t............the current thread
1382           lr...........the lock record
1383           one..........if true, only notify one thread
1384
1385    PRE-CONDITION:
1386       The current thread must be the owner of the lock record.
1387           This is NOT checked by this function!
1388    
1389 *******************************************************************************/
1390
1391 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1392 {
1393         list_t        *l;
1394         lock_waiter_t *w;
1395         threadobject  *waitingthread;
1396
1397         /* { the thread t owns the fat lock record lr on the object o } */
1398
1399         /* Get the waiters list. */
1400
1401         l = lr->waiters;
1402
1403         for (w = list_first(l); w != NULL; w = list_next(l, w)) {
1404                 /* signal the waiting thread */
1405
1406                 waitingthread = w->thread;
1407
1408                 /* We must skip threads which have already been notified. They will
1409                    remove themselves from the list. */
1410
1411                 if (waitingthread->signaled)
1412                         continue;
1413
1414                 /* Enter the wait-mutex. */
1415
1416                 Mutex_lock(waitingthread->waitmutex);
1417
1418                 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]",
1419                                         lr, t, waitingthread, one));
1420
1421                 Condition_signal(waitingthread->waitcond);
1422
1423                 /* Mark the thread as signaled. */
1424
1425                 waitingthread->signaled = true;
1426
1427                 /* Leave the wait-mutex. */
1428
1429                 Mutex_unlock(waitingthread->waitmutex);
1430
1431                 /* if we should only wake one, we are done */
1432
1433                 if (one)
1434                         break;
1435         }
1436 }
1437
1438
1439 /* lock_monitor_notify *********************************************************
1440
1441    Notify one thread or all threads waiting on the given object.
1442
1443    IN:
1444       t............the current thread
1445           o............the object
1446           one..........if true, only notify one thread
1447
1448    PRE-CONDITION:
1449       The current thread must be the owner of the object's monitor.
1450    
1451 *******************************************************************************/
1452
1453 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1454 {
1455         uintptr_t      lockword;
1456         lock_record_t *lr;
1457
1458         lockword = lock_lockword_get(t, o);
1459
1460         /* check if we own this monitor */
1461         /* We don't have to worry about stale values here, as any stale value */
1462         /* will fail this check.                                              */
1463
1464         if (IS_FAT_LOCK(lockword)) {
1465
1466                 lr = GET_FAT_LOCK(lockword);
1467
1468                 if (lr->owner != t) {
1469                         exceptions_throw_illegalmonitorstateexception();
1470                         return;
1471                 }
1472         }
1473         else {
1474                 /* it's a thin lock */
1475
1476                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1477                         exceptions_throw_illegalmonitorstateexception();
1478                         return;
1479                 }
1480
1481                 /* no thread can wait on a thin lock, so there's nothing to do. */
1482                 return;
1483         }
1484
1485         /* { the thread t owns the fat lock record lr on the object o } */
1486
1487         lock_record_notify(t, lr, one);
1488 }
1489
1490
1491
1492 /*============================================================================*/
1493 /* INQUIRY FUNCIONS                                                           */
1494 /*============================================================================*/
1495
1496
1497 /* lock_is_held_by_current_thread **********************************************
1498
1499    Return true if the current thread owns the monitor of the given object.
1500
1501    IN:
1502           o............the object
1503
1504    RETURN VALUE:
1505       true, if the current thread holds the lock of this object.
1506    
1507 *******************************************************************************/
1508
1509 bool lock_is_held_by_current_thread(java_handle_t *o)
1510 {
1511         threadobject  *t;
1512         uintptr_t      lockword;
1513         lock_record_t *lr;
1514
1515         t = THREADOBJECT;
1516
1517         /* check if we own this monitor */
1518         /* We don't have to worry about stale values here, as any stale value */
1519         /* will fail this check.                                              */
1520
1521         lockword = lock_lockword_get(t, o);
1522
1523         if (IS_FAT_LOCK(lockword)) {
1524                 /* it's a fat lock */
1525
1526                 lr = GET_FAT_LOCK(lockword);
1527
1528                 return (lr->owner == t);
1529         }
1530         else {
1531                 /* it's a thin lock */
1532
1533                 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1534         }
1535 }
1536
1537
1538
1539 /*============================================================================*/
1540 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD                              */
1541 /*============================================================================*/
1542
1543
1544 /* lock_wait_for_object ********************************************************
1545
1546    Wait for the given object.
1547
1548    IN:
1549           o............the object
1550           millis.......milliseconds to wait
1551           nanos........nanoseconds to wait
1552    
1553 *******************************************************************************/
1554
1555 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1556 {
1557         threadobject *thread;
1558
1559         thread = THREADOBJECT;
1560
1561         lock_monitor_wait(thread, o, millis, nanos);
1562 }
1563
1564
1565 /* lock_notify_object **********************************************************
1566
1567    Notify one thread waiting on the given object.
1568
1569    IN:
1570           o............the object
1571    
1572 *******************************************************************************/
1573
1574 void lock_notify_object(java_handle_t *o)
1575 {
1576         threadobject *thread;
1577
1578         thread = THREADOBJECT;
1579
1580         lock_monitor_notify(thread, o, true);
1581 }
1582
1583
1584 /* lock_notify_all_object ******************************************************
1585
1586    Notify all threads waiting on the given object.
1587
1588    IN:
1589           o............the object
1590    
1591 *******************************************************************************/
1592
1593 void lock_notify_all_object(java_handle_t *o)
1594 {
1595         threadobject *thread;
1596
1597         thread = THREADOBJECT;
1598
1599         lock_monitor_notify(thread, o, false);
1600 }
1601
1602
1603 /*
1604  * These are local overrides for various environment variables in Emacs.
1605  * Please do not remove this and leave it at the end of the file, where
1606  * Emacs will automagically detect them.
1607  * ---------------------------------------------------------------------
1608  * Local variables:
1609  * mode: c
1610  * indent-tabs-mode: t
1611  * c-basic-offset: 4
1612  * tab-width: 4
1613  * End:
1614  * vim:noexpandtab:sw=4:ts=4:
1615  */