* src/mm/cacao-gc/gc.c (gc_reflist_weak): Added list for weak references.
[cacao.git] / src / threads / native / lock.c
1 /* src/threads/native/lock.c - lock implementation
2
3    Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6    J. Wenninger, Institut f. Computersprachen - TU Wien
7
8    This file is part of CACAO.
9
10    This program is free software; you can redistribute it and/or
11    modify it under the terms of the GNU General Public License as
12    published by the Free Software Foundation; either version 2, or (at
13    your option) any later version.
14
15    This program is distributed in the hope that it will be useful, but
16    WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18    General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23    02110-1301, USA.
24
25 */
26
27
28 #include "config.h"
29
30 #include <assert.h>
31 #include <stdint.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <sys/time.h>
35 #include <pthread.h>
36
37 #include "vm/types.h"
38
39 #include "mm/memory.h"
40
41 #include "native/llni.h"
42
43 #include "threads/lock-common.h"
44
45 #include "threads/native/lock.h"
46 #include "threads/native/threads.h"
47
48 #include "toolbox/list.h"
49
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
54 #include "vm/vm.h"
55
56 #include "vmcore/options.h"
57
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
60 #endif
61
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
64 #endif
65
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
67
68 #include "arch.h"
69
70 /* includes for atomic instructions: */
71
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/native/generic-primitives.h"
74 #else
75 #include "machine-instr.h"
76 #endif
77
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
80 #endif
81
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
84 #endif
85
86
87 /* debug **********************************************************************/
88
89 #if !defined(NDEBUG)
90 # define DEBUGLOCKS(format) \
91     do { \
92         if (opt_DebugLocks) { \
93             log_println format; \
94         } \
95     } while (0)
96 #else
97 # define DEBUGLOCKS(format)
98 #endif
99
100
101 /******************************************************************************/
102 /* MACROS                                                                     */
103 /******************************************************************************/
104
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
107
108 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
109
110 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
111         ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
112
113
114 /******************************************************************************/
115 /* MACROS FOR THE FLAT LOCK CONTENTION BIT                                    */
116 /******************************************************************************/
117
118 #define LOCK_SET_FLC_BIT(obj)    ((obj)->hdrflags |= HDRFLAG_FLC)
119 #define LOCK_CLEAR_FLC_BIT(obj)  ((obj)->hdrflags &= ~ HDRFLAG_FLC)
120 #define LOCK_TEST_FLC_BIT(obj)   ((obj)->hdrflags & HDRFLAG_FLC)
121
122
123 /******************************************************************************/
124 /* MACROS FOR THIN/FAT LOCKS                                                  */
125 /******************************************************************************/
126
127 /* We use a variant of the tasuki locks described in the paper
128  *     
129  *     Tamiya Onodera, Kiyokuni Kawachiya
130  *     A Study of Locking Objects with Bimodal Fields
131  *     Proceedings of the ACM OOPSLA '99, pp. 223-237
132  *     1999
133  *
134  * The underlying thin locks are a variant of the thin locks described in
135  * 
136  *     Bacon, Konuru, Murthy, Serrano
137  *     Thin Locks: Featherweight Synchronization for Java
138  *         Proceedings of the ACM Conference on Programming Language Design and 
139  *         Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
140  *         June 1998
141  *
142  * In thin lock mode the lockword looks like this:
143  *
144  *     ,----------------------,-----------,---,
145  *     |      thread ID       |   count   | 0 |
146  *     `----------------------'-----------'---´
147  *
148  *     thread ID......the 'index' of the owning thread, or 0
149  *     count..........number of times the lock has been entered minus 1
150  *     0..............the shape bit is 0 in thin lock mode
151  *
152  * In fat lock mode it is basically a lock_record_t *:
153  *
154  *     ,----------------------------------,---,
155  *     |    lock_record_t * (without LSB) | 1 |
156  *     `----------------------------------'---´
157  *
158  *     1..............the shape bit is 1 in fat lock mode
159  */
160
161 #if SIZEOF_VOID_P == 8
162 #define THIN_LOCK_WORD_SIZE    64
163 #else
164 #define THIN_LOCK_WORD_SIZE    32
165 #endif
166
167 #define THIN_LOCK_SHAPE_BIT    0x01
168
169 #define THIN_UNLOCKED          0
170
171 #define THIN_LOCK_COUNT_SHIFT  1
172 #define THIN_LOCK_COUNT_SIZE   8
173 #define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
174 #define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
175 #define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
176
177 #define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
178 #define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
179
180 #define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
181 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
182
183 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
184 #define MAKE_FAT_LOCK(ptr)      ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
185
186 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
187
188
189 /* global variables ***********************************************************/
190
191 /* hashtable mapping objects to lock records */
192 static lock_hashtable_t lock_hashtable;
193
194
195 /******************************************************************************/
196 /* PROTOTYPES                                                                 */
197 /******************************************************************************/
198
199 static void lock_hashtable_init(void);
200
201 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
202 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
203 static void lock_record_enter(threadobject *t, lock_record_t *lr);
204 static void lock_record_exit(threadobject *t, lock_record_t *lr);
205 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
206 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
207
208
209 /*============================================================================*/
210 /* INITIALIZATION OF DATA STRUCTURES                                          */
211 /*============================================================================*/
212
213
214 /* lock_init *******************************************************************
215
216    Initialize global data for locking.
217
218 *******************************************************************************/
219
220 void lock_init(void)
221 {
222         /* initialize lock hashtable */
223
224         lock_hashtable_init();
225
226 #if defined(ENABLE_VMLOG)
227         vmlog_cacao_init_lock();
228 #endif
229 }
230
231
232 /* lock_pre_compute_thinlock ***************************************************
233
234    Pre-compute the thin lock value for a thread index.
235
236    IN:
237       index........the thead index (>= 1)
238
239    RETURN VALUE:
240       the thin lock value for this thread index
241
242 *******************************************************************************/
243
244 ptrint lock_pre_compute_thinlock(s4 index)
245 {
246         return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
247 }
248
249
250 /* lock_record_new *************************************************************
251
252    Allocate a lock record.
253
254 *******************************************************************************/
255
256 static lock_record_t *lock_record_new(void)
257 {
258         lock_record_t *lr;
259
260         /* allocate the data structure on the C heap */
261
262         lr = NEW(lock_record_t);
263
264 #if defined(ENABLE_STATISTICS)
265         if (opt_stat)
266                 size_lock_record += sizeof(lock_record_t);
267 #endif
268
269         /* initialize the members */
270
271         lr->object  = NULL;
272         lr->owner   = NULL;
273         lr->count   = 0;
274         lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
275
276 #if defined(ENABLE_GC_CACAO)
277         /* register the lock object as weak reference with the GC */
278
279         gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
280 #endif
281
282         /* initialize the mutex */
283
284         pthread_mutex_init(&(lr->mutex), NULL);
285
286         DEBUGLOCKS(("[lock_record_new   : lr=%p]", (void *) lr));
287
288         return lr;
289 }
290
291
292 /* lock_record_free ************************************************************
293
294    Free a lock record.
295
296    IN:
297        lr....lock record to free
298
299 *******************************************************************************/
300
301 static void lock_record_free(lock_record_t *lr)
302 {
303         DEBUGLOCKS(("[lock_record_free  : lr=%p]", (void *) lr));
304
305         /* Destroy the mutex. */
306
307         pthread_mutex_destroy(&(lr->mutex));
308
309 #if defined(ENABLE_GC_CACAO)
310         /* unregister the lock object reference with the GC */
311
312         gc_weakreference_unregister(&(lr->object));
313 #endif
314
315         /* Free the waiters list. */
316
317         list_free(lr->waiters);
318
319         /* Free the data structure. */
320
321         FREE(lr, lock_record_t);
322
323 #if defined(ENABLE_STATISTICS)
324         if (opt_stat)
325                 size_lock_record -= sizeof(lock_record_t);
326 #endif
327 }
328
329
330 /*============================================================================*/
331 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
332 /*============================================================================*/
333
334 /* lock_hashtable_init *********************************************************
335
336    Initialize the global hashtable mapping objects to lock records.
337
338 *******************************************************************************/
339
340 static void lock_hashtable_init(void)
341 {
342         pthread_mutex_init(&(lock_hashtable.mutex), NULL);
343
344         lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
345         lock_hashtable.entries = 0;
346         lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
347
348 #if defined(ENABLE_STATISTICS)
349         if (opt_stat)
350                 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
351 #endif
352
353         MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
354 }
355
356
357 /* lock_hashtable_grow *********************************************************
358
359    Grow the lock record hashtable to about twice its current size and
360    rehash the entries.
361
362 *******************************************************************************/
363
364 /* must be called with hashtable mutex locked */
365 static void lock_hashtable_grow(void)
366 {
367         u4 oldsize;
368         u4 newsize;
369         lock_record_t **oldtable;
370         lock_record_t **newtable;
371         lock_record_t *lr;
372         lock_record_t *next;
373         u4 i;
374         u4 h;
375         u4 newslot;
376
377         /* allocate a new table */
378
379         oldsize = lock_hashtable.size;
380         newsize = oldsize*2 + 1; /* XXX should use prime numbers */
381
382         DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
383
384         oldtable = lock_hashtable.ptr;
385         newtable = MNEW(lock_record_t *, newsize);
386
387 #if defined(ENABLE_STATISTICS)
388         if (opt_stat)
389                 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
390 #endif
391
392         MZERO(newtable, lock_record_t *, newsize);
393
394         /* rehash the entries */
395
396         for (i = 0; i < oldsize; i++) {
397                 lr = oldtable[i];
398                 while (lr) {
399                         next = lr->hashlink;
400
401                         h = heap_hashcode(lr->object);
402                         newslot = h % newsize;
403
404                         lr->hashlink = newtable[newslot];
405                         newtable[newslot] = lr;
406
407                         lr = next;
408                 }
409         }
410
411         /* replace the old table */
412
413         lock_hashtable.ptr  = newtable;
414         lock_hashtable.size = newsize;
415
416         MFREE(oldtable, lock_record_t *, oldsize);
417
418 #if defined(ENABLE_STATISTICS)
419         if (opt_stat)
420                 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
421 #endif
422 }
423
424
425 /* lock_hashtable_cleanup ******************************************************
426
427    Removes (and frees) lock records which have a cleared object reference
428    from the hashtable. The locked object was reclaimed by the GC.
429
430 *******************************************************************************/
431
432 #if defined(ENABLE_GC_CACAO)
433 void lock_hashtable_cleanup(void)
434 {
435         threadobject  *t;
436         lock_record_t *lr;
437         lock_record_t *prev;
438         lock_record_t *next;
439         int i;
440
441         t = THREADOBJECT;
442
443         /* lock the hashtable */
444
445         pthread_mutex_lock(&(lock_hashtable.mutex));
446
447         /* search the hashtable for cleared references */
448
449         for (i = 0; i < lock_hashtable.size; i++) {
450                 lr = lock_hashtable.ptr[i];
451                 prev = NULL;
452
453                 while (lr) {
454                         next = lr->hashlink;
455
456                         /* remove lock records with cleared references */
457
458                         if (lr->object == NULL) {
459
460                                 /* unlink the lock record from the hashtable */
461
462                                 if (prev == NULL)
463                                         lock_hashtable.ptr[i] = next;
464                                 else
465                                         prev->hashlink = next;
466
467                                 /* free the lock record */
468
469                                 lock_record_free(lr);
470
471                         } else {
472                                 prev = lr;
473                         }
474
475                         lr = next;
476                 }
477         }
478
479         /* unlock the hashtable */
480
481         pthread_mutex_unlock(&(lock_hashtable.mutex));
482 }
483 #endif
484
485
486 /* lock_hashtable_get **********************************************************
487
488    Find the lock record for the given object.  If it does not exists,
489    yet, create it and enter it in the hashtable.
490
491    IN:
492       t....the current thread
493           o....the object to look up
494
495    RETURN VALUE:
496       the lock record to use for this object
497
498 *******************************************************************************/
499
500 #if defined(ENABLE_GC_BOEHM)
501 static void lock_record_finalizer(void *object, void *p);
502 #endif
503
504 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
505 {
506         uintptr_t      lockword;
507         u4             slot;
508         lock_record_t *lr;
509
510         lockword = lock_lockword_get(t, o);
511
512         if (IS_FAT_LOCK(lockword))
513                 return GET_FAT_LOCK(lockword);
514
515         /* lock the hashtable */
516
517         pthread_mutex_lock(&(lock_hashtable.mutex));
518
519         /* lookup the lock record in the hashtable */
520
521         LLNI_CRITICAL_START_THREAD(t);
522         slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
523         lr   = lock_hashtable.ptr[slot];
524
525         for (; lr != NULL; lr = lr->hashlink) {
526                 if (lr->object == LLNI_DIRECT(o))
527                         break;
528         }
529         LLNI_CRITICAL_END_THREAD(t);
530
531         if (lr == NULL) {
532                 /* not found, we must create a new one */
533
534                 lr = lock_record_new();
535
536                 LLNI_CRITICAL_START_THREAD(t);
537                 lr->object = LLNI_DIRECT(o);
538                 LLNI_CRITICAL_END_THREAD(t);
539
540 #if defined(ENABLE_GC_BOEHM)
541                 /* register new finalizer to clean up the lock record */
542
543                 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
544 #endif
545
546                 /* enter it in the hashtable */
547
548                 lr->hashlink             = lock_hashtable.ptr[slot];
549                 lock_hashtable.ptr[slot] = lr;
550                 lock_hashtable.entries++;
551
552                 /* check whether the hash should grow */
553
554                 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
555                         lock_hashtable_grow();
556                 }
557         }
558
559         /* unlock the hashtable */
560
561         pthread_mutex_unlock(&(lock_hashtable.mutex));
562
563         /* return the new lock record */
564
565         return lr;
566 }
567
568
569 /* lock_hashtable_remove *******************************************************
570
571    Remove the lock record for the given object from the hashtable
572    and free it afterwards.
573
574    IN:
575        t....the current thread
576        o....the object to look up
577
578 *******************************************************************************/
579
580 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
581 {
582         uintptr_t      lockword;
583         lock_record_t *lr;
584         u4             slot;
585         lock_record_t *tmplr;
586
587         /* lock the hashtable */
588
589         pthread_mutex_lock(&(lock_hashtable.mutex));
590
591         /* get lock record */
592
593         lockword = lock_lockword_get(t, o);
594
595         assert(IS_FAT_LOCK(lockword));
596
597         lr = GET_FAT_LOCK(lockword);
598
599         /* remove the lock-record from the hashtable */
600
601         LLNI_CRITICAL_START_THREAD(t);
602         slot  = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
603         tmplr = lock_hashtable.ptr[slot];
604         LLNI_CRITICAL_END_THREAD(t);
605
606         if (tmplr == lr) {
607                 /* special handling if it's the first in the chain */
608
609                 lock_hashtable.ptr[slot] = lr->hashlink;
610         }
611         else {
612                 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
613                         if (tmplr->hashlink == lr) {
614                                 tmplr->hashlink = lr->hashlink;
615                                 break;
616                         }
617                 }
618
619                 assert(tmplr != NULL);
620         }
621
622         /* decrease entry count */
623
624         lock_hashtable.entries--;
625
626         /* unlock the hashtable */
627
628         pthread_mutex_unlock(&(lock_hashtable.mutex));
629
630         /* free the lock record */
631
632         lock_record_free(lr);
633 }
634
635
636 /* lock_record_finalizer *******************************************************
637
638    XXX Remove me for exact GC.
639
640 *******************************************************************************/
641
642 static void lock_record_finalizer(void *object, void *p)
643 {
644         java_handle_t *o;
645         classinfo     *c;
646
647         o = (java_handle_t *) object;
648
649 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
650         /* XXX this is only a dirty hack to make Boehm work with handles */
651
652         o = LLNI_WRAP((java_object_t *) o);
653 #endif
654
655         LLNI_class_get(o, c);
656
657 #if !defined(NDEBUG)
658         if (opt_DebugFinalizer) {
659                 log_start();
660                 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
661                 class_print(c);
662                 log_print("]");
663                 log_finish();
664         }
665 #endif
666
667         /* check for a finalizer function */
668
669         if (c->finalizer != NULL)
670                 finalizer_run(object, p);
671
672         /* remove the lock-record entry from the hashtable and free it */
673
674         lock_hashtable_remove(THREADOBJECT, o);
675 }
676
677
678 /*============================================================================*/
679 /* OBJECT LOCK INITIALIZATION                                                 */
680 /*============================================================================*/
681
682
683 /* lock_init_object_lock *******************************************************
684
685    Initialize the monitor pointer of the given object. The monitor gets
686    initialized to an unlocked state.
687
688 *******************************************************************************/
689
690 void lock_init_object_lock(java_object_t *o)
691 {
692         assert(o);
693
694         o->lockword = THIN_UNLOCKED;
695         LOCK_CLEAR_FLC_BIT(o);
696 }
697
698
699 /* lock_get_initial_lock_word **************************************************
700
701    Returns the initial (unlocked) lock word. The pointer is
702    required in the code generator to set up a virtual
703    java_objectheader for code patch locking.
704
705 *******************************************************************************/
706
707 lock_record_t *lock_get_initial_lock_word(void)
708 {
709         return (lock_record_t *) THIN_UNLOCKED;
710 }
711
712
713
714 /*============================================================================*/
715 /* LOCKING ALGORITHM                                                          */
716 /*============================================================================*/
717
718
719 /* lock_lockword_get ***********************************************************
720
721    Get the lockword for the given object.
722
723    IN:
724       t............the current thread
725       o............the object
726
727 *******************************************************************************/
728
729 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
730 {
731         uintptr_t lockword;
732
733         LLNI_CRITICAL_START_THREAD(t);
734         lockword = LLNI_DIRECT(o)->lockword;
735         LLNI_CRITICAL_END_THREAD(t);
736
737         return lockword;
738 }
739
740
741 /* lock_lockword_set ***********************************************************
742
743    Set the lockword for the given object.
744
745    IN:
746       t............the current thread
747       o............the object
748           lockword.....the new lockword value
749
750 *******************************************************************************/
751
752 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
753 {
754         LLNI_CRITICAL_START_THREAD(t);
755         LLNI_DIRECT(o)->lockword = lockword;
756         LLNI_CRITICAL_END_THREAD(t);
757 }
758
759
760 /* lock_record_enter ***********************************************************
761
762    Enter the lock represented by the given lock record.
763
764    IN:
765       t.................the current thread
766           lr................the lock record
767
768 *******************************************************************************/
769
770 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
771 {
772         pthread_mutex_lock(&(lr->mutex));
773         lr->owner = t;
774 }
775
776
777 /* lock_record_exit ************************************************************
778
779    Release the lock represented by the given lock record.
780
781    IN:
782       t.................the current thread
783           lr................the lock record
784
785    PRE-CONDITION:
786       The current thread must own the lock represented by this lock record.
787           This is NOT checked by this function!
788
789 *******************************************************************************/
790
791 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
792 {
793         lr->owner = NULL;
794         pthread_mutex_unlock(&(lr->mutex));
795 }
796
797
798 /* lock_inflate ****************************************************************
799
800    Inflate the lock of the given object. This may only be called by the
801    owner of the monitor of the object.
802
803    IN:
804       t............the current thread
805           o............the object of which to inflate the lock
806           lr...........the lock record to install. The current thread must
807                        own the lock of this lock record!
808
809    PRE-CONDITION:
810       The current thread must be the owner of this object's monitor AND
811           of the lock record's lock!
812
813 *******************************************************************************/
814
815 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
816 {
817         uintptr_t lockword;
818
819         /* get the current lock count */
820
821         lockword = lock_lockword_get(t, o);
822
823         if (IS_FAT_LOCK(lockword)) {
824                 assert(GET_FAT_LOCK(lockword) == lr);
825         }
826         else {
827                 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
828
829                 /* copy the count from the thin lock */
830
831                 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
832         }
833
834         DEBUGLOCKS(("[lock_inflate      : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
835                                 lr, t, o, lockword, lr->count));
836
837         /* clear flat-lock-contention bit */
838
839         LLNI_CRITICAL_START_THREAD(t);
840         LOCK_CLEAR_FLC_BIT(LLNI_DIRECT(o));
841         LLNI_CRITICAL_END_THREAD(t);
842
843         /* notify waiting objects */
844
845         lock_record_notify(t, lr, false);
846
847         /* install it */
848
849         lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
850 }
851
852
853 /* lock_monitor_enter **********************************************************
854
855    Acquire the monitor of the given object. If the current thread already
856    owns the monitor, the lock counter is simply increased.
857
858    This function blocks until it can acquire the monitor.
859
860    IN:
861       t............the current thread
862           o............the object of which to enter the monitor
863
864    RETURN VALUE:
865       true.........the lock has been successfully acquired
866           false........an exception has been thrown
867
868 *******************************************************************************/
869
870 bool lock_monitor_enter(java_handle_t *o)
871 {
872         threadobject  *t;
873         /* CAUTION: This code assumes that ptrint is unsigned! */
874         ptrint         lockword;
875         ptrint         thinlock;
876         lock_record_t *lr;
877
878         if (o == NULL) {
879                 exceptions_throw_nullpointerexception();
880                 return false;
881         }
882
883         t = THREADOBJECT;
884
885         thinlock = t->thinlock;
886
887         /* most common case: try to thin-lock an unlocked object */
888
889         LLNI_CRITICAL_START_THREAD(t);
890         lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
891         LLNI_CRITICAL_END_THREAD(t);
892
893         if (lockword == THIN_UNLOCKED) {
894                 /* success. we locked it */
895                 /* The Java Memory Model requires a memory barrier here: */
896                 MEMORY_BARRIER();
897                 return true;
898         }
899
900         /* next common case: recursive lock with small recursion count */
901         /* We don't have to worry about stale values here, as any stale value  */
902         /* will indicate another thread holding the lock (or an inflated lock) */
903
904         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
905                 /* we own this monitor               */
906                 /* check the current recursion count */
907
908                 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
909                 {
910                         /* the recursion count is low enough */
911
912                         lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
913
914                         /* success. we locked it */
915                         return true;
916                 }
917                 else {
918                         /* recursion count overflow */
919
920                         lr = lock_hashtable_get(t, o);
921                         lock_record_enter(t, lr);
922                         lock_inflate(t, o, lr);
923                         lr->count++;
924
925                         return true;
926                 }
927         }
928
929         /* the lock is either contented or fat */
930
931         if (IS_FAT_LOCK(lockword)) {
932
933                 lr = GET_FAT_LOCK(lockword);
934
935                 /* check for recursive entering */
936                 if (lr->owner == t) {
937                         lr->count++;
938                         return true;
939                 }
940
941                 /* acquire the mutex of the lock record */
942
943                 lock_record_enter(t, lr);
944
945                 assert(lr->count == 0);
946
947                 return true;
948         }
949
950         /****** inflation path ******/
951
952         /* first obtain the lock record for this object */
953
954         lr = lock_hashtable_get(t, o);
955
956 #if defined(ENABLE_JVMTI)
957         /* Monitor Contended Enter */
958         jvmti_MonitorContendedEntering(false, o);
959 #endif
960
961         /* enter the monitor */
962
963         lock_record_enter(t, lr);
964
965 #if defined(ENABLE_JVMTI)
966         /* Monitor Contended Entered */
967         jvmti_MonitorContendedEntering(true, o);
968 #endif
969
970         /* inflation loop */
971
972         while (IS_THIN_LOCK(lockword = lock_lockword_get(t, o))) {
973                 /* Set the flat lock contention bit to let the owning thread
974                    know that we want to be notified of unlocking. */
975
976                 LLNI_CRITICAL_START_THREAD(t);
977                 LOCK_SET_FLC_BIT(LLNI_DIRECT(o));
978                 LLNI_CRITICAL_END_THREAD(t);
979
980                 DEBUGLOCKS(("thread %d set flc bit on %p lr %p",
981                                         t->index, (void*) o, (void*) lr));
982
983                 /* try to lock the object */
984
985                 LLNI_CRITICAL_START_THREAD(t);
986                 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
987                 LLNI_CRITICAL_END_THREAD(t);
988
989                 if (lockword == THIN_UNLOCKED) {
990                         /* we can inflate the lock ourselves */
991
992                         DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
993                                                 t->index, (void*) o, (void*) lr));
994
995                         lock_inflate(t, o, lr);
996                 }
997                 else {
998                         /* Wait until another thread sees the flc bit and notifies
999                            us of unlocking. */
1000
1001                         (void) lock_record_wait(t, lr, 0, 0);
1002                 }
1003         }
1004
1005         /* we own the inflated lock now */
1006
1007         return true;
1008 }
1009
1010
1011 /* lock_monitor_exit ***********************************************************
1012
1013    Decrement the counter of a (currently owned) monitor. If the counter
1014    reaches zero, release the monitor.
1015
1016    If the current thread is not the owner of the monitor, an 
1017    IllegalMonitorState exception is thrown.
1018
1019    IN:
1020       t............the current thread
1021           o............the object of which to exit the monitor
1022
1023    RETURN VALUE:
1024       true.........everything ok,
1025           false........an exception has been thrown
1026
1027 *******************************************************************************/
1028
1029 bool lock_monitor_exit(java_handle_t *o)
1030 {
1031         threadobject *t;
1032         uintptr_t     lockword;
1033         ptrint        thinlock;
1034
1035         if (o == NULL) {
1036                 exceptions_throw_nullpointerexception();
1037                 return false;
1038         }
1039
1040         t = THREADOBJECT;
1041
1042         thinlock = t->thinlock;
1043
1044         /* We don't have to worry about stale values here, as any stale value */
1045         /* will indicate that we don't own the lock.                          */
1046
1047         lockword = lock_lockword_get(t, o);
1048
1049         /* most common case: we release a thin lock that we hold once */
1050
1051         if (lockword == thinlock) {
1052                 /* memory barrier for Java Memory Model */
1053                 MEMORY_BARRIER();
1054                 lock_lockword_set(t, o, THIN_UNLOCKED);
1055                 /* memory barrier for thin locking */
1056                 MEMORY_BARRIER();
1057
1058                 /* check if there has been a flat lock contention on this object */
1059
1060                 if (LOCK_TEST_FLC_BIT(LLNI_DIRECT(o))) {
1061                         lock_record_t *lr;
1062
1063                         DEBUGLOCKS(("thread %d saw flc bit on %p",
1064                                                 t->index, (void*) o));
1065
1066                         /* there has been a contention on this thin lock */
1067
1068                         lr = lock_hashtable_get(t, o);
1069
1070                         DEBUGLOCKS(("thread %d for %p got lr %p",
1071                                                 t->index, (void*) o, (void*) lr));
1072
1073                         lock_record_enter(t, lr);
1074
1075                         if (LOCK_TEST_FLC_BIT(LLNI_DIRECT(o))) {
1076                                 /* notify a thread that it can try to inflate the lock now */
1077
1078                                 lock_record_notify(t, lr, true);
1079                         }
1080
1081                         lock_record_exit(t, lr);
1082                 }
1083
1084                 return true;
1085         }
1086
1087         /* next common case: we release a recursive lock, count > 0 */
1088
1089         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1090                 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1091                 return true;
1092         }
1093
1094         /* either the lock is fat, or we don't hold it at all */
1095
1096         if (IS_FAT_LOCK(lockword)) {
1097
1098                 lock_record_t *lr;
1099
1100                 lr = GET_FAT_LOCK(lockword);
1101
1102                 /* check if we own this monitor */
1103                 /* We don't have to worry about stale values here, as any stale value */
1104                 /* will be != t and thus fail this check.                             */
1105
1106                 if (lr->owner != t) {
1107                         exceptions_throw_illegalmonitorstateexception();
1108                         return false;
1109                 }
1110
1111                 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1112
1113                 if (lr->count != 0) {
1114                         /* we had locked this one recursively. just decrement, it will */
1115                         /* still be locked. */
1116                         lr->count--;
1117                         return true;
1118                 }
1119
1120                 /* unlock this lock record */
1121
1122                 lr->owner = NULL;
1123                 pthread_mutex_unlock(&(lr->mutex));
1124
1125                 return true;
1126         }
1127
1128         /* legal thin lock cases have been handled above, so this is an error */
1129
1130         exceptions_throw_illegalmonitorstateexception();
1131
1132         return false;
1133 }
1134
1135
1136 /* lock_record_add_waiter ******************************************************
1137
1138    Add a thread to the list of waiting threads of a lock record.
1139
1140    IN:
1141       lr...........the lock record
1142       thread.......the thread to add
1143
1144 *******************************************************************************/
1145
1146 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1147 {
1148         lock_waiter_t *w;
1149
1150         /* Allocate a waiter data structure. */
1151
1152         w = NEW(lock_waiter_t);
1153
1154 #if defined(ENABLE_STATISTICS)
1155         if (opt_stat)
1156                 size_lock_waiter += sizeof(lock_waiter_t);
1157 #endif
1158
1159         /* Store the thread in the waiter structure. */
1160
1161         w->thread = thread;
1162
1163         /* Add the waiter as last entry to waiters list. */
1164
1165         list_add_last(lr->waiters, w);
1166 }
1167
1168
1169 /* lock_record_remove_waiter ***************************************************
1170
1171    Remove a thread from the list of waiting threads of a lock record.
1172
1173    IN:
1174       lr...........the lock record
1175       t............the current thread
1176
1177    PRE-CONDITION:
1178       The current thread must be the owner of the lock record.
1179    
1180 *******************************************************************************/
1181
1182 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1183 {
1184         list_t        *l;
1185         lock_waiter_t *w;
1186
1187         /* Get the waiters list. */
1188
1189         l = lr->waiters;
1190
1191         for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1192                 if (w->thread == thread) {
1193                         /* Remove the waiter entry from the list. */
1194
1195                         list_remove_unsynced(l, w);
1196
1197                         /* Free the waiter data structure. */
1198
1199                         FREE(w, lock_waiter_t);
1200
1201 #if defined(ENABLE_STATISTICS)
1202                         if (opt_stat)
1203                                 size_lock_waiter -= sizeof(lock_waiter_t);
1204 #endif
1205
1206                         return;
1207                 }
1208         }
1209
1210         /* This should never happen. */
1211
1212         vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1213 }
1214
1215
1216 /* lock_record_wait ************************************************************
1217
1218    Wait on a lock record for a given (maximum) amount of time.
1219
1220    IN:
1221       t............the current thread
1222           lr...........the lock record
1223           millis.......milliseconds of timeout
1224           nanos........nanoseconds of timeout
1225
1226    RETURN VALUE:
1227       true.........we have been interrupted,
1228       false........everything ok
1229
1230    PRE-CONDITION:
1231       The current thread must be the owner of the lock record.
1232           This is NOT checked by this function!
1233    
1234 *******************************************************************************/
1235
1236 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1237 {
1238         s4   lockcount;
1239         bool wasinterrupted;
1240
1241         DEBUGLOCKS(("[lock_record_wait  : lr=%p, t=%p, millis=%lld, nanos=%d]",
1242                                 lr, thread, millis, nanos));
1243
1244         /* { the thread t owns the fat lock record lr on the object o } */
1245
1246         /* register us as waiter for this object */
1247
1248         lock_record_add_waiter(lr, thread);
1249
1250         /* remember the old lock count */
1251
1252         lockcount = lr->count;
1253
1254         /* unlock this record */
1255
1256         lr->count = 0;
1257         lock_record_exit(thread, lr);
1258
1259         /* wait until notified/interrupted/timed out */
1260
1261         wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1262
1263         /* re-enter the monitor */
1264
1265         lock_record_enter(thread, lr);
1266
1267         /* remove us from the list of waiting threads */
1268
1269         lock_record_remove_waiter(lr, thread);
1270
1271         /* restore the old lock count */
1272
1273         lr->count = lockcount;
1274
1275         /* return if we have been interrupted */
1276
1277         return wasinterrupted;
1278 }
1279
1280
1281 /* lock_monitor_wait ***********************************************************
1282
1283    Wait on an object for a given (maximum) amount of time.
1284
1285    IN:
1286       t............the current thread
1287           o............the object
1288           millis.......milliseconds of timeout
1289           nanos........nanoseconds of timeout
1290
1291    PRE-CONDITION:
1292       The current thread must be the owner of the object's monitor.
1293    
1294 *******************************************************************************/
1295
1296 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1297 {
1298         uintptr_t      lockword;
1299         lock_record_t *lr;
1300
1301         lockword = lock_lockword_get(t, o);
1302
1303         /* check if we own this monitor */
1304         /* We don't have to worry about stale values here, as any stale value */
1305         /* will fail this check.                                              */
1306
1307         if (IS_FAT_LOCK(lockword)) {
1308
1309                 lr = GET_FAT_LOCK(lockword);
1310
1311                 if (lr->owner != t) {
1312                         exceptions_throw_illegalmonitorstateexception();
1313                         return;
1314                 }
1315         }
1316         else {
1317                 /* it's a thin lock */
1318
1319                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1320                         exceptions_throw_illegalmonitorstateexception();
1321                         return;
1322                 }
1323
1324                 /* inflate this lock */
1325
1326                 lr = lock_hashtable_get(t, o);
1327                 lock_record_enter(t, lr);
1328                 lock_inflate(t, o, lr);
1329         }
1330
1331         /* { the thread t owns the fat lock record lr on the object o } */
1332
1333         if (lock_record_wait(t, lr, millis, nanos))
1334                 exceptions_throw_interruptedexception();
1335 }
1336
1337
1338 /* lock_record_notify **********************************************************
1339
1340    Notify one thread or all threads waiting on the given lock record.
1341
1342    IN:
1343       t............the current thread
1344           lr...........the lock record
1345           one..........if true, only notify one thread
1346
1347    PRE-CONDITION:
1348       The current thread must be the owner of the lock record.
1349           This is NOT checked by this function!
1350    
1351 *******************************************************************************/
1352
1353 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1354 {
1355         list_t        *l;
1356         lock_waiter_t *w;
1357         threadobject  *waitingthread;
1358
1359         /* { the thread t owns the fat lock record lr on the object o } */
1360
1361         /* Get the waiters list. */
1362
1363         l = lr->waiters;
1364
1365         for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1366                 /* signal the waiting thread */
1367
1368                 waitingthread = w->thread;
1369
1370                 /* If the thread was already signaled but hasn't removed
1371                    itself from the list yet, just ignore it. */
1372
1373                 if (waitingthread->signaled == true)
1374                         continue;
1375
1376                 /* Enter the wait-mutex. */
1377
1378                 pthread_mutex_lock(&(waitingthread->waitmutex));
1379
1380                 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1381                                         lr, t, waitingthread, waitingthread->sleeping, one));
1382
1383                 /* Signal the thread if it's sleeping. */
1384
1385                 if (waitingthread->sleeping)
1386                         pthread_cond_signal(&(waitingthread->waitcond));
1387
1388                 /* Mark the thread as signaled. */
1389
1390                 waitingthread->signaled = true;
1391
1392                 /* Leave the wait-mutex. */
1393
1394                 pthread_mutex_unlock(&(waitingthread->waitmutex));
1395
1396                 /* if we should only wake one, we are done */
1397
1398                 if (one)
1399                         break;
1400         }
1401 }
1402
1403
1404 /* lock_monitor_notify *********************************************************
1405
1406    Notify one thread or all threads waiting on the given object.
1407
1408    IN:
1409       t............the current thread
1410           o............the object
1411           one..........if true, only notify one thread
1412
1413    PRE-CONDITION:
1414       The current thread must be the owner of the object's monitor.
1415    
1416 *******************************************************************************/
1417
1418 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1419 {
1420         uintptr_t      lockword;
1421         lock_record_t *lr;
1422
1423         lockword = lock_lockword_get(t, o);
1424
1425         /* check if we own this monitor */
1426         /* We don't have to worry about stale values here, as any stale value */
1427         /* will fail this check.                                              */
1428
1429         if (IS_FAT_LOCK(lockword)) {
1430
1431                 lr = GET_FAT_LOCK(lockword);
1432
1433                 if (lr->owner != t) {
1434                         exceptions_throw_illegalmonitorstateexception();
1435                         return;
1436                 }
1437         }
1438         else {
1439                 /* it's a thin lock */
1440
1441                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1442                         exceptions_throw_illegalmonitorstateexception();
1443                         return;
1444                 }
1445
1446                 /* inflate this lock */
1447
1448                 lr = lock_hashtable_get(t, o);
1449                 lock_record_enter(t, lr);
1450                 lock_inflate(t, o, lr);
1451         }
1452
1453         /* { the thread t owns the fat lock record lr on the object o } */
1454
1455         lock_record_notify(t, lr, one);
1456 }
1457
1458
1459
1460 /*============================================================================*/
1461 /* INQUIRY FUNCIONS                                                           */
1462 /*============================================================================*/
1463
1464
1465 /* lock_is_held_by_current_thread **********************************************
1466
1467    Return true if the current thread owns the monitor of the given object.
1468
1469    IN:
1470           o............the object
1471
1472    RETURN VALUE:
1473       true, if the current thread holds the lock of this object.
1474    
1475 *******************************************************************************/
1476
1477 bool lock_is_held_by_current_thread(java_handle_t *o)
1478 {
1479         threadobject  *t;
1480         uintptr_t      lockword;
1481         lock_record_t *lr;
1482
1483         t = THREADOBJECT;
1484
1485         /* check if we own this monitor */
1486         /* We don't have to worry about stale values here, as any stale value */
1487         /* will fail this check.                                              */
1488
1489         lockword = lock_lockword_get(t, o);
1490
1491         if (IS_FAT_LOCK(lockword)) {
1492                 /* it's a fat lock */
1493
1494                 lr = GET_FAT_LOCK(lockword);
1495
1496                 return (lr->owner == t);
1497         }
1498         else {
1499                 /* it's a thin lock */
1500
1501                 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1502         }
1503 }
1504
1505
1506
1507 /*============================================================================*/
1508 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD                              */
1509 /*============================================================================*/
1510
1511
1512 /* lock_wait_for_object ********************************************************
1513
1514    Wait for the given object.
1515
1516    IN:
1517           o............the object
1518           millis.......milliseconds to wait
1519           nanos........nanoseconds to wait
1520    
1521 *******************************************************************************/
1522
1523 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1524 {
1525         threadobject *thread;
1526
1527         thread = THREADOBJECT;
1528
1529         lock_monitor_wait(thread, o, millis, nanos);
1530 }
1531
1532
1533 /* lock_notify_object **********************************************************
1534
1535    Notify one thread waiting on the given object.
1536
1537    IN:
1538           o............the object
1539    
1540 *******************************************************************************/
1541
1542 void lock_notify_object(java_handle_t *o)
1543 {
1544         threadobject *thread;
1545
1546         thread = THREADOBJECT;
1547
1548         lock_monitor_notify(thread, o, true);
1549 }
1550
1551
1552 /* lock_notify_all_object ******************************************************
1553
1554    Notify all threads waiting on the given object.
1555
1556    IN:
1557           o............the object
1558    
1559 *******************************************************************************/
1560
1561 void lock_notify_all_object(java_handle_t *o)
1562 {
1563         threadobject *thread;
1564
1565         thread = THREADOBJECT;
1566
1567         lock_monitor_notify(thread, o, false);
1568 }
1569
1570
1571 /*
1572  * These are local overrides for various environment variables in Emacs.
1573  * Please do not remove this and leave it at the end of the file, where
1574  * Emacs will automagically detect them.
1575  * ---------------------------------------------------------------------
1576  * Local variables:
1577  * mode: c
1578  * indent-tabs-mode: t
1579  * c-basic-offset: 4
1580  * tab-width: 4
1581  * End:
1582  * vim:noexpandtab:sw=4:ts=4:
1583  */