* src/vm/finalizer.c (finalizer_run): Fixed for handles and added boehm-hack.
[cacao.git] / src / threads / native / lock.c
1 /* src/threads/native/lock.c - lock implementation
2
3    Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6    J. Wenninger, Institut f. Computersprachen - TU Wien
7
8    This file is part of CACAO.
9
10    This program is free software; you can redistribute it and/or
11    modify it under the terms of the GNU General Public License as
12    published by the Free Software Foundation; either version 2, or (at
13    your option) any later version.
14
15    This program is distributed in the hope that it will be useful, but
16    WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18    General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23    02110-1301, USA.
24
25 */
26
27
28 #include "config.h"
29
30 #include <assert.h>
31 #include <stdint.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <sys/time.h>
35 #include <pthread.h>
36
37 #include "vm/types.h"
38
39 #include "mm/memory.h"
40
41 #include "native/llni.h"
42
43 #include "threads/lock-common.h"
44
45 #include "threads/native/lock.h"
46 #include "threads/native/threads.h"
47
48 #include "toolbox/list.h"
49
50 #include "vm/global.h"
51 #include "vm/exceptions.h"
52 #include "vm/finalizer.h"
53 #include "vm/stringlocal.h"
54 #include "vm/vm.h"
55
56 #include "vmcore/options.h"
57
58 #if defined(ENABLE_STATISTICS)
59 # include "vmcore/statistics.h"
60 #endif
61
62 #if defined(ENABLE_VMLOG)
63 #include <vmlog_cacao.h>
64 #endif
65
66 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
67
68 #include "arch.h"
69
70 /* includes for atomic instructions: */
71
72 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
73 #include "threads/native/generic-primitives.h"
74 #else
75 #include "machine-instr.h"
76 #endif
77
78 #if defined(ENABLE_JVMTI)
79 #include "native/jvmti/cacaodbg.h"
80 #endif
81
82 #if defined(ENABLE_GC_BOEHM)
83 # include "mm/boehm-gc/include/gc.h"
84 #endif
85
86
87 /* debug **********************************************************************/
88
89 #if !defined(NDEBUG)
90 # define DEBUGLOCKS(format) \
91     do { \
92         if (opt_DebugLocks) { \
93             log_println format; \
94         } \
95     } while (0)
96 #else
97 # define DEBUGLOCKS(format)
98 #endif
99
100
101 /******************************************************************************/
102 /* MACROS                                                                     */
103 /******************************************************************************/
104
105 /* number of lock records in the first pool allocated for a thread */
106 #define LOCK_INITIAL_LOCK_RECORDS 8
107
108 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
109
110 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
111         ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
112
113
114 /******************************************************************************/
115 /* MACROS FOR THE FLAT LOCK CONTENTION BIT                                    */
116 /******************************************************************************/
117
118 #define LOCK_SET_FLC_BIT(obj)    ((obj)->hdrflags |= HDRFLAG_FLC)
119 #define LOCK_CLEAR_FLC_BIT(obj)  ((obj)->hdrflags &= ~ HDRFLAG_FLC)
120 #define LOCK_TEST_FLC_BIT(obj)   ((obj)->hdrflags & HDRFLAG_FLC)
121
122
123 /******************************************************************************/
124 /* MACROS FOR THIN/FAT LOCKS                                                  */
125 /******************************************************************************/
126
127 /* We use a variant of the tasuki locks described in the paper
128  *     
129  *     Tamiya Onodera, Kiyokuni Kawachiya
130  *     A Study of Locking Objects with Bimodal Fields
131  *     Proceedings of the ACM OOPSLA '99, pp. 223-237
132  *     1999
133  *
134  * The underlying thin locks are a variant of the thin locks described in
135  * 
136  *     Bacon, Konuru, Murthy, Serrano
137  *     Thin Locks: Featherweight Synchronization for Java
138  *         Proceedings of the ACM Conference on Programming Language Design and 
139  *         Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
140  *         June 1998
141  *
142  * In thin lock mode the lockword looks like this:
143  *
144  *     ,----------------------,-----------,---,
145  *     |      thread ID       |   count   | 0 |
146  *     `----------------------'-----------'---´
147  *
148  *     thread ID......the 'index' of the owning thread, or 0
149  *     count..........number of times the lock has been entered minus 1
150  *     0..............the shape bit is 0 in thin lock mode
151  *
152  * In fat lock mode it is basically a lock_record_t *:
153  *
154  *     ,----------------------------------,---,
155  *     |    lock_record_t * (without LSB) | 1 |
156  *     `----------------------------------'---´
157  *
158  *     1..............the shape bit is 1 in fat lock mode
159  */
160
161 #if SIZEOF_VOID_P == 8
162 #define THIN_LOCK_WORD_SIZE    64
163 #else
164 #define THIN_LOCK_WORD_SIZE    32
165 #endif
166
167 #define THIN_LOCK_SHAPE_BIT    0x01
168
169 #define THIN_UNLOCKED          0
170
171 #define THIN_LOCK_COUNT_SHIFT  1
172 #define THIN_LOCK_COUNT_SIZE   8
173 #define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
174 #define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
175 #define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
176
177 #define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
178 #define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
179
180 #define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
181 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
182
183 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
184 #define MAKE_FAT_LOCK(ptr)      ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
185
186 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
187
188
189 /* global variables ***********************************************************/
190
191 /* hashtable mapping objects to lock records */
192 static lock_hashtable_t lock_hashtable;
193
194
195 /******************************************************************************/
196 /* PROTOTYPES                                                                 */
197 /******************************************************************************/
198
199 static void lock_hashtable_init(void);
200
201 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
202 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
203 static void lock_record_enter(threadobject *t, lock_record_t *lr);
204 static void lock_record_exit(threadobject *t, lock_record_t *lr);
205 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
206 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
207
208
209 /*============================================================================*/
210 /* INITIALIZATION OF DATA STRUCTURES                                          */
211 /*============================================================================*/
212
213
214 /* lock_init *******************************************************************
215
216    Initialize global data for locking.
217
218 *******************************************************************************/
219
220 void lock_init(void)
221 {
222         /* initialize lock hashtable */
223
224         lock_hashtable_init();
225
226 #if defined(ENABLE_VMLOG)
227         vmlog_cacao_init_lock();
228 #endif
229 }
230
231
232 /* lock_pre_compute_thinlock ***************************************************
233
234    Pre-compute the thin lock value for a thread index.
235
236    IN:
237       index........the thead index (>= 1)
238
239    RETURN VALUE:
240       the thin lock value for this thread index
241
242 *******************************************************************************/
243
244 ptrint lock_pre_compute_thinlock(s4 index)
245 {
246         return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
247 }
248
249
250 /* lock_record_new *************************************************************
251
252    Allocate a lock record.
253
254 *******************************************************************************/
255
256 static lock_record_t *lock_record_new(void)
257 {
258         lock_record_t *lr;
259
260         /* allocate the data structure on the C heap */
261
262         lr = NEW(lock_record_t);
263
264 #if defined(ENABLE_STATISTICS)
265         if (opt_stat)
266                 size_lock_record += sizeof(lock_record_t);
267 #endif
268
269         /* initialize the members */
270
271         lr->object  = NULL;
272         lr->owner   = NULL;
273         lr->count   = 0;
274         lr->waiters = list_create(OFFSET(lock_waiter_t, linkage));
275
276         /* initialize the mutex */
277
278         pthread_mutex_init(&(lr->mutex), NULL);
279
280         DEBUGLOCKS(("[lock_record_new   : lr=%p]", (void *) lr));
281
282         return lr;
283 }
284
285
286 /* lock_record_free ************************************************************
287
288    Free a lock record.
289
290    IN:
291        lr....lock record to free
292
293 *******************************************************************************/
294
295 static void lock_record_free(lock_record_t *lr)
296 {
297         DEBUGLOCKS(("[lock_record_free  : lr=%p]", (void *) lr));
298
299         /* Destroy the mutex. */
300
301         pthread_mutex_destroy(&(lr->mutex));
302
303         /* Free the waiters list. */
304
305         list_free(lr->waiters);
306
307         /* Free the data structure. */
308
309         FREE(lr, lock_record_t);
310
311 #if defined(ENABLE_STATISTICS)
312         if (opt_stat)
313                 size_lock_record -= sizeof(lock_record_t);
314 #endif
315 }
316
317
318 /*============================================================================*/
319 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
320 /*============================================================================*/
321
322 /* lock_hashtable_init *********************************************************
323
324    Initialize the global hashtable mapping objects to lock records.
325
326 *******************************************************************************/
327
328 static void lock_hashtable_init(void)
329 {
330         pthread_mutex_init(&(lock_hashtable.mutex), NULL);
331
332         lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
333         lock_hashtable.entries = 0;
334         lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
335
336 #if defined(ENABLE_STATISTICS)
337         if (opt_stat)
338                 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
339 #endif
340
341         MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
342 }
343
344
345 /* lock_hashtable_grow *********************************************************
346
347    Grow the lock record hashtable to about twice its current size and
348    rehash the entries.
349
350 *******************************************************************************/
351
352 /* must be called with hashtable mutex locked */
353 static void lock_hashtable_grow(void)
354 {
355         u4 oldsize;
356         u4 newsize;
357         lock_record_t **oldtable;
358         lock_record_t **newtable;
359         lock_record_t *lr;
360         lock_record_t *next;
361         u4 i;
362         u4 h;
363         u4 newslot;
364
365         /* allocate a new table */
366
367         oldsize = lock_hashtable.size;
368         newsize = oldsize*2 + 1; /* XXX should use prime numbers */
369
370         DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
371
372         oldtable = lock_hashtable.ptr;
373         newtable = MNEW(lock_record_t *, newsize);
374
375 #if defined(ENABLE_STATISTICS)
376         if (opt_stat)
377                 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
378 #endif
379
380         MZERO(newtable, lock_record_t *, newsize);
381
382         /* rehash the entries */
383
384         for (i = 0; i < oldsize; i++) {
385                 lr = oldtable[i];
386                 while (lr) {
387                         next = lr->hashlink;
388
389                         h = heap_hashcode(lr->object);
390                         newslot = h % newsize;
391
392                         lr->hashlink = newtable[newslot];
393                         newtable[newslot] = lr;
394
395                         lr = next;
396                 }
397         }
398
399         /* replace the old table */
400
401         lock_hashtable.ptr  = newtable;
402         lock_hashtable.size = newsize;
403
404         MFREE(oldtable, lock_record_t *, oldsize);
405
406 #if defined(ENABLE_STATISTICS)
407         if (opt_stat)
408                 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
409 #endif
410 }
411
412
413 /* lock_hashtable_get **********************************************************
414
415    Find the lock record for the given object.  If it does not exists,
416    yet, create it and enter it in the hashtable.
417
418    IN:
419       t....the current thread
420           o....the object to look up
421
422    RETURN VALUE:
423       the lock record to use for this object
424
425 *******************************************************************************/
426
427 #if defined(ENABLE_GC_BOEHM)
428 static void lock_record_finalizer(void *object, void *p);
429 #endif
430
431 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
432 {
433         uintptr_t      lockword;
434         u4             slot;
435         lock_record_t *lr;
436
437         lockword = lock_lockword_get(t, o);
438
439         if (IS_FAT_LOCK(lockword))
440                 return GET_FAT_LOCK(lockword);
441
442         /* lock the hashtable */
443
444         pthread_mutex_lock(&(lock_hashtable.mutex));
445
446         /* lookup the lock record in the hashtable */
447
448         LLNI_CRITICAL_START_THREAD(t);
449         slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
450         lr   = lock_hashtable.ptr[slot];
451
452         for (; lr != NULL; lr = lr->hashlink) {
453                 if (lr->object == LLNI_DIRECT(o))
454                         break;
455         }
456         LLNI_CRITICAL_END_THREAD(t);
457
458         if (lr == NULL) {
459                 /* not found, we must create a new one */
460
461                 lr = lock_record_new();
462
463                 LLNI_CRITICAL_START_THREAD(t);
464                 lr->object = LLNI_DIRECT(o);
465                 LLNI_CRITICAL_END_THREAD(t);
466
467 #if defined(ENABLE_GC_BOEHM)
468                 /* register new finalizer to clean up the lock record */
469
470                 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
471 #endif
472
473                 /* enter it in the hashtable */
474
475                 lr->hashlink             = lock_hashtable.ptr[slot];
476                 lock_hashtable.ptr[slot] = lr;
477                 lock_hashtable.entries++;
478
479                 /* check whether the hash should grow */
480
481                 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
482                         lock_hashtable_grow();
483                 }
484         }
485
486         /* unlock the hashtable */
487
488         pthread_mutex_unlock(&(lock_hashtable.mutex));
489
490         /* return the new lock record */
491
492         return lr;
493 }
494
495
496 /* lock_hashtable_remove *******************************************************
497
498    Remove the lock record for the given object from the hashtable
499    and free it afterwards.
500
501    IN:
502        t....the current thread
503        o....the object to look up
504
505 *******************************************************************************/
506
507 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
508 {
509         uintptr_t      lockword;
510         lock_record_t *lr;
511         u4             slot;
512         lock_record_t *tmplr;
513
514         /* lock the hashtable */
515
516         pthread_mutex_lock(&(lock_hashtable.mutex));
517
518         /* get lock record */
519
520         lockword = lock_lockword_get(t, o);
521
522         assert(IS_FAT_LOCK(lockword));
523
524         lr = GET_FAT_LOCK(lockword);
525
526         /* remove the lock-record from the hashtable */
527
528         LLNI_CRITICAL_START_THREAD(t);
529         slot  = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
530         tmplr = lock_hashtable.ptr[slot];
531         LLNI_CRITICAL_END_THREAD(t);
532
533         if (tmplr == lr) {
534                 /* special handling if it's the first in the chain */
535
536                 lock_hashtable.ptr[slot] = lr->hashlink;
537         }
538         else {
539                 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
540                         if (tmplr->hashlink == lr) {
541                                 tmplr->hashlink = lr->hashlink;
542                                 break;
543                         }
544                 }
545
546                 assert(tmplr != NULL);
547         }
548
549         /* decrease entry count */
550
551         lock_hashtable.entries--;
552
553         /* unlock the hashtable */
554
555         pthread_mutex_unlock(&(lock_hashtable.mutex));
556
557         /* free the lock record */
558
559         lock_record_free(lr);
560 }
561
562
563 /* lock_record_finalizer *******************************************************
564
565    XXX Remove me for exact GC.
566
567 *******************************************************************************/
568
569 static void lock_record_finalizer(void *object, void *p)
570 {
571         java_handle_t *o;
572         classinfo     *c;
573
574         o = (java_handle_t *) object;
575
576 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
577         /* XXX this is only a dirty hack to make Boehm work with handles */
578
579         o = LLNI_WRAP((java_object_t *) o);
580 #endif
581
582         LLNI_class_get(o, c);
583
584 #if !defined(NDEBUG)
585         if (opt_DebugFinalizer) {
586                 log_start();
587                 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
588                 class_print(c);
589                 log_print("]");
590                 log_finish();
591         }
592 #endif
593
594         /* check for a finalizer function */
595
596         if (c->finalizer != NULL)
597                 finalizer_run(object, p);
598
599         /* remove the lock-record entry from the hashtable and free it */
600
601         lock_hashtable_remove(THREADOBJECT, o);
602 }
603
604
605 /*============================================================================*/
606 /* OBJECT LOCK INITIALIZATION                                                 */
607 /*============================================================================*/
608
609
610 /* lock_init_object_lock *******************************************************
611
612    Initialize the monitor pointer of the given object. The monitor gets
613    initialized to an unlocked state.
614
615 *******************************************************************************/
616
617 void lock_init_object_lock(java_object_t *o)
618 {
619         assert(o);
620
621         o->lockword = THIN_UNLOCKED;
622         LOCK_CLEAR_FLC_BIT(o);
623 }
624
625
626 /* lock_get_initial_lock_word **************************************************
627
628    Returns the initial (unlocked) lock word. The pointer is
629    required in the code generator to set up a virtual
630    java_objectheader for code patch locking.
631
632 *******************************************************************************/
633
634 lock_record_t *lock_get_initial_lock_word(void)
635 {
636         return (lock_record_t *) THIN_UNLOCKED;
637 }
638
639
640
641 /*============================================================================*/
642 /* LOCKING ALGORITHM                                                          */
643 /*============================================================================*/
644
645
646 /* lock_lockword_get ***********************************************************
647
648    Get the lockword for the given object.
649
650    IN:
651       t............the current thread
652       o............the object
653
654 *******************************************************************************/
655
656 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
657 {
658         uintptr_t lockword;
659
660         LLNI_CRITICAL_START_THREAD(t);
661         lockword = LLNI_DIRECT(o)->lockword;
662         LLNI_CRITICAL_END_THREAD(t);
663
664         return lockword;
665 }
666
667
668 /* lock_lockword_set ***********************************************************
669
670    Set the lockword for the given object.
671
672    IN:
673       t............the current thread
674       o............the object
675           lockword.....the new lockword value
676
677 *******************************************************************************/
678
679 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
680 {
681         LLNI_CRITICAL_START_THREAD(t);
682         LLNI_DIRECT(o)->lockword = lockword;
683         LLNI_CRITICAL_END_THREAD(t);
684 }
685
686
687 /* lock_record_enter ***********************************************************
688
689    Enter the lock represented by the given lock record.
690
691    IN:
692       t.................the current thread
693           lr................the lock record
694
695 *******************************************************************************/
696
697 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
698 {
699         pthread_mutex_lock(&(lr->mutex));
700         lr->owner = t;
701 }
702
703
704 /* lock_record_exit ************************************************************
705
706    Release the lock represented by the given lock record.
707
708    IN:
709       t.................the current thread
710           lr................the lock record
711
712    PRE-CONDITION:
713       The current thread must own the lock represented by this lock record.
714           This is NOT checked by this function!
715
716 *******************************************************************************/
717
718 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
719 {
720         lr->owner = NULL;
721         pthread_mutex_unlock(&(lr->mutex));
722 }
723
724
725 /* lock_inflate ****************************************************************
726
727    Inflate the lock of the given object. This may only be called by the
728    owner of the monitor of the object.
729
730    IN:
731       t............the current thread
732           o............the object of which to inflate the lock
733           lr...........the lock record to install. The current thread must
734                        own the lock of this lock record!
735
736    PRE-CONDITION:
737       The current thread must be the owner of this object's monitor AND
738           of the lock record's lock!
739
740 *******************************************************************************/
741
742 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
743 {
744         uintptr_t lockword;
745
746         /* get the current lock count */
747
748         lockword = lock_lockword_get(t, o);
749
750         if (IS_FAT_LOCK(lockword)) {
751                 assert(GET_FAT_LOCK(lockword) == lr);
752         }
753         else {
754                 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
755
756                 /* copy the count from the thin lock */
757
758                 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
759         }
760
761         DEBUGLOCKS(("[lock_inflate      : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
762                                 lr, t, o, lockword, lr->count));
763
764         /* clear flat-lock-contention bit */
765
766         LLNI_CRITICAL_START_THREAD(t);
767         LOCK_CLEAR_FLC_BIT(LLNI_DIRECT(o));
768         LLNI_CRITICAL_END_THREAD(t);
769
770         /* notify waiting objects */
771
772         lock_record_notify(t, lr, false);
773
774         /* install it */
775
776         lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
777 }
778
779
780 /* lock_monitor_enter **********************************************************
781
782    Acquire the monitor of the given object. If the current thread already
783    owns the monitor, the lock counter is simply increased.
784
785    This function blocks until it can acquire the monitor.
786
787    IN:
788       t............the current thread
789           o............the object of which to enter the monitor
790
791    RETURN VALUE:
792       true.........the lock has been successfully acquired
793           false........an exception has been thrown
794
795 *******************************************************************************/
796
797 bool lock_monitor_enter(java_handle_t *o)
798 {
799         threadobject  *t;
800         /* CAUTION: This code assumes that ptrint is unsigned! */
801         ptrint         lockword;
802         ptrint         thinlock;
803         lock_record_t *lr;
804
805         if (o == NULL) {
806                 exceptions_throw_nullpointerexception();
807                 return false;
808         }
809
810         t = THREADOBJECT;
811
812         thinlock = t->thinlock;
813
814         /* most common case: try to thin-lock an unlocked object */
815
816         LLNI_CRITICAL_START_THREAD(t);
817         lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
818         LLNI_CRITICAL_END_THREAD(t);
819
820         if (lockword == THIN_UNLOCKED) {
821                 /* success. we locked it */
822                 /* The Java Memory Model requires a memory barrier here: */
823                 MEMORY_BARRIER();
824                 return true;
825         }
826
827         /* next common case: recursive lock with small recursion count */
828         /* We don't have to worry about stale values here, as any stale value  */
829         /* will indicate another thread holding the lock (or an inflated lock) */
830
831         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
832                 /* we own this monitor               */
833                 /* check the current recursion count */
834
835                 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
836                 {
837                         /* the recursion count is low enough */
838
839                         lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
840
841                         /* success. we locked it */
842                         return true;
843                 }
844                 else {
845                         /* recursion count overflow */
846
847                         lr = lock_hashtable_get(t, o);
848                         lock_record_enter(t, lr);
849                         lock_inflate(t, o, lr);
850                         lr->count++;
851
852                         return true;
853                 }
854         }
855
856         /* the lock is either contented or fat */
857
858         if (IS_FAT_LOCK(lockword)) {
859
860                 lr = GET_FAT_LOCK(lockword);
861
862                 /* check for recursive entering */
863                 if (lr->owner == t) {
864                         lr->count++;
865                         return true;
866                 }
867
868                 /* acquire the mutex of the lock record */
869
870                 lock_record_enter(t, lr);
871
872                 assert(lr->count == 0);
873
874                 return true;
875         }
876
877         /****** inflation path ******/
878
879         /* first obtain the lock record for this object */
880
881         lr = lock_hashtable_get(t, o);
882
883 #if defined(ENABLE_JVMTI)
884         /* Monitor Contended Enter */
885         jvmti_MonitorContendedEntering(false, o);
886 #endif
887
888         /* enter the monitor */
889
890         lock_record_enter(t, lr);
891
892 #if defined(ENABLE_JVMTI)
893         /* Monitor Contended Entered */
894         jvmti_MonitorContendedEntering(true, o);
895 #endif
896
897         /* inflation loop */
898
899         while (IS_THIN_LOCK(lockword = lock_lockword_get(t, o))) {
900                 /* Set the flat lock contention bit to let the owning thread
901                    know that we want to be notified of unlocking. */
902
903                 LLNI_CRITICAL_START_THREAD(t);
904                 LOCK_SET_FLC_BIT(LLNI_DIRECT(o));
905                 LLNI_CRITICAL_END_THREAD(t);
906
907                 DEBUGLOCKS(("thread %d set flc bit on %p lr %p",
908                                         t->index, (void*) o, (void*) lr));
909
910                 /* try to lock the object */
911
912                 LLNI_CRITICAL_START_THREAD(t);
913                 lockword = COMPARE_AND_SWAP_OLD_VALUE(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
914                 LLNI_CRITICAL_END_THREAD(t);
915
916                 if (lockword == THIN_UNLOCKED) {
917                         /* we can inflate the lock ourselves */
918
919                         DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
920                                                 t->index, (void*) o, (void*) lr));
921
922                         lock_inflate(t, o, lr);
923                 }
924                 else {
925                         /* Wait until another thread sees the flc bit and notifies
926                            us of unlocking. */
927
928                         (void) lock_record_wait(t, lr, 0, 0);
929                 }
930         }
931
932         /* we own the inflated lock now */
933
934         return true;
935 }
936
937
938 /* lock_monitor_exit ***********************************************************
939
940    Decrement the counter of a (currently owned) monitor. If the counter
941    reaches zero, release the monitor.
942
943    If the current thread is not the owner of the monitor, an 
944    IllegalMonitorState exception is thrown.
945
946    IN:
947       t............the current thread
948           o............the object of which to exit the monitor
949
950    RETURN VALUE:
951       true.........everything ok,
952           false........an exception has been thrown
953
954 *******************************************************************************/
955
956 bool lock_monitor_exit(java_handle_t *o)
957 {
958         threadobject *t;
959         uintptr_t     lockword;
960         ptrint        thinlock;
961
962         if (o == NULL) {
963                 exceptions_throw_nullpointerexception();
964                 return false;
965         }
966
967         t = THREADOBJECT;
968
969         thinlock = t->thinlock;
970
971         /* We don't have to worry about stale values here, as any stale value */
972         /* will indicate that we don't own the lock.                          */
973
974         lockword = lock_lockword_get(t, o);
975
976         /* most common case: we release a thin lock that we hold once */
977
978         if (lockword == thinlock) {
979                 /* memory barrier for Java Memory Model */
980                 MEMORY_BARRIER();
981                 lock_lockword_set(t, o, THIN_UNLOCKED);
982                 /* memory barrier for thin locking */
983                 MEMORY_BARRIER();
984
985                 /* check if there has been a flat lock contention on this object */
986
987                 if (LOCK_TEST_FLC_BIT(LLNI_DIRECT(o))) {
988                         lock_record_t *lr;
989
990                         DEBUGLOCKS(("thread %d saw flc bit on %p",
991                                                 t->index, (void*) o));
992
993                         /* there has been a contention on this thin lock */
994
995                         lr = lock_hashtable_get(t, o);
996
997                         DEBUGLOCKS(("thread %d for %p got lr %p",
998                                                 t->index, (void*) o, (void*) lr));
999
1000                         lock_record_enter(t, lr);
1001
1002                         if (LOCK_TEST_FLC_BIT(LLNI_DIRECT(o))) {
1003                                 /* notify a thread that it can try to inflate the lock now */
1004
1005                                 lock_record_notify(t, lr, true);
1006                         }
1007
1008                         lock_record_exit(t, lr);
1009                 }
1010
1011                 return true;
1012         }
1013
1014         /* next common case: we release a recursive lock, count > 0 */
1015
1016         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1017                 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1018                 return true;
1019         }
1020
1021         /* either the lock is fat, or we don't hold it at all */
1022
1023         if (IS_FAT_LOCK(lockword)) {
1024
1025                 lock_record_t *lr;
1026
1027                 lr = GET_FAT_LOCK(lockword);
1028
1029                 /* check if we own this monitor */
1030                 /* We don't have to worry about stale values here, as any stale value */
1031                 /* will be != t and thus fail this check.                             */
1032
1033                 if (lr->owner != t) {
1034                         exceptions_throw_illegalmonitorstateexception();
1035                         return false;
1036                 }
1037
1038                 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1039
1040                 if (lr->count != 0) {
1041                         /* we had locked this one recursively. just decrement, it will */
1042                         /* still be locked. */
1043                         lr->count--;
1044                         return true;
1045                 }
1046
1047                 /* unlock this lock record */
1048
1049                 lr->owner = NULL;
1050                 pthread_mutex_unlock(&(lr->mutex));
1051
1052                 return true;
1053         }
1054
1055         /* legal thin lock cases have been handled above, so this is an error */
1056
1057         exceptions_throw_illegalmonitorstateexception();
1058
1059         return false;
1060 }
1061
1062
1063 /* lock_record_add_waiter ******************************************************
1064
1065    Add a thread to the list of waiting threads of a lock record.
1066
1067    IN:
1068       lr...........the lock record
1069       thread.......the thread to add
1070
1071 *******************************************************************************/
1072
1073 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1074 {
1075         lock_waiter_t *w;
1076
1077         /* Allocate a waiter data structure. */
1078
1079         w = NEW(lock_waiter_t);
1080
1081 #if defined(ENABLE_STATISTICS)
1082         if (opt_stat)
1083                 size_lock_waiter += sizeof(lock_waiter_t);
1084 #endif
1085
1086         /* Store the thread in the waiter structure. */
1087
1088         w->thread = thread;
1089
1090         /* Add the waiter as last entry to waiters list. */
1091
1092         list_add_last(lr->waiters, w);
1093 }
1094
1095
1096 /* lock_record_remove_waiter ***************************************************
1097
1098    Remove a thread from the list of waiting threads of a lock record.
1099
1100    IN:
1101       lr...........the lock record
1102       t............the current thread
1103
1104    PRE-CONDITION:
1105       The current thread must be the owner of the lock record.
1106    
1107 *******************************************************************************/
1108
1109 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1110 {
1111         list_t        *l;
1112         lock_waiter_t *w;
1113
1114         /* Get the waiters list. */
1115
1116         l = lr->waiters;
1117
1118         for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1119                 if (w->thread == thread) {
1120                         /* Remove the waiter entry from the list. */
1121
1122                         list_remove_unsynced(l, w);
1123
1124                         /* Free the waiter data structure. */
1125
1126                         FREE(w, lock_waiter_t);
1127
1128 #if defined(ENABLE_STATISTICS)
1129                         if (opt_stat)
1130                                 size_lock_waiter -= sizeof(lock_waiter_t);
1131 #endif
1132
1133                         return;
1134                 }
1135         }
1136
1137         /* This should never happen. */
1138
1139         vm_abort("lock_record_remove_waiter: thread not found in list of waiters\n");
1140 }
1141
1142
1143 /* lock_record_wait ************************************************************
1144
1145    Wait on a lock record for a given (maximum) amount of time.
1146
1147    IN:
1148       t............the current thread
1149           lr...........the lock record
1150           millis.......milliseconds of timeout
1151           nanos........nanoseconds of timeout
1152
1153    RETURN VALUE:
1154       true.........we have been interrupted,
1155       false........everything ok
1156
1157    PRE-CONDITION:
1158       The current thread must be the owner of the lock record.
1159           This is NOT checked by this function!
1160    
1161 *******************************************************************************/
1162
1163 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1164 {
1165         s4   lockcount;
1166         bool wasinterrupted;
1167
1168         DEBUGLOCKS(("[lock_record_wait  : lr=%p, t=%p, millis=%lld, nanos=%d]",
1169                                 lr, thread, millis, nanos));
1170
1171         /* { the thread t owns the fat lock record lr on the object o } */
1172
1173         /* register us as waiter for this object */
1174
1175         lock_record_add_waiter(lr, thread);
1176
1177         /* remember the old lock count */
1178
1179         lockcount = lr->count;
1180
1181         /* unlock this record */
1182
1183         lr->count = 0;
1184         lock_record_exit(thread, lr);
1185
1186         /* wait until notified/interrupted/timed out */
1187
1188         wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1189
1190         /* re-enter the monitor */
1191
1192         lock_record_enter(thread, lr);
1193
1194         /* remove us from the list of waiting threads */
1195
1196         lock_record_remove_waiter(lr, thread);
1197
1198         /* restore the old lock count */
1199
1200         lr->count = lockcount;
1201
1202         /* return if we have been interrupted */
1203
1204         return wasinterrupted;
1205 }
1206
1207
1208 /* lock_monitor_wait ***********************************************************
1209
1210    Wait on an object for a given (maximum) amount of time.
1211
1212    IN:
1213       t............the current thread
1214           o............the object
1215           millis.......milliseconds of timeout
1216           nanos........nanoseconds of timeout
1217
1218    PRE-CONDITION:
1219       The current thread must be the owner of the object's monitor.
1220    
1221 *******************************************************************************/
1222
1223 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1224 {
1225         uintptr_t      lockword;
1226         lock_record_t *lr;
1227
1228         lockword = lock_lockword_get(t, o);
1229
1230         /* check if we own this monitor */
1231         /* We don't have to worry about stale values here, as any stale value */
1232         /* will fail this check.                                              */
1233
1234         if (IS_FAT_LOCK(lockword)) {
1235
1236                 lr = GET_FAT_LOCK(lockword);
1237
1238                 if (lr->owner != t) {
1239                         exceptions_throw_illegalmonitorstateexception();
1240                         return;
1241                 }
1242         }
1243         else {
1244                 /* it's a thin lock */
1245
1246                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1247                         exceptions_throw_illegalmonitorstateexception();
1248                         return;
1249                 }
1250
1251                 /* inflate this lock */
1252
1253                 lr = lock_hashtable_get(t, o);
1254                 lock_record_enter(t, lr);
1255                 lock_inflate(t, o, lr);
1256         }
1257
1258         /* { the thread t owns the fat lock record lr on the object o } */
1259
1260         if (lock_record_wait(t, lr, millis, nanos))
1261                 exceptions_throw_interruptedexception();
1262 }
1263
1264
1265 /* lock_record_notify **********************************************************
1266
1267    Notify one thread or all threads waiting on the given lock record.
1268
1269    IN:
1270       t............the current thread
1271           lr...........the lock record
1272           one..........if true, only notify one thread
1273
1274    PRE-CONDITION:
1275       The current thread must be the owner of the lock record.
1276           This is NOT checked by this function!
1277    
1278 *******************************************************************************/
1279
1280 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1281 {
1282         list_t        *l;
1283         lock_waiter_t *w;
1284         threadobject  *waitingthread;
1285
1286         /* { the thread t owns the fat lock record lr on the object o } */
1287
1288         /* Get the waiters list. */
1289
1290         l = lr->waiters;
1291
1292         for (w = list_first_unsynced(l); w != NULL; w = list_next_unsynced(l, w)) {
1293                 /* signal the waiting thread */
1294
1295                 waitingthread = w->thread;
1296
1297                 /* If the thread was already signaled but hasn't removed
1298                    itself from the list yet, just ignore it. */
1299
1300                 if (waitingthread->signaled == true)
1301                         continue;
1302
1303                 /* Enter the wait-mutex. */
1304
1305                 pthread_mutex_lock(&(waitingthread->waitmutex));
1306
1307                 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, sleeping=%d, one=%d]",
1308                                         lr, t, waitingthread, waitingthread->sleeping, one));
1309
1310                 /* Signal the thread if it's sleeping. */
1311
1312                 if (waitingthread->sleeping)
1313                         pthread_cond_signal(&(waitingthread->waitcond));
1314
1315                 /* Mark the thread as signaled. */
1316
1317                 waitingthread->signaled = true;
1318
1319                 /* Leave the wait-mutex. */
1320
1321                 pthread_mutex_unlock(&(waitingthread->waitmutex));
1322
1323                 /* if we should only wake one, we are done */
1324
1325                 if (one)
1326                         break;
1327         }
1328 }
1329
1330
1331 /* lock_monitor_notify *********************************************************
1332
1333    Notify one thread or all threads waiting on the given object.
1334
1335    IN:
1336       t............the current thread
1337           o............the object
1338           one..........if true, only notify one thread
1339
1340    PRE-CONDITION:
1341       The current thread must be the owner of the object's monitor.
1342    
1343 *******************************************************************************/
1344
1345 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1346 {
1347         uintptr_t      lockword;
1348         lock_record_t *lr;
1349
1350         lockword = lock_lockword_get(t, o);
1351
1352         /* check if we own this monitor */
1353         /* We don't have to worry about stale values here, as any stale value */
1354         /* will fail this check.                                              */
1355
1356         if (IS_FAT_LOCK(lockword)) {
1357
1358                 lr = GET_FAT_LOCK(lockword);
1359
1360                 if (lr->owner != t) {
1361                         exceptions_throw_illegalmonitorstateexception();
1362                         return;
1363                 }
1364         }
1365         else {
1366                 /* it's a thin lock */
1367
1368                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1369                         exceptions_throw_illegalmonitorstateexception();
1370                         return;
1371                 }
1372
1373                 /* inflate this lock */
1374
1375                 lr = lock_hashtable_get(t, o);
1376                 lock_record_enter(t, lr);
1377                 lock_inflate(t, o, lr);
1378         }
1379
1380         /* { the thread t owns the fat lock record lr on the object o } */
1381
1382         lock_record_notify(t, lr, one);
1383 }
1384
1385
1386
1387 /*============================================================================*/
1388 /* INQUIRY FUNCIONS                                                           */
1389 /*============================================================================*/
1390
1391
1392 /* lock_is_held_by_current_thread **********************************************
1393
1394    Return true if the current thread owns the monitor of the given object.
1395
1396    IN:
1397           o............the object
1398
1399    RETURN VALUE:
1400       true, if the current thread holds the lock of this object.
1401    
1402 *******************************************************************************/
1403
1404 bool lock_is_held_by_current_thread(java_handle_t *o)
1405 {
1406         threadobject  *t;
1407         uintptr_t      lockword;
1408         lock_record_t *lr;
1409
1410         t = THREADOBJECT;
1411
1412         /* check if we own this monitor */
1413         /* We don't have to worry about stale values here, as any stale value */
1414         /* will fail this check.                                              */
1415
1416         lockword = lock_lockword_get(t, o);
1417
1418         if (IS_FAT_LOCK(lockword)) {
1419                 /* it's a fat lock */
1420
1421                 lr = GET_FAT_LOCK(lockword);
1422
1423                 return (lr->owner == t);
1424         }
1425         else {
1426                 /* it's a thin lock */
1427
1428                 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1429         }
1430 }
1431
1432
1433
1434 /*============================================================================*/
1435 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD                              */
1436 /*============================================================================*/
1437
1438
1439 /* lock_wait_for_object ********************************************************
1440
1441    Wait for the given object.
1442
1443    IN:
1444           o............the object
1445           millis.......milliseconds to wait
1446           nanos........nanoseconds to wait
1447    
1448 *******************************************************************************/
1449
1450 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1451 {
1452         threadobject *thread;
1453
1454         thread = THREADOBJECT;
1455
1456         lock_monitor_wait(thread, o, millis, nanos);
1457 }
1458
1459
1460 /* lock_notify_object **********************************************************
1461
1462    Notify one thread waiting on the given object.
1463
1464    IN:
1465           o............the object
1466    
1467 *******************************************************************************/
1468
1469 void lock_notify_object(java_handle_t *o)
1470 {
1471         threadobject *thread;
1472
1473         thread = THREADOBJECT;
1474
1475         lock_monitor_notify(thread, o, true);
1476 }
1477
1478
1479 /* lock_notify_all_object ******************************************************
1480
1481    Notify all threads waiting on the given object.
1482
1483    IN:
1484           o............the object
1485    
1486 *******************************************************************************/
1487
1488 void lock_notify_all_object(java_handle_t *o)
1489 {
1490         threadobject *thread;
1491
1492         thread = THREADOBJECT;
1493
1494         lock_monitor_notify(thread, o, false);
1495 }
1496
1497
1498 /*
1499  * These are local overrides for various environment variables in Emacs.
1500  * Please do not remove this and leave it at the end of the file, where
1501  * Emacs will automagically detect them.
1502  * ---------------------------------------------------------------------
1503  * Local variables:
1504  * mode: c
1505  * indent-tabs-mode: t
1506  * c-basic-offset: 4
1507  * tab-width: 4
1508  * End:
1509  * vim:noexpandtab:sw=4:ts=4:
1510  */