a6078339f7da4834877cb3b7ffd3688ea54d4375
[cacao.git] / src / threads / native / lock.c
1 /* src/threads/native/lock.c - lock implementation
2
3    Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6    J. Wenninger, Institut f. Computersprachen - TU Wien
7
8    This file is part of CACAO.
9
10    This program is free software; you can redistribute it and/or
11    modify it under the terms of the GNU General Public License as
12    published by the Free Software Foundation; either version 2, or (at
13    your option) any later version.
14
15    This program is distributed in the hope that it will be useful, but
16    WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18    General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23    02110-1301, USA.
24
25    $Id: threads.c 4903 2006-05-11 12:48:43Z edwin $
26
27 */
28
29
30 #include "config.h"
31
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <assert.h>
35 #include <sys/time.h>
36 #include <pthread.h>
37
38 #include "vm/types.h"
39
40 #include "mm/memory.h"
41
42 #include "threads/native/lock.h"
43 #include "threads/native/threads.h"
44
45 #include "vm/global.h"
46 #include "vm/exceptions.h"
47 #include "vm/stringlocal.h"
48 #include "vm/vm.h"
49
50 #include "vmcore/options.h"
51
52 #if defined(ENABLE_STATISTICS)
53 # include "vmcore/statistics.h"
54 #endif
55
56 #if defined(ENABLE_VMLOG)
57 #include <vmlog_cacao.h>
58 #endif
59
60 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
61
62 #include "arch.h"
63
64 /* includes for atomic instructions: */
65
66 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
67 #include "threads/native/generic-primitives.h"
68 #else
69 #include "machine-instr.h"
70 #endif
71
72 #if defined(ENABLE_JVMTI)
73 #include "native/jvmti/cacaodbg.h"
74 #endif
75
76
77 /******************************************************************************/
78 /* DEBUGGING MACROS                                                           */
79 /******************************************************************************/
80
81 /* #define LOCK_VERBOSE */
82
83 #if defined(LOCK_VERBOSE)
84 #define LOCK_LOG(args)  do { printf args; fflush(stdout); } while (0)
85 #else
86 #define LOCK_LOG(args)
87 #endif
88
89
90 /******************************************************************************/
91 /* MACROS                                                                     */
92 /******************************************************************************/
93
94 /* number of lock records in the first pool allocated for a thread */
95 #define LOCK_INITIAL_LOCK_RECORDS 8
96
97 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
98
99 #define LOCK_HASH(obj)  ((ptrint)(obj))
100
101 #define COMPARE_AND_SWAP_OLD_VALUE(address, oldvalue, newvalue) \
102         ((ptrint) compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)))
103
104 /* CAUTION: oldvalue is evaluated twice! */
105 #define COMPARE_AND_SWAP_SUCCEEDS(address, oldvalue, newvalue) \
106         (compare_and_swap((long *)(address), (long)(oldvalue), (long)(newvalue)) == (long)(oldvalue))
107
108
109 /******************************************************************************/
110 /* MACROS FOR THE FLAT LOCK CONTENTION BIT                                    */
111 /******************************************************************************/
112
113 #define LOCK_SET_FLC_BIT(obj)    ((obj)->hdrflags |= HDRFLAG_FLC)
114 #define LOCK_CLEAR_FLC_BIT(obj)  ((obj)->hdrflags &= ~ HDRFLAG_FLC)
115 #define LOCK_TEST_FLC_BIT(obj)   ((obj)->hdrflags & HDRFLAG_FLC)
116
117
118 /******************************************************************************/
119 /* MACROS FOR THIN/FAT LOCKS                                                  */
120 /******************************************************************************/
121
122 /* We use a variant of the tasuki locks described in the paper
123  *     
124  *     Tamiya Onodera, Kiyokuni Kawachiya
125  *     A Study of Locking Objects with Bimodal Fields
126  *     Proceedings of the ACM OOPSLA '99, pp. 223-237
127  *     1999
128  *
129  * The underlying thin locks are a variant of the thin locks described in
130  * 
131  *     Bacon, Konuru, Murthy, Serrano
132  *     Thin Locks: Featherweight Synchronization for Java
133  *         Proceedings of the ACM Conference on Programming Language Design and 
134  *         Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
135  *         June 1998
136  *
137  * In thin lock mode the lockword (monitorPtr) looks like this:
138  *
139  *     ,----------------------,-----------,---,
140  *     |      thread ID       |   count   | 0 |
141  *     `----------------------'-----------'---´
142  *
143  *     thread ID......the 'index' of the owning thread, or 0
144  *     count..........number of times the lock has been entered minus 1
145  *     0..............the shape bit is 0 in thin lock mode
146  *
147  * In fat lock mode it is basically a lock_record_t *:
148  *
149  *     ,----------------------------------,---,
150  *     |    lock_record_t * (without LSB) | 1 |
151  *     `----------------------------------'---´
152  *
153  *     1..............the shape bit is 1 in fat lock mode
154  */
155
156 #if SIZEOF_VOID_P == 8
157 #define THIN_LOCK_WORD_SIZE    64
158 #else
159 #define THIN_LOCK_WORD_SIZE    32
160 #endif
161
162 #define THIN_LOCK_SHAPE_BIT    0x01
163
164 #define THIN_UNLOCKED          0
165
166 #define THIN_LOCK_COUNT_SHIFT  1
167 #define THIN_LOCK_COUNT_SIZE   8
168 #define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
169 #define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
170 #define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
171
172 #define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
173 #define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
174
175 #define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
176 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
177
178 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
179 #define MAKE_FAT_LOCK(ptr)      ((ptrint)(ptr) | THIN_LOCK_SHAPE_BIT)
180
181 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
182
183
184 /******************************************************************************/
185 /* GLOBAL VARIABLES                                                           */
186 /******************************************************************************/
187
188 /* global lock record pool list header */
189 lock_record_pool_t *lock_global_pool;
190
191 /* mutex for synchronizing access to the global pool */
192 pthread_mutex_t lock_global_pool_lock;
193
194 /* hashtable mapping objects to lock records */
195 static lock_hashtable_t lock_hashtable;
196
197
198 /******************************************************************************/
199 /* PROTOTYPES                                                                 */
200 /******************************************************************************/
201
202 static void lock_hashtable_init(void);
203 static lock_record_t * lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o);
204
205 static lock_record_t * lock_record_alloc(threadobject *t);
206
207 static void lock_record_enter(threadobject *t, lock_record_t *lr);
208 static void lock_record_exit(threadobject *t, lock_record_t *lr);
209 static void lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
210 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
211
212
213 /*============================================================================*/
214 /* INITIALIZATION OF DATA STRUCTURES                                          */
215 /*============================================================================*/
216
217
218 /* lock_init *******************************************************************
219
220    Initialize global data for locking.
221
222 *******************************************************************************/
223
224 void lock_init(void)
225 {
226         pthread_mutex_init(&lock_global_pool_lock, NULL);
227
228         lock_hashtable_init();
229
230 #if defined(ENABLE_VMLOG)
231         vmlog_cacao_init_lock();
232 #endif
233 }
234
235
236 /* lock_record_init ************************************************************
237
238    Initialize a lock record.
239
240    IN:
241       r............the lock record to initialize
242           t............will become the owner
243
244 *******************************************************************************/
245
246 static void lock_record_init(lock_record_t *r, threadobject *t)
247 {
248         r->owner = NULL;
249         r->count = 0;
250         r->waiters = NULL;
251
252 #if !defined(NDEBUG)
253         r->nextfree = NULL;
254 #endif
255
256         pthread_mutex_init(&(r->mutex), NULL);
257 }
258
259
260 /* lock_init_execution_env *****************************************************
261
262    Initialize the execution environment for a thread.
263
264    IN:
265       thread.......the thread
266
267 *******************************************************************************/
268
269 void lock_init_execution_env(threadobject *thread)
270 {
271         thread->ee.firstfree = NULL;
272         thread->ee.lockrecordpools = NULL;
273         thread->ee.lockrecordcount = 0;
274 }
275
276
277
278 /* lock_pre_compute_thinlock ***************************************************
279
280    Pre-compute the thin lock value for a thread index.
281
282    IN:
283       index........the thead index (>= 1)
284
285    RETURN VALUE:
286       the thin lock value for this thread index
287
288 *******************************************************************************/
289
290 ptrint lock_pre_compute_thinlock(s4 index)
291 {
292         return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
293 }
294
295
296
297 /*============================================================================*/
298 /* LOCK RECORD MANAGEMENT                                                     */
299 /*============================================================================*/
300
301
302 /* lock_record_alloc_new_pool **************************************************
303
304    Get a new lock record pool from the memory allocator.
305
306    IN:
307       thread.......the thread that will own the lock records
308           size.........number of lock records in the pool to allocate
309
310    RETURN VALUE:
311       the new lock record pool, with initialized lock records
312
313 *******************************************************************************/
314
315 static lock_record_pool_t *lock_record_alloc_new_pool(threadobject *thread, int size)
316 {
317         lock_record_pool_t *pool;
318         s4                  i;
319
320         /* get the pool from the memory allocator */
321
322         pool = mem_alloc(sizeof(lock_record_pool_header_t)
323                                    + sizeof(lock_record_t) * size);
324
325 #if defined(ENABLE_STATISTICS)
326         if (opt_stat)
327                 size_lock_record_pool += sizeof(lock_record_pool_header_t) +
328                         sizeof(lock_record_t) * size;
329 #endif
330
331         /* initialize the pool header */
332
333         pool->header.size = size;
334
335         /* initialize the individual lock records */
336
337         for (i = 0; i < size; i++) {
338                 lock_record_init(&pool->lr[i], thread);
339
340                 pool->lr[i].nextfree = &pool->lr[i + 1];
341         }
342
343         /* terminate free list */
344
345         pool->lr[i - 1].nextfree = NULL;
346
347         return pool;
348 }
349
350
351 /* lock_record_alloc_pool ******************************************************
352
353    Allocate a lock record pool. The pool is either taken from the global free
354    list or requested from the memory allocator.
355
356    IN:
357       thread.......the thread that will own the lock records
358           size.........number of lock records in the pool to allocate
359
360    RETURN VALUE:
361       the new lock record pool, with initialized lock records
362
363 *******************************************************************************/
364
365 static lock_record_pool_t *lock_record_alloc_pool(threadobject *t, int size)
366 {
367         pthread_mutex_lock(&lock_global_pool_lock);
368
369         if (lock_global_pool != NULL) {
370                 int i;
371                 lock_record_pool_t *pool;
372
373                 /* pop a pool from the global freelist */
374
375                 pool             = lock_global_pool;
376                 lock_global_pool = pool->header.next;
377
378                 pthread_mutex_unlock(&lock_global_pool_lock);
379
380                 /* re-initialize owner and freelist chaining */
381
382                 for (i = 0; i < pool->header.size; i++) {
383                         pool->lr[i].owner    = NULL;
384                         pool->lr[i].nextfree = &pool->lr[i + 1];
385                 }
386                 pool->lr[i - 1].nextfree = NULL;
387
388                 return pool;
389         }
390
391         pthread_mutex_unlock(&lock_global_pool_lock);
392
393         /* we have to get a new pool from the allocator */
394
395         return lock_record_alloc_new_pool(t, size);
396 }
397
398
399 /* lock_record_free_pools ******************************************************
400
401    Free the lock record pools in the given linked list. The pools are inserted
402    into the global freelist.
403
404    IN:
405       pool.........list header
406
407 *******************************************************************************/
408
409 void lock_record_free_pools(lock_record_pool_t *pool)
410 {
411         lock_record_pool_header_t *last;
412
413         assert(false); /* XXX this function does not match the new locking */
414                        /*     algorithm. We must find another way to free  */
415                        /*     unused lock records.                         */
416
417         if (pool == NULL)
418                 return;
419
420         pthread_mutex_lock(&lock_global_pool_lock);
421
422         /* find the last pool in the list */
423
424         last = &pool->header;
425
426         while (last->next)
427                 last = &last->next->header;
428
429         /* chain it to the lock_global_pool freelist */
430
431         last->next = lock_global_pool;
432
433         /* insert the freed pools into the freelist */
434
435         lock_global_pool = pool;
436
437         pthread_mutex_unlock(&lock_global_pool_lock);
438 }
439
440
441 /* lock_record_alloc ***********************************************************
442
443    Allocate a lock record which is owned by the current thread.
444
445    IN:
446       t............the current thread 
447
448 *******************************************************************************/
449
450 static lock_record_t *lock_record_alloc(threadobject *t)
451 {
452         lock_record_t *r;
453
454         assert(t);
455         r = t->ee.firstfree;
456
457         if (!r) {
458                 int poolsize;
459                 lock_record_pool_t *pool;
460
461                 /* get a new pool */
462
463                 poolsize = t->ee.lockrecordcount ? t->ee.lockrecordcount * 2
464                                                                                  : LOCK_INITIAL_LOCK_RECORDS;
465                 pool = lock_record_alloc_pool(t, poolsize);
466
467                 /* add it to our per-thread pool list */
468
469                 pool->header.next = t->ee.lockrecordpools;
470                 t->ee.lockrecordpools = pool;
471                 t->ee.lockrecordcount += pool->header.size;
472
473                 /* take the first record from the pool */
474                 r = &pool->lr[0];
475         }
476
477         /* pop the record from the freelist */
478
479         t->ee.firstfree = r->nextfree;
480 #ifndef NDEBUG
481         r->nextfree = NULL; /* in order to find invalid uses of nextfree */
482 #endif
483
484         return r;
485 }
486
487
488 /* lock_record_recycle *********************************************************
489
490    Recycle the given lock record. It will be inserted in the appropriate
491    free list.
492
493    IN:
494       t............the owner
495           r............lock record to recycle
496
497 *******************************************************************************/
498
499 static inline void lock_record_recycle(threadobject *t, lock_record_t *r)
500 {
501         assert(t);
502         assert(r);
503         assert(r->owner == NULL);
504         assert(r->nextfree == NULL);
505
506         r->nextfree = t->ee.firstfree;
507         t->ee.firstfree = r;
508 }
509
510
511
512 /*============================================================================*/
513 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
514 /*============================================================================*/
515
516
517 /* lock_hashtable_init *********************************************************
518
519    Initialize the global hashtable mapping objects to lock records.
520
521 *******************************************************************************/
522
523 static void lock_hashtable_init(void)
524 {
525         pthread_mutex_init(&(lock_hashtable.mutex), NULL);
526
527         lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
528         lock_hashtable.entries = 0;
529         lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
530
531 #if defined(ENABLE_STATISTICS)
532         if (opt_stat)
533                 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
534 #endif
535
536         MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
537 }
538
539
540 /* lock_hashtable_grow *********************************************************
541
542    Grow the lock record hashtable to about twice its current size and
543    rehash the entries.
544
545 *******************************************************************************/
546
547 /* must be called with hashtable mutex locked */
548 static void lock_hashtable_grow(void)
549 {
550         u4 oldsize;
551         u4 newsize;
552         lock_record_t **oldtable;
553         lock_record_t **newtable;
554         lock_record_t *lr;
555         lock_record_t *next;
556         u4 i;
557         u4 h;
558         u4 newslot;
559
560         /* allocate a new table */
561
562         oldsize = lock_hashtable.size;
563         newsize = oldsize*2 + 1; /* XXX should use prime numbers */
564
565         LOCK_LOG(("growing lock hashtable to size %d\n", newsize));
566
567         oldtable = lock_hashtable.ptr;
568         newtable = MNEW(lock_record_t *, newsize);
569
570 #if defined(ENABLE_STATISTICS)
571         if (opt_stat)
572                 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
573 #endif
574
575         MZERO(newtable, lock_record_t *, newsize);
576
577         /* rehash the entries */
578
579         for (i = 0; i < oldsize; i++) {
580                 lr = oldtable[i];
581                 while (lr) {
582                         next = lr->hashlink;
583
584                         h = LOCK_HASH(lr->obj);
585                         newslot = h % newsize;
586
587                         lr->hashlink = newtable[newslot];
588                         newtable[newslot] = lr;
589
590                         lr = next;
591                 }
592         }
593
594         /* replace the old table */
595
596         lock_hashtable.ptr  = newtable;
597         lock_hashtable.size = newsize;
598
599         MFREE(oldtable, lock_record_t *, oldsize);
600
601 #if defined(ENABLE_STATISTICS)
602         if (opt_stat)
603                 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
604 #endif
605 }
606
607
608 /* lock_hashtable_get_lock_record **********************************************
609
610    Find the lock record for the given object. If it does not exists, yet,
611    create it and enter it in the hashtable.
612
613    IN:
614       t.................the current thread
615           o.................the object to look up
616
617    RETURN VALUE:
618       the lock record to use for this object
619
620 *******************************************************************************/
621
622 static lock_record_t *lock_hashtable_get_lock_record(threadobject *t, java_objectheader *o)
623 {
624         ptrint lockword;
625         u4 slot;
626         lock_record_t *lr;
627
628         lockword = (ptrint) o->monitorPtr;
629
630         if (IS_FAT_LOCK(lockword)) {
631                 return GET_FAT_LOCK(lockword);
632         }
633
634         /* lock the hashtable */
635
636         pthread_mutex_lock(&(lock_hashtable.mutex));
637
638         /* lookup the lock record in the hashtable */
639
640         slot = LOCK_HASH(o) % lock_hashtable.size;
641         lr = lock_hashtable.ptr[slot];
642         while (lr) {
643                 if (lr->obj == o) {
644                         pthread_mutex_unlock(&(lock_hashtable.mutex));
645                         return lr;
646                 }
647
648                 lr = lr->hashlink;
649         }
650
651         /* not found, we must create a new one */
652
653         lr = lock_record_alloc(t);
654         lr->obj = o;
655         LOCK_LOG(("thread %d allocated for %p new lr %p\n",
656                         t->index, (void*) o, (void*) lr));
657
658         /* enter it in the hashtable */
659
660         lr->hashlink = lock_hashtable.ptr[slot];
661         lock_hashtable.ptr[slot] = lr;
662         lock_hashtable.entries++;
663
664         /* check whether the hash should grow */
665
666         if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
667                 lock_hashtable_grow();
668         }
669
670         /* unlock the hashtable */
671
672         pthread_mutex_unlock(&(lock_hashtable.mutex));
673
674         /* return the new lock record */
675
676         return lr;
677 }
678
679
680 /*============================================================================*/
681 /* OBJECT LOCK INITIALIZATION                                                 */
682 /*============================================================================*/
683
684
685 /* lock_init_object_lock *******************************************************
686
687    Initialize the monitor pointer of the given object. The monitor gets
688    initialized to an unlocked state.
689
690 *******************************************************************************/
691
692 void lock_init_object_lock(java_objectheader *o)
693 {
694         assert(o);
695
696         o->monitorPtr = (lock_record_t *) THIN_UNLOCKED;
697         LOCK_CLEAR_FLC_BIT(o);
698 }
699
700
701 /* lock_get_initial_lock_word **************************************************
702
703    Returns the initial (unlocked) lock word. The pointer is
704    required in the code generator to set up a virtual
705    java_objectheader for code patch locking.
706
707 *******************************************************************************/
708
709 lock_record_t *lock_get_initial_lock_word(void)
710 {
711         return (lock_record_t *) THIN_UNLOCKED;
712 }
713
714
715
716 /*============================================================================*/
717 /* LOCKING ALGORITHM                                                          */
718 /*============================================================================*/
719
720
721 /* lock_record_enter ***********************************************************
722
723    Enter the lock represented by the given lock record.
724
725    IN:
726       t.................the current thread
727           lr................the lock record
728
729 *******************************************************************************/
730
731 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
732 {
733         pthread_mutex_lock(&(lr->mutex));
734         lr->owner = t;
735 }
736
737
738 /* lock_record_exit ************************************************************
739
740    Release the lock represented by the given lock record.
741
742    IN:
743       t.................the current thread
744           lr................the lock record
745
746    PRE-CONDITION:
747       The current thread must own the lock represented by this lock record.
748           This is NOT checked by this function!
749
750 *******************************************************************************/
751
752 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
753 {
754         lr->owner = NULL;
755         pthread_mutex_unlock(&(lr->mutex));
756 }
757
758
759 /* lock_inflate ****************************************************************
760
761    Inflate the lock of the given object. This may only be called by the
762    owner of the monitor of the object.
763
764    IN:
765       t............the current thread
766           o............the object of which to inflate the lock
767           lr...........the lock record to install. The current thread must
768                        own the lock of this lock record!
769
770    PRE-CONDITION:
771       The current thread must be the owner of this object's monitor AND
772           of the lock record's lock!
773
774 *******************************************************************************/
775
776 static void lock_inflate(threadobject *t, java_objectheader *o, lock_record_t *lr)
777 {
778         ptrint lockword;
779
780         /* get the current lock count */
781
782         lockword = (ptrint) o->monitorPtr;
783
784         if (IS_FAT_LOCK(lockword)) {
785                 assert(GET_FAT_LOCK(lockword) == lr);
786         }
787         else {
788                 assert( LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock );
789
790                 /* copy the count from the thin lock */
791
792                 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
793         }
794
795         LOCK_LOG(("thread %3d: inflating lock of object %p current lockword %lx, count %d\n",
796                         t->index, (void*) o, (long)o->monitorPtr, (int)lr->count));
797
798         /* clear flat-lock-contention bit */
799
800         LOCK_CLEAR_FLC_BIT(o);
801
802         /* notify waiting objects */
803
804         lock_record_notify(t, lr, false);
805
806         /* install it */
807
808         o->monitorPtr = (lock_record_t *) MAKE_FAT_LOCK(lr);
809 }
810
811
812 /* lock_monitor_enter **********************************************************
813
814    Acquire the monitor of the given object. If the current thread already
815    owns the monitor, the lock counter is simply increased.
816
817    This function blocks until it can acquire the monitor.
818
819    IN:
820       t............the current thread
821           o............the object of which to enter the monitor
822
823    RETURN VALUE:
824       true.........the lock has been successfully acquired
825           false........an exception has been thrown
826
827 *******************************************************************************/
828
829 bool lock_monitor_enter(java_objectheader *o)
830 {
831         threadobject *t;
832         /* CAUTION: This code assumes that ptrint is unsigned! */
833         ptrint        lockword;
834         ptrint        thinlock;
835
836         if (o == NULL) {
837                 exceptions_throw_nullpointerexception();
838                 return false;
839         }
840
841         t = THREADOBJECT;
842
843         thinlock = t->thinlock;
844
845         /* most common case: try to thin-lock an unlocked object */
846
847         if ((lockword = COMPARE_AND_SWAP_OLD_VALUE(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) == THIN_UNLOCKED) {
848                 /* success. we locked it */
849                 /* The Java Memory Model requires a memory barrier here: */
850                 MEMORY_BARRIER();
851                 return true;
852         }
853
854         /* next common case: recursive lock with small recursion count */
855         /* We don't have to worry about stale values here, as any stale value  */
856         /* will indicate another thread holding the lock (or an inflated lock) */
857
858         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
859                 /* we own this monitor               */
860                 /* check the current recursion count */
861
862                 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
863                 {
864                         /* the recursion count is low enough */
865
866                         o->monitorPtr = (lock_record_t *) (lockword + THIN_LOCK_COUNT_INCR);
867
868                         /* success. we locked it */
869                         return true;
870                 }
871                 else {
872                         lock_record_t *lr;
873
874                         /* recursion count overflow */
875
876                         lr = lock_hashtable_get_lock_record(t, o);
877                         lock_record_enter(t, lr);
878                         lock_inflate(t, o, lr);
879                         lr->count++;
880
881                         return true;
882                 }
883         }
884
885         /* the lock is either contented or fat */
886
887         {
888                 lock_record_t *lr;
889
890                 if (IS_FAT_LOCK(lockword)) {
891
892                         lr = GET_FAT_LOCK(lockword);
893
894                         /* check for recursive entering */
895                         if (lr->owner == t) {
896                                 lr->count++;
897                                 return true;
898                         }
899
900                         /* acquire the mutex of the lock record */
901
902                         lock_record_enter(t, lr);
903
904                         assert(lr->count == 0);
905
906                         return true;
907                 }
908
909                 /****** inflation path ******/
910
911                 /* first obtain the lock record for this object */
912
913                 lr = lock_hashtable_get_lock_record(t, o);
914
915 #if defined(ENABLE_JVMTI)
916         /* Monitor Contended Enter */
917                 jvmti_MonitorContendedEntering(false, o);
918 #endif
919                 /* enter the monitor */
920
921                 lock_record_enter(t, lr);
922
923
924 #if defined(ENABLE_JVMTI)
925                 /* Monitor Contended Entered */
926                 jvmti_MonitorContendedEntering(true, o);
927 #endif
928
929                 /* inflation loop */
930
931                 while (IS_THIN_LOCK(lockword = (ptrint) o->monitorPtr)) {
932                         /* Set the flat lock contention bit to let the owning thread */
933                         /* know that we want to be notified of unlocking.            */
934
935                         LOCK_SET_FLC_BIT(o);
936
937                         LOCK_LOG(("thread %d set flc bit on %p lr %p\n",
938                                         t->index, (void*) o, (void*) lr));
939
940                         /* try to lock the object */
941
942                         if (COMPARE_AND_SWAP_SUCCEEDS(&(o->monitorPtr), THIN_UNLOCKED, thinlock)) {
943                                 /* we can inflate the lock ourselves */
944                                 LOCK_LOG(("thread %d inflating lock of %p to lr %p\n",
945                                                 t->index, (void*) o, (void*) lr));
946                                 lock_inflate(t, o, lr);
947                         }
948                         else {
949                                 /* wait until another thread sees the flc bit and notifies us of unlocking */
950                                 LOCK_LOG(("thread %d waiting for notification on %p lr %p\n",
951                                                 t->index, (void*) o, (void*) lr));
952                                 lock_record_wait(t, lr, 0, 0);
953                         }
954                 }
955
956                 /* we own the inflated lock now */
957
958                 return true;
959         }
960 }
961
962
963 /* lock_monitor_exit ***********************************************************
964
965    Decrement the counter of a (currently owned) monitor. If the counter
966    reaches zero, release the monitor.
967
968    If the current thread is not the owner of the monitor, an 
969    IllegalMonitorState exception is thrown.
970
971    IN:
972       t............the current thread
973           o............the object of which to exit the monitor
974
975    RETURN VALUE:
976       true.........everything ok,
977           false........an exception has been thrown
978
979 *******************************************************************************/
980
981 bool lock_monitor_exit(java_objectheader *o)
982 {
983         threadobject *t;
984         ptrint        lockword;
985         ptrint        thinlock;
986
987         if (o == NULL) {
988                 exceptions_throw_nullpointerexception();
989                 return false;
990         }
991
992         t = THREADOBJECT;
993
994         /* We don't have to worry about stale values here, as any stale value */
995         /* will indicate that we don't own the lock.                          */
996
997         lockword = (ptrint) o->monitorPtr;
998         thinlock = t->thinlock;
999
1000         /* most common case: we release a thin lock that we hold once */
1001
1002         if (lockword == thinlock) {
1003                 /* memory barrier for Java Memory Model */
1004                 MEMORY_BARRIER();
1005                 o->monitorPtr = THIN_UNLOCKED;
1006                 /* memory barrier for thin locking */
1007                 MEMORY_BARRIER();
1008
1009                 /* check if there has been a flat lock contention on this object */
1010
1011                 if (LOCK_TEST_FLC_BIT(o)) {
1012                         lock_record_t *lr;
1013
1014                         LOCK_LOG(("thread %d saw flc bit on %p %s\n",
1015                                         t->index, (void*) o, o->vftbl->class->name->text));
1016
1017                         /* there has been a contention on this thin lock */
1018
1019                         lr = lock_hashtable_get_lock_record(t, o);
1020
1021                         LOCK_LOG(("thread %d for %p got lr %p\n",
1022                                         t->index, (void*) o, (void*) lr));
1023
1024                         lock_record_enter(t, lr);
1025
1026                         if (LOCK_TEST_FLC_BIT(o)) {
1027                                 /* notify a thread that it can try to inflate the lock now */
1028
1029                                 lock_record_notify(t, lr, true);
1030                         }
1031
1032                         lock_record_exit(t, lr);
1033                 }
1034
1035                 return true;
1036         }
1037
1038         /* next common case: we release a recursive lock, count > 0 */
1039
1040         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1041                 o->monitorPtr = (lock_record_t *) (lockword - THIN_LOCK_COUNT_INCR);
1042                 return true;
1043         }
1044
1045         /* either the lock is fat, or we don't hold it at all */
1046
1047         if (IS_FAT_LOCK(lockword)) {
1048
1049                 lock_record_t *lr;
1050
1051                 lr = GET_FAT_LOCK(lockword);
1052
1053                 /* check if we own this monitor */
1054                 /* We don't have to worry about stale values here, as any stale value */
1055                 /* will be != t and thus fail this check.                             */
1056
1057                 if (lr->owner != t) {
1058                         exceptions_throw_illegalmonitorstateexception();
1059                         return false;
1060                 }
1061
1062                 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1063
1064                 if (lr->count != 0) {
1065                         /* we had locked this one recursively. just decrement, it will */
1066                         /* still be locked. */
1067                         lr->count--;
1068                         return true;
1069                 }
1070
1071                 /* unlock this lock record */
1072
1073                 lr->owner = NULL;
1074                 pthread_mutex_unlock(&(lr->mutex));
1075
1076                 return true;
1077         }
1078
1079         /* legal thin lock cases have been handled above, so this is an error */
1080
1081         exceptions_throw_illegalmonitorstateexception();
1082
1083         return false;
1084 }
1085
1086
1087 /* lock_record_add_waiter ******************************************************
1088
1089    Add a thread to the list of waiting threads of a lock record.
1090
1091    IN:
1092       lr...........the lock record
1093       thread.......the thread to add
1094
1095 *******************************************************************************/
1096
1097 static void lock_record_add_waiter(lock_record_t *lr, threadobject *thread)
1098 {
1099         lock_waiter_t *waiter;
1100
1101         /* allocate a waiter data structure */
1102
1103         waiter = NEW(lock_waiter_t);
1104
1105 #if defined(ENABLE_STATISTICS)
1106         if (opt_stat)
1107                 size_lock_waiter += sizeof(lock_waiter_t);
1108 #endif
1109
1110         waiter->waiter = thread;
1111         waiter->next   = lr->waiters;
1112
1113         lr->waiters = waiter;
1114 }
1115
1116
1117 /* lock_record_remove_waiter ***************************************************
1118
1119    Remove a thread from the list of waiting threads of a lock record.
1120
1121    IN:
1122       lr...........the lock record
1123       t............the current thread
1124
1125    PRE-CONDITION:
1126       The current thread must be the owner of the lock record.
1127    
1128 *******************************************************************************/
1129
1130 static void lock_record_remove_waiter(lock_record_t *lr, threadobject *thread)
1131 {
1132         lock_waiter_t **link;
1133         lock_waiter_t  *w;
1134
1135         link = &(lr->waiters);
1136
1137         while ((w = *link)) {
1138                 if (w->waiter == thread) {
1139                         *link = w->next;
1140
1141                         /* free the waiter data structure */
1142
1143                         FREE(w, lock_waiter_t);
1144
1145 #if defined(ENABLE_STATISTICS)
1146                         if (opt_stat)
1147                                 size_lock_waiter -= sizeof(lock_waiter_t);
1148 #endif
1149
1150                         return;
1151                 }
1152
1153                 link = &(w->next);
1154         }
1155
1156         /* this should never happen */
1157
1158         vm_abort("lock_record_remove_waiter: waiting thread not found in list of waiters\n");
1159 }
1160
1161
1162 /* lock_record_wait ************************************************************
1163
1164    Wait on a lock record for a given (maximum) amount of time.
1165
1166    IN:
1167       t............the current thread
1168           lr...........the lock record
1169           millis.......milliseconds of timeout
1170           nanos........nanoseconds of timeout
1171
1172    PRE-CONDITION:
1173       The current thread must be the owner of the lock record.
1174           This is NOT checked by this function!
1175    
1176 *******************************************************************************/
1177
1178 static void lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1179 {
1180         s4   lockcount;
1181         bool wasinterrupted;
1182
1183         /* { the thread t owns the fat lock record lr on the object o } */
1184
1185         /* register us as waiter for this object */
1186
1187         lock_record_add_waiter(lr, thread);
1188
1189         /* remember the old lock count */
1190
1191         lockcount = lr->count;
1192
1193         /* unlock this record */
1194
1195         lr->count = 0;
1196         lock_record_exit(thread, lr);
1197
1198         /* wait until notified/interrupted/timed out */
1199
1200         wasinterrupted = threads_wait_with_timeout_relative(thread, millis, nanos);
1201
1202         /* re-enter the monitor */
1203
1204         lock_record_enter(thread, lr);
1205
1206         /* remove us from the list of waiting threads */
1207
1208         lock_record_remove_waiter(lr, thread);
1209
1210         /* restore the old lock count */
1211
1212         lr->count = lockcount;
1213
1214         /* if we have been interrupted, throw the appropriate exception */
1215
1216         if (wasinterrupted)
1217                 exceptions_throw_interruptedexception();
1218 }
1219
1220
1221 /* lock_monitor_wait ***********************************************************
1222
1223    Wait on an object for a given (maximum) amount of time.
1224
1225    IN:
1226       t............the current thread
1227           o............the object
1228           millis.......milliseconds of timeout
1229           nanos........nanoseconds of timeout
1230
1231    PRE-CONDITION:
1232       The current thread must be the owner of the object's monitor.
1233    
1234 *******************************************************************************/
1235
1236 static void lock_monitor_wait(threadobject *t, java_objectheader *o, s8 millis, s4 nanos)
1237 {
1238         ptrint         lockword;
1239         lock_record_t *lr;
1240
1241         lockword = (ptrint) o->monitorPtr;
1242
1243         /* check if we own this monitor */
1244         /* We don't have to worry about stale values here, as any stale value */
1245         /* will fail this check.                                              */
1246
1247         if (IS_FAT_LOCK(lockword)) {
1248
1249                 lr = GET_FAT_LOCK(lockword);
1250
1251                 if (lr->owner != t) {
1252                         exceptions_throw_illegalmonitorstateexception();
1253                         return;
1254                 }
1255         }
1256         else {
1257                 /* it's a thin lock */
1258
1259                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1260                         exceptions_throw_illegalmonitorstateexception();
1261                         return;
1262                 }
1263
1264                 /* inflate this lock */
1265                 lr = lock_hashtable_get_lock_record(t, o);
1266                 lock_record_enter(t, lr);
1267                 lock_inflate(t, o, lr);
1268         }
1269
1270         /* { the thread t owns the fat lock record lr on the object o } */
1271
1272         lock_record_wait(t, lr, millis, nanos);
1273 }
1274
1275
1276 /* lock_record_notify **********************************************************
1277
1278    Notify one thread or all threads waiting on the given lock record.
1279
1280    IN:
1281       t............the current thread
1282           lr...........the lock record
1283           one..........if true, only notify one thread
1284
1285    PRE-CONDITION:
1286       The current thread must be the owner of the lock record.
1287           This is NOT checked by this function!
1288    
1289 *******************************************************************************/
1290
1291 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1292 {
1293         lock_waiter_t *waiter;
1294         threadobject *waitingthread;
1295
1296         /* { the thread t owns the fat lock record lr on the object o } */
1297
1298         /* for each waiter: */
1299
1300         for (waiter = lr->waiters; waiter; waiter = waiter->next) {
1301
1302                 /* signal the waiting thread */
1303
1304                 waitingthread = waiter->waiter;
1305
1306                 pthread_mutex_lock(&waitingthread->waitmutex);
1307                 if (waitingthread->sleeping)
1308                         pthread_cond_signal(&waitingthread->waitcond);
1309                 waitingthread->signaled = true;
1310                 pthread_mutex_unlock(&waitingthread->waitmutex);
1311
1312                 /* if we should only wake one, we are done */
1313
1314                 if (one)
1315                         break;
1316         }
1317 }
1318
1319
1320 /* lock_monitor_notify *********************************************************
1321
1322    Notify one thread or all threads waiting on the given object.
1323
1324    IN:
1325       t............the current thread
1326           o............the object
1327           one..........if true, only notify one thread
1328
1329    PRE-CONDITION:
1330       The current thread must be the owner of the object's monitor.
1331    
1332 *******************************************************************************/
1333
1334 static void lock_monitor_notify(threadobject *t, java_objectheader *o, bool one)
1335 {
1336         ptrint lockword;
1337         lock_record_t *lr;
1338
1339         lockword = (ptrint) o->monitorPtr;
1340
1341         /* check if we own this monitor */
1342         /* We don't have to worry about stale values here, as any stale value */
1343         /* will fail this check.                                              */
1344
1345         if (IS_FAT_LOCK(lockword)) {
1346
1347                 lr = GET_FAT_LOCK(lockword);
1348
1349                 if (lr->owner != t) {
1350                         exceptions_throw_illegalmonitorstateexception();
1351                         return;
1352                 }
1353         }
1354         else {
1355                 /* it's a thin lock */
1356
1357                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1358                         exceptions_throw_illegalmonitorstateexception();
1359                         return;
1360                 }
1361
1362                 /* inflate this lock */
1363                 lr = lock_hashtable_get_lock_record(t, o);
1364                 lock_record_enter(t, lr);
1365                 lock_inflate(t, o, lr);
1366         }
1367
1368         /* { the thread t owns the fat lock record lr on the object o } */
1369
1370         lock_record_notify(t, lr, one);
1371 }
1372
1373
1374
1375 /*============================================================================*/
1376 /* INQUIRY FUNCIONS                                                           */
1377 /*============================================================================*/
1378
1379
1380 /* lock_is_held_by_current_thread **********************************************
1381
1382    Return true if the current thread owns the monitor of the given object.
1383
1384    IN:
1385           o............the object
1386
1387    RETURN VALUE:
1388       true, if the current thread holds the lock of this object.
1389    
1390 *******************************************************************************/
1391
1392 bool lock_is_held_by_current_thread(java_objectheader *o)
1393 {
1394         ptrint        lockword;
1395         threadobject *t;
1396
1397         /* check if we own this monitor */
1398         /* We don't have to worry about stale values here, as any stale value */
1399         /* will fail this check.                                              */
1400
1401         lockword = (ptrint) o->monitorPtr;
1402         t = THREADOBJECT;
1403
1404         if (IS_FAT_LOCK(lockword)) {
1405                 lock_record_t *lr;
1406
1407                 /* it's a fat lock */
1408                 lr = GET_FAT_LOCK(lockword);
1409
1410                 return (lr->owner == t);
1411         }
1412         else {
1413                 /* it's a thin lock */
1414
1415                 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1416         }
1417 }
1418
1419
1420
1421 /*============================================================================*/
1422 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD                              */
1423 /*============================================================================*/
1424
1425
1426 /* lock_wait_for_object ********************************************************
1427
1428    Wait for the given object.
1429
1430    IN:
1431           o............the object
1432           millis.......milliseconds to wait
1433           nanos........nanoseconds to wait
1434    
1435 *******************************************************************************/
1436
1437 void lock_wait_for_object(java_objectheader *o, s8 millis, s4 nanos)
1438 {
1439         threadobject *thread;
1440
1441         thread = THREADOBJECT;
1442
1443         lock_monitor_wait(thread, o, millis, nanos);
1444 }
1445
1446
1447 /* lock_notify_object **********************************************************
1448
1449    Notify one thread waiting on the given object.
1450
1451    IN:
1452           o............the object
1453    
1454 *******************************************************************************/
1455
1456 void lock_notify_object(java_objectheader *o)
1457 {
1458         threadobject *thread;
1459
1460         thread = THREADOBJECT;
1461
1462         lock_monitor_notify(thread, o, true);
1463 }
1464
1465
1466 /* lock_notify_all_object ******************************************************
1467
1468    Notify all threads waiting on the given object.
1469
1470    IN:
1471           o............the object
1472    
1473 *******************************************************************************/
1474
1475 void lock_notify_all_object(java_objectheader *o)
1476 {
1477         threadobject *thread;
1478
1479         thread = THREADOBJECT;
1480
1481         lock_monitor_notify(thread, o, false);
1482 }
1483
1484
1485 /*
1486  * These are local overrides for various environment variables in Emacs.
1487  * Please do not remove this and leave it at the end of the file, where
1488  * Emacs will automagically detect them.
1489  * ---------------------------------------------------------------------
1490  * Local variables:
1491  * mode: c
1492  * indent-tabs-mode: t
1493  * c-basic-offset: 4
1494  * tab-width: 4
1495  * End:
1496  * vim:noexpandtab:sw=4:ts=4:
1497  */