* src/threads/posix/lock.c: Moved to .cpp.
[cacao.git] / src / threads / lock.cpp
1 /* src/threads/lock.cpp - lock implementation
2
3    Copyright (C) 1996-2005, 2006, 2007, 2008
4    CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5
6    This file is part of CACAO.
7
8    This program is free software; you can redistribute it and/or
9    modify it under the terms of the GNU General Public License as
10    published by the Free Software Foundation; either version 2, or (at
11    your option) any later version.
12
13    This program is distributed in the hope that it will be useful, but
14    WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16    General Public License for more details.
17
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21    02110-1301, USA.
22
23 */
24
25
26 #include "config.h"
27
28 #include <assert.h>
29 #include <stdint.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <sys/time.h>
33
34 #include "vm/types.h"
35
36 #include "mm/memory.h"
37
38 #include "native/llni.h"
39
40 #include "threads/lock.hpp"
41 #include "threads/mutex.hpp"
42 #include "threads/threadlist.hpp"
43 #include "threads/thread.hpp"
44
45 #include "toolbox/list.hpp"
46
47 #include "vm/exceptions.hpp"
48 #include "vm/finalizer.h"
49 #include "vm/global.h"
50 #include "vm/options.h"
51 #include "vm/string.hpp"
52 #include "vm/vm.hpp"
53
54 #if defined(ENABLE_STATISTICS)
55 # include "vm/statistics.h"
56 #endif
57
58 #if defined(ENABLE_VMLOG)
59 #include <vmlog_cacao.h>
60 #endif
61
62 /* arch.h must be here because it defines USE_FAKE_ATOMIC_INSTRUCTIONS */
63
64 #include "arch.h"
65
66 /* includes for atomic instructions: */
67
68 #if defined(USE_FAKE_ATOMIC_INSTRUCTIONS)
69 #include "threads/posix/generic-primitives.h"
70 #else
71 #include "threads/atomic.hpp"
72 #endif
73
74 #if defined(ENABLE_JVMTI)
75 #include "native/jvmti/cacaodbg.h"
76 #endif
77
78 #if defined(ENABLE_GC_BOEHM)
79 # include "mm/boehm-gc/include/gc.h"
80 #endif
81
82
83 /* debug **********************************************************************/
84
85 #if !defined(NDEBUG)
86 # define DEBUGLOCKS(format) \
87     do { \
88         if (opt_DebugLocks) { \
89             log_println format; \
90         } \
91     } while (0)
92 #else
93 # define DEBUGLOCKS(format)
94 #endif
95
96
97 /******************************************************************************/
98 /* MACROS                                                                     */
99 /******************************************************************************/
100
101 /* number of lock records in the first pool allocated for a thread */
102 #define LOCK_INITIAL_LOCK_RECORDS 8
103
104 #define LOCK_INITIAL_HASHTABLE_SIZE  1613  /* a prime in the middle between 1024 and 2048 */
105
106
107 /******************************************************************************/
108 /* MACROS FOR THIN/FAT LOCKS                                                  */
109 /******************************************************************************/
110
111 /* We use a variant of the tasuki locks described in the paper
112  *     
113  *     Tamiya Onodera, Kiyokuni Kawachiya
114  *     A Study of Locking Objects with Bimodal Fields
115  *     Proceedings of the ACM OOPSLA '99, pp. 223-237
116  *     1999
117  *
118  * The underlying thin locks are a variant of the thin locks described in
119  * 
120  *     Bacon, Konuru, Murthy, Serrano
121  *     Thin Locks: Featherweight Synchronization for Java
122  *         Proceedings of the ACM Conference on Programming Language Design and 
123  *         Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
124  *         June 1998
125  *
126  * In thin lock mode the lockword looks like this:
127  *
128  *     ,----------------------,-----------,---,
129  *     |      thread ID       |   count   | 0 |
130  *     `----------------------'-----------'---'
131  *
132  *     thread ID......the 'index' of the owning thread, or 0
133  *     count..........number of times the lock has been entered minus 1
134  *     0..............the shape bit is 0 in thin lock mode
135  *
136  * In fat lock mode it is basically a lock_record_t *:
137  *
138  *     ,----------------------------------,---,
139  *     |    lock_record_t * (without LSB) | 1 |
140  *     `----------------------------------'---'
141  *
142  *     1..............the shape bit is 1 in fat lock mode
143  */
144
145 #if SIZEOF_VOID_P == 8
146 #define THIN_LOCK_WORD_SIZE    64
147 #else
148 #define THIN_LOCK_WORD_SIZE    32
149 #endif
150
151 #define THIN_LOCK_SHAPE_BIT    0x01
152
153 #define THIN_UNLOCKED          0
154
155 #define THIN_LOCK_COUNT_SHIFT  1
156 #define THIN_LOCK_COUNT_SIZE   8
157 #define THIN_LOCK_COUNT_INCR   (1 << THIN_LOCK_COUNT_SHIFT)
158 #define THIN_LOCK_COUNT_MAX    ((1 << THIN_LOCK_COUNT_SIZE) - 1)
159 #define THIN_LOCK_COUNT_MASK   (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT)
160
161 #define THIN_LOCK_TID_SHIFT    (THIN_LOCK_COUNT_SIZE + THIN_LOCK_COUNT_SHIFT)
162 #define THIN_LOCK_TID_SIZE     (THIN_LOCK_WORD_SIZE - THIN_LOCK_TID_SHIFT)
163
164 #define IS_THIN_LOCK(lockword)  (!((lockword) & THIN_LOCK_SHAPE_BIT))
165 #define IS_FAT_LOCK(lockword)     ((lockword) & THIN_LOCK_SHAPE_BIT)
166
167 #define GET_FAT_LOCK(lockword)  ((lock_record_t *) ((lockword) & ~THIN_LOCK_SHAPE_BIT))
168 #define MAKE_FAT_LOCK(ptr)      ((uintptr_t) (ptr) | THIN_LOCK_SHAPE_BIT)
169
170 #define LOCK_WORD_WITHOUT_COUNT(lockword) ((lockword) & ~THIN_LOCK_COUNT_MASK)
171 #define GET_THREAD_INDEX(lockword) ((unsigned) lockword >> THIN_LOCK_TID_SHIFT)
172
173
174 /* global variables ***********************************************************/
175
176 /* hashtable mapping objects to lock records */
177 static lock_hashtable_t lock_hashtable;
178
179
180 /******************************************************************************/
181 /* PROTOTYPES                                                                 */
182 /******************************************************************************/
183
184 static void lock_hashtable_init(void);
185
186 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o);
187 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword);
188 static void lock_record_enter(threadobject *t, lock_record_t *lr);
189 static void lock_record_exit(threadobject *t, lock_record_t *lr);
190 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
191 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
192
193
194 /*============================================================================*/
195 /* INITIALIZATION OF DATA STRUCTURES                                          */
196 /*============================================================================*/
197
198
199 /* lock_init *******************************************************************
200
201    Initialize global data for locking.
202
203 *******************************************************************************/
204
205 void lock_init(void)
206 {
207         /* initialize lock hashtable */
208
209         lock_hashtable_init();
210
211 #if defined(ENABLE_VMLOG)
212         vmlog_cacao_init_lock();
213 #endif
214 }
215
216
217 /* lock_pre_compute_thinlock ***************************************************
218
219    Pre-compute the thin lock value for a thread index.
220
221    IN:
222       index........the thead index (>= 1)
223
224    RETURN VALUE:
225       the thin lock value for this thread index
226
227 *******************************************************************************/
228
229 ptrint lock_pre_compute_thinlock(s4 index)
230 {
231         return (index << THIN_LOCK_TID_SHIFT) | THIN_UNLOCKED;
232 }
233
234
235 /* lock_record_new *************************************************************
236
237    Allocate a lock record.
238
239 *******************************************************************************/
240
241 static lock_record_t *lock_record_new(void)
242 {
243         lock_record_t *lr;
244
245         /* allocate the data structure on the C heap */
246
247         lr = NEW(lock_record_t);
248
249 #if defined(ENABLE_STATISTICS)
250         if (opt_stat)
251                 size_lock_record += sizeof(lock_record_t);
252 #endif
253
254         /* initialize the members */
255
256         lr->object  = NULL;
257         lr->owner   = NULL;
258         lr->count   = 0;
259         lr->waiters = new List<threadobject*>();
260
261 #if defined(ENABLE_GC_CACAO)
262         /* register the lock object as weak reference with the GC */
263
264         gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
265 #endif
266
267         // Initialize the mutex.
268         lr->mutex = new Mutex();
269
270         DEBUGLOCKS(("[lock_record_new   : lr=%p]", (void *) lr));
271
272         return lr;
273 }
274
275
276 /* lock_record_free ************************************************************
277
278    Free a lock record.
279
280    IN:
281        lr....lock record to free
282
283 *******************************************************************************/
284
285 static void lock_record_free(lock_record_t *lr)
286 {
287         DEBUGLOCKS(("[lock_record_free  : lr=%p]", (void *) lr));
288
289         // Destroy the mutex.
290         delete lr->mutex;
291
292 #if defined(ENABLE_GC_CACAO)
293         /* unregister the lock object reference with the GC */
294
295         gc_weakreference_unregister(&(lr->object));
296 #endif
297
298         // Free the waiters list.
299         delete lr->waiters;
300
301         /* Free the data structure. */
302
303         FREE(lr, lock_record_t);
304
305 #if defined(ENABLE_STATISTICS)
306         if (opt_stat)
307                 size_lock_record -= sizeof(lock_record_t);
308 #endif
309 }
310
311
312 /*============================================================================*/
313 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS                                  */
314 /*============================================================================*/
315
316 /* lock_hashtable_init *********************************************************
317
318    Initialize the global hashtable mapping objects to lock records.
319
320 *******************************************************************************/
321
322 static void lock_hashtable_init(void)
323 {
324         lock_hashtable.mutex   = new Mutex();
325
326         lock_hashtable.size    = LOCK_INITIAL_HASHTABLE_SIZE;
327         lock_hashtable.entries = 0;
328         lock_hashtable.ptr     = MNEW(lock_record_t *, lock_hashtable.size);
329
330 #if defined(ENABLE_STATISTICS)
331         if (opt_stat)
332                 size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size;
333 #endif
334
335         MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
336 }
337
338
339 /* lock_hashtable_grow *********************************************************
340
341    Grow the lock record hashtable to about twice its current size and
342    rehash the entries.
343
344 *******************************************************************************/
345
346 /* must be called with hashtable mutex locked */
347 static void lock_hashtable_grow(void)
348 {
349         u4 oldsize;
350         u4 newsize;
351         lock_record_t **oldtable;
352         lock_record_t **newtable;
353         lock_record_t *lr;
354         lock_record_t *next;
355         u4 i;
356         u4 h;
357         u4 newslot;
358
359         /* allocate a new table */
360
361         oldsize = lock_hashtable.size;
362         newsize = oldsize*2 + 1; /* XXX should use prime numbers */
363
364         DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
365
366         oldtable = lock_hashtable.ptr;
367         newtable = MNEW(lock_record_t *, newsize);
368
369 #if defined(ENABLE_STATISTICS)
370         if (opt_stat)
371                 size_lock_hashtable += sizeof(lock_record_t *) * newsize;
372 #endif
373
374         MZERO(newtable, lock_record_t *, newsize);
375
376         /* rehash the entries */
377
378         for (i = 0; i < oldsize; i++) {
379                 lr = oldtable[i];
380                 while (lr) {
381                         next = lr->hashlink;
382
383                         h = heap_hashcode(lr->object);
384                         newslot = h % newsize;
385
386                         lr->hashlink = newtable[newslot];
387                         newtable[newslot] = lr;
388
389                         lr = next;
390                 }
391         }
392
393         /* replace the old table */
394
395         lock_hashtable.ptr  = newtable;
396         lock_hashtable.size = newsize;
397
398         MFREE(oldtable, lock_record_t *, oldsize);
399
400 #if defined(ENABLE_STATISTICS)
401         if (opt_stat)
402                 size_lock_hashtable -= sizeof(lock_record_t *) * oldsize;
403 #endif
404 }
405
406
407 /* lock_hashtable_cleanup ******************************************************
408
409    Removes (and frees) lock records which have a cleared object reference
410    from the hashtable. The locked object was reclaimed by the GC.
411
412 *******************************************************************************/
413
414 #if defined(ENABLE_GC_CACAO)
415 void lock_hashtable_cleanup(void)
416 {
417         threadobject  *t;
418         lock_record_t *lr;
419         lock_record_t *prev;
420         lock_record_t *next;
421         int i;
422
423         t = THREADOBJECT;
424
425         /* lock the hashtable */
426
427         Mutex_lock(lock_hashtable.mutex);
428
429         /* search the hashtable for cleared references */
430
431         for (i = 0; i < lock_hashtable.size; i++) {
432                 lr = lock_hashtable.ptr[i];
433                 prev = NULL;
434
435                 while (lr) {
436                         next = lr->hashlink;
437
438                         /* remove lock records with cleared references */
439
440                         if (lr->object == NULL) {
441
442                                 /* unlink the lock record from the hashtable */
443
444                                 if (prev == NULL)
445                                         lock_hashtable.ptr[i] = next;
446                                 else
447                                         prev->hashlink = next;
448
449                                 /* free the lock record */
450
451                                 lock_record_free(lr);
452
453                         } else {
454                                 prev = lr;
455                         }
456
457                         lr = next;
458                 }
459         }
460
461         /* unlock the hashtable */
462
463         Mutex_unlock(lock_hashtable.mutex);
464 }
465 #endif
466
467
468 /* lock_hashtable_get **********************************************************
469
470    Find the lock record for the given object.  If it does not exists,
471    yet, create it and enter it in the hashtable.
472
473    IN:
474       t....the current thread
475           o....the object to look up
476
477    RETURN VALUE:
478       the lock record to use for this object
479
480 *******************************************************************************/
481
482 #if defined(ENABLE_GC_BOEHM)
483 static void lock_record_finalizer(void *object, void *p);
484 #endif
485
486 static lock_record_t *lock_hashtable_get(threadobject *t, java_handle_t *o)
487 {
488         uintptr_t      lockword;
489         u4             slot;
490         lock_record_t *lr;
491
492         lockword = lock_lockword_get(t, o);
493
494         if (IS_FAT_LOCK(lockword))
495                 return GET_FAT_LOCK(lockword);
496
497         // Lock the hashtable.
498         lock_hashtable.mutex->lock();
499
500         /* lookup the lock record in the hashtable */
501
502         LLNI_CRITICAL_START_THREAD(t);
503         slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
504         lr   = lock_hashtable.ptr[slot];
505
506         for (; lr != NULL; lr = lr->hashlink) {
507                 if (lr->object == LLNI_DIRECT(o))
508                         break;
509         }
510         LLNI_CRITICAL_END_THREAD(t);
511
512         if (lr == NULL) {
513                 /* not found, we must create a new one */
514
515                 lr = lock_record_new();
516
517                 LLNI_CRITICAL_START_THREAD(t);
518                 lr->object = LLNI_DIRECT(o);
519                 LLNI_CRITICAL_END_THREAD(t);
520
521 #if defined(ENABLE_GC_BOEHM)
522                 /* register new finalizer to clean up the lock record */
523
524                 GC_REGISTER_FINALIZER(LLNI_DIRECT(o), lock_record_finalizer, 0, 0, 0);
525 #endif
526
527                 /* enter it in the hashtable */
528
529                 lr->hashlink             = lock_hashtable.ptr[slot];
530                 lock_hashtable.ptr[slot] = lr;
531                 lock_hashtable.entries++;
532
533                 /* check whether the hash should grow */
534
535                 if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
536                         lock_hashtable_grow();
537                 }
538         }
539
540         // Unlock the hashtable.
541         lock_hashtable.mutex->unlock();
542
543         /* return the new lock record */
544
545         return lr;
546 }
547
548
549 /* lock_hashtable_remove *******************************************************
550
551    Remove the lock record for the given object from the hashtable
552    and free it afterwards.
553
554    IN:
555        t....the current thread
556        o....the object to look up
557
558 *******************************************************************************/
559
560 static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
561 {
562         uintptr_t      lockword;
563         lock_record_t *lr;
564         u4             slot;
565         lock_record_t *tmplr;
566
567         // Lock the hashtable.
568         lock_hashtable.mutex->lock();
569
570         /* get lock record */
571
572         lockword = lock_lockword_get(t, o);
573
574         assert(IS_FAT_LOCK(lockword));
575
576         lr = GET_FAT_LOCK(lockword);
577
578         /* remove the lock-record from the hashtable */
579
580         LLNI_CRITICAL_START_THREAD(t);
581         slot  = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
582         tmplr = lock_hashtable.ptr[slot];
583         LLNI_CRITICAL_END_THREAD(t);
584
585         if (tmplr == lr) {
586                 /* special handling if it's the first in the chain */
587
588                 lock_hashtable.ptr[slot] = lr->hashlink;
589         }
590         else {
591                 for (; tmplr != NULL; tmplr = tmplr->hashlink) {
592                         if (tmplr->hashlink == lr) {
593                                 tmplr->hashlink = lr->hashlink;
594                                 break;
595                         }
596                 }
597
598                 assert(tmplr != NULL);
599         }
600
601         /* decrease entry count */
602
603         lock_hashtable.entries--;
604
605         // Unlock the hashtable.
606         lock_hashtable.mutex->unlock();
607
608         /* free the lock record */
609
610         lock_record_free(lr);
611 }
612
613
614 /* lock_record_finalizer *******************************************************
615
616    XXX Remove me for exact GC.
617
618 *******************************************************************************/
619
620 static void lock_record_finalizer(void *object, void *p)
621 {
622         java_handle_t *o;
623         classinfo     *c;
624
625         o = (java_handle_t *) object;
626
627 #if !defined(ENABLE_GC_CACAO) && defined(ENABLE_HANDLES)
628         /* XXX this is only a dirty hack to make Boehm work with handles */
629
630         o = LLNI_WRAP((java_object_t *) o);
631 #endif
632
633         LLNI_class_get(o, c);
634
635 #if !defined(NDEBUG)
636         if (opt_DebugFinalizer) {
637                 log_start();
638                 log_print("[finalizer lockrecord: o=%p p=%p class=", object, p);
639                 class_print(c);
640                 log_print("]");
641                 log_finish();
642         }
643 #endif
644
645         /* check for a finalizer function */
646
647         if (c->finalizer != NULL)
648                 finalizer_run(object, p);
649
650         /* remove the lock-record entry from the hashtable and free it */
651
652         lock_hashtable_remove(THREADOBJECT, o);
653 }
654
655
656 /*============================================================================*/
657 /* OBJECT LOCK INITIALIZATION                                                 */
658 /*============================================================================*/
659
660
661 /* lock_init_object_lock *******************************************************
662
663    Initialize the monitor pointer of the given object. The monitor gets
664    initialized to an unlocked state.
665
666 *******************************************************************************/
667
668 void lock_init_object_lock(java_object_t *o)
669 {
670         assert(o);
671
672         o->lockword = THIN_UNLOCKED;
673 }
674
675
676 /*============================================================================*/
677 /* LOCKING ALGORITHM                                                          */
678 /*============================================================================*/
679
680
681 /* lock_lockword_get ***********************************************************
682
683    Get the lockword for the given object.
684
685    IN:
686       t............the current thread
687       o............the object
688
689 *******************************************************************************/
690
691 static inline uintptr_t lock_lockword_get(threadobject *t, java_handle_t *o)
692 {
693         uintptr_t lockword;
694
695         LLNI_CRITICAL_START_THREAD(t);
696         lockword = LLNI_DIRECT(o)->lockword;
697         LLNI_CRITICAL_END_THREAD(t);
698
699         return lockword;
700 }
701
702
703 /* lock_lockword_set ***********************************************************
704
705    Set the lockword for the given object.
706
707    IN:
708       t............the current thread
709       o............the object
710           lockword.....the new lockword value
711
712 *******************************************************************************/
713
714 static inline void lock_lockword_set(threadobject *t, java_handle_t *o, uintptr_t lockword)
715 {
716         LLNI_CRITICAL_START_THREAD(t);
717         LLNI_DIRECT(o)->lockword = lockword;
718         LLNI_CRITICAL_END_THREAD(t);
719 }
720
721
722 /* lock_record_enter ***********************************************************
723
724    Enter the lock represented by the given lock record.
725
726    IN:
727       t.................the current thread
728           lr................the lock record
729
730 *******************************************************************************/
731
732 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
733 {
734         lr->mutex->lock();
735         lr->owner = t;
736 }
737
738
739 /* lock_record_exit ************************************************************
740
741    Release the lock represented by the given lock record.
742
743    IN:
744       t.................the current thread
745           lr................the lock record
746
747    PRE-CONDITION:
748       The current thread must own the lock represented by this lock record.
749           This is NOT checked by this function!
750
751 *******************************************************************************/
752
753 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
754 {
755         lr->owner = NULL;
756         lr->mutex->unlock();
757 }
758
759
760 /* lock_inflate ****************************************************************
761
762    Inflate the lock of the given object. This may only be called by the
763    owner of the monitor of the object.
764
765    IN:
766       t............the current thread
767           o............the object of which to inflate the lock
768           lr...........the lock record to install. The current thread must
769                        own the lock of this lock record!
770
771    PRE-CONDITION:
772       The current thread must be the owner of this object's monitor AND
773           of the lock record's lock!
774
775 *******************************************************************************/
776
777 static void lock_inflate(threadobject *t, java_handle_t *o, lock_record_t *lr)
778 {
779         uintptr_t lockword;
780
781         /* get the current lock count */
782
783         lockword = lock_lockword_get(t, o);
784
785         if (IS_FAT_LOCK(lockword)) {
786                 assert(GET_FAT_LOCK(lockword) == lr);
787                 return;
788         }
789         else {
790                 assert(LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
791
792                 /* copy the count from the thin lock */
793
794                 lr->count = (lockword & THIN_LOCK_COUNT_MASK) >> THIN_LOCK_COUNT_SHIFT;
795         }
796
797         DEBUGLOCKS(("[lock_inflate      : lr=%p, t=%p, o=%p, o->lockword=%lx, count=%d]",
798                                 lr, t, o, lockword, lr->count));
799
800         /* install it */
801
802         lock_lockword_set(t, o, MAKE_FAT_LOCK(lr));
803 }
804
805
806 static void sable_flc_waiting(ptrint lockword, threadobject *t, java_handle_t *o)
807 {
808         int index;
809         threadobject *t_other;
810         int old_flc;
811
812         index = GET_THREAD_INDEX(lockword);
813         t_other = ThreadList::get_thread_by_index(index);
814
815         if (!t_other)
816 /*              failure, TODO: add statistics */
817                 return;
818
819         t_other->flc_lock->lock();
820         old_flc = t_other->flc_bit;
821         t_other->flc_bit = true;
822
823         DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d",
824                                 t->index, t_other->index));
825
826         // Set FLC bit first, then read the lockword again.
827         Atomic::memory_barrier();
828
829         lockword = lock_lockword_get(t, o);
830
831         /* Lockword is still the way it was seen before */
832         if (IS_THIN_LOCK(lockword) && (GET_THREAD_INDEX(lockword) == index))
833         {
834                 /* Add tuple (t, o) to the other thread's FLC list */
835                 t->flc_object = o;
836                 t->flc_next = t_other->flc_list;
837                 t_other->flc_list = t;
838
839                 for (;;)
840                 {
841                         threadobject *current;
842
843                         // Wait until another thread sees the flc bit and notifies
844                         // us of unlocking.
845                         t->flc_cond->wait(t_other->flc_lock);
846
847                         /* Traverse FLC list looking if we're still there */
848                         current = t_other->flc_list;
849                         while (current && current != t)
850                                 current = current->flc_next;
851                         if (!current)
852                                 /* not in list anymore, can stop waiting */
853                                 break;
854
855                         /* We are still in the list -- the other thread cannot have seen
856                            the FLC bit yet */
857                         assert(t_other->flc_bit);
858                 }
859
860                 t->flc_object = NULL;   /* for garbage collector? */
861                 t->flc_next = NULL;
862         }
863         else
864                 t_other->flc_bit = old_flc;
865
866         t_other->flc_lock->unlock();
867 }
868
869 static void notify_flc_waiters(threadobject *t, java_handle_t *o)
870 {
871         threadobject *current;
872
873         t->flc_lock->lock();
874
875         current = t->flc_list;
876         while (current)
877         {
878                 if (current->flc_object != o)
879                 {
880                         /* The object has to be inflated so the other threads can properly
881                            block on it. */
882
883                         /* Only if not already inflated */
884                         ptrint lockword = lock_lockword_get(t, current->flc_object);
885                         if (IS_THIN_LOCK(lockword)) {
886                                 lock_record_t *lr = lock_hashtable_get(t, current->flc_object);
887                                 lock_record_enter(t, lr);
888
889                                 DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
890                                                         t->index, (void*) current->flc_object, (void*) lr));
891
892                                 lock_inflate(t, current->flc_object, lr);
893                         }
894                 }
895
896                 // Wake the waiting threads.
897                 current->flc_cond->broadcast();
898
899                 current = current->flc_next;
900         }
901
902         t->flc_list = NULL;
903         t->flc_bit = false;
904
905         t->flc_lock->unlock();
906 }
907
908 /* lock_monitor_enter **********************************************************
909
910    Acquire the monitor of the given object. If the current thread already
911    owns the monitor, the lock counter is simply increased.
912
913    This function blocks until it can acquire the monitor.
914
915    IN:
916       t............the current thread
917           o............the object of which to enter the monitor
918
919    RETURN VALUE:
920       true.........the lock has been successfully acquired
921           false........an exception has been thrown
922
923 *******************************************************************************/
924
925 bool lock_monitor_enter(java_handle_t *o)
926 {
927         threadobject  *t;
928         /* CAUTION: This code assumes that ptrint is unsigned! */
929         ptrint         lockword;
930         ptrint         thinlock;
931         lock_record_t *lr;
932
933         if (o == NULL) {
934                 exceptions_throw_nullpointerexception();
935                 return false;
936         }
937
938         t = THREADOBJECT;
939
940         thinlock = t->thinlock;
941
942 retry:
943         /* most common case: try to thin-lock an unlocked object */
944
945         LLNI_CRITICAL_START_THREAD(t);
946         lockword = Atomic::compare_and_swap(&(LLNI_DIRECT(o)->lockword), THIN_UNLOCKED, thinlock);
947         LLNI_CRITICAL_END_THREAD(t);
948
949         if (lockword == THIN_UNLOCKED) {
950                 /* success. we locked it */
951                 // The Java Memory Model requires an instruction barrier here
952                 // (because of the CAS above).
953                 Atomic::instruction_barrier();
954                 return true;
955         }
956
957         /* next common case: recursive lock with small recursion count */
958         /* We don't have to worry about stale values here, as any stale value  */
959         /* will indicate another thread holding the lock (or an inflated lock) */
960
961         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
962                 /* we own this monitor               */
963                 /* check the current recursion count */
964
965                 if ((lockword ^ thinlock) < (THIN_LOCK_COUNT_MAX << THIN_LOCK_COUNT_SHIFT))
966                 {
967                         /* the recursion count is low enough */
968
969                         lock_lockword_set(t, o, lockword + THIN_LOCK_COUNT_INCR);
970
971                         /* success. we locked it */
972                         return true;
973                 }
974                 else {
975                         /* recursion count overflow */
976
977                         lr = lock_hashtable_get(t, o);
978                         lock_record_enter(t, lr);
979                         lock_inflate(t, o, lr);
980                         lr->count++;
981
982                         notify_flc_waiters(t, o);
983
984                         return true;
985                 }
986         }
987
988         /* the lock is either contented or fat */
989
990         if (IS_FAT_LOCK(lockword)) {
991
992                 lr = GET_FAT_LOCK(lockword);
993
994                 /* check for recursive entering */
995                 if (lr->owner == t) {
996                         lr->count++;
997                         return true;
998                 }
999
1000                 /* acquire the mutex of the lock record */
1001
1002                 lock_record_enter(t, lr);
1003
1004                 assert(lr->count == 0);
1005
1006                 return true;
1007         }
1008
1009         /****** inflation path ******/
1010
1011 #if defined(ENABLE_JVMTI)
1012         /* Monitor Contended Enter */
1013         jvmti_MonitorContendedEntering(false, o);
1014 #endif
1015
1016         sable_flc_waiting(lockword, t, o);
1017
1018 #if defined(ENABLE_JVMTI)
1019         /* Monitor Contended Entered */
1020         jvmti_MonitorContendedEntering(true, o);
1021 #endif
1022         goto retry;
1023 }
1024
1025
1026 /* lock_monitor_exit ***********************************************************
1027
1028    Decrement the counter of a (currently owned) monitor. If the counter
1029    reaches zero, release the monitor.
1030
1031    If the current thread is not the owner of the monitor, an 
1032    IllegalMonitorState exception is thrown.
1033
1034    IN:
1035       t............the current thread
1036           o............the object of which to exit the monitor
1037
1038    RETURN VALUE:
1039       true.........everything ok,
1040           false........an exception has been thrown
1041
1042 *******************************************************************************/
1043
1044 bool lock_monitor_exit(java_handle_t *o)
1045 {
1046         threadobject *t;
1047         uintptr_t     lockword;
1048         ptrint        thinlock;
1049
1050         if (o == NULL) {
1051                 exceptions_throw_nullpointerexception();
1052                 return false;
1053         }
1054
1055         t = THREADOBJECT;
1056
1057         thinlock = t->thinlock;
1058
1059         /* We don't have to worry about stale values here, as any stale value */
1060         /* will indicate that we don't own the lock.                          */
1061
1062         lockword = lock_lockword_get(t, o);
1063
1064         /* most common case: we release a thin lock that we hold once */
1065
1066         if (lockword == thinlock) {
1067                 // Memory barrier for Java Memory Model.
1068                 Atomic::write_memory_barrier();
1069                 lock_lockword_set(t, o, THIN_UNLOCKED);
1070                 // Memory barrier for thin locking.
1071                 Atomic::memory_barrier();
1072
1073                 /* check if there has been a flat lock contention on this object */
1074
1075                 if (t->flc_bit) {
1076                         DEBUGLOCKS(("thread %d saw flc bit", t->index));
1077
1078                         /* there has been a contention on this thin lock */
1079                         notify_flc_waiters(t, o);
1080                 }
1081
1082                 return true;
1083         }
1084
1085         /* next common case: we release a recursive lock, count > 0 */
1086
1087         if (LOCK_WORD_WITHOUT_COUNT(lockword) == thinlock) {
1088                 lock_lockword_set(t, o, lockword - THIN_LOCK_COUNT_INCR);
1089                 return true;
1090         }
1091
1092         /* either the lock is fat, or we don't hold it at all */
1093
1094         if (IS_FAT_LOCK(lockword)) {
1095
1096                 lock_record_t *lr;
1097
1098                 lr = GET_FAT_LOCK(lockword);
1099
1100                 /* check if we own this monitor */
1101                 /* We don't have to worry about stale values here, as any stale value */
1102                 /* will be != t and thus fail this check.                             */
1103
1104                 if (lr->owner != t) {
1105                         exceptions_throw_illegalmonitorstateexception();
1106                         return false;
1107                 }
1108
1109                 /* { the current thread `t` owns the lock record `lr` on object `o` } */
1110
1111                 if (lr->count != 0) {
1112                         /* we had locked this one recursively. just decrement, it will */
1113                         /* still be locked. */
1114                         lr->count--;
1115                         return true;
1116                 }
1117
1118                 /* unlock this lock record */
1119
1120                 lr->owner = NULL;
1121                 lr->mutex->unlock();
1122
1123                 return true;
1124         }
1125
1126         /* legal thin lock cases have been handled above, so this is an error */
1127
1128         exceptions_throw_illegalmonitorstateexception();
1129
1130         return false;
1131 }
1132
1133
1134 /* lock_record_add_waiter ******************************************************
1135
1136    Add a thread to the list of waiting threads of a lock record.
1137
1138    IN:
1139       lr...........the lock record
1140       thread.......the thread to add
1141
1142 *******************************************************************************/
1143
1144 static void lock_record_add_waiter(lock_record_t *lr, threadobject* t)
1145 {
1146         // Add the thread as last entry to waiters list.
1147         lr->waiters->push_back(t);
1148
1149 #if defined(ENABLE_STATISTICS)
1150         if (opt_stat)
1151                 size_lock_waiter += sizeof(threadobject*);
1152 #endif
1153 }
1154
1155
1156 /* lock_record_remove_waiter ***************************************************
1157
1158    Remove a thread from the list of waiting threads of a lock record.
1159
1160    IN:
1161       lr...........the lock record
1162       t............the current thread
1163
1164    PRE-CONDITION:
1165       The current thread must be the owner of the lock record.
1166    
1167 *******************************************************************************/
1168
1169 static void lock_record_remove_waiter(lock_record_t *lr, threadobject* t)
1170 {
1171         // Remove the thread from the waiters.
1172         lr->waiters->remove(t);
1173
1174 #if defined(ENABLE_STATISTICS)
1175         if (opt_stat)
1176                 size_lock_waiter -= sizeof(threadobject*);
1177 #endif
1178 }
1179
1180
1181 /* lock_record_wait ************************************************************
1182
1183    Wait on a lock record for a given (maximum) amount of time.
1184
1185    IN:
1186       t............the current thread
1187           lr...........the lock record
1188           millis.......milliseconds of timeout
1189           nanos........nanoseconds of timeout
1190
1191    RETURN VALUE:
1192       true.........we have been interrupted,
1193       false........everything ok
1194
1195    PRE-CONDITION:
1196       The current thread must be the owner of the lock record.
1197           This is NOT checked by this function!
1198    
1199 *******************************************************************************/
1200
1201 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1202 {
1203         s4   lockcount;
1204         bool wasinterrupted = false;
1205
1206         DEBUGLOCKS(("[lock_record_wait  : lr=%p, t=%p, millis=%lld, nanos=%d]",
1207                                 lr, thread, millis, nanos));
1208
1209         /* { the thread t owns the fat lock record lr on the object o } */
1210
1211         /* register us as waiter for this object */
1212
1213         lock_record_add_waiter(lr, thread);
1214
1215         /* remember the old lock count */
1216
1217         lockcount = lr->count;
1218
1219         /* unlock this record */
1220
1221         lr->count = 0;
1222         lock_record_exit(thread, lr);
1223
1224         /* wait until notified/interrupted/timed out */
1225
1226         threads_wait_with_timeout_relative(thread, millis, nanos);
1227
1228         /* re-enter the monitor */
1229
1230         lock_record_enter(thread, lr);
1231
1232         /* remove us from the list of waiting threads */
1233
1234         lock_record_remove_waiter(lr, thread);
1235
1236         /* restore the old lock count */
1237
1238         lr->count = lockcount;
1239
1240         /* We can only be signaled OR interrupted, not both. If both flags
1241            are set, reset only signaled and leave the thread in
1242            interrupted state. Otherwise, clear both. */
1243
1244         if (!thread->signaled) {
1245                 wasinterrupted = thread->interrupted;
1246                 thread->interrupted = false;
1247         }
1248
1249         thread->signaled = false;
1250
1251         /* return if we have been interrupted */
1252
1253         return wasinterrupted;
1254 }
1255
1256
1257 /* lock_monitor_wait ***********************************************************
1258
1259    Wait on an object for a given (maximum) amount of time.
1260
1261    IN:
1262       t............the current thread
1263           o............the object
1264           millis.......milliseconds of timeout
1265           nanos........nanoseconds of timeout
1266
1267    PRE-CONDITION:
1268       The current thread must be the owner of the object's monitor.
1269    
1270 *******************************************************************************/
1271
1272 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1273 {
1274         uintptr_t      lockword;
1275         lock_record_t *lr;
1276
1277         lockword = lock_lockword_get(t, o);
1278
1279         /* check if we own this monitor */
1280         /* We don't have to worry about stale values here, as any stale value */
1281         /* will fail this check.                                              */
1282
1283         if (IS_FAT_LOCK(lockword)) {
1284
1285                 lr = GET_FAT_LOCK(lockword);
1286
1287                 if (lr->owner != t) {
1288                         exceptions_throw_illegalmonitorstateexception();
1289                         return;
1290                 }
1291         }
1292         else {
1293                 /* it's a thin lock */
1294
1295                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1296                         exceptions_throw_illegalmonitorstateexception();
1297                         return;
1298                 }
1299
1300                 /* inflate this lock */
1301
1302                 lr = lock_hashtable_get(t, o);
1303                 lock_record_enter(t, lr);
1304                 lock_inflate(t, o, lr);
1305
1306                 notify_flc_waiters(t, o);
1307         }
1308
1309         /* { the thread t owns the fat lock record lr on the object o } */
1310
1311         if (lock_record_wait(t, lr, millis, nanos))
1312                 exceptions_throw_interruptedexception();
1313 }
1314
1315
1316 /* lock_record_notify **********************************************************
1317
1318    Notify one thread or all threads waiting on the given lock record.
1319
1320    IN:
1321       t............the current thread
1322           lr...........the lock record
1323           one..........if true, only notify one thread
1324
1325    PRE-CONDITION:
1326       The current thread must be the owner of the lock record.
1327           This is NOT checked by this function!
1328    
1329 *******************************************************************************/
1330
1331 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1332 {
1333         /* { the thread t owns the fat lock record lr on the object o } */
1334
1335         for (List<threadobject*>::iterator it = lr->waiters->begin(); it != lr->waiters->end(); it++) {
1336                 threadobject* waiter = *it;
1337
1338                 // We must skip threads which have already been notified. They
1339                 // will remove themselves from the list.
1340                 if (waiter->signaled)
1341                         continue;
1342
1343                 // Enter the wait-mutex.
1344                 waiter->waitmutex->lock();
1345
1346                 DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]", lr, t, waiter, one));
1347
1348                 // Signal the waiter.
1349                 waiter->waitcond->signal();
1350
1351                 // Mark the thread as signaled.
1352                 waiter->signaled = true;
1353
1354                 // Leave the wait-mutex.
1355                 waiter->waitmutex->unlock();
1356
1357                 // If we should only wake one thread, we are done.
1358                 if (one)
1359                         break;
1360         }
1361 }
1362
1363
1364 /* lock_monitor_notify *********************************************************
1365
1366    Notify one thread or all threads waiting on the given object.
1367
1368    IN:
1369       t............the current thread
1370           o............the object
1371           one..........if true, only notify one thread
1372
1373    PRE-CONDITION:
1374       The current thread must be the owner of the object's monitor.
1375    
1376 *******************************************************************************/
1377
1378 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1379 {
1380         uintptr_t      lockword;
1381         lock_record_t *lr;
1382
1383         lockword = lock_lockword_get(t, o);
1384
1385         /* check if we own this monitor */
1386         /* We don't have to worry about stale values here, as any stale value */
1387         /* will fail this check.                                              */
1388
1389         if (IS_FAT_LOCK(lockword)) {
1390
1391                 lr = GET_FAT_LOCK(lockword);
1392
1393                 if (lr->owner != t) {
1394                         exceptions_throw_illegalmonitorstateexception();
1395                         return;
1396                 }
1397         }
1398         else {
1399                 /* it's a thin lock */
1400
1401                 if (LOCK_WORD_WITHOUT_COUNT(lockword) != t->thinlock) {
1402                         exceptions_throw_illegalmonitorstateexception();
1403                         return;
1404                 }
1405
1406                 /* no thread can wait on a thin lock, so there's nothing to do. */
1407                 return;
1408         }
1409
1410         /* { the thread t owns the fat lock record lr on the object o } */
1411
1412         lock_record_notify(t, lr, one);
1413 }
1414
1415
1416
1417 /*============================================================================*/
1418 /* INQUIRY FUNCIONS                                                           */
1419 /*============================================================================*/
1420
1421
1422 /* lock_is_held_by_current_thread **********************************************
1423
1424    Return true if the current thread owns the monitor of the given object.
1425
1426    IN:
1427           o............the object
1428
1429    RETURN VALUE:
1430       true, if the current thread holds the lock of this object.
1431    
1432 *******************************************************************************/
1433
1434 bool lock_is_held_by_current_thread(java_handle_t *o)
1435 {
1436         threadobject  *t;
1437         uintptr_t      lockword;
1438         lock_record_t *lr;
1439
1440         t = THREADOBJECT;
1441
1442         /* check if we own this monitor */
1443         /* We don't have to worry about stale values here, as any stale value */
1444         /* will fail this check.                                              */
1445
1446         lockword = lock_lockword_get(t, o);
1447
1448         if (IS_FAT_LOCK(lockword)) {
1449                 /* it's a fat lock */
1450
1451                 lr = GET_FAT_LOCK(lockword);
1452
1453                 return (lr->owner == t);
1454         }
1455         else {
1456                 /* it's a thin lock */
1457
1458                 return (LOCK_WORD_WITHOUT_COUNT(lockword) == t->thinlock);
1459         }
1460 }
1461
1462
1463
1464 /*============================================================================*/
1465 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD                              */
1466 /*============================================================================*/
1467
1468
1469 /* lock_wait_for_object ********************************************************
1470
1471    Wait for the given object.
1472
1473    IN:
1474           o............the object
1475           millis.......milliseconds to wait
1476           nanos........nanoseconds to wait
1477    
1478 *******************************************************************************/
1479
1480 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1481 {
1482         threadobject *thread;
1483
1484         thread = THREADOBJECT;
1485
1486         lock_monitor_wait(thread, o, millis, nanos);
1487 }
1488
1489
1490 /* lock_notify_object **********************************************************
1491
1492    Notify one thread waiting on the given object.
1493
1494    IN:
1495           o............the object
1496    
1497 *******************************************************************************/
1498
1499 void lock_notify_object(java_handle_t *o)
1500 {
1501         threadobject *thread;
1502
1503         thread = THREADOBJECT;
1504
1505         lock_monitor_notify(thread, o, true);
1506 }
1507
1508
1509 /* lock_notify_all_object ******************************************************
1510
1511    Notify all threads waiting on the given object.
1512
1513    IN:
1514           o............the object
1515    
1516 *******************************************************************************/
1517
1518 void lock_notify_all_object(java_handle_t *o)
1519 {
1520         threadobject *thread;
1521
1522         thread = THREADOBJECT;
1523
1524         lock_monitor_notify(thread, o, false);
1525 }
1526
1527
1528 /*
1529  * These are local overrides for various environment variables in Emacs.
1530  * Please do not remove this and leave it at the end of the file, where
1531  * Emacs will automagically detect them.
1532  * ---------------------------------------------------------------------
1533  * Local variables:
1534  * mode: c++
1535  * indent-tabs-mode: t
1536  * c-basic-offset: 4
1537  * tab-width: 4
1538  * End:
1539  * vim:noexpandtab:sw=4:ts=4:
1540  */