2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
22 * Mutual exclusion between allocator/collector routines.
23 * Needed if there is more than one allocator thread.
24 * FASTLOCK() is assumed to try to acquire the lock in a cheap and
25 * dirty way that is acceptable for a few instructions, e.g. by
26 * inhibiting preemption. This is assumed to have succeeded only
27 * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
28 * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
29 * If signals cannot be tolerated with the FASTLOCK held, then
30 * FASTLOCK should disable signals. The code executed under
31 * FASTLOCK is otherwise immune to interruption, provided it is
33 * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
34 * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
35 * (There is currently no equivalent for FASTLOCK.)
37 * In the PARALLEL_MARK case, we also need to define a number of
38 * other inline finctions here:
39 * GC_bool GC_compare_and_exchange( volatile GC_word *addr,
40 * GC_word old, GC_word new )
41 * GC_word GC_atomic_add( volatile GC_word *addr, GC_word how_much )
42 * void GC_memory_barrier( )
46 void GC_noop1 GC_PROTO((word));
47 # ifdef PCR_OBSOLETE /* Faster, but broken with multiple lwp's */
48 # include "th/PCR_Th.h"
49 # include "th/PCR_ThCrSec.h"
50 extern struct PCR_Th_MLRep GC_allocate_ml;
51 # define DCL_LOCK_STATE PCR_sigset_t GC_old_sig_mask
52 # define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
53 # define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
54 # define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
55 # define FASTLOCK() PCR_ThCrSec_EnterSys()
56 /* Here we cheat (a lot): */
57 # define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
58 /* TRUE if nobody currently holds the lock */
59 # define FASTUNLOCK() PCR_ThCrSec_ExitSys()
62 # include <base/PCR_Base.h>
63 # include <th/PCR_Th.h>
64 extern PCR_Th_ML GC_allocate_ml;
65 # define DCL_LOCK_STATE \
66 PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
67 # define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
68 # define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
69 # define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
70 # define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
71 # define FASTUNLOCK() {\
72 if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
75 extern GC_word RT0u__inCritical;
76 # define LOCK() RT0u__inCritical++
77 # define UNLOCK() RT0u__inCritical--
81 extern pthread_mutex_t GC_allocate_ml;
82 # define LOCK() pthread_mutex_lock(&GC_allocate_ml)
83 # define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
85 # ifdef GC_SOLARIS_THREADS
88 extern mutex_t GC_allocate_ml;
89 # define LOCK() mutex_lock(&GC_allocate_ml);
90 # define UNLOCK() mutex_unlock(&GC_allocate_ml);
93 /* Try to define GC_TEST_AND_SET and a matching GC_CLEAR for spin lock */
94 /* acquisition and release. We need this for correct operation of the */
98 inline static int GC_test_and_set(volatile unsigned int *addr) {
100 /* Note: the "xchg" instruction does not need a "lock" prefix */
101 __asm__ __volatile__("xchgl %0, %1"
102 : "=r"(oldval), "=m"(*(addr))
103 : "0"(1), "m"(*(addr)) : "memory");
106 # define GC_TEST_AND_SET_DEFINED
109 # if defined(__INTEL_COMPILER)
110 # include <ia64intrin.h>
112 inline static int GC_test_and_set(volatile unsigned int *addr) {
114 # ifndef __INTEL_COMPILER
115 __asm__ __volatile__("xchg4 %0=%1,%2"
116 : "=r"(oldval), "=m"(*addr)
117 : "r"(n) : "memory");
119 oldval = _InterlockedExchange(addr, n);
123 # define GC_TEST_AND_SET_DEFINED
124 /* Should this handle post-increment addressing?? */
125 inline static void GC_clear(volatile unsigned int *addr) {
126 # ifndef __INTEL_COMPILER
127 __asm__ __volatile__("st4.rel %0=r0" : "=m" (*addr) : : "memory");
129 // there is no st4 but I can use xchg I hope
130 _InterlockedExchange(addr, 0);
133 # define GC_CLEAR_DEFINED
136 inline static int GC_test_and_set(volatile unsigned int *addr) {
139 __asm__ __volatile__("ldstub %1,%0"
140 : "=r"(oldval), "=m"(*addr)
141 : "m"(*addr) : "memory");
144 # define GC_TEST_AND_SET_DEFINED
147 /* Contributed by Tony Mantler. I'm not sure how well it was */
149 inline static int GC_test_and_set(volatile unsigned int *addr) {
150 char oldval; /* this must be no longer than 8 bits */
152 /* The return value is semi-phony. */
153 /* 'tas' sets bit 7 while the return */
154 /* value pretends bit 0 was set */
155 __asm__ __volatile__(
156 "tas %1@; sne %0; negb %0"
158 : "a" (addr) : "memory");
161 # define GC_TEST_AND_SET_DEFINED
163 # if defined(POWERPC)
164 inline static int GC_test_and_set(volatile unsigned int *addr) {
166 int temp = 1; /* locked value */
168 __asm__ __volatile__(
169 "1:\tlwarx %0,0,%1\n" /* load and reserve */
170 "\tcmpwi %0, 0\n" /* if load is */
171 "\tbne 2f\n" /* non-zero, return already set */
172 "\tstwcx. %2,0,%1\n" /* else store conditional */
173 "\tbne- 1b\n" /* retry if lost reservation */
174 "\tsync\n" /* import barrier */
175 "2:\t\n" /* oldval is zero if we set */
177 : "r"(addr), "r"(temp)
181 # define GC_TEST_AND_SET_DEFINED
182 inline static void GC_clear(volatile unsigned int *addr) {
183 __asm__ __volatile__("lwsync" : : : "memory");
186 # define GC_CLEAR_DEFINED
189 inline static int GC_test_and_set(volatile unsigned int * addr)
191 unsigned long oldvalue;
194 __asm__ __volatile__(
208 ".section .text2,\"ax\"\n"
212 :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
213 :"Ir" (1), "m" (*addr)
218 # define GC_TEST_AND_SET_DEFINED
219 inline static void GC_clear(volatile unsigned int *addr) {
220 __asm__ __volatile__("mb" : : : "memory");
223 # define GC_CLEAR_DEFINED
226 inline static int GC_test_and_set(volatile unsigned int *addr) {
227 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
229 __asm__ __volatile__ (
232 "strex %1, %2, [%3]\n"
235 : "=&r" (ret), "=&r" (tmp)
236 : "r" (1), "r" (addr)
241 /* SWP on ARM is very similar to XCHG on x86. Doesn't lock the
242 * bus because there are no SMP ARM machines. If/when there are,
243 * this code will likely need to be updated. */
244 /* See linuxthreads/sysdeps/arm/pt-machine.h in glibc-2.1 */
245 __asm__ __volatile__("swp %0, %1, [%2]"
252 # define GC_TEST_AND_SET_DEFINED
253 inline static void GC_clear(volatile unsigned int *addr) {
256 __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory");
260 # define GC_CLEAR_DEFINED
263 inline static int GC_test_and_set(volatile unsigned int *addr) {
264 /* Ripped from linuxthreads/sysdeps/cris/pt-machine.h. */
265 /* Included with Hans-Peter Nilsson's permission. */
266 register unsigned long int ret;
268 /* Note the use of a dummy output of *addr to expose the write.
269 * The memory barrier is to stop *other* writes being moved past
272 __asm__ __volatile__("clearf\n"
279 : "=&r" (ret), "=m" (*addr)
280 : "r" (addr), "r" ((int) 1), "m" (*addr)
284 # define GC_TEST_AND_SET_DEFINED
287 inline static int GC_test_and_set(volatile unsigned int *addr) {
289 __asm__ __volatile__ (
291 "0: cs %0,%1,0(%2)\n"
294 : "d" (1), "a" (addr)
299 # endif /* __GNUC__ */
300 # if (defined(ALPHA) && !defined(__GNUC__))
302 --> We currently assume that if gcc is not used, we are
303 --> running under Tru64.
305 # include <machine/builtins.h>
307 # define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1)
308 # define GC_TEST_AND_SET_DEFINED
309 # define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; }
310 # define GC_CLEAR_DEFINED
312 # if defined(MSWIN32)
313 # define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)
314 # define GC_TEST_AND_SET_DEFINED
318 # include <sys/tas.h>
319 # define GC_test_and_set(addr) _test_and_set((int *) addr,1)
320 # define GC_TEST_AND_SET_DEFINED
321 # elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
322 || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
324 # define GC_test_and_set(addr) _test_and_set((void *)addr,1)
326 # define GC_test_and_set(addr) test_and_set((void *)addr,1)
329 # include <sgidefs.h>
331 # define GC_test_and_set(addr) __test_and_set32((void *)addr,1)
332 # define GC_clear(addr) __lock_release(addr);
333 # define GC_CLEAR_DEFINED
335 # define GC_TEST_AND_SET_DEFINED
338 # include <sys/atomic_op.h>
339 # if (defined(_POWER) || defined(_POWERPC))
340 # if defined(__GNUC__)
341 inline static void GC_memsync() {
342 __asm__ __volatile__ ("sync" : : : "memory");
346 # define inline __inline
348 # pragma mc_func GC_memsync { \
349 "7c0004ac" /* sync (same opcode used for dcs)*/ \
353 # error dont know how to memsync
355 inline static int GC_test_and_set(volatile unsigned int * addr) {
357 if (compare_and_swap((void *)addr, &oldvalue, 1)) {
362 # define GC_TEST_AND_SET_DEFINED
363 inline static void GC_clear(volatile unsigned int *addr) {
367 # define GC_CLEAR_DEFINED
370 # if 0 /* defined(HP_PA) */
371 /* The official recommendation seems to be to not use ldcw from */
372 /* user mode. Since multithreaded incremental collection doesn't */
373 /* work anyway on HP_PA, this shouldn't be a major loss. */
375 /* "set" means 0 and "clear" means 1 here. */
376 # define GC_test_and_set(addr) !GC_test_and_clear(addr);
377 # define GC_TEST_AND_SET_DEFINED
378 # define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1;
379 /* The above needs a memory barrier! */
380 # define GC_CLEAR_DEFINED
382 # if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)
384 inline static void GC_clear(volatile unsigned int *addr) {
385 /* Try to discourage gcc from moving anything past this. */
386 __asm__ __volatile__(" " : : : "memory");
390 /* The function call in the following should prevent the */
391 /* compiler from moving assignments to below the UNLOCK. */
392 # define GC_clear(addr) GC_noop1((word)(addr)); \
393 *((volatile unsigned int *)(addr)) = 0;
395 # define GC_CLEAR_DEFINED
396 # endif /* !GC_CLEAR_DEFINED */
398 # if !defined(GC_TEST_AND_SET_DEFINED)
399 # define USE_PTHREAD_LOCKS
402 # if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
403 && !defined(GC_WIN32_THREADS)
404 # define NO_THREAD (pthread_t)(-1)
405 # include <pthread.h>
406 # if defined(PARALLEL_MARK)
407 /* We need compare-and-swap to update mark bits, where it's */
408 /* performance critical. If USE_MARK_BYTES is defined, it is */
409 /* no longer needed for this purpose. However we use it in */
410 /* either case to implement atomic fetch-and-add, though that's */
411 /* less performance critical, and could perhaps be done with */
413 # if defined(GENERIC_COMPARE_AND_SWAP)
414 /* Probably not useful, except for debugging. */
415 /* We do use GENERIC_COMPARE_AND_SWAP on PA_RISC, but we */
416 /* minimize its use. */
417 extern pthread_mutex_t GC_compare_and_swap_lock;
419 /* Note that if GC_word updates are not atomic, a concurrent */
420 /* reader should acquire GC_compare_and_swap_lock. On */
421 /* currently supported platforms, such updates are atomic. */
422 extern GC_bool GC_compare_and_exchange(volatile GC_word *addr,
423 GC_word old, GC_word new_val);
424 # endif /* GENERIC_COMPARE_AND_SWAP */
426 # if !defined(GENERIC_COMPARE_AND_SWAP)
427 /* Returns TRUE if the comparison succeeded. */
428 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
433 __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
434 : "+m"(*(addr)), "=q"(result)
435 : "r" (new_val), "a"(old) : "memory");
436 return (GC_bool) result;
438 # endif /* !GENERIC_COMPARE_AND_SWAP */
439 inline static void GC_memory_barrier()
441 /* We believe the processor ensures at least processor */
442 /* consistent ordering. Thus a compiler barrier */
443 /* should suffice. */
444 __asm__ __volatile__("" : : : "memory");
449 # if !defined(GENERIC_COMPARE_AND_SWAP)
450 /* Returns TRUE if the comparison succeeded. */
451 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
456 __asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1"
457 : "+m"(*(addr)), "=r"(result)
458 : "r" (new_val), "a"(old) : "memory");
459 return (GC_bool) result;
461 # endif /* !GENERIC_COMPARE_AND_SWAP */
462 inline static void GC_memory_barrier()
464 /* We believe the processor ensures at least processor */
465 /* consistent ordering. Thus a compiler barrier */
466 /* should suffice. */
467 __asm__ __volatile__("" : : : "memory");
471 # if defined(POWERPC)
472 # if !defined(GENERIC_COMPARE_AND_SWAP)
473 # if CPP_WORDSZ == 64
474 /* Returns TRUE if the comparison succeeded. */
475 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
476 GC_word old, GC_word new_val)
478 # if HAS___SYNC_BOOL_COMPARE_AND_SWAP
479 return __sync_bool_compare_and_swap(addr, old, new_val);
481 unsigned long result, dummy;
482 __asm__ __volatile__(
483 "1:\tldarx %0,0,%5\n"
493 : "=&r" (dummy), "=r" (result), "=p" (addr)
494 : "r" (new_val), "r" (old), "2"(addr)
496 return (GC_bool) result;
500 /* Returns TRUE if the comparison succeeded. */
501 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
502 GC_word old, GC_word new_val)
504 # if HAS___SYNC_BOOL_COMPARE_AND_SWAP
505 return __sync_bool_compare_and_swap(addr, old, new_val);
508 __asm__ __volatile__(
509 "1:\tlwarx %0,0,%5\n"
519 : "=&r" (dummy), "=r" (result), "=p" (addr)
520 : "r" (new_val), "r" (old), "2"(addr)
522 return (GC_bool) result;
526 # endif /* !GENERIC_COMPARE_AND_SWAP */
527 inline static void GC_memory_barrier()
529 __asm__ __volatile__("sync" : : : "memory");
531 # endif /* POWERPC */
534 # if !defined(GENERIC_COMPARE_AND_SWAP)
535 # if CPP_WORDSZ == 64
536 /* Returns TRUE if the comparison succeeded. */
537 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
538 GC_word old, GC_word new_val)
540 unsigned long result;
541 __asm__ __volatile__(
544 : "0" (new_val), "r" (addr), "r" (old)
546 return (GC_bool) (result == old);
549 /* Returns TRUE if the comparison succeeded. */
550 inline static GC_bool GC_compare_and_exchange(volatile GC_word *_addr,
551 GC_word _old, GC_word _new_val)
553 register unsigned long result asm("o0");
554 register unsigned long old asm("o1");
555 register volatile GC_word *addr asm("o2");
559 __asm__ __volatile__(
560 /* We encode the instruction directly so that it
561 doesn't taint the whole binary as v9-only. */
562 ".word 0xd1e29009" /* cas [%o2], %o1, %o0 */
564 : "0" (result), "r" (addr), "r"(old)
566 return (GC_bool) (result == old);
569 # endif /* !GENERIC_COMPARE_AND_SWAP */
570 inline static void GC_memory_barrier()
572 /* All sparc v9 chips provice procesor consistent ordering. */
573 /* Thus a compiler barrier should suffice. */
574 __asm__ __volatile__("" : : : "memory");
579 # if !defined(GENERIC_COMPARE_AND_SWAP)
580 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
581 GC_word old, GC_word new_val)
583 unsigned long oldval;
584 # if CPP_WORDSZ == 32
585 __asm__ __volatile__(
587 "mov ar.ccv=%3 ;; cmpxchg4.rel %0=[%0],%2,ar.ccv"
589 : "r"(addr), "r"(new_val), "r"(old) : "memory");
591 __asm__ __volatile__(
592 "mov ar.ccv=%3 ;; cmpxchg8.rel %0=[%1],%2,ar.ccv"
594 : "r"(addr), "r"(new_val), "r"(old) : "memory");
596 return (oldval == old);
598 # endif /* !GENERIC_COMPARE_AND_SWAP */
600 /* Shouldn't be needed; we use volatile stores instead. */
601 inline static void GC_memory_barrier()
603 __asm__ __volatile__("mf" : : : "memory");
608 # if !defined(GENERIC_COMPARE_AND_SWAP)
609 # if defined(__GNUC__)
610 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
611 GC_word old, GC_word new_val)
613 unsigned long was_equal;
616 __asm__ __volatile__(
625 :"=&r" (temp), "=m" (*addr), "=&r" (was_equal)
626 : "r" (new_val), "Ir" (old)
630 # else /* !__GNUC__ */
631 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
632 GC_word old, GC_word new_val)
634 return __CMP_STORE_QUAD(addr, old, new_val, addr);
636 # endif /* !__GNUC__ */
637 # endif /* !GENERIC_COMPARE_AND_SWAP */
639 inline static void GC_memory_barrier()
641 __asm__ __volatile__("mb" : : : "memory");
644 # define GC_memory_barrier() asm("mb")
645 # endif /* !__GNUC__ */
648 # if !defined(GENERIC_COMPARE_AND_SWAP)
649 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
650 GC_word old, GC_word new_val)
653 __asm__ __volatile__ (
661 : "=&d" (retval), "+d" (old)
662 : "d" (new_val), "a" (addr)
667 # define GC_memory_barrier()
669 # if !defined(GENERIC_COMPARE_AND_SWAP)
670 /* Returns the original value of *addr. */
671 inline static GC_word GC_atomic_add(volatile GC_word *addr,
677 } while (!GC_compare_and_exchange(addr, old, old+how_much));
680 # else /* GENERIC_COMPARE_AND_SWAP */
681 /* So long as a GC_word can be atomically updated, it should */
682 /* be OK to read *addr without a lock. */
683 extern GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much);
684 # endif /* GENERIC_COMPARE_AND_SWAP */
686 # endif /* PARALLEL_MARK */
688 # if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
689 /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to */
690 /* be held for long periods, if it is held at all. Thus spinning */
691 /* and sleeping for fixed periods are likely to result in */
692 /* significant wasted time. We thus rely mostly on queued locks. */
693 # define USE_SPIN_LOCK
694 extern volatile unsigned int GC_allocate_lock;
695 extern void GC_lock(void);
696 /* Allocation lock holder. Only set if acquired by client through */
697 /* GC_call_with_alloc_lock. */
698 # ifdef GC_ASSERTIONS
700 { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); \
703 { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
704 GC_clear(&GC_allocate_lock); }
707 { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
709 GC_clear(&GC_allocate_lock)
710 # endif /* !GC_ASSERTIONS */
712 /* Another alternative for OSF1 might be: */
713 # include <sys/mman.h>
714 extern msemaphore GC_allocate_semaphore;
715 # define LOCK() { if (msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) \
716 != 0) GC_lock(); else GC_allocate_lock = 1; }
717 /* The following is INCORRECT, since the memory model is too weak. */
718 /* Is this true? Presumably msem_unlock has the right semantics? */
720 # define UNLOCK() { GC_allocate_lock = 0; \
721 msem_unlock(&GC_allocate_semaphore, 0); }
723 # else /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCKS */
724 # ifndef USE_PTHREAD_LOCKS
725 # define USE_PTHREAD_LOCKS
727 # endif /* THREAD_LOCAL_ALLOC */
728 # ifdef USE_PTHREAD_LOCKS
729 # include <pthread.h>
730 extern pthread_mutex_t GC_allocate_ml;
731 # ifdef GC_ASSERTIONS
736 { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
737 pthread_mutex_unlock(&GC_allocate_ml); }
738 # else /* !GC_ASSERTIONS */
739 # if defined(NO_PTHREAD_TRYLOCK)
740 # define LOCK() GC_lock();
741 # else /* !defined(NO_PTHREAD_TRYLOCK) */
743 { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
745 # define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
746 # endif /* !GC_ASSERTIONS */
747 # endif /* USE_PTHREAD_LOCKS */
748 # define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
749 # define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
750 # define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
751 extern VOLATILE GC_bool GC_collecting;
752 # define ENTER_GC() GC_collecting = 1;
753 # define EXIT_GC() GC_collecting = 0;
754 extern void GC_lock(void);
755 extern pthread_t GC_lock_holder;
756 # ifdef GC_ASSERTIONS
757 extern pthread_t GC_mark_lock_holder;
759 # endif /* GC_PTHREADS with linux_threads.c implementation */
760 # if defined(GC_WIN32_THREADS)
761 # if defined(GC_PTHREADS)
762 # include <pthread.h>
763 extern pthread_mutex_t GC_allocate_ml;
764 # define LOCK() pthread_mutex_lock(&GC_allocate_ml)
765 # define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
767 # include <windows.h>
768 GC_API CRITICAL_SECTION GC_allocate_ml;
769 # define LOCK() EnterCriticalSection(&GC_allocate_ml);
770 # define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
773 # ifndef SET_LOCK_HOLDER
774 # define SET_LOCK_HOLDER()
775 # define UNSET_LOCK_HOLDER()
776 # define I_HOLD_LOCK() FALSE
777 /* Used on platforms were locks can be reacquired, */
778 /* so it doesn't matter if we lie. */
780 # else /* !THREADS */
783 # endif /* !THREADS */
784 # ifndef SET_LOCK_HOLDER
785 # define SET_LOCK_HOLDER()
786 # define UNSET_LOCK_HOLDER()
787 # define I_HOLD_LOCK() FALSE
788 /* Used on platforms were locks can be reacquired, */
789 /* so it doesn't matter if we lie. */
796 # ifndef DCL_LOCK_STATE
797 # define DCL_LOCK_STATE
800 # define FASTLOCK() LOCK()
801 # define FASTLOCK_SUCCEEDED() TRUE
802 # define FASTUNLOCK() UNLOCK()
805 #endif /* GC_LOCKS_H */