2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
22 * Mutual exclusion between allocator/collector routines.
23 * Needed if there is more than one allocator thread.
24 * FASTLOCK() is assumed to try to acquire the lock in a cheap and
25 * dirty way that is acceptable for a few instructions, e.g. by
26 * inhibiting preemption. This is assumed to have succeeded only
27 * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
28 * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
29 * If signals cannot be tolerated with the FASTLOCK held, then
30 * FASTLOCK should disable signals. The code executed under
31 * FASTLOCK is otherwise immune to interruption, provided it is
33 * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
34 * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
35 * (There is currently no equivalent for FASTLOCK.)
37 * In the PARALLEL_MARK case, we also need to define a number of
38 * other inline finctions here:
39 * GC_bool GC_compare_and_exchange( volatile GC_word *addr,
40 * GC_word old, GC_word new )
41 * GC_word GC_atomic_add( volatile GC_word *addr, GC_word how_much )
42 * void GC_memory_barrier( )
46 void GC_noop1 GC_PROTO((word));
47 # ifdef PCR_OBSOLETE /* Faster, but broken with multiple lwp's */
48 # include "th/PCR_Th.h"
49 # include "th/PCR_ThCrSec.h"
50 extern struct PCR_Th_MLRep GC_allocate_ml;
51 # define DCL_LOCK_STATE PCR_sigset_t GC_old_sig_mask
52 # define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
53 # define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
54 # define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
55 # define FASTLOCK() PCR_ThCrSec_EnterSys()
56 /* Here we cheat (a lot): */
57 # define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
58 /* TRUE if nobody currently holds the lock */
59 # define FASTUNLOCK() PCR_ThCrSec_ExitSys()
62 # include <base/PCR_Base.h>
63 # include <th/PCR_Th.h>
64 extern PCR_Th_ML GC_allocate_ml;
65 # define DCL_LOCK_STATE \
66 PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
67 # define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
68 # define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
69 # define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
70 # define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
71 # define FASTUNLOCK() {\
72 if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
75 extern GC_word RT0u__inCritical;
76 # define LOCK() RT0u__inCritical++
77 # define UNLOCK() RT0u__inCritical--
81 extern pthread_mutex_t GC_allocate_ml;
82 # define LOCK() pthread_mutex_lock(&GC_allocate_ml)
83 # define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
85 # ifdef GC_SOLARIS_THREADS
88 extern mutex_t GC_allocate_ml;
89 # define LOCK() mutex_lock(&GC_allocate_ml);
90 # define UNLOCK() mutex_unlock(&GC_allocate_ml);
93 /* Try to define GC_TEST_AND_SET and a matching GC_CLEAR for spin lock */
94 /* acquisition and release. We need this for correct operation of the */
98 inline static int GC_test_and_set(volatile unsigned int *addr) {
100 /* Note: the "xchg" instruction does not need a "lock" prefix */
101 __asm__ __volatile__("xchgl %0, %1"
102 : "=r"(oldval), "=m"(*(addr))
103 : "0"(1), "m"(*(addr)) : "memory");
106 # define GC_TEST_AND_SET_DEFINED
109 # if defined(__INTEL_COMPILER)
110 # include <ia64intrin.h>
112 inline static int GC_test_and_set(volatile unsigned int *addr) {
114 # ifndef __INTEL_COMPILER
115 __asm__ __volatile__("xchg4 %0=%1,%2"
116 : "=r"(oldval), "=m"(*addr)
117 : "r"(n) : "memory");
119 oldval = _InterlockedExchange(addr, n);
123 # define GC_TEST_AND_SET_DEFINED
124 /* Should this handle post-increment addressing?? */
125 inline static void GC_clear(volatile unsigned int *addr) {
126 # ifndef __INTEL_COMPILER
127 __asm__ __volatile__("st4.rel %0=r0" : "=m" (*addr) : : "memory");
129 // there is no st4 but I can use xchg I hope
130 _InterlockedExchange(addr, 0);
133 # define GC_CLEAR_DEFINED
136 inline static int GC_test_and_set(volatile unsigned int *addr) {
139 __asm__ __volatile__("ldstub %1,%0"
140 : "=r"(oldval), "=m"(*addr)
141 : "m"(*addr) : "memory");
144 # define GC_TEST_AND_SET_DEFINED
147 /* Contributed by Tony Mantler. I'm not sure how well it was */
149 inline static int GC_test_and_set(volatile unsigned int *addr) {
150 char oldval; /* this must be no longer than 8 bits */
152 /* The return value is semi-phony. */
153 /* 'tas' sets bit 7 while the return */
154 /* value pretends bit 0 was set */
155 __asm__ __volatile__(
156 "tas %1@; sne %0; negb %0"
158 : "a" (addr) : "memory");
161 # define GC_TEST_AND_SET_DEFINED
163 # if defined(POWERPC)
164 inline static int GC_test_and_set(volatile unsigned int *addr) {
166 int temp = 1; /* locked value */
168 __asm__ __volatile__(
169 "1:\tlwarx %0,0,%1\n" /* load and reserve */
170 "\tcmpwi %0, 0\n" /* if load is */
171 "\tbne 2f\n" /* non-zero, return already set */
172 "\tstwcx. %2,0,%1\n" /* else store conditional */
173 "\tbne- 1b\n" /* retry if lost reservation */
174 "\tsync\n" /* import barrier */
175 "2:\t\n" /* oldval is zero if we set */
177 : "r"(addr), "r"(temp)
181 # define GC_TEST_AND_SET_DEFINED
182 inline static void GC_clear(volatile unsigned int *addr) {
183 __asm__ __volatile__("lwsync" : : : "memory");
186 # define GC_CLEAR_DEFINED
189 inline static int GC_test_and_set(volatile unsigned int * addr)
191 unsigned long oldvalue;
194 __asm__ __volatile__(
208 ".section .text2,\"ax\"\n"
212 :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
213 :"Ir" (1), "m" (*addr)
218 # define GC_TEST_AND_SET_DEFINED
219 inline static void GC_clear(volatile unsigned int *addr) {
220 __asm__ __volatile__("mb" : : : "memory");
223 # define GC_CLEAR_DEFINED
226 # ifdef __native_client__
227 # define MASK_REGISTER(reg, cond) "bic" cond " " reg ", " reg ", #0xc0000000\n"
228 # define NACL_ALIGN() ".align 4\n"
230 # define MASK_REGISTER(reg, cond)
231 # define NACL_ALIGN()
233 inline static int GC_test_and_set(volatile unsigned int *addr) {
234 return (int) __sync_lock_test_and_set (addr, 1);
236 # define GC_TEST_AND_SET_DEFINED
237 inline static void GC_clear(volatile unsigned int *addr) {
238 __sync_synchronize ();
242 # define GC_CLEAR_DEFINED
244 # undef MASK_REGISTER
247 inline static int GC_test_and_set(volatile unsigned int *addr) {
248 /* Ripped from linuxthreads/sysdeps/cris/pt-machine.h. */
249 /* Included with Hans-Peter Nilsson's permission. */
250 register unsigned long int ret;
252 /* Note the use of a dummy output of *addr to expose the write.
253 * The memory barrier is to stop *other* writes being moved past
256 __asm__ __volatile__("clearf\n"
263 : "=&r" (ret), "=m" (*addr)
264 : "r" (addr), "r" ((int) 1), "m" (*addr)
268 # define GC_TEST_AND_SET_DEFINED
271 inline static int GC_test_and_set(volatile unsigned int *addr) {
273 __asm__ __volatile__ (
275 "0: cs %0,%1,0(%2)\n"
278 : "d" (1), "a" (addr)
283 # endif /* __GNUC__ */
284 # if (defined(ALPHA) && !defined(__GNUC__))
286 --> We currently assume that if gcc is not used, we are
287 --> running under Tru64.
289 # include <machine/builtins.h>
291 # define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1)
292 # define GC_TEST_AND_SET_DEFINED
293 # define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; }
294 # define GC_CLEAR_DEFINED
296 # if defined(MSWIN32)
297 # define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)
298 # define GC_TEST_AND_SET_DEFINED
302 # include <sys/tas.h>
303 # define GC_test_and_set(addr) _test_and_set((int *) addr,1)
304 # define GC_TEST_AND_SET_DEFINED
305 # elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
306 || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
308 # define GC_test_and_set(addr) _test_and_set((void *)addr,1)
310 # define GC_test_and_set(addr) test_and_set((void *)addr,1)
313 # include <sgidefs.h>
315 # define GC_test_and_set(addr) __test_and_set32((void *)addr,1)
316 # define GC_clear(addr) __lock_release(addr);
317 # define GC_CLEAR_DEFINED
319 # define GC_TEST_AND_SET_DEFINED
322 # include <sys/atomic_op.h>
323 # if (defined(_POWER) || defined(_POWERPC))
324 # if defined(__GNUC__)
325 inline static void GC_memsync() {
326 __asm__ __volatile__ ("sync" : : : "memory");
330 # define inline __inline
332 # pragma mc_func GC_memsync { \
333 "7c0004ac" /* sync (same opcode used for dcs)*/ \
337 # error dont know how to memsync
339 inline static int GC_test_and_set(volatile unsigned int * addr) {
341 if (compare_and_swap((void *)addr, &oldvalue, 1)) {
346 # define GC_TEST_AND_SET_DEFINED
347 inline static void GC_clear(volatile unsigned int *addr) {
351 # define GC_CLEAR_DEFINED
354 # if 0 /* defined(HP_PA) */
355 /* The official recommendation seems to be to not use ldcw from */
356 /* user mode. Since multithreaded incremental collection doesn't */
357 /* work anyway on HP_PA, this shouldn't be a major loss. */
359 /* "set" means 0 and "clear" means 1 here. */
360 # define GC_test_and_set(addr) !GC_test_and_clear(addr);
361 # define GC_TEST_AND_SET_DEFINED
362 # define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1;
363 /* The above needs a memory barrier! */
364 # define GC_CLEAR_DEFINED
366 # if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)
368 inline static void GC_clear(volatile unsigned int *addr) {
369 /* Try to discourage gcc from moving anything past this. */
370 __asm__ __volatile__(" " : : : "memory");
374 /* The function call in the following should prevent the */
375 /* compiler from moving assignments to below the UNLOCK. */
376 # define GC_clear(addr) GC_noop1((word)(addr)); \
377 *((volatile unsigned int *)(addr)) = 0;
379 # define GC_CLEAR_DEFINED
380 # endif /* !GC_CLEAR_DEFINED */
382 # if !defined(GC_TEST_AND_SET_DEFINED)
383 # define USE_PTHREAD_LOCKS
386 # if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
387 && !defined(GC_WIN32_THREADS)
388 # define NO_THREAD (pthread_t)(-1)
389 # include <pthread.h>
390 # if defined(PARALLEL_MARK)
391 /* We need compare-and-swap to update mark bits, where it's */
392 /* performance critical. If USE_MARK_BYTES is defined, it is */
393 /* no longer needed for this purpose. However we use it in */
394 /* either case to implement atomic fetch-and-add, though that's */
395 /* less performance critical, and could perhaps be done with */
397 # if defined(GENERIC_COMPARE_AND_SWAP)
398 /* Probably not useful, except for debugging. */
399 /* We do use GENERIC_COMPARE_AND_SWAP on PA_RISC, but we */
400 /* minimize its use. */
401 extern pthread_mutex_t GC_compare_and_swap_lock;
403 /* Note that if GC_word updates are not atomic, a concurrent */
404 /* reader should acquire GC_compare_and_swap_lock. On */
405 /* currently supported platforms, such updates are atomic. */
406 extern GC_bool GC_compare_and_exchange(volatile GC_word *addr,
407 GC_word old, GC_word new_val);
408 # endif /* GENERIC_COMPARE_AND_SWAP */
410 # if !defined(GENERIC_COMPARE_AND_SWAP)
411 /* Returns TRUE if the comparison succeeded. */
412 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
417 __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
418 : "+m"(*(addr)), "=q"(result)
419 : "r" (new_val), "a"(old) : "memory");
420 return (GC_bool) result;
422 # endif /* !GENERIC_COMPARE_AND_SWAP */
423 inline static void GC_memory_barrier()
425 /* We believe the processor ensures at least processor */
426 /* consistent ordering. Thus a compiler barrier */
427 /* should suffice. */
428 __asm__ __volatile__("" : : : "memory");
433 # if !defined(GENERIC_COMPARE_AND_SWAP)
434 /* Returns TRUE if the comparison succeeded. */
435 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
440 __asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1"
441 : "+m"(*(addr)), "=r"(result)
442 : "r" (new_val), "a"(old) : "memory");
443 return (GC_bool) result;
445 # endif /* !GENERIC_COMPARE_AND_SWAP */
446 inline static void GC_memory_barrier()
448 /* We believe the processor ensures at least processor */
449 /* consistent ordering. Thus a compiler barrier */
450 /* should suffice. */
451 __asm__ __volatile__("" : : : "memory");
455 # if defined(POWERPC)
456 # if !defined(GENERIC_COMPARE_AND_SWAP)
457 # if CPP_WORDSZ == 64
458 /* Returns TRUE if the comparison succeeded. */
459 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
460 GC_word old, GC_word new_val)
462 # if HAS___SYNC_BOOL_COMPARE_AND_SWAP
463 return __sync_bool_compare_and_swap(addr, old, new_val);
465 unsigned long result, dummy;
466 __asm__ __volatile__(
467 "1:\tldarx %0,0,%5\n"
477 : "=&r" (dummy), "=r" (result), "=p" (addr)
478 : "r" (new_val), "r" (old), "2"(addr)
480 return (GC_bool) result;
484 /* Returns TRUE if the comparison succeeded. */
485 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
486 GC_word old, GC_word new_val)
488 # if HAS___SYNC_BOOL_COMPARE_AND_SWAP
489 return __sync_bool_compare_and_swap(addr, old, new_val);
492 __asm__ __volatile__(
493 "1:\tlwarx %0,0,%5\n"
503 : "=&r" (dummy), "=r" (result), "=p" (addr)
504 : "r" (new_val), "r" (old), "2"(addr)
506 return (GC_bool) result;
510 # endif /* !GENERIC_COMPARE_AND_SWAP */
511 inline static void GC_memory_barrier()
513 __asm__ __volatile__("sync" : : : "memory");
515 # endif /* POWERPC */
518 # if !defined(GENERIC_COMPARE_AND_SWAP)
519 # if CPP_WORDSZ == 64
520 /* Returns TRUE if the comparison succeeded. */
521 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
522 GC_word old, GC_word new_val)
524 unsigned long result;
525 __asm__ __volatile__(
528 : "0" (new_val), "r" (addr), "r" (old)
530 return (GC_bool) (result == old);
533 /* Returns TRUE if the comparison succeeded. */
534 inline static GC_bool GC_compare_and_exchange(volatile GC_word *_addr,
535 GC_word _old, GC_word _new_val)
537 register unsigned long result asm("o0");
538 register unsigned long old asm("o1");
539 register volatile GC_word *addr asm("o2");
543 __asm__ __volatile__(
544 /* We encode the instruction directly so that it
545 doesn't taint the whole binary as v9-only. */
546 ".word 0xd1e29009" /* cas [%o2], %o1, %o0 */
548 : "0" (result), "r" (addr), "r"(old)
550 return (GC_bool) (result == old);
553 # endif /* !GENERIC_COMPARE_AND_SWAP */
554 inline static void GC_memory_barrier()
556 /* All sparc v9 chips provice procesor consistent ordering. */
557 /* Thus a compiler barrier should suffice. */
558 __asm__ __volatile__("" : : : "memory");
563 # if !defined(GENERIC_COMPARE_AND_SWAP)
564 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
565 GC_word old, GC_word new_val)
567 unsigned long oldval;
568 # if CPP_WORDSZ == 32
569 __asm__ __volatile__(
571 "mov ar.ccv=%3 ;; cmpxchg4.rel %0=[%0],%2,ar.ccv"
573 : "r"(addr), "r"(new_val), "r"(old) : "memory");
575 __asm__ __volatile__(
576 "mov ar.ccv=%3 ;; cmpxchg8.rel %0=[%1],%2,ar.ccv"
578 : "r"(addr), "r"(new_val), "r"(old) : "memory");
580 return (oldval == old);
582 # endif /* !GENERIC_COMPARE_AND_SWAP */
584 /* Shouldn't be needed; we use volatile stores instead. */
585 inline static void GC_memory_barrier()
587 __asm__ __volatile__("mf" : : : "memory");
592 # if !defined(GENERIC_COMPARE_AND_SWAP)
593 # if defined(__GNUC__)
594 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
595 GC_word old, GC_word new_val)
597 unsigned long was_equal;
600 __asm__ __volatile__(
609 :"=&r" (temp), "=m" (*addr), "=&r" (was_equal)
610 : "r" (new_val), "Ir" (old)
614 # else /* !__GNUC__ */
615 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
616 GC_word old, GC_word new_val)
618 return __CMP_STORE_QUAD(addr, old, new_val, addr);
620 # endif /* !__GNUC__ */
621 # endif /* !GENERIC_COMPARE_AND_SWAP */
623 inline static void GC_memory_barrier()
625 __asm__ __volatile__("mb" : : : "memory");
628 # define GC_memory_barrier() asm("mb")
629 # endif /* !__GNUC__ */
632 # if !defined(GENERIC_COMPARE_AND_SWAP)
633 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
634 GC_word old, GC_word new_val)
637 __asm__ __volatile__ (
645 : "=&d" (retval), "+d" (old)
646 : "d" (new_val), "a" (addr)
651 # define GC_memory_barrier()
653 # if !defined(GENERIC_COMPARE_AND_SWAP)
654 /* Returns the original value of *addr. */
655 inline static GC_word GC_atomic_add(volatile GC_word *addr,
661 } while (!GC_compare_and_exchange(addr, old, old+how_much));
664 # else /* GENERIC_COMPARE_AND_SWAP */
665 /* So long as a GC_word can be atomically updated, it should */
666 /* be OK to read *addr without a lock. */
667 extern GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much);
668 # endif /* GENERIC_COMPARE_AND_SWAP */
670 # endif /* PARALLEL_MARK */
672 # if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
673 /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to */
674 /* be held for long periods, if it is held at all. Thus spinning */
675 /* and sleeping for fixed periods are likely to result in */
676 /* significant wasted time. We thus rely mostly on queued locks. */
677 # define USE_SPIN_LOCK
678 extern volatile unsigned int GC_allocate_lock;
679 extern void GC_lock(void);
680 /* Allocation lock holder. Only set if acquired by client through */
681 /* GC_call_with_alloc_lock. */
682 # ifdef GC_ASSERTIONS
684 { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); \
687 { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
688 GC_clear(&GC_allocate_lock); }
691 { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
693 GC_clear(&GC_allocate_lock)
694 # endif /* !GC_ASSERTIONS */
696 /* Another alternative for OSF1 might be: */
697 # include <sys/mman.h>
698 extern msemaphore GC_allocate_semaphore;
699 # define LOCK() { if (msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) \
700 != 0) GC_lock(); else GC_allocate_lock = 1; }
701 /* The following is INCORRECT, since the memory model is too weak. */
702 /* Is this true? Presumably msem_unlock has the right semantics? */
704 # define UNLOCK() { GC_allocate_lock = 0; \
705 msem_unlock(&GC_allocate_semaphore, 0); }
707 # else /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCKS */
708 # ifndef USE_PTHREAD_LOCKS
709 # define USE_PTHREAD_LOCKS
711 # endif /* THREAD_LOCAL_ALLOC */
712 # ifdef USE_PTHREAD_LOCKS
713 # include <pthread.h>
714 extern pthread_mutex_t GC_allocate_ml;
715 # ifdef GC_ASSERTIONS
720 { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
721 pthread_mutex_unlock(&GC_allocate_ml); }
722 # else /* !GC_ASSERTIONS */
723 # if defined(NO_PTHREAD_TRYLOCK)
724 # define LOCK() GC_lock();
725 # else /* !defined(NO_PTHREAD_TRYLOCK) */
727 { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
729 # define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
730 # endif /* !GC_ASSERTIONS */
731 # endif /* USE_PTHREAD_LOCKS */
732 # define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
733 # define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
734 # define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
735 extern VOLATILE GC_bool GC_collecting;
736 # define ENTER_GC() GC_collecting = 1;
737 # define EXIT_GC() GC_collecting = 0;
738 extern void GC_lock(void);
739 extern pthread_t GC_lock_holder;
740 # ifdef GC_ASSERTIONS
741 extern pthread_t GC_mark_lock_holder;
743 # endif /* GC_PTHREADS with linux_threads.c implementation */
744 # if defined(GC_WIN32_THREADS)
745 # if defined(GC_PTHREADS)
746 # include <pthread.h>
747 extern pthread_mutex_t GC_allocate_ml;
748 # define LOCK() pthread_mutex_lock(&GC_allocate_ml)
749 # define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
751 # include <windows.h>
752 GC_API CRITICAL_SECTION GC_allocate_ml;
753 # define LOCK() EnterCriticalSection(&GC_allocate_ml);
754 # define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
757 # ifndef SET_LOCK_HOLDER
758 # define SET_LOCK_HOLDER()
759 # define UNSET_LOCK_HOLDER()
760 # define I_HOLD_LOCK() FALSE
761 /* Used on platforms were locks can be reacquired, */
762 /* so it doesn't matter if we lie. */
764 # else /* !THREADS */
767 # endif /* !THREADS */
768 # ifndef SET_LOCK_HOLDER
769 # define SET_LOCK_HOLDER()
770 # define UNSET_LOCK_HOLDER()
771 # define I_HOLD_LOCK() FALSE
772 /* Used on platforms were locks can be reacquired, */
773 /* so it doesn't matter if we lie. */
780 # ifndef DCL_LOCK_STATE
781 # define DCL_LOCK_STATE
784 # define FASTLOCK() LOCK()
785 # define FASTLOCK_SUCCEEDED() TRUE
786 # define FASTUNLOCK() UNLOCK()
789 #endif /* GC_LOCKS_H */