2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 * Copyright (c) 2000-2001 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 * Support code for LinuxThreads, the clone()-based kernel
18 * thread package for Linux which is included in libc6.
20 * This code relies on implementation details of LinuxThreads,
21 * (i.e. properties not guaranteed by the Pthread standard),
22 * though this version now does less of that than the other Pthreads
25 * Note that there is a lot of code duplication between linux_threads.c
26 * and thread support for some of the other Posix platforms; any changes
27 * made here may need to be reflected there too.
29 /* DG/UX ix86 support <takis@xfree86.org> */
31 * Linux_threads.c now also includes some code to support HPUX and
32 * OSF1 (Compaq Tru64 Unix, really). The OSF1 support is based on Eric Benson's
35 * Eric also suggested an alternate basis for a lock implementation in
37 * + #elif defined(OSF1)
38 * + unsigned long GC_allocate_lock = 0;
39 * + msemaphore GC_allocate_semaphore;
40 * + # define GC_TRY_LOCK() \
41 * + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
42 * + ? (GC_allocate_lock = 1) \
44 * + # define GC_LOCK_TAKEN GC_allocate_lock
47 /*#define DEBUG_THREADS 1*/
48 /*#define GC_ASSERTIONS*/
50 # include "private/pthread_support.h"
52 # if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
53 && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS) \
54 && !defined(GC_AIX_THREADS)
56 # if defined(GC_HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \
57 && !defined(USE_HPUX_TLS)
61 # if (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \
62 defined(GC_DARWIN_THREADS)) && !defined(USE_PTHREAD_SPECIFIC)
63 # define USE_PTHREAD_SPECIFIC
66 # if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE)
67 # define _POSIX4A_DRAFT10_SOURCE 1
70 # if defined(GC_DGUX386_THREADS) && !defined(_USING_POSIX4A_DRAFT10)
71 # define _USING_POSIX4A_DRAFT10 1
74 # ifdef THREAD_LOCAL_ALLOC
75 # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_HPUX_TLS)
76 # include "private/specific.h"
78 # if defined(USE_PTHREAD_SPECIFIC)
79 # define GC_getspecific pthread_getspecific
80 # define GC_setspecific pthread_setspecific
81 # define GC_key_create pthread_key_create
82 typedef pthread_key_t GC_key_t;
84 # if defined(USE_HPUX_TLS)
85 # define GC_getspecific(x) (x)
86 # define GC_setspecific(key, v) ((key) = (v), 0)
87 # define GC_key_create(key, d) 0
88 typedef void * GC_key_t;
97 # include <sys/mman.h>
98 # include <sys/time.h>
99 # include <sys/types.h>
100 # include <sys/stat.h>
103 #if defined(GC_DARWIN_THREADS)
104 # include "private/darwin_semaphore.h"
106 # include <semaphore.h>
107 #endif /* !GC_DARWIN_THREADS */
109 #if defined(GC_DARWIN_THREADS)
110 # include <sys/sysctl.h>
111 #endif /* GC_DARWIN_THREADS */
115 #if defined(GC_DGUX386_THREADS)
116 # include <sys/dg_sys_info.h>
117 # include <sys/_int_psem.h>
118 /* sem_t is an uint in DG/UX */
119 typedef unsigned int sem_t;
120 #endif /* GC_DGUX386_THREADS */
126 #ifdef GC_USE_LD_WRAP
127 # define WRAP_FUNC(f) __wrap_##f
128 # define REAL_FUNC(f) __real_##f
130 # define WRAP_FUNC(f) GC_##f
131 # if !defined(GC_DGUX386_THREADS)
132 # define REAL_FUNC(f) f
133 # else /* GC_DGUX386_THREADS */
134 # define REAL_FUNC(f) __d10_##f
135 # endif /* GC_DGUX386_THREADS */
136 # undef pthread_create
137 # if !defined(GC_DARWIN_THREADS)
138 # undef pthread_sigmask
141 # undef pthread_detach
146 static GC_bool parallel_initialized = FALSE;
148 void GC_init_parallel();
150 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
152 /* We don't really support thread-local allocation with DBG_HDRS_ALL */
157 GC_key_t GC_thread_key;
159 static GC_bool keys_initialized;
161 /* Recover the contents of the freelist array fl into the global one gfl.*/
162 /* Note that the indexing scheme differs, in that gfl has finer size */
163 /* resolution, even if not all entries are used. */
164 /* We hold the allocator lock. */
165 static void return_freelists(ptr_t *fl, ptr_t *gfl)
171 for (i = 1; i < NFREELISTS; ++i) {
172 nwords = i * (GRANULARITY/sizeof(word));
175 if ((word)q >= HBLKSIZE) {
176 if (gfl[nwords] == 0) {
180 for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
186 /* Clear fl[i], since the thread structure may hang around. */
187 /* Do it in a way that is likely to trap if we access it. */
188 fl[i] = (ptr_t)HBLKSIZE;
192 /* We statically allocate a single "size 0" object. It is linked to */
193 /* itself, and is thus repeatedly reused for all size 0 allocation */
194 /* requests. (Size 0 gcj allocation requests are incorrect, and */
195 /* we arrange for those to fault asap.) */
196 static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
198 /* Each thread structure must be initialized. */
199 /* This call must be made from the new thread. */
200 /* Caller holds allocation lock. */
201 void GC_init_thread_local(GC_thread p)
205 if (!keys_initialized) {
206 if (0 != GC_key_create(&GC_thread_key, 0)) {
207 ABORT("Failed to create key for local allocator");
209 keys_initialized = TRUE;
211 if (0 != GC_setspecific(GC_thread_key, p)) {
212 ABORT("Failed to set thread specific allocation pointers");
214 for (i = 1; i < NFREELISTS; ++i) {
215 p -> ptrfree_freelists[i] = (ptr_t)1;
216 p -> normal_freelists[i] = (ptr_t)1;
217 # ifdef GC_GCJ_SUPPORT
218 p -> gcj_freelists[i] = (ptr_t)1;
221 /* Set up the size 0 free lists. */
222 p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
223 p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
224 # ifdef GC_GCJ_SUPPORT
225 p -> gcj_freelists[0] = (ptr_t)(-1);
229 #ifdef GC_GCJ_SUPPORT
230 extern ptr_t * GC_gcjobjfreelist;
233 /* We hold the allocator lock. */
234 void GC_destroy_thread_local(GC_thread p)
236 /* We currently only do this from the thread itself or from */
237 /* the fork handler for a child process. */
239 GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
241 return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
242 return_freelists(p -> normal_freelists, GC_objfreelist);
243 # ifdef GC_GCJ_SUPPORT
244 return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
248 extern GC_PTR GC_generic_malloc_many();
250 GC_PTR GC_local_malloc(size_t bytes)
252 if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
253 return(GC_malloc(bytes));
255 int index = INDEX_FROM_BYTES(bytes);
258 # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
259 GC_key_t k = GC_thread_key;
263 # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
264 if (EXPECT(0 == k, 0)) {
265 /* This can happen if we get called when the world is */
266 /* being initialized. Whether we can actually complete */
267 /* the initialization then is unclear. */
272 tsd = GC_getspecific(GC_thread_key);
273 # ifdef GC_ASSERTIONS
275 GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
278 my_fl = ((GC_thread)tsd) -> normal_freelists + index;
280 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
281 ptr_t next = obj_link(my_entry);
282 GC_PTR result = (GC_PTR)my_entry;
284 obj_link(my_entry) = 0;
285 PREFETCH_FOR_WRITE(next);
287 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
288 *my_fl = my_entry + index + 1;
289 return GC_malloc(bytes);
291 GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
292 if (*my_fl == 0) return GC_oom_fn(bytes);
293 return GC_local_malloc(bytes);
298 GC_PTR GC_local_malloc_atomic(size_t bytes)
300 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
301 return(GC_malloc_atomic(bytes));
303 int index = INDEX_FROM_BYTES(bytes);
304 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
305 -> ptrfree_freelists + index;
306 ptr_t my_entry = *my_fl;
308 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
309 GC_PTR result = (GC_PTR)my_entry;
310 *my_fl = obj_link(my_entry);
312 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
313 *my_fl = my_entry + index + 1;
314 return GC_malloc_atomic(bytes);
316 GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
317 /* *my_fl is updated while the collector is excluded; */
318 /* the free list is always visible to the collector as */
320 if (*my_fl == 0) return GC_oom_fn(bytes);
321 return GC_local_malloc_atomic(bytes);
326 #ifdef GC_GCJ_SUPPORT
328 #include "include/gc_gcj.h"
331 extern GC_bool GC_gcj_malloc_initialized;
334 extern int GC_gcj_kind;
336 GC_PTR GC_local_gcj_malloc(size_t bytes,
337 void * ptr_to_struct_containing_descr)
339 GC_ASSERT(GC_gcj_malloc_initialized);
340 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
341 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
343 int index = INDEX_FROM_BYTES(bytes);
344 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
345 -> gcj_freelists + index;
346 ptr_t my_entry = *my_fl;
347 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
348 GC_PTR result = (GC_PTR)my_entry;
349 GC_ASSERT(!GC_incremental);
350 /* We assert that any concurrent marker will stop us. */
351 /* Thus it is impossible for a mark procedure to see the */
352 /* allocation of the next object, but to see this object */
353 /* still containing a free list pointer. Otherwise the */
354 /* marker might find a random "mark descriptor". */
355 *(volatile ptr_t *)my_fl = obj_link(my_entry);
356 /* We must update the freelist before we store the pointer. */
357 /* Otherwise a GC at this point would see a corrupted */
359 /* A memory barrier is probably never needed, since the */
360 /* action of stopping this thread will cause prior writes */
362 GC_ASSERT(((void * volatile *)result)[1] == 0);
363 *(void * volatile *)result = ptr_to_struct_containing_descr;
365 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
366 if (!GC_incremental) *my_fl = my_entry + index + 1;
367 /* In the incremental case, we always have to take this */
368 /* path. Thus we leave the counter alone. */
369 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
371 GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
372 if (*my_fl == 0) return GC_oom_fn(bytes);
373 return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
378 #endif /* GC_GCJ_SUPPORT */
380 # else /* !THREAD_LOCAL_ALLOC && !DBG_HDRS_ALL */
382 # define GC_destroy_thread_local(t)
384 # endif /* !THREAD_LOCAL_ALLOC */
388 To make sure that we're using LinuxThreads and not some other thread
389 package, we generate a dummy reference to `pthread_kill_other_threads_np'
390 (was `__pthread_initial_thread_bos' but that disappeared),
391 which is a symbol defined in LinuxThreads, but (hopefully) not in other
394 We no longer do this, since this code is now portable enough that it might
395 actually work for something else.
397 void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
400 long GC_nprocs = 1; /* Number of processors. We may not have */
401 /* access to all of them, but this is as good */
402 /* a guess as any ... */
407 # define MAX_MARKERS 16
410 static ptr_t marker_sp[MAX_MARKERS] = {0};
412 void * GC_mark_thread(void * id)
416 marker_sp[(word)id] = GC_approx_sp();
417 for (;; ++my_mark_no) {
418 /* GC_mark_no is passed only to allow GC_help_marker to terminate */
419 /* promptly. This is important if it were called from the signal */
420 /* handler or from the GC lock acquisition code. Under Linux, it's */
421 /* not safe to call it from a signal handler, since it uses mutexes */
422 /* and condition variables. Since it is called only here, the */
423 /* argument is unnecessary. */
424 if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
425 /* resynchronize if we get far off, e.g. because GC_mark_no */
427 my_mark_no = GC_mark_no;
429 # ifdef DEBUG_THREADS
430 GC_printf1("Starting mark helper for mark number %ld\n", my_mark_no);
432 GC_help_marker(my_mark_no);
436 extern long GC_markers; /* Number of mark threads we would */
437 /* like to have. Includes the */
438 /* initiating thread. */
440 pthread_t GC_mark_threads[MAX_MARKERS];
442 #define PTHREAD_CREATE REAL_FUNC(pthread_create)
444 static void start_mark_threads()
449 if (GC_markers > MAX_MARKERS) {
450 WARN("Limiting number of mark threads\n", 0);
451 GC_markers = MAX_MARKERS;
453 if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
455 if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
456 ABORT("pthread_attr_setdetachstate failed");
458 # if defined(HPUX) || defined(GC_DGUX386_THREADS)
459 /* Default stack size is usually too small: fix it. */
460 /* Otherwise marker threads or GC may run out of */
462 # define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
467 if (pthread_attr_getstacksize(&attr, &old_size) != 0)
468 ABORT("pthread_attr_getstacksize failed\n");
469 if (old_size < MIN_STACK_SIZE) {
470 if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
471 ABORT("pthread_attr_setstacksize failed\n");
474 # endif /* HPUX || GC_DGUX386_THREADS */
476 if (GC_print_stats) {
477 GC_printf1("Starting %ld marker threads\n", GC_markers - 1);
480 for (i = 0; i < GC_markers - 1; ++i) {
481 if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
482 GC_mark_thread, (void *)(word)i)) {
483 WARN("Marker thread creation failed, errno = %ld.\n", errno);
488 #else /* !PARALLEL_MARK */
490 static __inline__ void start_mark_threads()
494 #endif /* !PARALLEL_MARK */
496 /* Defining INSTALL_LOOPING_SEGV_HANDLER causes SIGSEGV and SIGBUS to */
497 /* result in an infinite loop in a signal handler. This can be very */
498 /* useful for debugging, since (as of RH7) gdb still seems to have */
499 /* serious problems with threads. */
500 #ifdef INSTALL_LOOPING_SEGV_HANDLER
501 void GC_looping_handler(int sig)
503 GC_printf3("Signal %ld in thread %lx, pid %ld\n",
504 sig, pthread_self(), getpid());
509 GC_bool GC_thr_initialized = FALSE;
511 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
513 void GC_push_thread_structures GC_PROTO((void))
515 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
516 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
517 GC_push_all((ptr_t)(&GC_thread_key),
518 (ptr_t)(&GC_thread_key)+sizeof(&GC_thread_key));
522 #ifdef THREAD_LOCAL_ALLOC
523 /* We must explicitly mark ptrfree and gcj free lists, since the free */
524 /* list links wouldn't otherwise be found. We also set them in the */
525 /* normal free lists, since that involves touching less memory than if */
526 /* we scanned them normally. */
527 void GC_mark_thread_local_free_lists(void)
533 for (i = 0; i < THREAD_TABLE_SZ; ++i) {
534 for (p = GC_threads[i]; 0 != p; p = p -> next) {
535 for (j = 1; j < NFREELISTS; ++j) {
536 q = p -> ptrfree_freelists[j];
537 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
538 q = p -> normal_freelists[j];
539 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
540 # ifdef GC_GCJ_SUPPORT
541 q = p -> gcj_freelists[j];
542 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
543 # endif /* GC_GCJ_SUPPORT */
548 #endif /* THREAD_LOCAL_ALLOC */
550 static struct GC_Thread_Rep first_thread;
552 /* Add a thread to GC_threads. We assume it wasn't already there. */
553 /* Caller holds allocation lock. */
554 GC_thread GC_new_thread(pthread_t id)
556 int hv = ((word)id) % THREAD_TABLE_SZ;
558 static GC_bool first_thread_used = FALSE;
560 if (!first_thread_used) {
561 result = &first_thread;
562 first_thread_used = TRUE;
564 result = (struct GC_Thread_Rep *)
565 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
567 if (result == 0) return(0);
569 result -> next = GC_threads[hv];
570 GC_threads[hv] = result;
571 GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
575 /* Delete a thread from GC_threads. We assume it is there. */
576 /* (The code intentionally traps if it wasn't.) */
577 /* Caller holds allocation lock. */
578 void GC_delete_thread(pthread_t id)
580 int hv = ((word)id) % THREAD_TABLE_SZ;
581 register GC_thread p = GC_threads[hv];
582 register GC_thread prev = 0;
584 while (!pthread_equal(p -> id, id)) {
589 GC_threads[hv] = p -> next;
591 prev -> next = p -> next;
596 /* If a thread has been joined, but we have not yet */
597 /* been notified, then there may be more than one thread */
598 /* in the table with the same pthread id. */
599 /* This is OK, but we need a way to delete a specific one. */
600 void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
602 int hv = ((word)id) % THREAD_TABLE_SZ;
603 register GC_thread p = GC_threads[hv];
604 register GC_thread prev = 0;
611 GC_threads[hv] = p -> next;
613 prev -> next = p -> next;
618 /* Return a GC_thread corresponding to a given thread_t. */
619 /* Returns 0 if it's not there. */
620 /* Caller holds allocation lock or otherwise inhibits */
622 /* If there is more than one thread with the given id we */
623 /* return the most recent one. */
624 GC_thread GC_lookup_thread(pthread_t id)
626 int hv = ((word)id) % THREAD_TABLE_SZ;
627 register GC_thread p = GC_threads[hv];
629 while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
634 /* Remove all entries from the GC_threads table, except the */
635 /* one for the current thread. We need to do this in the child */
636 /* process after a fork(), since only the current thread */
637 /* survives in the child. */
638 void GC_remove_all_threads_but_me(void)
640 pthread_t self = pthread_self();
642 GC_thread p, next, me;
644 for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
646 for (p = GC_threads[hv]; 0 != p; p = next) {
648 if (p -> id == self) {
652 # ifdef THREAD_LOCAL_ALLOC
653 if (!(p -> flags & FINISHED)) {
654 GC_destroy_thread_local(p);
656 # endif /* THREAD_LOCAL_ALLOC */
657 if (p != &first_thread) GC_INTERNAL_FREE(p);
663 #endif /* HANDLE_FORK */
665 #ifdef USE_PROC_FOR_LIBRARIES
666 int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
671 # ifdef PARALLEL_MARK
672 for (i = 0; i < GC_markers; ++i) {
673 if (marker_sp[i] > lo & marker_sp[i] < hi) return 1;
676 for (i = 0; i < THREAD_TABLE_SZ; i++) {
677 for (p = GC_threads[i]; p != 0; p = p -> next) {
678 if (0 != p -> stack_end) {
679 # ifdef STACK_GROWS_UP
680 if (p -> stack_end >= lo && p -> stack_end < hi) return 1;
681 # else /* STACK_GROWS_DOWN */
682 if (p -> stack_end > lo && p -> stack_end <= hi) return 1;
689 #endif /* USE_PROC_FOR_LIBRARIES */
691 #ifdef GC_LINUX_THREADS
692 /* Return the number of processors, or i<= 0 if it can't be determined. */
695 /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
696 /* appears to be buggy in many cases. */
697 /* We look for lines "cpu<n>" in /proc/stat. */
698 # define STAT_BUF_SIZE 4096
699 # define STAT_READ read
700 /* If read is wrapped, this may need to be redefined to call */
702 char stat_buf[STAT_BUF_SIZE];
705 /* Some old kernels only have a single "cpu nnnn ..." */
706 /* entry in /proc/stat. We identify those as */
710 f = open("/proc/stat", O_RDONLY);
711 if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {
712 WARN("Couldn't read /proc/stat\n", 0);
715 for (i = 0; i < len - 100; ++i) {
716 if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
717 && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
718 int cpu_no = atoi(stat_buf + i + 4);
719 if (cpu_no >= result) result = cpu_no + 1;
725 #endif /* GC_LINUX_THREADS */
727 /* We hold the GC lock. Wait until an in-progress GC has finished. */
728 /* Repeatedly RELEASES GC LOCK in order to wait. */
729 /* If wait_for_all is true, then we exit with the GC lock held and no */
730 /* collection in progress; otherwise we just wait for the current GC */
732 extern GC_bool GC_collection_in_progress();
733 void GC_wait_for_gc_completion(GC_bool wait_for_all)
735 if (GC_incremental && GC_collection_in_progress()) {
736 int old_gc_no = GC_gc_no;
738 /* Make sure that no part of our stack is still on the mark stack, */
739 /* since it's about to be unmapped. */
740 while (GC_incremental && GC_collection_in_progress()
741 && (wait_for_all || old_gc_no == GC_gc_no)) {
743 GC_collect_a_little_inner(1);
753 /* Procedures called before and after a fork. The goal here is to make */
754 /* it safe to call GC_malloc() in a forked child. It's unclear that is */
755 /* attainable, since the single UNIX spec seems to imply that one */
756 /* should only call async-signal-safe functions, and we probably can't */
757 /* quite guarantee that. But we give it our best shot. (That same */
758 /* spec also implies that it's not safe to call the system malloc */
759 /* between fork() and exec(). Thus we're doing no worse than it. */
761 /* Called before a fork() */
762 void GC_fork_prepare_proc(void)
764 /* Acquire all relevant locks, so that after releasing the locks */
765 /* the child will see a consistent state in which monitor */
766 /* invariants hold. Unfortunately, we can't acquire libc locks */
767 /* we might need, and there seems to be no guarantee that libc */
768 /* must install a suitable fork handler. */
769 /* Wait for an ongoing GC to finish, since we can't finish it in */
770 /* the (one remaining thread in) the child. */
772 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
773 GC_wait_for_reclaim();
775 GC_wait_for_gc_completion(TRUE);
776 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
777 GC_acquire_mark_lock();
781 /* Called in parent after a fork() */
782 void GC_fork_parent_proc(void)
784 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
785 GC_release_mark_lock();
790 /* Called in child after a fork() */
791 void GC_fork_child_proc(void)
793 /* Clean up the thread table, so that just our thread is left. */
794 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
795 GC_release_mark_lock();
797 GC_remove_all_threads_but_me();
798 # ifdef PARALLEL_MARK
799 /* Turn off parallel marking in the child, since we are probably */
800 /* just going to exec, and we would have to restart mark threads. */
803 # endif /* PARALLEL_MARK */
806 #endif /* HANDLE_FORK */
808 #if defined(GC_DGUX386_THREADS)
809 /* Return the number of processors, or i<= 0 if it can't be determined. */
812 /* <takis@XFree86.Org> */
814 struct dg_sys_info_pm_info pm_sysinfo;
817 status = dg_sys_info((long int *) &pm_sysinfo,
818 DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
820 /* set -1 for error */
824 numCpus = pm_sysinfo.idle_vp_count;
826 # ifdef DEBUG_THREADS
827 GC_printf1("Number of active CPUs in this system: %d\n", numCpus);
831 #endif /* GC_DGUX386_THREADS */
833 /* We hold the allocation lock. */
836 # ifndef GC_DARWIN_THREADS
841 if (GC_thr_initialized) return;
842 GC_thr_initialized = TRUE;
845 /* Prepare for a possible fork. */
846 pthread_atfork(GC_fork_prepare_proc, GC_fork_parent_proc,
848 # endif /* HANDLE_FORK */
849 /* Add the initial thread, so we can stop it. */
850 t = GC_new_thread(pthread_self());
851 # ifdef GC_DARWIN_THREADS
852 t -> stop_info.mach_thread = mach_thread_self();
854 t -> stop_info.stack_ptr = (ptr_t)(&dummy);
856 t -> flags = DETACHED | MAIN_THREAD;
862 char * nprocs_string = GETENV("GC_NPROCS");
864 if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
866 if (GC_nprocs <= 0) {
867 # if defined(GC_HPUX_THREADS)
868 GC_nprocs = pthread_num_processors_np();
870 # if defined(GC_OSF1_THREADS)
871 GC_nprocs = sysconf(_SC_NPROCESSORS_ONLN);
872 if (GC_nprocs <= 0) GC_nprocs = 1;
874 # if defined(GC_FREEBSD_THREADS)
877 # if defined(GC_DARWIN_THREADS)
879 size_t len = sizeof(ncpus);
880 sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0);
883 # if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)
884 GC_nprocs = GC_get_nprocs();
887 if (GC_nprocs <= 0) {
888 WARN("GC_get_nprocs() returned %ld\n", GC_nprocs);
890 # ifdef PARALLEL_MARK
894 # ifdef PARALLEL_MARK
896 char * markers_string = GETENV("GC_MARKERS");
897 if (markers_string != NULL) {
898 GC_markers = atoi(markers_string);
900 GC_markers = GC_nprocs;
905 # ifdef PARALLEL_MARK
907 if (GC_print_stats) {
908 GC_printf2("Number of processors = %ld, "
909 "number of marker threads = %ld\n", GC_nprocs, GC_markers);
912 if (GC_markers == 1) {
915 if (GC_print_stats) {
916 GC_printf0("Single marker thread, turning off parallel marking\n");
921 /* Disable true incremental collection, but generational is OK. */
922 GC_time_limit = GC_TIME_UNLIMITED;
928 /* Perform all initializations, including those that */
929 /* may require allocation. */
930 /* Called without allocation lock. */
931 /* Must be called before a second thread is created. */
932 /* Called without allocation lock. */
933 void GC_init_parallel()
935 if (parallel_initialized) return;
936 parallel_initialized = TRUE;
938 /* GC_init() calls us back, so set flag first. */
939 if (!GC_is_initialized) GC_init();
940 /* If we are using a parallel marker, start the helper threads. */
941 # ifdef PARALLEL_MARK
942 if (GC_parallel) start_mark_threads();
944 /* Initialize thread local free lists if used. */
945 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
947 GC_init_thread_local(GC_lookup_thread(pthread_self()));
953 #if !defined(GC_DARWIN_THREADS)
954 int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
958 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
960 sigdelset(&fudged_set, SIG_SUSPEND);
963 return(REAL_FUNC(pthread_sigmask)(how, set, oset));
965 #endif /* !GC_DARWIN_THREADS */
967 /* Wrappers for functions that are likely to block for an appreciable */
968 /* length of time. Must be called in pairs, if at all. */
969 /* Nothing much beyond the system call itself should be executed */
972 void GC_start_blocking(void) {
976 me = GC_lookup_thread(pthread_self());
977 GC_ASSERT(!(me -> thread_blocked));
979 me -> stop_info.stack_ptr = (ptr_t)GC_save_regs_in_stack();
981 # ifndef GC_DARWIN_THREADS
982 me -> stop_info.stack_ptr = (ptr_t)GC_approx_sp();
986 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack() + SP_SLOP;
988 /* Add some slop to the stack pointer, since the wrapped call may */
989 /* end up pushing more callee-save registers. */
990 # ifndef GC_DARWIN_THREADS
991 # ifdef STACK_GROWS_UP
992 me -> stop_info.stack_ptr += SP_SLOP;
994 me -> stop_info.stack_ptr -= SP_SLOP;
997 me -> thread_blocked = TRUE;
1001 void GC_end_blocking(void) {
1003 LOCK(); /* This will block if the world is stopped. */
1004 me = GC_lookup_thread(pthread_self());
1005 GC_ASSERT(me -> thread_blocked);
1006 me -> thread_blocked = FALSE;
1010 #if defined(GC_DGUX386_THREADS)
1011 #define __d10_sleep sleep
1012 #endif /* GC_DGUX386_THREADS */
1014 /* A wrapper for the standard C sleep function */
1015 int WRAP_FUNC(sleep) (unsigned int seconds)
1019 GC_start_blocking();
1020 result = REAL_FUNC(sleep)(seconds);
1026 void *(*start_routine)(void *);
1029 sem_t registered; /* 1 ==> in our thread table, but */
1030 /* parent hasn't yet noticed. */
1033 /* Called at thread exit. */
1034 /* Never called for main thread. That's OK, since it */
1035 /* results in at most a tiny one-time leak. And */
1036 /* linuxthreads doesn't reclaim the main threads */
1037 /* resources or id anyway. */
1038 void GC_thread_exit_proc(void *arg)
1043 me = GC_lookup_thread(pthread_self());
1044 GC_destroy_thread_local(me);
1045 if (me -> flags & DETACHED) {
1046 GC_delete_thread(pthread_self());
1048 me -> flags |= FINISHED;
1050 # if defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
1051 && !defined(USE_HPUX_TLS) && !defined(DBG_HDRS_ALL)
1052 GC_remove_specific(GC_thread_key);
1054 GC_wait_for_gc_completion(FALSE);
1058 int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
1061 GC_thread thread_gc_id;
1064 thread_gc_id = GC_lookup_thread(thread);
1065 /* This is guaranteed to be the intended one, since the thread id */
1066 /* cant have been recycled by pthreads. */
1068 result = REAL_FUNC(pthread_join)(thread, retval);
1069 # if defined (GC_FREEBSD_THREADS)
1070 /* On FreeBSD, the wrapped pthread_join() sometimes returns (what
1071 appears to be) a spurious EINTR which caused the test and real code
1072 to gratuitously fail. Having looked at system pthread library source
1073 code, I see how this return code may be generated. In one path of
1074 code, pthread_join() just returns the errno setting of the thread
1075 being joined. This does not match the POSIX specification or the
1076 local man pages thus I have taken the liberty to catch this one
1077 spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
1078 if (result == EINTR) result = 0;
1082 /* Here the pthread thread id may have been recycled. */
1083 GC_delete_gc_thread(thread, thread_gc_id);
1090 WRAP_FUNC(pthread_detach)(pthread_t thread)
1093 GC_thread thread_gc_id;
1096 thread_gc_id = GC_lookup_thread(thread);
1098 result = REAL_FUNC(pthread_detach)(thread);
1101 thread_gc_id -> flags |= DETACHED;
1102 /* Here the pthread thread id may have been recycled. */
1103 if (thread_gc_id -> flags & FINISHED) {
1104 GC_delete_gc_thread(thread, thread_gc_id);
1111 void * GC_start_routine(void * arg)
1114 struct start_info * si = arg;
1117 pthread_t my_pthread;
1118 void *(*start)(void *);
1121 my_pthread = pthread_self();
1122 # ifdef DEBUG_THREADS
1123 GC_printf1("Starting thread 0x%lx\n", my_pthread);
1124 GC_printf1("pid = %ld\n", (long) getpid());
1125 GC_printf1("sp = 0x%lx\n", (long) &arg);
1128 me = GC_new_thread(my_pthread);
1129 #ifdef GC_DARWIN_THREADS
1130 me -> stop_info.mach_thread = mach_thread_self();
1132 me -> stop_info.stack_ptr = 0;
1134 me -> flags = si -> flags;
1135 /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
1136 /* doesn't work because the stack base in /proc/self/stat is the */
1137 /* one for the main thread. There is a strong argument that that's */
1138 /* a kernel bug, but a pervasive one. */
1139 # ifdef STACK_GROWS_DOWN
1140 me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
1141 & ~(GC_page_size - 1));
1142 # ifndef GC_DARWIN_THREADS
1143 me -> stop_info.stack_ptr = me -> stack_end - 0x10;
1145 /* Needs to be plausible, since an asynchronous stack mark */
1146 /* should not crash. */
1148 me -> stack_end = (ptr_t)((word)(&dummy) & ~(GC_page_size - 1));
1149 me -> stop_info.stack_ptr = me -> stack_end + 0x10;
1151 /* This is dubious, since we may be more than a page into the stack, */
1152 /* and hence skip some of it, though it's not clear that matters. */
1154 me -> backing_store_end = (ptr_t)
1155 (GC_save_regs_in_stack() & ~(GC_page_size - 1));
1156 /* This is also < 100% convincing. We should also read this */
1157 /* from /proc, but the hook to do so isn't there yet. */
1160 start = si -> start_routine;
1161 # ifdef DEBUG_THREADS
1162 GC_printf1("start_routine = 0x%lx\n", start);
1164 start_arg = si -> arg;
1165 sem_post(&(si -> registered)); /* Last action on si. */
1166 /* OK to deallocate. */
1167 pthread_cleanup_push(GC_thread_exit_proc, 0);
1168 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1170 GC_init_thread_local(me);
1173 result = (*start)(start_arg);
1175 GC_printf1("Finishing thread 0x%x\n", pthread_self());
1177 me -> status = result;
1178 pthread_cleanup_pop(1);
1179 /* Cleanup acquires lock, ensuring that we can't exit */
1180 /* while a collection that thinks we're alive is trying to stop */
1186 WRAP_FUNC(pthread_create)(pthread_t *new_thread,
1187 const pthread_attr_t *attr,
1188 void *(*start_routine)(void *), void *arg)
1193 struct start_info * si;
1194 /* This is otherwise saved only in an area mmapped by the thread */
1195 /* library, which isn't visible to the collector. */
1197 /* We resist the temptation to muck with the stack size here, */
1198 /* even if the default is unreasonably small. That's the client's */
1199 /* responsibility. */
1202 si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),
1205 if (!parallel_initialized) GC_init_parallel();
1206 if (0 == si) return(ENOMEM);
1207 sem_init(&(si -> registered), 0, 0);
1208 si -> start_routine = start_routine;
1211 if (!GC_thr_initialized) GC_thr_init();
1212 # ifdef GC_ASSERTIONS
1216 pthread_attr_t my_attr;
1217 pthread_attr_init(&my_attr);
1218 pthread_attr_getstacksize(&my_attr, &stack_size);
1220 pthread_attr_getstacksize(attr, &stack_size);
1222 GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word)));
1223 /* Our threads may need to do some work for the GC. */
1224 /* Ridiculously small threads won't work, and they */
1225 /* probably wouldn't work anyway. */
1229 detachstate = PTHREAD_CREATE_JOINABLE;
1231 pthread_attr_getdetachstate(attr, &detachstate);
1233 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
1234 si -> flags = my_flags;
1236 # ifdef DEBUG_THREADS
1237 GC_printf1("About to start new thread from thread 0x%X\n",
1241 result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
1243 # ifdef DEBUG_THREADS
1244 GC_printf1("Started thread 0x%X\n", *new_thread);
1246 /* Wait until child has been added to the thread table. */
1247 /* This also ensures that we hold onto si until the child is done */
1248 /* with it. Thus it doesn't matter whether it is otherwise */
1249 /* visible to the collector. */
1250 while (0 != sem_wait(&(si -> registered))) {
1251 if (EINTR != errno) ABORT("sem_wait failed");
1253 sem_destroy(&(si -> registered));
1255 GC_INTERNAL_FREE(si);
1261 #ifdef GENERIC_COMPARE_AND_SWAP
1262 pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;
1264 GC_bool GC_compare_and_exchange(volatile GC_word *addr,
1265 GC_word old, GC_word new_val)
1268 pthread_mutex_lock(&GC_compare_and_swap_lock);
1275 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1279 GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)
1282 pthread_mutex_lock(&GC_compare_and_swap_lock);
1284 *addr = old + how_much;
1285 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1289 #endif /* GENERIC_COMPARE_AND_SWAP */
1290 /* Spend a few cycles in a way that can't introduce contention with */
1291 /* othre threads. */
1296 volatile word dummy = 0;
1299 for (i = 0; i < 10; ++i) {
1301 __asm__ __volatile__ (" " : : : "memory");
1303 /* Something that's unlikely to be optimized away. */
1309 #define SPIN_MAX 1024 /* Maximum number of calls to GC_pause before */
1312 VOLATILE GC_bool GC_collecting = 0;
1313 /* A hint that we're in the collector and */
1314 /* holding the allocation lock for an */
1315 /* extended period. */
1317 #if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)
1318 /* If we don't want to use the below spinlock implementation, either */
1319 /* because we don't have a GC_test_and_set implementation, or because */
1320 /* we don't want to risk sleeping, we can still try spinning on */
1321 /* pthread_mutex_trylock for a while. This appears to be very */
1322 /* beneficial in many cases. */
1323 /* I suspect that under high contention this is nearly always better */
1324 /* than the spin lock. But it's a bit slower on a uniprocessor. */
1325 /* Hence we still default to the spin lock. */
1326 /* This is also used to acquire the mark lock for the parallel */
1329 /* Here we use a strict exponential backoff scheme. I don't know */
1330 /* whether that's better or worse than the above. We eventually */
1331 /* yield by calling pthread_mutex_lock(); it never makes sense to */
1332 /* explicitly sleep. */
1334 void GC_generic_lock(pthread_mutex_t * lock)
1336 #ifndef NO_PTHREAD_TRYLOCK
1337 unsigned pause_length = 1;
1340 if (0 == pthread_mutex_trylock(lock)) return;
1341 for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
1342 for (i = 0; i < pause_length; ++i) {
1345 switch(pthread_mutex_trylock(lock)) {
1351 ABORT("Unexpected error from pthread_mutex_trylock");
1354 #endif /* !NO_PTHREAD_TRYLOCK */
1355 pthread_mutex_lock(lock);
1358 #endif /* !USE_SPIN_LOCK || PARALLEL_MARK */
1360 #if defined(USE_SPIN_LOCK)
1362 /* Reasonably fast spin locks. Basically the same implementation */
1363 /* as STL alloc.h. This isn't really the right way to do this. */
1364 /* but until the POSIX scheduling mess gets straightened out ... */
1366 volatile unsigned int GC_allocate_lock = 0;
1371 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
1372 # define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
1373 static unsigned spin_max = low_spin_max;
1374 unsigned my_spin_max;
1375 static unsigned last_spins = 0;
1376 unsigned my_last_spins;
1379 if (!GC_test_and_set(&GC_allocate_lock)) {
1382 my_spin_max = spin_max;
1383 my_last_spins = last_spins;
1384 for (i = 0; i < my_spin_max; i++) {
1385 if (GC_collecting || GC_nprocs == 1) goto yield;
1386 if (i < my_last_spins/2 || GC_allocate_lock) {
1390 if (!GC_test_and_set(&GC_allocate_lock)) {
1393 * Spinning worked. Thus we're probably not being scheduled
1394 * against the other process with which we were contending.
1395 * Thus it makes sense to spin longer the next time.
1398 spin_max = high_spin_max;
1402 /* We are probably being scheduled against the other process. Sleep. */
1403 spin_max = low_spin_max;
1406 if (!GC_test_and_set(&GC_allocate_lock)) {
1409 # define SLEEP_THRESHOLD 12
1410 /* Under Linux very short sleeps tend to wait until */
1411 /* the current time quantum expires. On old Linux */
1412 /* kernels nanosleep(<= 2ms) just spins under Linux. */
1413 /* (Under 2.4, this happens only for real-time */
1414 /* processes.) We want to minimize both behaviors */
1416 if (i < SLEEP_THRESHOLD) {
1422 /* Don't wait for more than about 15msecs, even */
1423 /* under extreme contention. */
1425 ts.tv_nsec = 1 << i;
1431 #else /* !USE_SPINLOCK */
1434 #ifndef NO_PTHREAD_TRYLOCK
1435 if (1 == GC_nprocs || GC_collecting) {
1436 pthread_mutex_lock(&GC_allocate_ml);
1438 GC_generic_lock(&GC_allocate_ml);
1440 #else /* !NO_PTHREAD_TRYLOCK */
1441 pthread_mutex_lock(&GC_allocate_ml);
1442 #endif /* !NO_PTHREAD_TRYLOCK */
1445 #endif /* !USE_SPINLOCK */
1447 #if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
1449 #ifdef GC_ASSERTIONS
1450 pthread_t GC_mark_lock_holder = NO_THREAD;
1454 /* Ugly workaround for a linux threads bug in the final versions */
1455 /* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */
1456 /* field even when it fails to acquire the mutex. This causes */
1457 /* pthread_cond_wait to die. Remove for glibc2.2. */
1458 /* According to the man page, we should use */
1459 /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
1461 static pthread_mutex_t mark_mutex =
1462 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
1464 static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
1467 static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
1469 void GC_acquire_mark_lock()
1472 if (pthread_mutex_lock(&mark_mutex) != 0) {
1473 ABORT("pthread_mutex_lock failed");
1476 GC_generic_lock(&mark_mutex);
1477 # ifdef GC_ASSERTIONS
1478 GC_mark_lock_holder = pthread_self();
1482 void GC_release_mark_lock()
1484 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1485 # ifdef GC_ASSERTIONS
1486 GC_mark_lock_holder = NO_THREAD;
1488 if (pthread_mutex_unlock(&mark_mutex) != 0) {
1489 ABORT("pthread_mutex_unlock failed");
1493 /* Collector must wait for a freelist builders for 2 reasons: */
1494 /* 1) Mark bits may still be getting examined without lock. */
1495 /* 2) Partial free lists referenced only by locals may not be scanned */
1496 /* correctly, e.g. if they contain "pointer-free" objects, since the */
1497 /* free-list link may be ignored. */
1498 void GC_wait_builder()
1500 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1501 # ifdef GC_ASSERTIONS
1502 GC_mark_lock_holder = NO_THREAD;
1504 if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
1505 ABORT("pthread_cond_wait failed");
1507 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1508 # ifdef GC_ASSERTIONS
1509 GC_mark_lock_holder = pthread_self();
1513 void GC_wait_for_reclaim()
1515 GC_acquire_mark_lock();
1516 while (GC_fl_builder_count > 0) {
1519 GC_release_mark_lock();
1522 void GC_notify_all_builder()
1524 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1525 if (pthread_cond_broadcast(&builder_cv) != 0) {
1526 ABORT("pthread_cond_broadcast failed");
1530 #endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
1532 #ifdef PARALLEL_MARK
1534 static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
1536 void GC_wait_marker()
1538 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1539 # ifdef GC_ASSERTIONS
1540 GC_mark_lock_holder = NO_THREAD;
1542 if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
1543 ABORT("pthread_cond_wait failed");
1545 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1546 # ifdef GC_ASSERTIONS
1547 GC_mark_lock_holder = pthread_self();
1551 void GC_notify_all_marker()
1553 if (pthread_cond_broadcast(&mark_cv) != 0) {
1554 ABORT("pthread_cond_broadcast failed");
1558 #endif /* PARALLEL_MARK */
1560 # endif /* GC_LINUX_THREADS and friends */