2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 * Copyright (c) 2000-2005 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/pthread_support.h"
20 * Support code originally for LinuxThreads, the clone()-based kernel
21 * thread package for Linux which is included in libc6.
23 * This code no doubt makes some assumptions beyond what is
24 * guaranteed by the pthread standard, though it now does
25 * very little of that. It now also supports NPTL, and many
26 * other Posix thread implementations. We are trying to merge
27 * all flavors of pthread support code into this file.
29 /* DG/UX ix86 support <takis@xfree86.org> */
31 * Linux_threads.c now also includes some code to support HPUX and
32 * OSF1 (Compaq Tru64 Unix, really). The OSF1 support is based on Eric Benson's
35 * Eric also suggested an alternate basis for a lock implementation in
37 * + #elif defined(OSF1)
38 * + unsigned long GC_allocate_lock = 0;
39 * + msemaphore GC_allocate_semaphore;
40 * + # define GC_TRY_LOCK() \
41 * + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
42 * + ? (GC_allocate_lock = 1) \
44 * + # define GC_LOCK_TAKEN GC_allocate_lock
47 #if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS)
55 # include <sys/mman.h>
56 # include <sys/time.h>
57 # include <sys/types.h>
58 # include <sys/stat.h>
62 # include "gc_inline.h"
64 #if defined(GC_DARWIN_THREADS)
65 # include "private/darwin_semaphore.h"
67 # include <semaphore.h>
68 #endif /* !GC_DARWIN_THREADS */
70 #if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)
71 # include <sys/sysctl.h>
72 #endif /* GC_DARWIN_THREADS */
74 #if defined(GC_NETBSD_THREADS)
75 # include <sys/param.h>
76 # include <sys/sysctl.h>
77 #endif /* GC_NETBSD_THREADS */
79 /* Allocator lock definitions. */
80 #if !defined(USE_SPIN_LOCK)
81 GC_INNER pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
83 GC_INNER unsigned long GC_lock_holder = NO_THREAD;
84 /* Used only for assertions, and to prevent */
85 /* recursive reentry in the system call wrapper. */
87 #if defined(GC_DGUX386_THREADS)
88 # include <sys/dg_sys_info.h>
89 # include <sys/_int_psem.h>
90 /* sem_t is an uint in DG/UX */
91 typedef unsigned int sem_t;
92 #endif /* GC_DGUX386_THREADS */
94 /* Undefine macros used to redirect pthread primitives. */
95 # undef pthread_create
96 # if !defined(GC_DARWIN_THREADS) && !defined(GC_OPENBSD_THREADS)
97 # undef pthread_sigmask
100 # undef pthread_detach
101 # if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
102 && !defined(_PTHREAD_USE_PTDNAM_)
103 /* Restore the original mangled names on Tru64 UNIX. */
104 # define pthread_create __pthread_create
105 # define pthread_join __pthread_join
106 # define pthread_detach __pthread_detach
109 #ifdef GC_USE_LD_WRAP
110 # define WRAP_FUNC(f) __wrap_##f
111 # define REAL_FUNC(f) __real_##f
113 # ifdef GC_USE_DLOPEN_WRAP
115 # define WRAP_FUNC(f) f
116 # define REAL_FUNC(f) GC_real_##f
117 /* We define both GC_f and plain f to be the wrapped function. */
118 /* In that way plain calls work, as do calls from files that */
119 /* included gc.h, wich redefined f to GC_f. */
120 /* FIXME: Needs work for DARWIN and True64 (OSF1) */
121 typedef int (* GC_pthread_create_t)(pthread_t *, const pthread_attr_t *,
122 void * (*)(void *), void *);
123 static GC_pthread_create_t REAL_FUNC(pthread_create);
124 typedef int (* GC_pthread_sigmask_t)(int, const sigset_t *, sigset_t *);
125 static GC_pthread_sigmask_t REAL_FUNC(pthread_sigmask);
126 typedef int (* GC_pthread_join_t)(pthread_t, void **);
127 static GC_pthread_join_t REAL_FUNC(pthread_join);
128 typedef int (* GC_pthread_detach_t)(pthread_t);
129 static GC_pthread_detach_t REAL_FUNC(pthread_detach);
131 # define WRAP_FUNC(f) GC_##f
132 # if !defined(GC_DGUX386_THREADS)
133 # define REAL_FUNC(f) f
134 # else /* GC_DGUX386_THREADS */
135 # define REAL_FUNC(f) __d10_##f
136 # endif /* GC_DGUX386_THREADS */
140 #if defined(GC_USE_LD_WRAP) || defined(GC_USE_DLOPEN_WRAP)
141 /* Define GC_ functions as aliases for the plain ones, which will */
142 /* be intercepted. This allows files which include gc.h, and hence */
143 /* generate references to the GC_ symbols, to see the right symbols. */
144 GC_API int GC_pthread_create(pthread_t * t, const pthread_attr_t * a,
145 void * (* fn)(void *), void * arg)
147 return pthread_create(t, a, fn, arg);
150 GC_API int GC_pthread_sigmask(int how, const sigset_t *mask,
153 return pthread_sigmask(how, mask, old);
156 GC_API int GC_pthread_join(pthread_t t, void **res)
158 return pthread_join(t, res);
161 GC_API int GC_pthread_detach(pthread_t t)
163 return pthread_detach(t);
165 #endif /* Linker-based interception. */
167 #ifdef GC_USE_DLOPEN_WRAP
168 STATIC GC_bool GC_syms_initialized = FALSE;
170 STATIC void GC_init_real_syms(void)
173 # define LIBPTHREAD_NAME "libpthread.so.0"
174 # define LIBPTHREAD_NAME_LEN 16 /* incl. trailing 0 */
175 size_t len = LIBPTHREAD_NAME_LEN - 1;
176 char namebuf[LIBPTHREAD_NAME_LEN];
177 static char *libpthread_name = LIBPTHREAD_NAME;
179 if (GC_syms_initialized) return;
181 dl_handle = RTLD_NEXT;
183 dl_handle = dlopen(libpthread_name, RTLD_LAZY);
184 if (NULL == dl_handle) {
185 while (isdigit(libpthread_name[len-1])) --len;
186 if (libpthread_name[len-1] == '.') --len;
187 memcpy(namebuf, libpthread_name, len);
189 dl_handle = dlopen(namebuf, RTLD_LAZY);
191 if (NULL == dl_handle) ABORT("Couldn't open libpthread\n");
193 REAL_FUNC(pthread_create) = (GC_pthread_create_t)
194 dlsym(dl_handle, "pthread_create");
195 REAL_FUNC(pthread_sigmask) = (GC_pthread_sigmask_t)
196 dlsym(dl_handle, "pthread_sigmask");
197 REAL_FUNC(pthread_join) = (GC_pthread_join_t)
198 dlsym(dl_handle, "pthread_join");
199 REAL_FUNC(pthread_detach) = (GC_pthread_detach_t)
200 dlsym(dl_handle, "pthread_detach");
201 GC_syms_initialized = TRUE;
204 # define INIT_REAL_SYMS() if (!GC_syms_initialized) GC_init_real_syms();
206 # define INIT_REAL_SYMS()
209 static GC_bool parallel_initialized = FALSE;
211 GC_INNER GC_bool GC_need_to_lock = FALSE;
213 STATIC long GC_nprocs = 1;
214 /* Number of processors. We may not have */
215 /* access to all of them, but this is as good */
216 /* a guess as any ... */
218 #ifdef THREAD_LOCAL_ALLOC
219 /* We must explicitly mark ptrfree and gcj free lists, since the free */
220 /* list links wouldn't otherwise be found. We also set them in the */
221 /* normal free lists, since that involves touching less memory than if */
222 /* we scanned them normally. */
223 GC_INNER void GC_mark_thread_local_free_lists(void)
228 for (i = 0; i < THREAD_TABLE_SZ; ++i) {
229 for (p = GC_threads[i]; 0 != p; p = p -> next) {
230 GC_mark_thread_local_fls_for(&(p->tlfs));
235 #if defined(GC_ASSERTIONS)
236 void GC_check_tls_for(GC_tlfs p);
237 # if defined(USE_CUSTOM_SPECIFIC)
238 void GC_check_tsd_marks(tsd *key);
240 /* Check that all thread-local free-lists are completely marked. */
241 /* also check that thread-specific-data structures are marked. */
242 void GC_check_tls(void)
247 for (i = 0; i < THREAD_TABLE_SZ; ++i) {
248 for (p = GC_threads[i]; 0 != p; p = p -> next) {
249 GC_check_tls_for(&(p->tlfs));
252 # if defined(USE_CUSTOM_SPECIFIC)
253 if (GC_thread_key != 0)
254 GC_check_tsd_marks(GC_thread_key);
257 #endif /* GC_ASSERTIONS */
259 #endif /* Thread_local_alloc */
264 # define MAX_MARKERS 16
267 static ptr_t marker_sp[MAX_MARKERS - 1] = {0};
269 static ptr_t marker_bsp[MAX_MARKERS - 1] = {0};
272 STATIC void * GC_mark_thread(void * id)
275 IF_CANCEL(int cancel_state;)
277 DISABLE_CANCEL(cancel_state);
278 /* Mark threads are not cancellable; they */
279 /* should be invisible to client. */
280 marker_sp[(word)id] = GC_approx_sp();
282 marker_bsp[(word)id] = GC_save_regs_in_stack();
285 if ((word)id == (word)-1) return 0; /* to make compiler happy */
287 for (;; ++my_mark_no) {
288 /* GC_mark_no is passed only to allow GC_help_marker to terminate */
289 /* promptly. This is important if it were called from the signal */
290 /* handler or from the GC lock acquisition code. Under Linux, it's */
291 /* not safe to call it from a signal handler, since it uses mutexes */
292 /* and condition variables. Since it is called only here, the */
293 /* argument is unnecessary. */
294 if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
295 /* resynchronize if we get far off, e.g. because GC_mark_no */
297 my_mark_no = GC_mark_no;
299 # ifdef DEBUG_THREADS
300 GC_printf("Starting mark helper for mark number %lu\n",
301 (unsigned long)my_mark_no);
303 GC_help_marker(my_mark_no);
307 STATIC pthread_t GC_mark_threads[MAX_MARKERS];
309 #define PTHREAD_CREATE REAL_FUNC(pthread_create)
311 static void start_mark_threads(void)
316 if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
318 if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
319 ABORT("pthread_attr_setdetachstate failed");
321 # if defined(HPUX) || defined(GC_DGUX386_THREADS)
322 /* Default stack size is usually too small: fix it. */
323 /* Otherwise marker threads or GC may run out of */
325 # define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
330 if (pthread_attr_getstacksize(&attr, &old_size) != 0)
331 ABORT("pthread_attr_getstacksize failed\n");
332 if (old_size < MIN_STACK_SIZE) {
333 if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
334 ABORT("pthread_attr_setstacksize failed\n");
337 # endif /* HPUX || GC_DGUX386_THREADS */
338 for (i = 0; i < GC_markers - 1; ++i) {
339 if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
340 GC_mark_thread, (void *)(word)i)) {
341 WARN("Marker thread creation failed, errno = %" GC_PRIdPTR "\n",
343 /* Don't try to create other marker threads. */
345 if (i == 0) GC_parallel = FALSE;
349 if (GC_print_stats) {
350 GC_log_printf("Started %ld mark helper threads\n", GC_markers - 1);
352 pthread_attr_destroy(&attr);
355 #endif /* PARALLEL_MARK */
357 GC_INNER GC_bool GC_thr_initialized = FALSE;
359 GC_INNER volatile GC_thread GC_threads[THREAD_TABLE_SZ] = {0};
361 void GC_push_thread_structures(void)
363 GC_ASSERT(I_HOLD_LOCK());
364 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
365 # if defined(THREAD_LOCAL_ALLOC)
366 GC_push_all((ptr_t)(&GC_thread_key),
367 (ptr_t)(&GC_thread_key)+sizeof(&GC_thread_key));
371 /* It may not be safe to allocate when we register the first thread. */
372 static struct GC_Thread_Rep first_thread;
374 /* Add a thread to GC_threads. We assume it wasn't already there. */
375 /* Caller holds allocation lock. */
376 STATIC GC_thread GC_new_thread(pthread_t id)
378 int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
380 static GC_bool first_thread_used = FALSE;
382 GC_ASSERT(I_HOLD_LOCK());
383 if (!first_thread_used) {
384 result = &first_thread;
385 first_thread_used = TRUE;
387 result = (struct GC_Thread_Rep *)
388 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
389 if (result == 0) return(0);
392 result -> next = GC_threads[hv];
393 GC_threads[hv] = result;
394 GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
398 /* Delete a thread from GC_threads. We assume it is there. */
399 /* (The code intentionally traps if it wasn't.) */
400 STATIC void GC_delete_thread(pthread_t id)
402 int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
403 register GC_thread p = GC_threads[hv];
404 register GC_thread prev = 0;
406 GC_ASSERT(I_HOLD_LOCK());
407 while (!THREAD_EQUAL(p -> id, id)) {
412 GC_threads[hv] = p -> next;
414 prev -> next = p -> next;
416 # ifdef GC_DARWIN_THREADS
417 mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
422 /* If a thread has been joined, but we have not yet */
423 /* been notified, then there may be more than one thread */
424 /* in the table with the same pthread id. */
425 /* This is OK, but we need a way to delete a specific one. */
426 STATIC void GC_delete_gc_thread(GC_thread gc_id)
428 pthread_t id = gc_id -> id;
429 int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
430 register GC_thread p = GC_threads[hv];
431 register GC_thread prev = 0;
433 GC_ASSERT(I_HOLD_LOCK());
439 GC_threads[hv] = p -> next;
441 prev -> next = p -> next;
443 # ifdef GC_DARWIN_THREADS
444 mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
449 /* Return a GC_thread corresponding to a given pthread_t. */
450 /* Returns 0 if it's not there. */
451 /* Caller holds allocation lock or otherwise inhibits */
453 /* If there is more than one thread with the given id we */
454 /* return the most recent one. */
455 GC_INNER GC_thread GC_lookup_thread(pthread_t id)
457 int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
458 register GC_thread p = GC_threads[hv];
460 while (p != 0 && !THREAD_EQUAL(p -> id, id)) p = p -> next;
464 /* Called by GC_finalize() (in case of an allocation failure observed). */
465 GC_INNER void GC_reset_finalizer_nested(void)
467 GC_thread me = GC_lookup_thread(pthread_self());
468 me->finalizer_nested = 0;
471 /* Checks and updates the thread-local level of finalizers recursion. */
472 /* Returns NULL if GC_invoke_finalizers() should not be called by the */
473 /* collector (to minimize the risk of a deep finalizers recursion), */
474 /* otherwise returns a pointer to the thread-local finalizer_nested. */
475 /* Called by GC_notify_or_invoke_finalizers() only (the lock is held). */
476 GC_INNER unsigned *GC_check_finalizer_nested(void)
478 GC_thread me = GC_lookup_thread(pthread_self());
479 unsigned nesting_level = me->finalizer_nested;
481 /* We are inside another GC_invoke_finalizers(). */
482 /* Skip some implicitly-called GC_invoke_finalizers() */
483 /* depending on the nesting (recursion) level. */
484 if (++me->finalizer_skipped < (1U << nesting_level)) return NULL;
485 me->finalizer_skipped = 0;
487 me->finalizer_nested = nesting_level + 1;
488 return &me->finalizer_nested;
491 #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
492 /* This is called from thread-local GC_malloc(). */
493 GC_bool GC_is_thread_tsd_valid(void *tsd)
497 me = (char *)GC_lookup_thread(pthread_self());
499 /* FIXME: We can check tsd more correctly (since now we have access */
500 /* to the right declarations). This old algorithm (moved from */
501 /* thread_local_alloc.c) checks only that it's close. */
502 return((char *)tsd > me && (char *)tsd < me + 1000);
507 /* Remove all entries from the GC_threads table, except the */
508 /* one for the current thread. We need to do this in the child */
509 /* process after a fork(), since only the current thread */
510 /* survives in the child. */
511 STATIC void GC_remove_all_threads_but_me(void)
513 pthread_t self = pthread_self();
515 GC_thread p, next, me;
517 for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
519 for (p = GC_threads[hv]; 0 != p; p = next) {
521 if (THREAD_EQUAL(p -> id, self)) {
525 # ifdef THREAD_LOCAL_ALLOC
526 if (!(p -> flags & FINISHED)) {
527 GC_destroy_thread_local(&(p->tlfs));
529 # endif /* THREAD_LOCAL_ALLOC */
530 if (p != &first_thread) GC_INTERNAL_FREE(p);
536 #endif /* HANDLE_FORK */
538 #ifdef USE_PROC_FOR_LIBRARIES
539 GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
544 GC_ASSERT(I_HOLD_LOCK());
545 # ifdef PARALLEL_MARK
546 for (i = 0; i < GC_markers - 1; ++i) {
547 if (marker_sp[i] > lo & marker_sp[i] < hi) return TRUE;
549 if (marker_bsp[i] > lo & marker_bsp[i] < hi) return TRUE;
553 for (i = 0; i < THREAD_TABLE_SZ; i++) {
554 for (p = GC_threads[i]; p != 0; p = p -> next) {
555 if (0 != p -> stack_end) {
556 # ifdef STACK_GROWS_UP
557 if (p -> stack_end >= lo && p -> stack_end < hi) return TRUE;
558 # else /* STACK_GROWS_DOWN */
559 if (p -> stack_end > lo && p -> stack_end <= hi) return TRUE;
566 #endif /* USE_PROC_FOR_LIBRARIES */
569 /* Find the largest stack_base smaller than bound. May be used */
570 /* to find the boundary between a register stack and adjacent */
571 /* immediately preceding memory stack. */
572 GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound)
578 GC_ASSERT(I_HOLD_LOCK());
579 # ifdef PARALLEL_MARK
580 for (i = 0; i < GC_markers - 1; ++i) {
581 if (marker_sp[i] > result && marker_sp[i] < bound)
582 result = marker_sp[i];
585 for (i = 0; i < THREAD_TABLE_SZ; i++) {
586 for (p = GC_threads[i]; p != 0; p = p -> next) {
587 if (p -> stack_end > result && p -> stack_end < bound) {
588 result = p -> stack_end;
596 #ifdef GC_LINUX_THREADS
597 /* Return the number of processors, or i<= 0 if it can't be determined. */
598 STATIC int GC_get_nprocs(void)
600 /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
601 /* appears to be buggy in many cases. */
602 /* We look for lines "cpu<n>" in /proc/stat. */
604 /* Also defined in os_dep.c. */
605 # define STAT_BUF_SIZE 4096
606 # define STAT_READ read
608 /* If read is wrapped, this may need to be redefined to call */
610 char stat_buf[STAT_BUF_SIZE];
613 /* Some old kernels only have a single "cpu nnnn ..." */
614 /* entry in /proc/stat. We identify those as */
618 f = open("/proc/stat", O_RDONLY);
619 if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {
620 WARN("Couldn't read /proc/stat\n", 0);
623 for (i = 0; i < len - 100; ++i) {
624 if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
625 && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
626 int cpu_no = atoi(stat_buf + i + 4);
627 if (cpu_no >= result) result = cpu_no + 1;
633 #endif /* GC_LINUX_THREADS */
635 /* We hold the GC lock. Wait until an in-progress GC has finished. */
636 /* Repeatedly RELEASES GC LOCK in order to wait. */
637 /* If wait_for_all is true, then we exit with the GC lock held and no */
638 /* collection in progress; otherwise we just wait for the current GC */
640 STATIC void GC_wait_for_gc_completion(GC_bool wait_for_all)
642 GC_ASSERT(I_HOLD_LOCK());
643 ASSERT_CANCEL_DISABLED();
644 if (GC_incremental && GC_collection_in_progress()) {
645 int old_gc_no = GC_gc_no;
647 /* Make sure that no part of our stack is still on the mark stack, */
648 /* since it's about to be unmapped. */
649 while (GC_incremental && GC_collection_in_progress()
650 && (wait_for_all || old_gc_no == GC_gc_no)) {
652 GC_in_thread_creation = TRUE;
653 GC_collect_a_little_inner(1);
654 GC_in_thread_creation = FALSE;
664 /* Procedures called before and after a fork. The goal here is to make */
665 /* it safe to call GC_malloc() in a forked child. It's unclear that is */
666 /* attainable, since the single UNIX spec seems to imply that one */
667 /* should only call async-signal-safe functions, and we probably can't */
668 /* quite guarantee that. But we give it our best shot. (That same */
669 /* spec also implies that it's not safe to call the system malloc */
670 /* between fork() and exec(). Thus we're doing no worse than it.) */
672 IF_CANCEL(static int fork_cancel_state;)
673 /* protected by allocation lock. */
675 /* Called before a fork() */
676 STATIC void GC_fork_prepare_proc(void)
678 /* Acquire all relevant locks, so that after releasing the locks */
679 /* the child will see a consistent state in which monitor */
680 /* invariants hold. Unfortunately, we can't acquire libc locks */
681 /* we might need, and there seems to be no guarantee that libc */
682 /* must install a suitable fork handler. */
683 /* Wait for an ongoing GC to finish, since we can't finish it in */
684 /* the (one remaining thread in) the child. */
686 DISABLE_CANCEL(fork_cancel_state);
687 /* Following waits may include cancellation points. */
688 # if defined(PARALLEL_MARK)
690 GC_wait_for_reclaim();
692 GC_wait_for_gc_completion(TRUE);
693 # if defined(PARALLEL_MARK)
695 GC_acquire_mark_lock();
699 /* Called in parent after a fork() */
700 STATIC void GC_fork_parent_proc(void)
702 # if defined(PARALLEL_MARK)
704 GC_release_mark_lock();
706 RESTORE_CANCEL(fork_cancel_state);
710 /* Called in child after a fork() */
711 STATIC void GC_fork_child_proc(void)
713 /* Clean up the thread table, so that just our thread is left. */
714 # if defined(PARALLEL_MARK)
716 GC_release_mark_lock();
718 GC_remove_all_threads_but_me();
719 # ifdef PARALLEL_MARK
720 /* Turn off parallel marking in the child, since we are probably */
721 /* just going to exec, and we would have to restart mark threads. */
724 # endif /* PARALLEL_MARK */
725 RESTORE_CANCEL(fork_cancel_state);
728 #endif /* HANDLE_FORK */
730 #if defined(GC_DGUX386_THREADS)
731 /* Return the number of processors, or i<= 0 if it can't be determined. */
732 STATIC int GC_get_nprocs(void)
734 /* <takis@XFree86.Org> */
736 struct dg_sys_info_pm_info pm_sysinfo;
739 status = dg_sys_info((long int *) &pm_sysinfo,
740 DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
742 /* set -1 for error */
746 numCpus = pm_sysinfo.idle_vp_count;
748 # ifdef DEBUG_THREADS
749 GC_printf("Number of active CPUs in this system: %d\n", numCpus);
753 #endif /* GC_DGUX386_THREADS */
755 #if defined(GC_NETBSD_THREADS)
756 static int get_ncpu(void)
758 int mib[] = {CTL_HW,HW_NCPU};
760 size_t len = sizeof(res);
762 sysctl(mib, sizeof(mib)/sizeof(int), &res, &len, NULL, 0);
765 #endif /* GC_NETBSD_THREADS */
767 #if defined(GC_LINUX_THREADS) && defined(INCLUDE_LINUX_THREAD_DESCR)
768 __thread int GC_dummy_thread_local;
771 /* We hold the allocation lock. */
772 GC_INNER void GC_thr_init(void)
774 # ifndef GC_DARWIN_THREADS
779 if (GC_thr_initialized) return;
780 GC_thr_initialized = TRUE;
783 /* Prepare for a possible fork. */
784 pthread_atfork(GC_fork_prepare_proc, GC_fork_parent_proc,
786 # endif /* HANDLE_FORK */
787 # if defined(INCLUDE_LINUX_THREAD_DESCR)
788 /* Explicitly register the region including the address */
789 /* of a thread local variable. This should include thread */
790 /* locals for the main thread, except for those allocated */
791 /* in response to dlopen calls. */
793 ptr_t thread_local_addr = (ptr_t)(&GC_dummy_thread_local);
794 ptr_t main_thread_start, main_thread_end;
795 if (!GC_enclosing_mapping(thread_local_addr, &main_thread_start,
797 ABORT("Failed to find mapping for main thread thread locals");
799 GC_add_roots_inner(main_thread_start, main_thread_end, FALSE);
802 /* Add the initial thread, so we can stop it. */
803 t = GC_new_thread(pthread_self());
804 # ifdef GC_DARWIN_THREADS
805 t -> stop_info.mach_thread = mach_thread_self();
807 t -> stop_info.stack_ptr = (ptr_t)(&dummy);
809 t -> flags = DETACHED | MAIN_THREAD;
815 char * nprocs_string = GETENV("GC_NPROCS");
817 if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
819 if (GC_nprocs <= 0) {
820 # if defined(GC_HPUX_THREADS)
821 GC_nprocs = pthread_num_processors_np();
823 # if defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS) \
824 || defined(GC_SOLARIS_THREADS) || defined(GC_GNU_THREADS)
825 GC_nprocs = sysconf(_SC_NPROCESSORS_ONLN);
826 if (GC_nprocs <= 0) GC_nprocs = 1;
828 # if defined(GC_IRIX_THREADS)
829 GC_nprocs = sysconf(_SC_NPROC_ONLN);
830 if (GC_nprocs <= 0) GC_nprocs = 1;
832 # if defined(GC_NETBSD_THREADS)
833 GC_nprocs = get_ncpu();
835 # if defined(GC_OPENBSD_THREADS)
836 /* FIXME: Implement real "get_ncpu". */
839 # if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)
841 size_t len = sizeof(ncpus);
842 sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0);
845 # if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)
846 GC_nprocs = GC_get_nprocs();
849 if (GC_nprocs <= 0) {
850 WARN("GC_get_nprocs() returned %" GC_PRIdPTR "\n", GC_nprocs);
852 # ifdef PARALLEL_MARK
856 # ifdef PARALLEL_MARK
858 char * markers_string = GETENV("GC_MARKERS");
859 if (markers_string != NULL) {
860 GC_markers = atoi(markers_string);
861 if (GC_markers > MAX_MARKERS) {
862 WARN("Limiting number of mark threads\n", 0);
863 GC_markers = MAX_MARKERS;
866 GC_markers = GC_nprocs;
867 if (GC_markers >= MAX_MARKERS)
868 GC_markers = MAX_MARKERS; /* silently limit GC_markers value */
873 # ifdef PARALLEL_MARK
874 if (GC_print_stats) {
875 GC_log_printf("Number of processors = %ld, "
876 "number of marker threads = %ld\n", GC_nprocs, GC_markers);
878 if (GC_markers <= 1) {
880 if (GC_print_stats) {
882 "Single marker thread, turning off parallel marking\n");
886 /* Disable true incremental collection, but generational is OK. */
887 GC_time_limit = GC_TIME_UNLIMITED;
889 /* If we are using a parallel marker, actually start helper threads. */
890 if (GC_parallel) start_mark_threads();
895 /* Perform all initializations, including those that */
896 /* may require allocation. */
897 /* Called without allocation lock. */
898 /* Must be called before a second thread is created. */
899 /* Did we say it's called without the allocation lock? */
900 GC_INNER void GC_init_parallel(void)
902 if (parallel_initialized) return;
903 parallel_initialized = TRUE;
905 /* GC_init() calls us back, so set flag first. */
906 if (!GC_is_initialized) GC_init();
907 /* Initialize thread local free lists if used. */
908 # if defined(THREAD_LOCAL_ALLOC)
910 GC_init_thread_local(&(GC_lookup_thread(pthread_self())->tlfs));
915 #if !defined(GC_DARWIN_THREADS) && !defined(GC_OPENBSD_THREADS)
916 GC_API int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set,
922 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
924 sigdelset(&fudged_set, SIG_SUSPEND);
927 return(REAL_FUNC(pthread_sigmask)(how, set, oset));
929 #endif /* !GC_DARWIN_THREADS */
931 /* Wrapper for functions that are likely to block for an appreciable */
932 /* length of time. */
935 GC_INNER void GC_do_blocking_inner(ptr_t data, void * context)
937 struct blocking_data * d = (struct blocking_data *) data;
940 me = GC_lookup_thread(pthread_self());
941 GC_ASSERT(!(me -> thread_blocked));
943 me -> stop_info.stack_ptr = GC_save_regs_in_stack();
944 # elif !defined(GC_DARWIN_THREADS)
945 me -> stop_info.stack_ptr = GC_approx_sp();
948 me -> backing_store_ptr = GC_save_regs_in_stack();
950 me -> thread_blocked = TRUE;
951 /* Save context here if we want to support precise stack marking */
953 d -> client_data = (d -> fn)(d -> client_data);
954 LOCK(); /* This will block if the world is stopped. */
955 me -> thread_blocked = FALSE;
959 /* GC_call_with_gc_active() has the opposite to GC_do_blocking() */
960 /* functionality. It might be called from a user function invoked by */
961 /* GC_do_blocking() to temporarily back allow calling any GC function */
962 /* and/or manipulating pointers to the garbage collected heap. */
963 GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
966 struct GC_activation_frame_s frame;
968 LOCK(); /* This will block if the world is stopped. */
969 me = GC_lookup_thread(pthread_self());
971 /* Adjust our stack base value (this could happen unless */
972 /* GC_get_stack_base() was used which returned GC_SUCCESS). */
973 if ((me -> flags & MAIN_THREAD) == 0) {
974 GC_ASSERT(me -> stack_end != NULL);
975 if (me -> stack_end HOTTER_THAN (ptr_t)(&frame))
976 me -> stack_end = (ptr_t)(&frame);
978 /* The original stack. */
979 if (GC_stackbottom HOTTER_THAN (ptr_t)(&frame))
980 GC_stackbottom = (ptr_t)(&frame);
983 if (me -> thread_blocked == FALSE) {
984 /* We are not inside GC_do_blocking() - do nothing more. */
986 return fn(client_data);
989 /* Setup new "frame". */
990 # ifdef GC_DARWIN_THREADS
991 /* FIXME: Implement it for Darwin ("frames" are ignored at present). */
993 frame.saved_stack_ptr = me -> stop_info.stack_ptr;
996 /* This is the same as in GC_call_with_stack_base(). */
997 frame.backing_store_end = GC_save_regs_in_stack();
998 /* Unnecessarily flushes register stack, */
999 /* but that probably doesn't hurt. */
1000 frame.saved_backing_store_ptr = me -> backing_store_ptr;
1002 frame.prev = me -> activation_frame;
1003 me -> thread_blocked = FALSE;
1004 me -> activation_frame = &frame;
1007 client_data = fn(client_data);
1008 GC_ASSERT(me -> thread_blocked == FALSE);
1009 GC_ASSERT(me -> activation_frame == &frame);
1011 /* Restore original "frame". */
1013 me -> activation_frame = frame.prev;
1015 me -> backing_store_ptr = frame.saved_backing_store_ptr;
1017 me -> thread_blocked = TRUE;
1018 # ifndef GC_DARWIN_THREADS
1019 me -> stop_info.stack_ptr = frame.saved_stack_ptr;
1023 return client_data; /* result */
1027 void *(*start_routine)(void *);
1030 sem_t registered; /* 1 ==> in our thread table, but */
1031 /* parent hasn't yet noticed. */
1034 GC_API int GC_CALL GC_unregister_my_thread(void)
1037 IF_CANCEL(int cancel_state;)
1040 DISABLE_CANCEL(cancel_state);
1041 /* Wait for any GC that may be marking from our stack to */
1042 /* complete before we remove this thread. */
1043 GC_wait_for_gc_completion(FALSE);
1044 me = GC_lookup_thread(pthread_self());
1045 # if defined(THREAD_LOCAL_ALLOC)
1046 GC_destroy_thread_local(&(me->tlfs));
1048 if (me -> flags & DETACHED) {
1049 GC_delete_thread(pthread_self());
1051 me -> flags |= FINISHED;
1053 # if defined(THREAD_LOCAL_ALLOC)
1054 GC_remove_specific(GC_thread_key);
1056 RESTORE_CANCEL(cancel_state);
1061 /* Called at thread exit. */
1062 /* Never called for main thread. That's OK, since it */
1063 /* results in at most a tiny one-time leak. And */
1064 /* linuxthreads doesn't reclaim the main threads */
1065 /* resources or id anyway. */
1066 STATIC void GC_thread_exit_proc(void *arg)
1068 GC_unregister_my_thread();
1071 GC_API int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
1074 GC_thread thread_gc_id;
1078 thread_gc_id = GC_lookup_thread(thread);
1079 /* This is guaranteed to be the intended one, since the thread id */
1080 /* can't have been recycled by pthreads. */
1082 result = REAL_FUNC(pthread_join)(thread, retval);
1083 # if defined (GC_FREEBSD_THREADS)
1084 /* On FreeBSD, the wrapped pthread_join() sometimes returns (what
1085 appears to be) a spurious EINTR which caused the test and real code
1086 to gratuitously fail. Having looked at system pthread library source
1087 code, I see how this return code may be generated. In one path of
1088 code, pthread_join() just returns the errno setting of the thread
1089 being joined. This does not match the POSIX specification or the
1090 local man pages thus I have taken the liberty to catch this one
1091 spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
1092 if (result == EINTR) result = 0;
1096 /* Here the pthread thread id may have been recycled. */
1097 GC_delete_gc_thread(thread_gc_id);
1103 GC_API int WRAP_FUNC(pthread_detach)(pthread_t thread)
1106 GC_thread thread_gc_id;
1110 thread_gc_id = GC_lookup_thread(thread);
1112 result = REAL_FUNC(pthread_detach)(thread);
1115 thread_gc_id -> flags |= DETACHED;
1116 /* Here the pthread thread id may have been recycled. */
1117 if (thread_gc_id -> flags & FINISHED) {
1118 GC_delete_gc_thread(thread_gc_id);
1125 GC_INNER GC_bool GC_in_thread_creation = FALSE;
1126 /* Protected by allocation lock. */
1128 STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
1129 pthread_t my_pthread)
1133 GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */
1134 me = GC_new_thread(my_pthread);
1135 GC_in_thread_creation = FALSE;
1137 ABORT("Failed to allocate memory for thread registering.");
1138 # ifdef GC_DARWIN_THREADS
1139 me -> stop_info.mach_thread = mach_thread_self();
1141 me -> stop_info.stack_ptr = sb -> mem_base;
1143 me -> stack_end = sb -> mem_base;
1144 if (me -> stack_end == NULL)
1145 ABORT("Bad stack base in GC_register_my_thread");
1147 me -> backing_store_end = sb -> reg_base;
1152 GC_API void GC_CALL GC_allow_register_threads(void)
1154 /* Check GC is initialized and the current thread is registered. */
1155 GC_ASSERT(GC_lookup_thread(pthread_self()) != 0);
1157 GC_need_to_lock = TRUE; /* We are multi-threaded now. */
1160 GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb)
1162 pthread_t my_pthread = pthread_self();
1165 if (GC_need_to_lock == FALSE)
1166 ABORT("Threads explicit registering is not previously enabled");
1169 me = GC_lookup_thread(my_pthread);
1171 me = GC_register_my_thread_inner(sb, my_pthread);
1172 me -> flags |= DETACHED;
1173 /* Treat as detached, since we do not need to worry about */
1174 /* pointer results. */
1175 # if defined(THREAD_LOCAL_ALLOC)
1176 GC_init_thread_local(&(me->tlfs));
1182 return GC_DUPLICATE;
1186 STATIC void * GC_CALLBACK GC_inner_start_routine(struct GC_stack_base *sb,
1189 struct start_info * si = arg;
1192 pthread_t my_pthread;
1193 void *(*start)(void *);
1196 my_pthread = pthread_self();
1197 # ifdef DEBUG_THREADS
1198 GC_printf("Starting thread 0x%x, pid = %ld, sp = %p\n",
1199 (unsigned)my_pthread, (long) getpid(), &arg);
1202 me = GC_register_my_thread_inner(sb, my_pthread);
1203 me -> flags = si -> flags;
1204 # if defined(THREAD_LOCAL_ALLOC)
1205 GC_init_thread_local(&(me->tlfs));
1208 start = si -> start_routine;
1209 # ifdef DEBUG_THREADS
1210 GC_printf("start_routine = %p\n", (void *)(signed_word)start);
1212 start_arg = si -> arg;
1213 sem_post(&(si -> registered)); /* Last action on si. */
1214 /* OK to deallocate. */
1215 pthread_cleanup_push(GC_thread_exit_proc, 0);
1216 result = (*start)(start_arg);
1217 # ifdef DEBUG_THREADS
1218 GC_printf("Finishing thread 0x%x\n", (unsigned)pthread_self());
1220 me -> status = result;
1221 pthread_cleanup_pop(1);
1222 /* Cleanup acquires lock, ensuring that we can't exit */
1223 /* while a collection that thinks we're alive is trying to stop */
1228 STATIC void * GC_start_routine(void * arg)
1230 # ifdef INCLUDE_LINUX_THREAD_DESCR
1231 struct GC_stack_base sb;
1233 # ifdef REDIRECT_MALLOC
1234 /* GC_get_stack_base may call pthread_getattr_np, which can */
1235 /* unfortunately call realloc, which may allocate from an */
1236 /* unregistered thread. This is unpleasant, since it might */
1237 /* force heap growth. */
1240 if (GC_get_stack_base(&sb) != GC_SUCCESS)
1241 ABORT("Failed to get thread stack base.");
1242 # ifdef REDIRECT_MALLOC
1245 return GC_inner_start_routine(&sb, arg);
1247 return GC_call_with_stack_base(GC_inner_start_routine, arg);
1251 GC_API int WRAP_FUNC(pthread_create)(pthread_t *new_thread,
1252 const pthread_attr_t *attr,
1253 void *(*start_routine)(void *), void *arg)
1258 struct start_info * si;
1259 /* This is otherwise saved only in an area mmapped by the thread */
1260 /* library, which isn't visible to the collector. */
1262 /* We resist the temptation to muck with the stack size here, */
1263 /* even if the default is unreasonably small. That's the client's */
1264 /* responsibility. */
1268 si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),
1271 if (!parallel_initialized) GC_init_parallel();
1273 (si = (struct start_info *)
1274 (*GC_get_oom_fn())(sizeof(struct start_info))) == 0)
1276 sem_init(&(si -> registered), 0, 0);
1277 si -> start_routine = start_routine;
1280 if (!GC_thr_initialized) GC_thr_init();
1281 # ifdef GC_ASSERTIONS
1283 size_t stack_size = 0;
1285 pthread_attr_getstacksize(attr, &stack_size);
1287 if (0 == stack_size) {
1288 pthread_attr_t my_attr;
1289 pthread_attr_init(&my_attr);
1290 pthread_attr_getstacksize(&my_attr, &stack_size);
1292 /* On Solaris 10, with default attr initialization, */
1293 /* stack_size remains 0. Fudge it. */
1294 if (0 == stack_size) {
1296 WARN("Failed to get stack size for assertion checking\n", 0);
1298 stack_size = 1000000;
1300 # ifdef PARALLEL_MARK
1301 GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word)));
1303 /* FreeBSD-5.3/Alpha: default pthread stack is 64K, */
1304 /* HBLKSIZE=8192, sizeof(word)=8 */
1305 GC_ASSERT(stack_size >= 65536);
1307 /* Our threads may need to do some work for the GC. */
1308 /* Ridiculously small threads won't work, and they */
1309 /* probably wouldn't work anyway. */
1313 detachstate = PTHREAD_CREATE_JOINABLE;
1315 pthread_attr_getdetachstate(attr, &detachstate);
1317 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
1318 si -> flags = my_flags;
1320 # ifdef DEBUG_THREADS
1321 GC_printf("About to start new thread from thread 0x%x\n",
1322 (unsigned)pthread_self());
1324 GC_need_to_lock = TRUE;
1326 result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
1328 # ifdef DEBUG_THREADS
1329 GC_printf("Started thread 0x%x\n", (unsigned)(*new_thread));
1331 /* Wait until child has been added to the thread table. */
1332 /* This also ensures that we hold onto si until the child is done */
1333 /* with it. Thus it doesn't matter whether it is otherwise */
1334 /* visible to the collector. */
1336 IF_CANCEL(int cancel_state;)
1337 DISABLE_CANCEL(cancel_state);
1338 /* pthread_create is not a cancellation point. */
1339 while (0 != sem_wait(&(si -> registered))) {
1340 if (EINTR != errno) ABORT("sem_wait failed");
1342 RESTORE_CANCEL(cancel_state);
1344 sem_destroy(&(si -> registered));
1346 GC_INTERNAL_FREE(si);
1352 #if defined(USE_SPIN_LOCK) || !defined(NO_PTHREAD_TRYLOCK)
1353 /* Spend a few cycles in a way that can't introduce contention with */
1354 /* other threads. */
1355 STATIC void GC_pause(void)
1358 # if !defined(__GNUC__) || defined(__INTEL_COMPILER)
1359 volatile word dummy = 0;
1362 for (i = 0; i < 10; ++i) {
1363 # if defined(__GNUC__) && !defined(__INTEL_COMPILER)
1364 __asm__ __volatile__ (" " : : : "memory");
1366 /* Something that's unlikely to be optimized away. */
1373 #define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */
1376 GC_INNER volatile GC_bool GC_collecting = 0;
1377 /* A hint that we're in the collector and */
1378 /* holding the allocation lock for an */
1379 /* extended period. */
1381 #if (!defined(USE_SPIN_LOCK) && !defined(NO_PTHREAD_TRYLOCK)) \
1382 || defined(PARALLEL_MARK)
1383 /* If we don't want to use the below spinlock implementation, either */
1384 /* because we don't have a GC_test_and_set implementation, or because */
1385 /* we don't want to risk sleeping, we can still try spinning on */
1386 /* pthread_mutex_trylock for a while. This appears to be very */
1387 /* beneficial in many cases. */
1388 /* I suspect that under high contention this is nearly always better */
1389 /* than the spin lock. But it's a bit slower on a uniprocessor. */
1390 /* Hence we still default to the spin lock. */
1391 /* This is also used to acquire the mark lock for the parallel */
1394 /* Here we use a strict exponential backoff scheme. I don't know */
1395 /* whether that's better or worse than the above. We eventually */
1396 /* yield by calling pthread_mutex_lock(); it never makes sense to */
1397 /* explicitly sleep. */
1399 /* #define LOCK_STATS */
1401 AO_t GC_spin_count = 0;
1402 AO_t GC_block_count = 0;
1403 AO_t GC_unlocked_count = 0;
1406 STATIC void GC_generic_lock(pthread_mutex_t * lock)
1408 #ifndef NO_PTHREAD_TRYLOCK
1409 unsigned pause_length = 1;
1412 if (0 == pthread_mutex_trylock(lock)) {
1414 (void)AO_fetch_and_add1(&GC_unlocked_count);
1418 for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
1419 for (i = 0; i < pause_length; ++i) {
1422 switch(pthread_mutex_trylock(lock)) {
1425 (void)AO_fetch_and_add1(&GC_spin_count);
1431 ABORT("Unexpected error from pthread_mutex_trylock");
1434 #endif /* !NO_PTHREAD_TRYLOCK */
1436 (void)AO_fetch_and_add1(&GC_block_count);
1438 pthread_mutex_lock(lock);
1441 #endif /* !USE_SPIN_LOCK || ... */
1443 #if defined(USE_SPIN_LOCK)
1445 /* Reasonably fast spin locks. Basically the same implementation */
1446 /* as STL alloc.h. This isn't really the right way to do this. */
1447 /* but until the POSIX scheduling mess gets straightened out ... */
1449 GC_INNER volatile AO_TS_t GC_allocate_lock = AO_TS_INITIALIZER;
1451 GC_INNER void GC_lock(void)
1453 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
1454 # define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
1455 static unsigned spin_max = low_spin_max;
1456 unsigned my_spin_max;
1457 static unsigned last_spins = 0;
1458 unsigned my_last_spins;
1461 if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
1464 my_spin_max = spin_max;
1465 my_last_spins = last_spins;
1466 for (i = 0; i < my_spin_max; i++) {
1467 if (GC_collecting || GC_nprocs == 1) goto yield;
1468 if (i < my_last_spins/2) {
1472 if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
1475 * Spinning worked. Thus we're probably not being scheduled
1476 * against the other process with which we were contending.
1477 * Thus it makes sense to spin longer the next time.
1480 spin_max = high_spin_max;
1484 /* We are probably being scheduled against the other process. Sleep. */
1485 spin_max = low_spin_max;
1488 if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
1491 # define SLEEP_THRESHOLD 12
1492 /* Under Linux very short sleeps tend to wait until */
1493 /* the current time quantum expires. On old Linux */
1494 /* kernels nanosleep(<= 2ms) just spins under Linux. */
1495 /* (Under 2.4, this happens only for real-time */
1496 /* processes.) We want to minimize both behaviors */
1498 if (i < SLEEP_THRESHOLD) {
1504 /* Don't wait for more than about 15msecs, even */
1505 /* under extreme contention. */
1507 ts.tv_nsec = 1 << i;
1513 #else /* !USE_SPINLOCK */
1514 GC_INNER void GC_lock(void)
1516 #ifndef NO_PTHREAD_TRYLOCK
1517 if (1 == GC_nprocs || GC_collecting) {
1518 pthread_mutex_lock(&GC_allocate_ml);
1520 GC_generic_lock(&GC_allocate_ml);
1522 #else /* !NO_PTHREAD_TRYLOCK */
1523 pthread_mutex_lock(&GC_allocate_ml);
1524 #endif /* !NO_PTHREAD_TRYLOCK */
1527 #endif /* !USE_SPINLOCK */
1529 #ifdef PARALLEL_MARK
1531 #ifdef GC_ASSERTIONS
1532 GC_INNER unsigned long GC_mark_lock_holder = NO_THREAD;
1536 /* Ugly workaround for a linux threads bug in the final versions */
1537 /* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */
1538 /* field even when it fails to acquire the mutex. This causes */
1539 /* pthread_cond_wait to die. Remove for glibc2.2. */
1540 /* According to the man page, we should use */
1541 /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
1543 static pthread_mutex_t mark_mutex =
1544 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
1546 static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
1549 static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
1551 GC_INNER void GC_acquire_mark_lock(void)
1554 if (pthread_mutex_lock(&mark_mutex) != 0) {
1555 ABORT("pthread_mutex_lock failed");
1558 GC_generic_lock(&mark_mutex);
1559 # ifdef GC_ASSERTIONS
1560 GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self());
1564 GC_INNER void GC_release_mark_lock(void)
1566 GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
1567 # ifdef GC_ASSERTIONS
1568 GC_mark_lock_holder = NO_THREAD;
1570 if (pthread_mutex_unlock(&mark_mutex) != 0) {
1571 ABORT("pthread_mutex_unlock failed");
1575 /* Collector must wait for a freelist builders for 2 reasons: */
1576 /* 1) Mark bits may still be getting examined without lock. */
1577 /* 2) Partial free lists referenced only by locals may not be scanned */
1578 /* correctly, e.g. if they contain "pointer-free" objects, since the */
1579 /* free-list link may be ignored. */
1580 STATIC void GC_wait_builder(void)
1582 GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
1583 ASSERT_CANCEL_DISABLED();
1584 # ifdef GC_ASSERTIONS
1585 GC_mark_lock_holder = NO_THREAD;
1587 if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
1588 ABORT("pthread_cond_wait failed");
1590 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1591 # ifdef GC_ASSERTIONS
1592 GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self());
1596 GC_INNER void GC_wait_for_reclaim(void)
1598 GC_acquire_mark_lock();
1599 while (GC_fl_builder_count > 0) {
1602 GC_release_mark_lock();
1605 GC_INNER void GC_notify_all_builder(void)
1607 GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
1608 if (pthread_cond_broadcast(&builder_cv) != 0) {
1609 ABORT("pthread_cond_broadcast failed");
1613 static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
1615 GC_INNER void GC_wait_marker(void)
1617 GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
1618 ASSERT_CANCEL_DISABLED();
1619 # ifdef GC_ASSERTIONS
1620 GC_mark_lock_holder = NO_THREAD;
1622 if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
1623 ABORT("pthread_cond_wait failed");
1625 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1626 # ifdef GC_ASSERTIONS
1627 GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self());
1631 GC_INNER void GC_notify_all_marker(void)
1633 if (pthread_cond_broadcast(&mark_cv) != 0) {
1634 ABORT("pthread_cond_broadcast failed");
1638 #endif /* PARALLEL_MARK */
1640 #endif /* GC_LINUX_THREADS and friends */