X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=src%2Fmm%2Fboehm-gc%2Fpthread_support.c;h=7ff0c6738d511a7b4fb181ce43dd9a2e3322de91;hb=c9ee016a64e925938f84fcb85521f82febcc4f63;hp=098a3c1880fc185209cb5883f6767bd447b68cf0;hpb=c1ce9643c18b5f133568529fdf19e21e06bc2adc;p=cacao.git diff --git a/src/mm/boehm-gc/pthread_support.c b/src/mm/boehm-gc/pthread_support.c index 098a3c188..7ff0c6738 100644 --- a/src/mm/boehm-gc/pthread_support.c +++ b/src/mm/boehm-gc/pthread_support.c @@ -1,4 +1,4 @@ -/* +/* * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996 by Silicon Graphics. All rights reserved. * Copyright (c) 1998 by Fergus Henderson. All rights reserved. @@ -13,6 +13,9 @@ * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ + +#include "private/pthread_support.h" + /* * Support code originally for LinuxThreads, the clone()-based kernel * thread package for Linux which is included in libc6. @@ -21,7 +24,7 @@ * guaranteed by the pthread standard, though it now does * very little of that. It now also supports NPTL, and many * other Posix thread implementations. We are trying to merge - * all flavors of pthread dupport code into this file. + * all flavors of pthread support code into this file. */ /* DG/UX ix86 support */ /* @@ -41,21 +44,7 @@ * + # define GC_LOCK_TAKEN GC_allocate_lock */ -#include "config.h" - -/*#define DEBUG_THREADS 1*/ - -# include "private/pthread_support.h" - -# if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) - -# if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE) -# define _POSIX4A_DRAFT10_SOURCE 1 -# endif - -# if defined(GC_DGUX386_THREADS) && !defined(_USING_POSIX4A_DRAFT10) -# define _USING_POSIX4A_DRAFT10 1 -# endif +#if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) # include # include @@ -85,30 +74,26 @@ #if defined(GC_NETBSD_THREADS) # include # include -#endif /* GC_NETBSD_THREADS */ +#endif /* GC_NETBSD_THREADS */ -/* Allocator lock definitions. */ +/* Allocator lock definitions. */ #if !defined(USE_SPIN_LOCK) - pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER; + GC_INNER pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER; #endif -unsigned long GC_lock_holder = NO_THREAD; - /* Used only for assertions, and to prevent */ - /* recursive reentry in the system call wrapper. */ +GC_INNER unsigned long GC_lock_holder = NO_THREAD; + /* Used only for assertions, and to prevent */ + /* recursive reentry in the system call wrapper. */ #if defined(GC_DGUX386_THREADS) # include # include /* sem_t is an uint in DG/UX */ - typedef unsigned int sem_t; + typedef unsigned int sem_t; #endif /* GC_DGUX386_THREADS */ -#ifndef __GNUC__ -# define __inline__ -#endif - /* Undefine macros used to redirect pthread primitives. */ # undef pthread_create -# if !defined(GC_DARWIN_THREADS) +# if !defined(GC_DARWIN_THREADS) && !defined(GC_OPENBSD_THREADS) # undef pthread_sigmask # endif # undef pthread_join @@ -129,19 +114,19 @@ unsigned long GC_lock_holder = NO_THREAD; # include # define WRAP_FUNC(f) f # define REAL_FUNC(f) GC_real_##f - /* We define both GC_f and plain f to be the wrapped function. */ - /* In that way plain calls work, as do calls from files that */ - /* included gc.h, wich redefined f to GC_f. */ + /* We define both GC_f and plain f to be the wrapped function. */ + /* In that way plain calls work, as do calls from files that */ + /* included gc.h, wich redefined f to GC_f. */ /* FIXME: Needs work for DARWIN and True64 (OSF1) */ typedef int (* GC_pthread_create_t)(pthread_t *, const pthread_attr_t *, - void * (*)(void *), void *); - static GC_pthread_create_t GC_real_pthread_create; + void * (*)(void *), void *); + static GC_pthread_create_t REAL_FUNC(pthread_create); typedef int (* GC_pthread_sigmask_t)(int, const sigset_t *, sigset_t *); - static GC_pthread_sigmask_t GC_real_pthread_sigmask; + static GC_pthread_sigmask_t REAL_FUNC(pthread_sigmask); typedef int (* GC_pthread_join_t)(pthread_t, void **); - static GC_pthread_join_t GC_real_pthread_join; + static GC_pthread_join_t REAL_FUNC(pthread_join); typedef int (* GC_pthread_detach_t)(pthread_t); - static GC_pthread_detach_t GC_real_pthread_detach; + static GC_pthread_detach_t REAL_FUNC(pthread_detach); # else # define WRAP_FUNC(f) GC_##f # if !defined(GC_DGUX386_THREADS) @@ -152,29 +137,37 @@ unsigned long GC_lock_holder = NO_THREAD; # endif #endif -#if defined(GC_USE_DL_WRAP) || defined(GC_USE_DLOPEN_WRAP) -/* Define GC_ functions as aliases for the plain ones, which will */ -/* be intercepted. This allows files which include gc.h, and hence */ -/* generate referemces to the GC_ symbols, to see the right symbols. */ - int GC_pthread_create(pthread_t * t, const pthread_attr_t * a, - void * (* fn)(void *), void * arg) { - return pthread_create(t, a, fn, arg); +#if defined(GC_USE_LD_WRAP) || defined(GC_USE_DLOPEN_WRAP) +/* Define GC_ functions as aliases for the plain ones, which will */ +/* be intercepted. This allows files which include gc.h, and hence */ +/* generate references to the GC_ symbols, to see the right symbols. */ + GC_API int GC_pthread_create(pthread_t * t, const pthread_attr_t * a, + void * (* fn)(void *), void * arg) + { + return pthread_create(t, a, fn, arg); } - int GC_pthread_sigmask(int how, const sigset_t *mask, sigset_t *old) { - return pthread_sigmask(how, mask, old); + + GC_API int GC_pthread_sigmask(int how, const sigset_t *mask, + sigset_t *old) + { + return pthread_sigmask(how, mask, old); } - int GC_pthread_join(pthread_t t, void **res) { - return pthread_join(t, res); + + GC_API int GC_pthread_join(pthread_t t, void **res) + { + return pthread_join(t, res); } - int GC_pthread_detach(pthread_t t) { - return pthread_detach(t); + + GC_API int GC_pthread_detach(pthread_t t) + { + return pthread_detach(t); } #endif /* Linker-based interception. */ #ifdef GC_USE_DLOPEN_WRAP - static GC_bool GC_syms_initialized = FALSE; + STATIC GC_bool GC_syms_initialized = FALSE; - void GC_init_real_syms(void) + STATIC void GC_init_real_syms(void) { void *dl_handle; # define LIBPTHREAD_NAME "libpthread.so.0" @@ -197,14 +190,14 @@ unsigned long GC_lock_holder = NO_THREAD; } if (NULL == dl_handle) ABORT("Couldn't open libpthread\n"); # endif - GC_real_pthread_create = (GC_pthread_create_t) - dlsym(dl_handle, "pthread_create"); - GC_real_pthread_sigmask = (GC_pthread_sigmask_t) - dlsym(dl_handle, "pthread_sigmask"); - GC_real_pthread_join = (GC_pthread_join_t) - dlsym(dl_handle, "pthread_join"); - GC_real_pthread_detach = (GC_pthread_detach_t) - dlsym(dl_handle, "pthread_detach"); + REAL_FUNC(pthread_create) = (GC_pthread_create_t) + dlsym(dl_handle, "pthread_create"); + REAL_FUNC(pthread_sigmask) = (GC_pthread_sigmask_t) + dlsym(dl_handle, "pthread_sigmask"); + REAL_FUNC(pthread_join) = (GC_pthread_join_t) + dlsym(dl_handle, "pthread_join"); + REAL_FUNC(pthread_detach) = (GC_pthread_detach_t) + dlsym(dl_handle, "pthread_detach"); GC_syms_initialized = TRUE; } @@ -213,51 +206,53 @@ unsigned long GC_lock_holder = NO_THREAD; # define INIT_REAL_SYMS() #endif -void GC_thr_init(void); - static GC_bool parallel_initialized = FALSE; -GC_bool GC_need_to_lock = FALSE; - -void GC_init_parallel(void); +GC_INNER GC_bool GC_need_to_lock = FALSE; -long GC_nprocs = 1; /* Number of processors. We may not have */ - /* access to all of them, but this is as good */ - /* a guess as any ... */ +STATIC long GC_nprocs = 1; + /* Number of processors. We may not have */ + /* access to all of them, but this is as good */ + /* a guess as any ... */ #ifdef THREAD_LOCAL_ALLOC -/* We must explicitly mark ptrfree and gcj free lists, since the free */ -/* list links wouldn't otherwise be found. We also set them in the */ -/* normal free lists, since that involves touching less memory than if */ -/* we scanned them normally. */ -void GC_mark_thread_local_free_lists(void) +/* We must explicitly mark ptrfree and gcj free lists, since the free */ +/* list links wouldn't otherwise be found. We also set them in the */ +/* normal free lists, since that involves touching less memory than if */ +/* we scanned them normally. */ +GC_INNER void GC_mark_thread_local_free_lists(void) { int i; GC_thread p; - + for (i = 0; i < THREAD_TABLE_SZ; ++i) { for (p = GC_threads[i]; 0 != p; p = p -> next) { - GC_mark_thread_local_fls_for(&(p->tlfs)); + GC_mark_thread_local_fls_for(&(p->tlfs)); } } } #if defined(GC_ASSERTIONS) - /* Check that all thread-local free-lists are completely marked. */ - /* also check that thread-specific-data structures are marked. */ - void GC_check_tls(void) { - int i; - GC_thread p; - - for (i = 0; i < THREAD_TABLE_SZ; ++i) { - for (p = GC_threads[i]; 0 != p; p = p -> next) { - GC_check_tls_for(&(p->tlfs)); - } - } + void GC_check_tls_for(GC_tlfs p); +# if defined(USE_CUSTOM_SPECIFIC) + void GC_check_tsd_marks(tsd *key); +# endif + /* Check that all thread-local free-lists are completely marked. */ + /* also check that thread-specific-data structures are marked. */ + void GC_check_tls(void) + { + int i; + GC_thread p; + + for (i = 0; i < THREAD_TABLE_SZ; ++i) { + for (p = GC_threads[i]; 0 != p; p = p -> next) { + GC_check_tls_for(&(p->tlfs)); + } + } # if defined(USE_CUSTOM_SPECIFIC) - if (GC_thread_key != 0) - GC_check_tsd_marks(GC_thread_key); -# endif + if (GC_thread_key != 0) + GC_check_tsd_marks(GC_thread_key); +# endif } #endif /* GC_ASSERTIONS */ @@ -269,43 +264,47 @@ void GC_mark_thread_local_free_lists(void) # define MAX_MARKERS 16 # endif -static ptr_t marker_sp[MAX_MARKERS] = {0}; +static ptr_t marker_sp[MAX_MARKERS - 1] = {0}; #ifdef IA64 - static ptr_t marker_bsp[MAX_MARKERS] = {0}; + static ptr_t marker_bsp[MAX_MARKERS - 1] = {0}; #endif -void * GC_mark_thread(void * id) +STATIC void * GC_mark_thread(void * id) { word my_mark_no = 0; + IF_CANCEL(int cancel_state;) + DISABLE_CANCEL(cancel_state); + /* Mark threads are not cancellable; they */ + /* should be invisible to client. */ marker_sp[(word)id] = GC_approx_sp(); # ifdef IA64 marker_bsp[(word)id] = GC_save_regs_in_stack(); # endif + + if ((word)id == (word)-1) return 0; /* to make compiler happy */ + for (;; ++my_mark_no) { - /* GC_mark_no is passed only to allow GC_help_marker to terminate */ - /* promptly. This is important if it were called from the signal */ - /* handler or from the GC lock acquisition code. Under Linux, it's */ - /* not safe to call it from a signal handler, since it uses mutexes */ - /* and condition variables. Since it is called only here, the */ - /* argument is unnecessary. */ + /* GC_mark_no is passed only to allow GC_help_marker to terminate */ + /* promptly. This is important if it were called from the signal */ + /* handler or from the GC lock acquisition code. Under Linux, it's */ + /* not safe to call it from a signal handler, since it uses mutexes */ + /* and condition variables. Since it is called only here, the */ + /* argument is unnecessary. */ if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) { - /* resynchronize if we get far off, e.g. because GC_mark_no */ - /* wrapped. */ - my_mark_no = GC_mark_no; + /* resynchronize if we get far off, e.g. because GC_mark_no */ + /* wrapped. */ + my_mark_no = GC_mark_no; } # ifdef DEBUG_THREADS - GC_printf("Starting mark helper for mark number %lu\n", my_mark_no); + GC_printf("Starting mark helper for mark number %lu\n", + (unsigned long)my_mark_no); # endif GC_help_marker(my_mark_no); } } -extern long GC_markers; /* Number of mark threads we would */ - /* like to have. Includes the */ - /* initiating thread. */ - -pthread_t GC_mark_threads[MAX_MARKERS]; +STATIC pthread_t GC_mark_threads[MAX_MARKERS]; #define PTHREAD_CREATE REAL_FUNC(pthread_create) @@ -314,48 +313,50 @@ static void start_mark_threads(void) unsigned i; pthread_attr_t attr; - if (GC_markers > MAX_MARKERS) { - WARN("Limiting number of mark threads\n", 0); - GC_markers = MAX_MARKERS; - } if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed"); - + if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) - ABORT("pthread_attr_setdetachstate failed"); + ABORT("pthread_attr_setdetachstate failed"); # if defined(HPUX) || defined(GC_DGUX386_THREADS) /* Default stack size is usually too small: fix it. */ - /* Otherwise marker threads or GC may run out of */ - /* space. */ + /* Otherwise marker threads or GC may run out of */ + /* space. */ # define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word)) { - size_t old_size; - int code; + size_t old_size; + int code; if (pthread_attr_getstacksize(&attr, &old_size) != 0) - ABORT("pthread_attr_getstacksize failed\n"); - if (old_size < MIN_STACK_SIZE) { - if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0) - ABORT("pthread_attr_setstacksize failed\n"); - } + ABORT("pthread_attr_getstacksize failed\n"); + if (old_size < MIN_STACK_SIZE) { + if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0) + ABORT("pthread_attr_setstacksize failed\n"); + } } # endif /* HPUX || GC_DGUX386_THREADS */ - if (GC_print_stats) { - GC_log_printf("Starting %ld marker threads\n", GC_markers - 1); - } for (i = 0; i < GC_markers - 1; ++i) { if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr, - GC_mark_thread, (void *)(word)i)) { - WARN("Marker thread creation failed, errno = %ld.\n", errno); + GC_mark_thread, (void *)(word)i)) { + WARN("Marker thread creation failed, errno = %" GC_PRIdPTR "\n", + errno); + /* Don't try to create other marker threads. */ + GC_markers = i + 1; + if (i == 0) GC_parallel = FALSE; + break; } } + if (GC_print_stats) { + GC_log_printf("Started %ld mark helper threads\n", GC_markers - 1); + } + pthread_attr_destroy(&attr); } #endif /* PARALLEL_MARK */ -GC_bool GC_thr_initialized = FALSE; +GC_INNER GC_bool GC_thr_initialized = FALSE; -volatile GC_thread GC_threads[THREAD_TABLE_SZ]; +GC_INNER volatile GC_thread GC_threads[THREAD_TABLE_SZ] = {0}; void GC_push_thread_structures(void) { @@ -363,31 +364,30 @@ void GC_push_thread_structures(void) GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads)); # if defined(THREAD_LOCAL_ALLOC) GC_push_all((ptr_t)(&GC_thread_key), - (ptr_t)(&GC_thread_key)+sizeof(&GC_thread_key)); + (ptr_t)(&GC_thread_key)+sizeof(&GC_thread_key)); # endif } -/* It may not be safe to allocate when we register the first thread. */ +/* It may not be safe to allocate when we register the first thread. */ static struct GC_Thread_Rep first_thread; -/* Add a thread to GC_threads. We assume it wasn't already there. */ -/* Caller holds allocation lock. */ -GC_thread GC_new_thread(pthread_t id) +/* Add a thread to GC_threads. We assume it wasn't already there. */ +/* Caller holds allocation lock. */ +STATIC GC_thread GC_new_thread(pthread_t id) { int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ; GC_thread result; static GC_bool first_thread_used = FALSE; - + GC_ASSERT(I_HOLD_LOCK()); if (!first_thread_used) { - result = &first_thread; - first_thread_used = TRUE; + result = &first_thread; + first_thread_used = TRUE; } else { result = (struct GC_Thread_Rep *) - GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL); - GC_ASSERT(result -> flags == 0); + GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL); + if (result == 0) return(0); } - if (result == 0) return(0); result -> id = id; result -> next = GC_threads[hv]; GC_threads[hv] = result; @@ -395,14 +395,14 @@ GC_thread GC_new_thread(pthread_t id) return(result); } -/* Delete a thread from GC_threads. We assume it is there. */ -/* (The code intentionally traps if it wasn't.) */ -void GC_delete_thread(pthread_t id) +/* Delete a thread from GC_threads. We assume it is there. */ +/* (The code intentionally traps if it wasn't.) */ +STATIC void GC_delete_thread(pthread_t id) { int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ; register GC_thread p = GC_threads[hv]; register GC_thread prev = 0; - + GC_ASSERT(I_HOLD_LOCK()); while (!THREAD_EQUAL(p -> id, id)) { prev = p; @@ -414,16 +414,16 @@ void GC_delete_thread(pthread_t id) prev -> next = p -> next; } # ifdef GC_DARWIN_THREADS - mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread); + mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread); # endif GC_INTERNAL_FREE(p); } -/* If a thread has been joined, but we have not yet */ -/* been notified, then there may be more than one thread */ -/* in the table with the same pthread id. */ -/* This is OK, but we need a way to delete a specific one. */ -void GC_delete_gc_thread(GC_thread gc_id) +/* If a thread has been joined, but we have not yet */ +/* been notified, then there may be more than one thread */ +/* in the table with the same pthread id. */ +/* This is OK, but we need a way to delete a specific one. */ +STATIC void GC_delete_gc_thread(GC_thread gc_id) { pthread_t id = gc_id -> id; int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ; @@ -441,32 +441,74 @@ void GC_delete_gc_thread(GC_thread gc_id) prev -> next = p -> next; } # ifdef GC_DARWIN_THREADS - mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread); + mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread); # endif GC_INTERNAL_FREE(p); } -/* Return a GC_thread corresponding to a given pthread_t. */ -/* Returns 0 if it's not there. */ -/* Caller holds allocation lock or otherwise inhibits */ -/* updates. */ -/* If there is more than one thread with the given id we */ -/* return the most recent one. */ -GC_thread GC_lookup_thread(pthread_t id) +/* Return a GC_thread corresponding to a given pthread_t. */ +/* Returns 0 if it's not there. */ +/* Caller holds allocation lock or otherwise inhibits */ +/* updates. */ +/* If there is more than one thread with the given id we */ +/* return the most recent one. */ +GC_INNER GC_thread GC_lookup_thread(pthread_t id) { int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ; register GC_thread p = GC_threads[hv]; - + while (p != 0 && !THREAD_EQUAL(p -> id, id)) p = p -> next; return(p); } +/* Called by GC_finalize() (in case of an allocation failure observed). */ +GC_INNER void GC_reset_finalizer_nested(void) +{ + GC_thread me = GC_lookup_thread(pthread_self()); + me->finalizer_nested = 0; +} + +/* Checks and updates the thread-local level of finalizers recursion. */ +/* Returns NULL if GC_invoke_finalizers() should not be called by the */ +/* collector (to minimize the risk of a deep finalizers recursion), */ +/* otherwise returns a pointer to the thread-local finalizer_nested. */ +/* Called by GC_notify_or_invoke_finalizers() only (the lock is held). */ +GC_INNER unsigned *GC_check_finalizer_nested(void) +{ + GC_thread me = GC_lookup_thread(pthread_self()); + unsigned nesting_level = me->finalizer_nested; + if (nesting_level) { + /* We are inside another GC_invoke_finalizers(). */ + /* Skip some implicitly-called GC_invoke_finalizers() */ + /* depending on the nesting (recursion) level. */ + if (++me->finalizer_skipped < (1U << nesting_level)) return NULL; + me->finalizer_skipped = 0; + } + me->finalizer_nested = nesting_level + 1; + return &me->finalizer_nested; +} + +#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC) + /* This is called from thread-local GC_malloc(). */ + GC_bool GC_is_thread_tsd_valid(void *tsd) + { + char *me; + LOCK(); + me = (char *)GC_lookup_thread(pthread_self()); + UNLOCK(); + /* FIXME: We can check tsd more correctly (since now we have access */ + /* to the right declarations). This old algorithm (moved from */ + /* thread_local_alloc.c) checks only that it's close. */ + return((char *)tsd > me && (char *)tsd < me + 1000); + } +#endif + #ifdef HANDLE_FORK -/* Remove all entries from the GC_threads table, except the */ -/* one for the current thread. We need to do this in the child */ -/* process after a fork(), since only the current thread */ -/* survives in the child. */ -void GC_remove_all_threads_but_me(void) +/* Remove all entries from the GC_threads table, except the */ +/* one for the current thread. We need to do this in the child */ +/* process after a fork(), since only the current thread */ +/* survives in the child. */ +STATIC void GC_remove_all_threads_but_me(void) { pthread_t self = pthread_self(); int hv; @@ -475,18 +517,18 @@ void GC_remove_all_threads_but_me(void) for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) { me = 0; for (p = GC_threads[hv]; 0 != p; p = next) { - next = p -> next; - if (THREAD_EQUAL(p -> id, self)) { - me = p; - p -> next = 0; - } else { -# ifdef THREAD_LOCAL_ALLOC - if (!(p -> flags & FINISHED)) { - GC_destroy_thread_local(&(p->tlfs)); - } -# endif /* THREAD_LOCAL_ALLOC */ - if (p != &first_thread) GC_INTERNAL_FREE(p); - } + next = p -> next; + if (THREAD_EQUAL(p -> id, self)) { + me = p; + p -> next = 0; + } else { +# ifdef THREAD_LOCAL_ALLOC + if (!(p -> flags & FINISHED)) { + GC_destroy_thread_local(&(p->tlfs)); + } +# endif /* THREAD_LOCAL_ALLOC */ + if (p != &first_thread) GC_INTERNAL_FREE(p); + } } GC_threads[hv] = me; } @@ -494,194 +536,208 @@ void GC_remove_all_threads_but_me(void) #endif /* HANDLE_FORK */ #ifdef USE_PROC_FOR_LIBRARIES -GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi) -{ + GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi) + { int i; GC_thread p; - + GC_ASSERT(I_HOLD_LOCK()); # ifdef PARALLEL_MARK - for (i = 0; i < GC_markers; ++i) { - if (marker_sp[i] > lo & marker_sp[i] < hi) return TRUE; + for (i = 0; i < GC_markers - 1; ++i) { + if (marker_sp[i] > lo & marker_sp[i] < hi) return TRUE; # ifdef IA64 - if (marker_bsp[i] > lo & marker_bsp[i] < hi) return TRUE; -# endif + if (marker_bsp[i] > lo & marker_bsp[i] < hi) return TRUE; +# endif } # endif for (i = 0; i < THREAD_TABLE_SZ; i++) { for (p = GC_threads[i]; p != 0; p = p -> next) { - if (0 != p -> stack_end) { -# ifdef STACK_GROWS_UP + if (0 != p -> stack_end) { +# ifdef STACK_GROWS_UP if (p -> stack_end >= lo && p -> stack_end < hi) return TRUE; -# else /* STACK_GROWS_DOWN */ +# else /* STACK_GROWS_DOWN */ if (p -> stack_end > lo && p -> stack_end <= hi) return TRUE; -# endif - } +# endif + } } } return FALSE; -} + } #endif /* USE_PROC_FOR_LIBRARIES */ #ifdef IA64 -/* Find the largest stack_base smaller than bound. May be used */ -/* to find the boundary between a register stack and adjacent */ -/* immediately preceding memory stack. */ -ptr_t GC_greatest_stack_base_below(ptr_t bound) -{ + /* Find the largest stack_base smaller than bound. May be used */ + /* to find the boundary between a register stack and adjacent */ + /* immediately preceding memory stack. */ + GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound) + { int i; GC_thread p; ptr_t result = 0; - + GC_ASSERT(I_HOLD_LOCK()); # ifdef PARALLEL_MARK - for (i = 0; i < GC_markers; ++i) { - if (marker_sp[i] > result && marker_sp[i] < bound) - result = marker_sp[i]; + for (i = 0; i < GC_markers - 1; ++i) { + if (marker_sp[i] > result && marker_sp[i] < bound) + result = marker_sp[i]; } # endif for (i = 0; i < THREAD_TABLE_SZ; i++) { for (p = GC_threads[i]; p != 0; p = p -> next) { - if (p -> stack_end > result && p -> stack_end < bound) { - result = p -> stack_end; - } + if (p -> stack_end > result && p -> stack_end < bound) { + result = p -> stack_end; + } } } return result; -} + } #endif /* IA64 */ #ifdef GC_LINUX_THREADS -/* Return the number of processors, or i<= 0 if it can't be determined. */ -int GC_get_nprocs(void) -{ - /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */ - /* appears to be buggy in many cases. */ - /* We look for lines "cpu" in /proc/stat. */ -# define STAT_BUF_SIZE 4096 -# define STAT_READ read - /* If read is wrapped, this may need to be redefined to call */ - /* the real one. */ + /* Return the number of processors, or i<= 0 if it can't be determined. */ + STATIC int GC_get_nprocs(void) + { + /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */ + /* appears to be buggy in many cases. */ + /* We look for lines "cpu" in /proc/stat. */ +# ifndef STAT_READ + /* Also defined in os_dep.c. */ +# define STAT_BUF_SIZE 4096 +# define STAT_READ read +# endif + /* If read is wrapped, this may need to be redefined to call */ + /* the real one. */ char stat_buf[STAT_BUF_SIZE]; int f; word result = 1; - /* Some old kernels only have a single "cpu nnnn ..." */ - /* entry in /proc/stat. We identify those as */ - /* uniprocessors. */ + /* Some old kernels only have a single "cpu nnnn ..." */ + /* entry in /proc/stat. We identify those as */ + /* uniprocessors. */ size_t i, len = 0; f = open("/proc/stat", O_RDONLY); if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) { - WARN("Couldn't read /proc/stat\n", 0); - return -1; + WARN("Couldn't read /proc/stat\n", 0); + return -1; } for (i = 0; i < len - 100; ++i) { if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c' - && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') { - int cpu_no = atoi(stat_buf + i + 4); - if (cpu_no >= result) result = cpu_no + 1; - } + && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') { + int cpu_no = atoi(stat_buf + i + 4); + if (cpu_no >= result) result = cpu_no + 1; + } } close(f); return result; -} + } #endif /* GC_LINUX_THREADS */ -/* We hold the GC lock. Wait until an in-progress GC has finished. */ -/* Repeatedly RELEASES GC LOCK in order to wait. */ -/* If wait_for_all is true, then we exit with the GC lock held and no */ -/* collection in progress; otherwise we just wait for the current GC */ -/* to finish. */ -extern GC_bool GC_collection_in_progress(void); -void GC_wait_for_gc_completion(GC_bool wait_for_all) +/* We hold the GC lock. Wait until an in-progress GC has finished. */ +/* Repeatedly RELEASES GC LOCK in order to wait. */ +/* If wait_for_all is true, then we exit with the GC lock held and no */ +/* collection in progress; otherwise we just wait for the current GC */ +/* to finish. */ +STATIC void GC_wait_for_gc_completion(GC_bool wait_for_all) { GC_ASSERT(I_HOLD_LOCK()); + ASSERT_CANCEL_DISABLED(); if (GC_incremental && GC_collection_in_progress()) { - int old_gc_no = GC_gc_no; - - /* Make sure that no part of our stack is still on the mark stack, */ - /* since it's about to be unmapped. */ - while (GC_incremental && GC_collection_in_progress() - && (wait_for_all || old_gc_no == GC_gc_no)) { - ENTER_GC(); - GC_in_thread_creation = TRUE; + int old_gc_no = GC_gc_no; + + /* Make sure that no part of our stack is still on the mark stack, */ + /* since it's about to be unmapped. */ + while (GC_incremental && GC_collection_in_progress() + && (wait_for_all || old_gc_no == GC_gc_no)) { + ENTER_GC(); + GC_in_thread_creation = TRUE; GC_collect_a_little_inner(1); - GC_in_thread_creation = FALSE; - EXIT_GC(); - UNLOCK(); - sched_yield(); - LOCK(); - } + GC_in_thread_creation = FALSE; + EXIT_GC(); + UNLOCK(); + sched_yield(); + LOCK(); + } } } #ifdef HANDLE_FORK /* Procedures called before and after a fork. The goal here is to make */ -/* it safe to call GC_malloc() in a forked child. It's unclear that is */ -/* attainable, since the single UNIX spec seems to imply that one */ -/* should only call async-signal-safe functions, and we probably can't */ -/* quite guarantee that. But we give it our best shot. (That same */ -/* spec also implies that it's not safe to call the system malloc */ -/* between fork() and exec(). Thus we're doing no worse than it. */ - -/* Called before a fork() */ -void GC_fork_prepare_proc(void) +/* it safe to call GC_malloc() in a forked child. It's unclear that is */ +/* attainable, since the single UNIX spec seems to imply that one */ +/* should only call async-signal-safe functions, and we probably can't */ +/* quite guarantee that. But we give it our best shot. (That same */ +/* spec also implies that it's not safe to call the system malloc */ +/* between fork() and exec(). Thus we're doing no worse than it.) */ + +IF_CANCEL(static int fork_cancel_state;) + /* protected by allocation lock. */ + +/* Called before a fork() */ +STATIC void GC_fork_prepare_proc(void) { - /* Acquire all relevant locks, so that after releasing the locks */ - /* the child will see a consistent state in which monitor */ - /* invariants hold. Unfortunately, we can't acquire libc locks */ - /* we might need, and there seems to be no guarantee that libc */ - /* must install a suitable fork handler. */ - /* Wait for an ongoing GC to finish, since we can't finish it in */ - /* the (one remaining thread in) the child. */ + /* Acquire all relevant locks, so that after releasing the locks */ + /* the child will see a consistent state in which monitor */ + /* invariants hold. Unfortunately, we can't acquire libc locks */ + /* we might need, and there seems to be no guarantee that libc */ + /* must install a suitable fork handler. */ + /* Wait for an ongoing GC to finish, since we can't finish it in */ + /* the (one remaining thread in) the child. */ LOCK(); -# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) - GC_wait_for_reclaim(); + DISABLE_CANCEL(fork_cancel_state); + /* Following waits may include cancellation points. */ +# if defined(PARALLEL_MARK) + if (GC_parallel) + GC_wait_for_reclaim(); # endif GC_wait_for_gc_completion(TRUE); -# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) - GC_acquire_mark_lock(); +# if defined(PARALLEL_MARK) + if (GC_parallel) + GC_acquire_mark_lock(); # endif } -/* Called in parent after a fork() */ -void GC_fork_parent_proc(void) +/* Called in parent after a fork() */ +STATIC void GC_fork_parent_proc(void) { -# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) - GC_release_mark_lock(); +# if defined(PARALLEL_MARK) + if (GC_parallel) + GC_release_mark_lock(); # endif + RESTORE_CANCEL(fork_cancel_state); UNLOCK(); } -/* Called in child after a fork() */ -void GC_fork_child_proc(void) +/* Called in child after a fork() */ +STATIC void GC_fork_child_proc(void) { /* Clean up the thread table, so that just our thread is left. */ -# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) - GC_release_mark_lock(); +# if defined(PARALLEL_MARK) + if (GC_parallel) + GC_release_mark_lock(); # endif GC_remove_all_threads_but_me(); # ifdef PARALLEL_MARK - /* Turn off parallel marking in the child, since we are probably */ - /* just going to exec, and we would have to restart mark threads. */ + /* Turn off parallel marking in the child, since we are probably */ + /* just going to exec, and we would have to restart mark threads. */ GC_markers = 1; GC_parallel = FALSE; # endif /* PARALLEL_MARK */ + RESTORE_CANCEL(fork_cancel_state); UNLOCK(); } #endif /* HANDLE_FORK */ #if defined(GC_DGUX386_THREADS) -/* Return the number of processors, or i<= 0 if it can't be determined. */ -int GC_get_nprocs(void) -{ + /* Return the number of processors, or i<= 0 if it can't be determined. */ + STATIC int GC_get_nprocs(void) + { /* */ int numCpus; struct dg_sys_info_pm_info pm_sysinfo; int status =0; status = dg_sys_info((long int *) &pm_sysinfo, - DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION); + DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION); if (status < 0) /* set -1 for error */ numCpus = -1; @@ -690,30 +746,30 @@ int GC_get_nprocs(void) numCpus = pm_sysinfo.idle_vp_count; # ifdef DEBUG_THREADS - GC_printf("Number of active CPUs in this system: %d\n", numCpus); + GC_printf("Number of active CPUs in this system: %d\n", numCpus); # endif return(numCpus); -} + } #endif /* GC_DGUX386_THREADS */ #if defined(GC_NETBSD_THREADS) -static int get_ncpu(void) -{ + static int get_ncpu(void) + { int mib[] = {CTL_HW,HW_NCPU}; int res; size_t len = sizeof(res); sysctl(mib, sizeof(mib)/sizeof(int), &res, &len, NULL, 0); return res; -} -#endif /* GC_NETBSD_THREADS */ + } +#endif /* GC_NETBSD_THREADS */ -# if defined(GC_LINUX_THREADS) && defined(INCLUDE_LINUX_THREAD_DESCR) -__thread int dummy_thread_local; -# endif +#if defined(GC_LINUX_THREADS) && defined(INCLUDE_LINUX_THREAD_DESCR) + __thread int GC_dummy_thread_local; +#endif -/* We hold the allocation lock. */ -void GC_thr_init(void) +/* We hold the allocation lock. */ +GC_INNER void GC_thr_init(void) { # ifndef GC_DARWIN_THREADS int dummy; @@ -722,28 +778,28 @@ void GC_thr_init(void) if (GC_thr_initialized) return; GC_thr_initialized = TRUE; - + # ifdef HANDLE_FORK - /* Prepare for a possible fork. */ + /* Prepare for a possible fork. */ pthread_atfork(GC_fork_prepare_proc, GC_fork_parent_proc, - GC_fork_child_proc); + GC_fork_child_proc); # endif /* HANDLE_FORK */ # if defined(INCLUDE_LINUX_THREAD_DESCR) - /* Explicitly register the region including the address */ - /* of a thread local variable. This should included thread */ - /* locals for the main thread, except for those allocated */ - /* in response to dlopen calls. */ - { - ptr_t thread_local_addr = (ptr_t)(&dummy_thread_local); - ptr_t main_thread_start, main_thread_end; + /* Explicitly register the region including the address */ + /* of a thread local variable. This should include thread */ + /* locals for the main thread, except for those allocated */ + /* in response to dlopen calls. */ + { + ptr_t thread_local_addr = (ptr_t)(&GC_dummy_thread_local); + ptr_t main_thread_start, main_thread_end; if (!GC_enclosing_mapping(thread_local_addr, &main_thread_start, - &main_thread_end)) { - ABORT("Failed to find mapping for main thread thread locals"); - } - GC_add_roots_inner(main_thread_start, main_thread_end, FALSE); - } + &main_thread_end)) { + ABORT("Failed to find mapping for main thread thread locals"); + } + GC_add_roots_inner(main_thread_start, main_thread_end, FALSE); + } # endif - /* Add the initial thread, so we can stop it. */ + /* Add the initial thread, so we can stop it. */ t = GC_new_thread(pthread_self()); # ifdef GC_DARWIN_THREADS t -> stop_info.mach_thread = mach_thread_self(); @@ -756,72 +812,79 @@ void GC_thr_init(void) /* Set GC_nprocs. */ { - char * nprocs_string = GETENV("GC_NPROCS"); - GC_nprocs = -1; - if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string); + char * nprocs_string = GETENV("GC_NPROCS"); + GC_nprocs = -1; + if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string); } if (GC_nprocs <= 0) { # if defined(GC_HPUX_THREADS) - GC_nprocs = pthread_num_processors_np(); + GC_nprocs = pthread_num_processors_np(); +# endif +# if defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS) \ + || defined(GC_SOLARIS_THREADS) || defined(GC_GNU_THREADS) + GC_nprocs = sysconf(_SC_NPROCESSORS_ONLN); + if (GC_nprocs <= 0) GC_nprocs = 1; # endif -# if defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS) \ - || defined(GC_SOLARIS_THREADS) - GC_nprocs = sysconf(_SC_NPROCESSORS_ONLN); - if (GC_nprocs <= 0) GC_nprocs = 1; -# endif # if defined(GC_IRIX_THREADS) - GC_nprocs = sysconf(_SC_NPROC_ONLN); - if (GC_nprocs <= 0) GC_nprocs = 1; + GC_nprocs = sysconf(_SC_NPROC_ONLN); + if (GC_nprocs <= 0) GC_nprocs = 1; # endif # if defined(GC_NETBSD_THREADS) - GC_nprocs = get_ncpu(); + GC_nprocs = get_ncpu(); +# endif +# if defined(GC_OPENBSD_THREADS) + /* FIXME: Implement real "get_ncpu". */ + GC_nprocs = 2; # endif # if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS) - int ncpus = 1; - size_t len = sizeof(ncpus); - sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0); - GC_nprocs = ncpus; + int ncpus = 1; + size_t len = sizeof(ncpus); + sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0); + GC_nprocs = ncpus; # endif -# if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS) +# if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS) GC_nprocs = GC_get_nprocs(); -# endif -# if defined(GC_GNU_THREADS) - if (GC_nprocs <= 0) GC_nprocs = 1; # endif } if (GC_nprocs <= 0) { - WARN("GC_get_nprocs() returned %ld\n", GC_nprocs); - GC_nprocs = 2; -# ifdef PARALLEL_MARK - GC_markers = 1; -# endif + WARN("GC_get_nprocs() returned %" GC_PRIdPTR "\n", GC_nprocs); + GC_nprocs = 2; +# ifdef PARALLEL_MARK + GC_markers = 1; +# endif } else { -# ifdef PARALLEL_MARK +# ifdef PARALLEL_MARK { - char * markers_string = GETENV("GC_MARKERS"); - if (markers_string != NULL) { - GC_markers = atoi(markers_string); - } else { - GC_markers = GC_nprocs; - } + char * markers_string = GETENV("GC_MARKERS"); + if (markers_string != NULL) { + GC_markers = atoi(markers_string); + if (GC_markers > MAX_MARKERS) { + WARN("Limiting number of mark threads\n", 0); + GC_markers = MAX_MARKERS; + } + } else { + GC_markers = GC_nprocs; + if (GC_markers >= MAX_MARKERS) + GC_markers = MAX_MARKERS; /* silently limit GC_markers value */ + } } -# endif +# endif } # ifdef PARALLEL_MARK if (GC_print_stats) { GC_log_printf("Number of processors = %ld, " - "number of marker threads = %ld\n", GC_nprocs, GC_markers); + "number of marker threads = %ld\n", GC_nprocs, GC_markers); } - if (GC_markers == 1) { - GC_parallel = FALSE; - if (GC_print_stats) { - GC_log_printf( - "Single marker thread, turning off parallel marking\n"); - } + if (GC_markers <= 1) { + GC_parallel = FALSE; + if (GC_print_stats) { + GC_log_printf( + "Single marker thread, turning off parallel marking\n"); + } } else { - GC_parallel = TRUE; - /* Disable true incremental collection, but generational is OK. */ - GC_time_limit = GC_TIME_UNLIMITED; + GC_parallel = TRUE; + /* Disable true incremental collection, but generational is OK. */ + GC_time_limit = GC_TIME_UNLIMITED; } /* If we are using a parallel marker, actually start helper threads. */ if (GC_parallel) start_mark_threads(); @@ -829,19 +892,19 @@ void GC_thr_init(void) } -/* Perform all initializations, including those that */ -/* may require allocation. */ -/* Called without allocation lock. */ -/* Must be called before a second thread is created. */ -/* Did we say it's called without the allocation lock? */ -void GC_init_parallel(void) +/* Perform all initializations, including those that */ +/* may require allocation. */ +/* Called without allocation lock. */ +/* Must be called before a second thread is created. */ +/* Did we say it's called without the allocation lock? */ +GC_INNER void GC_init_parallel(void) { if (parallel_initialized) return; parallel_initialized = TRUE; - /* GC_init() calls us back, so set flag first. */ + /* GC_init() calls us back, so set flag first. */ if (!GC_is_initialized) GC_init(); - /* Initialize thread local free lists if used. */ + /* Initialize thread local free lists if used. */ # if defined(THREAD_LOCAL_ALLOC) LOCK(); GC_init_thread_local(&(GC_lookup_thread(pthread_self())->tlfs)); @@ -849,12 +912,12 @@ void GC_init_parallel(void) # endif } - -#if !defined(GC_DARWIN_THREADS) -int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset) -{ +#if !defined(GC_DARWIN_THREADS) && !defined(GC_OPENBSD_THREADS) + GC_API int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, + sigset_t *oset) + { sigset_t fudged_set; - + INIT_REAL_SYMS(); if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) { fudged_set = *set; @@ -862,100 +925,159 @@ int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset) set = &fudged_set; } return(REAL_FUNC(pthread_sigmask)(how, set, oset)); -} + } #endif /* !GC_DARWIN_THREADS */ -/* Wrapper for functions that are likely to block for an appreciable */ -/* length of time. */ +/* Wrapper for functions that are likely to block for an appreciable */ +/* length of time. */ -struct blocking_data { - void (*fn)(void *); - void *arg; -}; - -static void GC_do_blocking_inner(ptr_t data, void * context) { +/*ARGSUSED*/ +GC_INNER void GC_do_blocking_inner(ptr_t data, void * context) +{ struct blocking_data * d = (struct blocking_data *) data; GC_thread me; LOCK(); me = GC_lookup_thread(pthread_self()); GC_ASSERT(!(me -> thread_blocked)); # ifdef SPARC - me -> stop_info.stack_ptr = GC_save_regs_in_stack(); + me -> stop_info.stack_ptr = GC_save_regs_in_stack(); # elif !defined(GC_DARWIN_THREADS) - me -> stop_info.stack_ptr = GC_approx_sp(); + me -> stop_info.stack_ptr = GC_approx_sp(); # endif # ifdef IA64 - me -> backing_store_ptr = GC_save_regs_in_stack(); + me -> backing_store_ptr = GC_save_regs_in_stack(); # endif me -> thread_blocked = TRUE; /* Save context here if we want to support precise stack marking */ UNLOCK(); - (d -> fn)(d -> arg); - LOCK(); /* This will block if the world is stopped. */ + d -> client_data = (d -> fn)(d -> client_data); + LOCK(); /* This will block if the world is stopped. */ me -> thread_blocked = FALSE; UNLOCK(); } -void GC_do_blocking(void (*fn)(void *), void *arg) { - struct blocking_data my_data; +/* GC_call_with_gc_active() has the opposite to GC_do_blocking() */ +/* functionality. It might be called from a user function invoked by */ +/* GC_do_blocking() to temporarily back allow calling any GC function */ +/* and/or manipulating pointers to the garbage collected heap. */ +GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn, + void * client_data) +{ + struct GC_activation_frame_s frame; + GC_thread me; + LOCK(); /* This will block if the world is stopped. */ + me = GC_lookup_thread(pthread_self()); + + /* Adjust our stack base value (this could happen unless */ + /* GC_get_stack_base() was used which returned GC_SUCCESS). */ + if ((me -> flags & MAIN_THREAD) == 0) { + GC_ASSERT(me -> stack_end != NULL); + if (me -> stack_end HOTTER_THAN (ptr_t)(&frame)) + me -> stack_end = (ptr_t)(&frame); + } else { + /* The original stack. */ + if (GC_stackbottom HOTTER_THAN (ptr_t)(&frame)) + GC_stackbottom = (ptr_t)(&frame); + } + + if (me -> thread_blocked == FALSE) { + /* We are not inside GC_do_blocking() - do nothing more. */ + UNLOCK(); + return fn(client_data); + } + + /* Setup new "frame". */ +# ifdef GC_DARWIN_THREADS + /* FIXME: Implement it for Darwin ("frames" are ignored at present). */ +# else + frame.saved_stack_ptr = me -> stop_info.stack_ptr; +# endif +# ifdef IA64 + /* This is the same as in GC_call_with_stack_base(). */ + frame.backing_store_end = GC_save_regs_in_stack(); + /* Unnecessarily flushes register stack, */ + /* but that probably doesn't hurt. */ + frame.saved_backing_store_ptr = me -> backing_store_ptr; +# endif + frame.prev = me -> activation_frame; + me -> thread_blocked = FALSE; + me -> activation_frame = &frame; + + UNLOCK(); + client_data = fn(client_data); + GC_ASSERT(me -> thread_blocked == FALSE); + GC_ASSERT(me -> activation_frame == &frame); + + /* Restore original "frame". */ + LOCK(); + me -> activation_frame = frame.prev; +# ifdef IA64 + me -> backing_store_ptr = frame.saved_backing_store_ptr; +# endif + me -> thread_blocked = TRUE; +# ifndef GC_DARWIN_THREADS + me -> stop_info.stack_ptr = frame.saved_stack_ptr; +# endif + UNLOCK(); - my_data.fn = fn; - my_data.arg = arg; - GC_with_callee_saves_pushed(GC_do_blocking_inner, (ptr_t)(&my_data)); + return client_data; /* result */ } - + struct start_info { void *(*start_routine)(void *); void *arg; word flags; - sem_t registered; /* 1 ==> in our thread table, but */ - /* parent hasn't yet noticed. */ + sem_t registered; /* 1 ==> in our thread table, but */ + /* parent hasn't yet noticed. */ }; -int GC_unregister_my_thread(void) +GC_API int GC_CALL GC_unregister_my_thread(void) { GC_thread me; + IF_CANCEL(int cancel_state;) LOCK(); - /* Wait for any GC that may be marking from our stack to */ - /* complete before we remove this thread. */ + DISABLE_CANCEL(cancel_state); + /* Wait for any GC that may be marking from our stack to */ + /* complete before we remove this thread. */ GC_wait_for_gc_completion(FALSE); me = GC_lookup_thread(pthread_self()); # if defined(THREAD_LOCAL_ALLOC) GC_destroy_thread_local(&(me->tlfs)); # endif if (me -> flags & DETACHED) { - GC_delete_thread(pthread_self()); + GC_delete_thread(pthread_self()); } else { - me -> flags |= FINISHED; + me -> flags |= FINISHED; } # if defined(THREAD_LOCAL_ALLOC) GC_remove_specific(GC_thread_key); # endif + RESTORE_CANCEL(cancel_state); UNLOCK(); return GC_SUCCESS; } -/* Called at thread exit. */ -/* Never called for main thread. That's OK, since it */ -/* results in at most a tiny one-time leak. And */ -/* linuxthreads doesn't reclaim the main threads */ -/* resources or id anyway. */ -void GC_thread_exit_proc(void *arg) +/* Called at thread exit. */ +/* Never called for main thread. That's OK, since it */ +/* results in at most a tiny one-time leak. And */ +/* linuxthreads doesn't reclaim the main threads */ +/* resources or id anyway. */ +STATIC void GC_thread_exit_proc(void *arg) { GC_unregister_my_thread(); } -int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval) +GC_API int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval) { int result; GC_thread thread_gc_id; - + INIT_REAL_SYMS(); LOCK(); thread_gc_id = GC_lookup_thread(thread); - /* This is guaranteed to be the intended one, since the thread id */ - /* cant have been recycled by pthreads. */ + /* This is guaranteed to be the intended one, since the thread id */ + /* can't have been recycled by pthreads. */ UNLOCK(); result = REAL_FUNC(pthread_join)(thread, retval); # if defined (GC_FREEBSD_THREADS) @@ -978,12 +1100,11 @@ int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval) return result; } -int -WRAP_FUNC(pthread_detach)(pthread_t thread) +GC_API int WRAP_FUNC(pthread_detach)(pthread_t thread) { int result; GC_thread thread_gc_id; - + INIT_REAL_SYMS(); LOCK(); thread_gc_id = GC_lookup_thread(thread); @@ -1001,49 +1122,69 @@ WRAP_FUNC(pthread_detach)(pthread_t thread) return result; } -GC_bool GC_in_thread_creation = FALSE; /* Protected by allocation lock. */ +GC_INNER GC_bool GC_in_thread_creation = FALSE; + /* Protected by allocation lock. */ -GC_thread GC_register_my_thread_inner(struct GC_stack_base *sb, - pthread_t my_pthread) +STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb, + pthread_t my_pthread) { GC_thread me; GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */ me = GC_new_thread(my_pthread); GC_in_thread_creation = FALSE; + if (me == 0) + ABORT("Failed to allocate memory for thread registering."); # ifdef GC_DARWIN_THREADS me -> stop_info.mach_thread = mach_thread_self(); # else me -> stop_info.stack_ptr = sb -> mem_base; # endif me -> stack_end = sb -> mem_base; + if (me -> stack_end == NULL) + ABORT("Bad stack base in GC_register_my_thread"); # ifdef IA64 me -> backing_store_end = sb -> reg_base; # endif /* IA64 */ return me; } -int GC_register_my_thread(struct GC_stack_base *sb) +GC_API void GC_CALL GC_allow_register_threads(void) +{ + /* Check GC is initialized and the current thread is registered. */ + GC_ASSERT(GC_lookup_thread(pthread_self()) != 0); + + GC_need_to_lock = TRUE; /* We are multi-threaded now. */ +} + +GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb) { pthread_t my_pthread = pthread_self(); GC_thread me; + if (GC_need_to_lock == FALSE) + ABORT("Threads explicit registering is not previously enabled"); + LOCK(); me = GC_lookup_thread(my_pthread); if (0 == me) { me = GC_register_my_thread_inner(sb, my_pthread); - me -> flags |= DETACHED; - /* Treat as detached, since we do not need to worry about */ - /* pointer results. */ - UNLOCK(); + me -> flags |= DETACHED; + /* Treat as detached, since we do not need to worry about */ + /* pointer results. */ +# if defined(THREAD_LOCAL_ALLOC) + GC_init_thread_local(&(me->tlfs)); +# endif + UNLOCK(); return GC_SUCCESS; } else { - UNLOCK(); - return GC_DUPLICATE; + UNLOCK(); + return GC_DUPLICATE; } } -void * GC_inner_start_routine(struct GC_stack_base *sb, void * arg) +STATIC void * GC_CALLBACK GC_inner_start_routine(struct GC_stack_base *sb, + void * arg) { struct start_info * si = arg; void * result; @@ -1054,53 +1195,50 @@ void * GC_inner_start_routine(struct GC_stack_base *sb, void * arg) my_pthread = pthread_self(); # ifdef DEBUG_THREADS - GC_printf("Starting thread 0x%x\n", (unsigned)my_pthread); - GC_printf("pid = %ld\n", (long) getpid()); - GC_printf("sp = 0x%lx\n", (long) &arg); + GC_printf("Starting thread 0x%x, pid = %ld, sp = %p\n", + (unsigned)my_pthread, (long) getpid(), &arg); # endif LOCK(); me = GC_register_my_thread_inner(sb, my_pthread); me -> flags = si -> flags; +# if defined(THREAD_LOCAL_ALLOC) + GC_init_thread_local(&(me->tlfs)); +# endif UNLOCK(); start = si -> start_routine; # ifdef DEBUG_THREADS - GC_printf("start_routine = %p\n", (void *)start); + GC_printf("start_routine = %p\n", (void *)(signed_word)start); # endif start_arg = si -> arg; - sem_post(&(si -> registered)); /* Last action on si. */ - /* OK to deallocate. */ + sem_post(&(si -> registered)); /* Last action on si. */ + /* OK to deallocate. */ pthread_cleanup_push(GC_thread_exit_proc, 0); -# if defined(THREAD_LOCAL_ALLOC) - LOCK(); - GC_init_thread_local(&(me->tlfs)); - UNLOCK(); -# endif result = (*start)(start_arg); -# if DEBUG_THREADS +# ifdef DEBUG_THREADS GC_printf("Finishing thread 0x%x\n", (unsigned)pthread_self()); # endif me -> status = result; pthread_cleanup_pop(1); - /* Cleanup acquires lock, ensuring that we can't exit */ + /* Cleanup acquires lock, ensuring that we can't exit */ /* while a collection that thinks we're alive is trying to stop */ - /* us. */ + /* us. */ return(result); } -void * GC_start_routine(void * arg) +STATIC void * GC_start_routine(void * arg) { # ifdef INCLUDE_LINUX_THREAD_DESCR struct GC_stack_base sb; # ifdef REDIRECT_MALLOC - /* GC_get_stack_base may call pthread_getattr_np, which can */ - /* unfortunately call realloc, which may allocate from an */ - /* unregistered thread. This is unpleasant, since it might */ - /* force heap growth. */ + /* GC_get_stack_base may call pthread_getattr_np, which can */ + /* unfortunately call realloc, which may allocate from an */ + /* unregistered thread. This is unpleasant, since it might */ + /* force heap growth. */ GC_disable(); # endif if (GC_get_stack_base(&sb) != GC_SUCCESS) - ABORT("Failed to get thread stack base."); + ABORT("Failed to get thread stack base."); # ifdef REDIRECT_MALLOC GC_enable(); # endif @@ -1110,29 +1248,31 @@ void * GC_start_routine(void * arg) # endif } -int -WRAP_FUNC(pthread_create)(pthread_t *new_thread, - const pthread_attr_t *attr, +GC_API int WRAP_FUNC(pthread_create)(pthread_t *new_thread, + const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { int result; int detachstate; word my_flags = 0; - struct start_info * si; - /* This is otherwise saved only in an area mmapped by the thread */ - /* library, which isn't visible to the collector. */ - - /* We resist the temptation to muck with the stack size here, */ - /* even if the default is unreasonably small. That's the client's */ - /* responsibility. */ + struct start_info * si; + /* This is otherwise saved only in an area mmapped by the thread */ + /* library, which isn't visible to the collector. */ + + /* We resist the temptation to muck with the stack size here, */ + /* even if the default is unreasonably small. That's the client's */ + /* responsibility. */ INIT_REAL_SYMS(); LOCK(); si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info), - NORMAL); + NORMAL); UNLOCK(); if (!parallel_initialized) GC_init_parallel(); - if (0 == si) return(ENOMEM); + if (0 == si && + (si = (struct start_info *) + (*GC_get_oom_fn())(sizeof(struct start_info))) == 0) + return(ENOMEM); sem_init(&(si -> registered), 0, 0); si -> start_routine = start_routine; si -> arg = arg; @@ -1140,38 +1280,38 @@ WRAP_FUNC(pthread_create)(pthread_t *new_thread, if (!GC_thr_initialized) GC_thr_init(); # ifdef GC_ASSERTIONS { - size_t stack_size = 0; - if (NULL != attr) { - pthread_attr_getstacksize(attr, &stack_size); - } - if (0 == stack_size) { - pthread_attr_t my_attr; - pthread_attr_init(&my_attr); - pthread_attr_getstacksize(&my_attr, &stack_size); - } - /* On Solaris 10, with default attr initialization, */ - /* stack_size remains 0. Fudge it. */ - if (0 == stack_size) { -# ifndef SOLARIS - WARN("Failed to get stack size for assertion checking\n", 0); -# endif - stack_size = 1000000; - } + size_t stack_size = 0; + if (NULL != attr) { + pthread_attr_getstacksize(attr, &stack_size); + } + if (0 == stack_size) { + pthread_attr_t my_attr; + pthread_attr_init(&my_attr); + pthread_attr_getstacksize(&my_attr, &stack_size); + } + /* On Solaris 10, with default attr initialization, */ + /* stack_size remains 0. Fudge it. */ + if (0 == stack_size) { +# ifndef SOLARIS + WARN("Failed to get stack size for assertion checking\n", 0); +# endif + stack_size = 1000000; + } # ifdef PARALLEL_MARK - GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word))); + GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word))); # else - /* FreeBSD-5.3/Alpha: default pthread stack is 64K, */ - /* HBLKSIZE=8192, sizeof(word)=8 */ - GC_ASSERT(stack_size >= 65536); + /* FreeBSD-5.3/Alpha: default pthread stack is 64K, */ + /* HBLKSIZE=8192, sizeof(word)=8 */ + GC_ASSERT(stack_size >= 65536); # endif - /* Our threads may need to do some work for the GC. */ - /* Ridiculously small threads won't work, and they */ - /* probably wouldn't work anyway. */ + /* Our threads may need to do some work for the GC. */ + /* Ridiculously small threads won't work, and they */ + /* probably wouldn't work anyway. */ } # endif if (NULL == attr) { - detachstate = PTHREAD_CREATE_JOINABLE; - } else { + detachstate = PTHREAD_CREATE_JOINABLE; + } else { pthread_attr_getdetachstate(attr, &detachstate); } if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED; @@ -1179,7 +1319,7 @@ WRAP_FUNC(pthread_create)(pthread_t *new_thread, UNLOCK(); # ifdef DEBUG_THREADS GC_printf("About to start new thread from thread 0x%x\n", - (unsigned)pthread_self()); + (unsigned)pthread_self()); # endif GC_need_to_lock = TRUE; @@ -1188,14 +1328,18 @@ WRAP_FUNC(pthread_create)(pthread_t *new_thread, # ifdef DEBUG_THREADS GC_printf("Started thread 0x%x\n", (unsigned)(*new_thread)); # endif - /* Wait until child has been added to the thread table. */ - /* This also ensures that we hold onto si until the child is done */ - /* with it. Thus it doesn't matter whether it is otherwise */ - /* visible to the collector. */ + /* Wait until child has been added to the thread table. */ + /* This also ensures that we hold onto si until the child is done */ + /* with it. Thus it doesn't matter whether it is otherwise */ + /* visible to the collector. */ if (0 == result) { - while (0 != sem_wait(&(si -> registered))) { + IF_CANCEL(int cancel_state;) + DISABLE_CANCEL(cancel_state); + /* pthread_create is not a cancellation point. */ + while (0 != sem_wait(&(si -> registered))) { if (EINTR != errno) ABORT("sem_wait failed"); - } + } + RESTORE_CANCEL(cancel_state); } sem_destroy(&(si -> registered)); LOCK(); @@ -1205,93 +1349,96 @@ WRAP_FUNC(pthread_create)(pthread_t *new_thread, return(result); } -/* Spend a few cycles in a way that can't introduce contention with */ -/* othre threads. */ -void GC_pause(void) +#if defined(USE_SPIN_LOCK) || !defined(NO_PTHREAD_TRYLOCK) +/* Spend a few cycles in a way that can't introduce contention with */ +/* other threads. */ +STATIC void GC_pause(void) { int i; # if !defined(__GNUC__) || defined(__INTEL_COMPILER) volatile word dummy = 0; # endif - for (i = 0; i < 10; ++i) { + for (i = 0; i < 10; ++i) { # if defined(__GNUC__) && !defined(__INTEL_COMPILER) __asm__ __volatile__ (" " : : : "memory"); # else - /* Something that's unlikely to be optimized away. */ - GC_noop(++dummy); + /* Something that's unlikely to be optimized away. */ + GC_noop(++dummy); # endif } } - -#define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */ - /* give up. */ +#endif -volatile GC_bool GC_collecting = 0; - /* A hint that we're in the collector and */ +#define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */ + /* give up. */ + +GC_INNER volatile GC_bool GC_collecting = 0; + /* A hint that we're in the collector and */ /* holding the allocation lock for an */ /* extended period. */ -#if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK) -/* If we don't want to use the below spinlock implementation, either */ -/* because we don't have a GC_test_and_set implementation, or because */ -/* we don't want to risk sleeping, we can still try spinning on */ -/* pthread_mutex_trylock for a while. This appears to be very */ -/* beneficial in many cases. */ -/* I suspect that under high contention this is nearly always better */ -/* than the spin lock. But it's a bit slower on a uniprocessor. */ -/* Hence we still default to the spin lock. */ -/* This is also used to acquire the mark lock for the parallel */ -/* marker. */ - -/* Here we use a strict exponential backoff scheme. I don't know */ -/* whether that's better or worse than the above. We eventually */ -/* yield by calling pthread_mutex_lock(); it never makes sense to */ -/* explicitly sleep. */ - -#define LOCK_STATS +#if (!defined(USE_SPIN_LOCK) && !defined(NO_PTHREAD_TRYLOCK)) \ + || defined(PARALLEL_MARK) +/* If we don't want to use the below spinlock implementation, either */ +/* because we don't have a GC_test_and_set implementation, or because */ +/* we don't want to risk sleeping, we can still try spinning on */ +/* pthread_mutex_trylock for a while. This appears to be very */ +/* beneficial in many cases. */ +/* I suspect that under high contention this is nearly always better */ +/* than the spin lock. But it's a bit slower on a uniprocessor. */ +/* Hence we still default to the spin lock. */ +/* This is also used to acquire the mark lock for the parallel */ +/* marker. */ + +/* Here we use a strict exponential backoff scheme. I don't know */ +/* whether that's better or worse than the above. We eventually */ +/* yield by calling pthread_mutex_lock(); it never makes sense to */ +/* explicitly sleep. */ + +/* #define LOCK_STATS */ #ifdef LOCK_STATS - unsigned long GC_spin_count = 0; - unsigned long GC_block_count = 0; - unsigned long GC_unlocked_count = 0; + AO_t GC_spin_count = 0; + AO_t GC_block_count = 0; + AO_t GC_unlocked_count = 0; #endif -void GC_generic_lock(pthread_mutex_t * lock) +STATIC void GC_generic_lock(pthread_mutex_t * lock) { #ifndef NO_PTHREAD_TRYLOCK unsigned pause_length = 1; unsigned i; - + if (0 == pthread_mutex_trylock(lock)) { # ifdef LOCK_STATS - ++GC_unlocked_count; + (void)AO_fetch_and_add1(&GC_unlocked_count); # endif - return; + return; } for (; pause_length <= SPIN_MAX; pause_length <<= 1) { - for (i = 0; i < pause_length; ++i) { - GC_pause(); - } + for (i = 0; i < pause_length; ++i) { + GC_pause(); + } switch(pthread_mutex_trylock(lock)) { - case 0: -# ifdef LOCK_STATS - ++GC_spin_count; -# endif - return; - case EBUSY: - break; - default: - ABORT("Unexpected error from pthread_mutex_trylock"); + case 0: +# ifdef LOCK_STATS + (void)AO_fetch_and_add1(&GC_spin_count); +# endif + return; + case EBUSY: + break; + default: + ABORT("Unexpected error from pthread_mutex_trylock"); } } #endif /* !NO_PTHREAD_TRYLOCK */ # ifdef LOCK_STATS - ++GC_block_count; + (void)AO_fetch_and_add1(&GC_block_count); # endif pthread_mutex_lock(lock); } -#endif /* !USE_SPIN_LOCK || PARALLEL_MARK */ +#endif /* !USE_SPIN_LOCK || ... */ #if defined(USE_SPIN_LOCK) @@ -1299,10 +1446,9 @@ void GC_generic_lock(pthread_mutex_t * lock) /* as STL alloc.h. This isn't really the right way to do this. */ /* but until the POSIX scheduling mess gets straightened out ... */ -volatile AO_TS_t GC_allocate_lock = 0; +GC_INNER volatile AO_TS_t GC_allocate_lock = AO_TS_INITIALIZER; - -void GC_lock(void) +GC_INNER void GC_lock(void) { # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */ # define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */ @@ -1324,12 +1470,12 @@ void GC_lock(void) continue; } if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) { - /* + /* * got it! * Spinning worked. Thus we're probably not being scheduled * against the other process with which we were contending. * Thus it makes sense to spin longer the next time. - */ + */ last_spins = i; spin_max = high_spin_max; return; @@ -1343,33 +1489,33 @@ yield: return; } # define SLEEP_THRESHOLD 12 - /* Under Linux very short sleeps tend to wait until */ - /* the current time quantum expires. On old Linux */ - /* kernels nanosleep(<= 2ms) just spins under Linux. */ - /* (Under 2.4, this happens only for real-time */ - /* processes.) We want to minimize both behaviors */ - /* here. */ + /* Under Linux very short sleeps tend to wait until */ + /* the current time quantum expires. On old Linux */ + /* kernels nanosleep(<= 2ms) just spins under Linux. */ + /* (Under 2.4, this happens only for real-time */ + /* processes.) We want to minimize both behaviors */ + /* here. */ if (i < SLEEP_THRESHOLD) { sched_yield(); - } else { - struct timespec ts; - - if (i > 24) i = 24; - /* Don't wait for more than about 15msecs, even */ - /* under extreme contention. */ - ts.tv_sec = 0; - ts.tv_nsec = 1 << i; - nanosleep(&ts, 0); - } + } else { + struct timespec ts; + + if (i > 24) i = 24; + /* Don't wait for more than about 15msecs, even */ + /* under extreme contention. */ + ts.tv_sec = 0; + ts.tv_nsec = 1 << i; + nanosleep(&ts, 0); + } } } #else /* !USE_SPINLOCK */ -void GC_lock(void) +GC_INNER void GC_lock(void) { #ifndef NO_PTHREAD_TRYLOCK if (1 == GC_nprocs || GC_collecting) { - pthread_mutex_lock(&GC_allocate_ml); + pthread_mutex_lock(&GC_allocate_ml); } else { GC_generic_lock(&GC_allocate_ml); } @@ -1380,10 +1526,10 @@ void GC_lock(void) #endif /* !USE_SPINLOCK */ -#if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) +#ifdef PARALLEL_MARK #ifdef GC_ASSERTIONS - unsigned long GC_mark_lock_holder = NO_THREAD; + GC_INNER unsigned long GC_mark_lock_holder = NO_THREAD; #endif #if 0 @@ -1402,96 +1548,93 @@ void GC_lock(void) static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER; -void GC_acquire_mark_lock(void) +GC_INNER void GC_acquire_mark_lock(void) { /* if (pthread_mutex_lock(&mark_mutex) != 0) { - ABORT("pthread_mutex_lock failed"); + ABORT("pthread_mutex_lock failed"); } */ GC_generic_lock(&mark_mutex); # ifdef GC_ASSERTIONS - GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self()); + GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self()); # endif } -void GC_release_mark_lock(void) +GC_INNER void GC_release_mark_lock(void) { GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self())); # ifdef GC_ASSERTIONS - GC_mark_lock_holder = NO_THREAD; + GC_mark_lock_holder = NO_THREAD; # endif if (pthread_mutex_unlock(&mark_mutex) != 0) { - ABORT("pthread_mutex_unlock failed"); + ABORT("pthread_mutex_unlock failed"); } } -/* Collector must wait for a freelist builders for 2 reasons: */ -/* 1) Mark bits may still be getting examined without lock. */ -/* 2) Partial free lists referenced only by locals may not be scanned */ -/* correctly, e.g. if they contain "pointer-free" objects, since the */ -/* free-list link may be ignored. */ -void GC_wait_builder(void) +/* Collector must wait for a freelist builders for 2 reasons: */ +/* 1) Mark bits may still be getting examined without lock. */ +/* 2) Partial free lists referenced only by locals may not be scanned */ +/* correctly, e.g. if they contain "pointer-free" objects, since the */ +/* free-list link may be ignored. */ +STATIC void GC_wait_builder(void) { GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self())); + ASSERT_CANCEL_DISABLED(); # ifdef GC_ASSERTIONS - GC_mark_lock_holder = NO_THREAD; + GC_mark_lock_holder = NO_THREAD; # endif if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) { - ABORT("pthread_cond_wait failed"); + ABORT("pthread_cond_wait failed"); } GC_ASSERT(GC_mark_lock_holder == NO_THREAD); # ifdef GC_ASSERTIONS - GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self()); + GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self()); # endif } -void GC_wait_for_reclaim(void) +GC_INNER void GC_wait_for_reclaim(void) { GC_acquire_mark_lock(); while (GC_fl_builder_count > 0) { - GC_wait_builder(); + GC_wait_builder(); } GC_release_mark_lock(); } -void GC_notify_all_builder(void) +GC_INNER void GC_notify_all_builder(void) { GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self())); if (pthread_cond_broadcast(&builder_cv) != 0) { - ABORT("pthread_cond_broadcast failed"); + ABORT("pthread_cond_broadcast failed"); } } -#endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */ - -#ifdef PARALLEL_MARK - static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER; -void GC_wait_marker(void) +GC_INNER void GC_wait_marker(void) { GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self())); + ASSERT_CANCEL_DISABLED(); # ifdef GC_ASSERTIONS - GC_mark_lock_holder = NO_THREAD; + GC_mark_lock_holder = NO_THREAD; # endif if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) { - ABORT("pthread_cond_wait failed"); + ABORT("pthread_cond_wait failed"); } GC_ASSERT(GC_mark_lock_holder == NO_THREAD); # ifdef GC_ASSERTIONS - GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self()); + GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self()); # endif } -void GC_notify_all_marker(void) +GC_INNER void GC_notify_all_marker(void) { if (pthread_cond_broadcast(&mark_cv) != 0) { - ABORT("pthread_cond_broadcast failed"); + ABORT("pthread_cond_broadcast failed"); } } #endif /* PARALLEL_MARK */ -# endif /* GC_LINUX_THREADS and friends */ - +#endif /* GC_LINUX_THREADS and friends */