--> Macro replaced by USE_COMPILER_TLS
# endif
+#ifndef USE_COMPILER_TLS
# if (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \
- defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)) \
- && !defined(USE_PTHREAD_SPECIFIC)
+ defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)) || \
+ defined(GC_NETBSD_THREADS) && !defined(USE_PTHREAD_SPECIFIC) || \
+ defined(GC_FREEBSD_THREADS) && !defined(USE_PTHREAD_SPECIFIC) || \
+ defined(GC_OPENBSD_THREADS)
# define USE_PTHREAD_SPECIFIC
# endif
+#endif
# if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE)
# define _POSIX4A_DRAFT10_SOURCE 1
typedef pthread_key_t GC_key_t;
# endif
# if defined(USE_COMPILER_TLS)
+/* Note sles9 gcc on powerpc gets confused by the define to set GC_thread_tls and pthread_setspecific
+ * so we actually use a static inline function decalred below that is equivalent to:
+ * define GC_setspecific(key, v) (GC_thread_tls = (v), pthread_setspecific ((key), (v)))
+ */
# define GC_getspecific(x) (GC_thread_tls)
-# define GC_setspecific(key, v) (GC_thread_tls = (v), pthread_setspecific ((key), (v)))
# define GC_key_create pthread_key_create
typedef pthread_key_t GC_key_t;
# endif
# include <sys/sysctl.h>
#endif /* GC_DARWIN_THREADS */
+#if defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
+# include <sys/param.h>
+# include <sys/sysctl.h>
+#endif
+
#if defined(GC_DGUX386_THREADS)
# endif
# undef pthread_join
# undef pthread_detach
+# if defined(NACL)
+# undef pthread_exit
+# endif
# if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
&& !defined(_PTHREAD_USE_PTDNAM_)
/* Restore the original mangled names on Tru64 UNIX. */
void GC_init_parallel();
+static pthread_t main_pthread_self;
+static void *main_stack, *main_altstack;
+static int main_stack_size, main_altstack_size;
+
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
/* We don't really support thread-local allocation with DBG_HDRS_ALL */
GC_key_t GC_thread_key;
#ifdef USE_COMPILER_TLS
-static __thread MONO_TLS_FAST void* GC_thread_tls;
+__thread MONO_TLS_FAST void* GC_thread_tls;
+
+/*
+ * gcc errors out with /tmp/ccdPMFuq.s:2994: Error: symbol `.LTLS4' is already defined
+ * if the inline is added on powerpc
+ */
+#if !defined(__ppc__) && !defined(__powerpc__)
+inline
+#endif
+static int GC_setspecific (GC_key_t key, void *value) {
+ GC_thread_tls = value;
+ return pthread_setspecific (key, value);
+}
#endif
static GC_bool keys_initialized;
ABORT("Failed to set thread specific allocation pointers");
}
for (i = 1; i < NFREELISTS; ++i) {
- p -> ptrfree_freelists[i] = (ptr_t)1;
- p -> normal_freelists[i] = (ptr_t)1;
+ p -> tlfs.ptrfree_freelists[i] = (ptr_t)1;
+ p -> tlfs.normal_freelists[i] = (ptr_t)1;
# ifdef GC_GCJ_SUPPORT
- p -> gcj_freelists[i] = (ptr_t)1;
+ p -> tlfs.gcj_freelists[i] = (ptr_t)1;
# endif
}
/* Set up the size 0 free lists. */
- p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
- p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
+ p -> tlfs.ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
+ p -> tlfs.normal_freelists[0] = (ptr_t)(&size_zero_object);
# ifdef GC_GCJ_SUPPORT
- p -> gcj_freelists[0] = (ptr_t)(-1);
+ p -> tlfs.gcj_freelists[0] = (ptr_t)(-1);
# endif
}
# ifndef HANDLE_FORK
GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
# endif
- return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
- return_freelists(p -> normal_freelists, GC_objfreelist);
+ return_freelists(p -> tlfs.ptrfree_freelists, GC_aobjfreelist);
+ return_freelists(p -> tlfs.normal_freelists, GC_objfreelist);
# ifdef GC_GCJ_SUPPORT
- return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
+ return_freelists(p -> tlfs.gcj_freelists, GC_gcjobjfreelist);
# endif
}
GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
UNLOCK();
# endif
- my_fl = ((GC_thread)tsd) -> normal_freelists + index;
+ my_fl = ((GC_thread)tsd) -> tlfs.normal_freelists + index;
my_entry = *my_fl;
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
ptr_t next = obj_link(my_entry);
} else {
int index = INDEX_FROM_BYTES(bytes);
ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
- -> ptrfree_freelists + index;
+ -> tlfs.ptrfree_freelists + index;
ptr_t my_entry = *my_fl;
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
} else {
int index = INDEX_FROM_BYTES(bytes);
ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
- -> gcj_freelists + index;
+ -> tlfs.gcj_freelists + index;
ptr_t my_entry = *my_fl;
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
GC_PTR result = (GC_PTR)my_entry;
void * GC_local_gcj_fast_malloc(size_t lw, void * ptr_to_struct_containing_descr)
{
ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
- -> gcj_freelists + lw;
+ -> tlfs.gcj_freelists + lw;
ptr_t my_entry = *my_fl;
GC_ASSERT(GC_gcj_malloc_initialized);
for (i = 0; i < THREAD_TABLE_SZ; ++i) {
for (p = GC_threads[i]; 0 != p; p = p -> next) {
for (j = 1; j < NFREELISTS; ++j) {
- q = p -> ptrfree_freelists[j];
+ q = p -> tlfs.ptrfree_freelists[j];
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
- q = p -> normal_freelists[j];
+ q = p -> tlfs.normal_freelists[j];
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
# ifdef GC_GCJ_SUPPORT
- q = p -> gcj_freelists[j];
+ q = p -> tlfs.gcj_freelists[j];
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
# endif /* GC_GCJ_SUPPORT */
}
static struct GC_Thread_Rep first_thread;
+#ifdef NACL
+extern volatile int nacl_thread_parked[MAX_NACL_GC_THREADS];
+extern volatile int nacl_thread_used[MAX_NACL_GC_THREADS];
+extern volatile int nacl_thread_parking_inited;
+extern volatile int nacl_num_gc_threads;
+extern pthread_mutex_t nacl_thread_alloc_lock;
+extern __thread int nacl_thread_idx;
+extern __thread GC_thread nacl_gc_thread_self;
+
+extern void nacl_pre_syscall_hook();
+extern void nacl_post_syscall_hook();
+extern void nacl_register_gc_hooks(void (*pre)(), void (*post)());
+
+#include <stdio.h>
+
+struct nacl_irt_blockhook {
+ int (*register_block_hooks)(void (*pre)(void), void (*post)(void));
+};
+
+extern size_t nacl_interface_query(const char *interface_ident,
+ void *table, size_t tablesize);
+
+void nacl_initialize_gc_thread()
+{
+ int i;
+ static struct nacl_irt_blockhook gc_hook;
+
+ pthread_mutex_lock(&nacl_thread_alloc_lock);
+ if (!nacl_thread_parking_inited)
+ {
+ for (i = 0; i < MAX_NACL_GC_THREADS; i++) {
+ nacl_thread_used[i] = 0;
+ nacl_thread_parked[i] = 0;
+ }
+ // TODO: replace with public 'register hook' function when
+ // available from glibc
+ nacl_interface_query("nacl-irt-blockhook-0.1", &gc_hook, sizeof(gc_hook));
+ gc_hook.register_block_hooks(nacl_pre_syscall_hook, nacl_post_syscall_hook);
+ nacl_thread_parking_inited = 1;
+ }
+ GC_ASSERT(nacl_num_gc_threads <= MAX_NACL_GC_THREADS);
+ for (i = 0; i < MAX_NACL_GC_THREADS; i++) {
+ if (nacl_thread_used[i] == 0) {
+ nacl_thread_used[i] = 1;
+ nacl_thread_idx = i;
+ nacl_num_gc_threads++;
+ break;
+ }
+ }
+ pthread_mutex_unlock(&nacl_thread_alloc_lock);
+}
+
+void nacl_shutdown_gc_thread()
+{
+ pthread_mutex_lock(&nacl_thread_alloc_lock);
+ GC_ASSERT(nacl_thread_idx >= 0 && nacl_thread_idx < MAX_NACL_GC_THREADS);
+ GC_ASSERT(nacl_thread_used[nacl_thread_idx] != 0);
+ nacl_thread_used[nacl_thread_idx] = 0;
+ nacl_thread_idx = -1;
+ nacl_num_gc_threads--;
+ pthread_mutex_unlock(&nacl_thread_alloc_lock);
+}
+
+#endif /* NACL */
+
/* Add a thread to GC_threads. We assume it wasn't already there. */
/* Caller holds allocation lock. */
GC_thread GC_new_thread(pthread_t id)
{
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ int hv = ((unsigned long)id) % THREAD_TABLE_SZ;
GC_thread result;
static GC_bool first_thread_used = FALSE;
}
if (result == 0) return(0);
result -> id = id;
+#ifdef PLATFORM_ANDROID
+ result -> kernel_id = gettid();
+#endif
result -> next = GC_threads[hv];
GC_threads[hv] = result;
+#ifdef NACL
+ nacl_gc_thread_self = result;
+ nacl_initialize_gc_thread();
+#endif
GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
return(result);
}
/* Caller holds allocation lock. */
void GC_delete_thread(pthread_t id)
{
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ int hv = ((unsigned long)id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
register GC_thread prev = 0;
+#ifdef NACL
+ nacl_shutdown_gc_thread();
+ nacl_gc_thread_self = NULL;
+#endif
+
while (!pthread_equal(p -> id, id)) {
prev = p;
p = p -> next;
} else {
prev -> next = p -> next;
}
+
+#ifdef GC_DARWIN_THREADS
+ mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
+#endif
+
GC_INTERNAL_FREE(p);
}
/* This is OK, but we need a way to delete a specific one. */
void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
{
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ int hv = ((unsigned long)id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
register GC_thread prev = 0;
} else {
prev -> next = p -> next;
}
+
+#ifdef GC_DARWIN_THREADS
+ mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
+#endif
+
GC_INTERNAL_FREE(p);
}
/* return the most recent one. */
GC_thread GC_lookup_thread(pthread_t id)
{
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ int hv = ((unsigned long)id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
return ptr ? 1 : 0;
}
+void GC_register_altstack (void *stack, int stack_size, void *altstack, int altstack_size)
+{
+ GC_thread thread;
+
+ LOCK();
+ thread = (void *)GC_lookup_thread(pthread_self());
+ if (thread) {
+ thread->stack = stack;
+ thread->stack_size = stack_size;
+ thread->altstack = altstack;
+ thread->altstack_size = altstack_size;
+ } else {
+ /*
+ * This happens if we are called before GC_thr_init ().
+ */
+ main_pthread_self = pthread_self ();
+ main_stack = stack;
+ main_stack_size = stack_size;
+ main_altstack = altstack;
+ main_altstack_size = altstack_size;
+ }
+ UNLOCK();
+}
+
#ifdef HANDLE_FORK
/* Remove all entries from the GC_threads table, except the */
/* one for the current thread. We need to do this in the child */
GC_destroy_thread_local(p);
}
# endif /* THREAD_LOCAL_ALLOC */
- if (p != &first_thread) GC_INTERNAL_FREE(p);
+ if (p != &first_thread) GC_INTERNAL_FREE(p);
}
}
GC_threads[hv] = me;
}
+ GC_INTERNAL_FREE(p);
}
#endif /* HANDLE_FORK */
/* Return the number of processors, or i<= 0 if it can't be determined. */
int GC_get_nprocs()
{
+#ifndef NACL
/* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
/* appears to be buggy in many cases. */
/* We look for lines "cpu<n>" in /proc/stat. */
}
close(f);
return result;
+#else /* NACL */
+ return sysconf(_SC_NPROCESSORS_ONLN);
+#endif
}
#endif /* GC_LINUX_THREADS */
t -> stop_info.stack_ptr = (ptr_t)(&dummy);
# endif
t -> flags = DETACHED | MAIN_THREAD;
+ if (pthread_self () == main_pthread_self) {
+ t->stack = main_stack;
+ t->stack_size = main_stack_size;
+ t->altstack = main_altstack;
+ t->altstack_size = main_altstack_size;
+ }
GC_stop_init();
GC_nprocs = sysconf(_SC_NPROC_ONLN);
if (GC_nprocs <= 0) GC_nprocs = 1;
# endif
-# if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)
+# if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS) || defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
int ncpus = 1;
size_t len = sizeof(ncpus);
sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0);
GC_markers = atoi(markers_string);
} else {
GC_markers = GC_nprocs;
+ if (GC_markers > MAX_MARKERS)
+ GC_markers = MAX_MARKERS;
}
}
# endif
}
-#if !defined(GC_DARWIN_THREADS)
+#if !defined(GC_DARWIN_THREADS) && !defined(GC_OPENBSD_THREADS)
+#ifndef NACL
int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
{
sigset_t fudged_set;
}
return(REAL_FUNC(pthread_sigmask)(how, set, oset));
}
+#endif
#endif /* !GC_DARWIN_THREADS */
/* Wrappers for functions that are likely to block for an appreciable */
me = GC_lookup_thread(pthread_self());
GC_destroy_thread_local(me);
if (me -> flags & DETACHED) {
+# ifdef THREAD_LOCAL_ALLOC
+ /* NULL out the tls key to prevent the dtor function from being called */
+ if (0 != GC_setspecific(GC_thread_key, NULL))
+ ABORT("Failed to set thread specific allocation pointers");
+#endif
GC_delete_thread(pthread_self());
} else {
me -> flags |= FINISHED;
return result;
}
+#ifdef NACL
+/* TODO: remove, NaCl glibc now supports pthread cleanup functions. */
+void
+WRAP_FUNC(pthread_exit)(void *status)
+{
+ REAL_FUNC(pthread_exit)(status);
+}
+#endif
+
int
WRAP_FUNC(pthread_detach)(pthread_t thread)
{
if (start) *start = si -> start_routine;
if (start_arg) *start_arg = si -> arg;
- sem_post(&(si -> registered)); /* Last action on si. */
+ if (!(si->flags & FOREIGN_THREAD))
+ sem_post(&(si -> registered)); /* Last action on si. */
/* OK to deallocate. */
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
LOCK();
return me;
}
-int GC_thread_register_foreign (void *base_addr)
+void GC_allow_register_threads (void)
+{
+ /* No-op for GC pre-v7. */
+}
+
+int GC_register_my_thread (struct GC_stack_base *sb)
{
struct start_info si = { 0, }; /* stacked for legibility & locking */
GC_thread me;
# ifdef DEBUG_THREADS
- GC_printf1( "GC_thread_register_foreign %p\n", &si );
+ GC_printf1( "GC_register_my_thread %p\n", &si );
# endif
si.flags = FOREIGN_THREAD;
if (!parallel_initialized) GC_init_parallel();
LOCK();
if (!GC_thr_initialized) GC_thr_init();
-
+ me = GC_lookup_thread(pthread_self());
UNLOCK();
+ if (me != NULL)
+ return GC_DUPLICATE;
- me = GC_start_routine_head(&si, base_addr, NULL, NULL);
-
- return me != NULL;
+ (void)GC_start_routine_head(&si, sb -> mem_base, NULL, NULL);
+ return GC_SUCCESS;
}
void * GC_start_routine(void * arg)