--> Macro replaced by USE_COMPILER_TLS
# endif
+#ifndef USE_COMPILER_TLS
# if (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \
- defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)) \
- && !defined(USE_PTHREAD_SPECIFIC)
+ defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)) || \
+ defined(GC_NETBSD_THREADS) && !defined(USE_PTHREAD_SPECIFIC) || \
+ defined(GC_FREEBSD_THREADS) && !defined(USE_PTHREAD_SPECIFIC) || \
+ defined(GC_OPENBSD_THREADS)
# define USE_PTHREAD_SPECIFIC
# endif
+#endif
# if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE)
# define _POSIX4A_DRAFT10_SOURCE 1
typedef pthread_key_t GC_key_t;
# endif
# if defined(USE_COMPILER_TLS)
+/* Note sles9 gcc on powerpc gets confused by the define to set GC_thread_tls and pthread_setspecific
+ * so we actually use a static inline function decalred below that is equivalent to:
+ * define GC_setspecific(key, v) (GC_thread_tls = (v), pthread_setspecific ((key), (v)))
+ */
# define GC_getspecific(x) (GC_thread_tls)
-# define GC_setspecific(key, v) (GC_thread_tls = (v), pthread_setspecific ((key), (v)))
# define GC_key_create pthread_key_create
typedef pthread_key_t GC_key_t;
# endif
# include <sys/sysctl.h>
#endif /* GC_DARWIN_THREADS */
+#if defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
+# include <sys/param.h>
+# include <sys/sysctl.h>
+#endif
+
#if defined(GC_DGUX386_THREADS)
# endif
# undef pthread_join
# undef pthread_detach
+# if defined(NACL)
+# undef pthread_exit
+# endif
# if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
&& !defined(_PTHREAD_USE_PTDNAM_)
/* Restore the original mangled names on Tru64 UNIX. */
void GC_init_parallel();
+static pthread_t main_pthread_self;
+static void *main_stack, *main_altstack;
+static int main_stack_size, main_altstack_size;
+
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
/* We don't really support thread-local allocation with DBG_HDRS_ALL */
GC_key_t GC_thread_key;
#ifdef USE_COMPILER_TLS
-static __thread MONO_TLS_FAST void* GC_thread_tls;
+__thread MONO_TLS_FAST void* GC_thread_tls;
+
+/*
+ * gcc errors out with /tmp/ccdPMFuq.s:2994: Error: symbol `.LTLS4' is already defined
+ * if the inline is added on powerpc
+ */
+#if !defined(__ppc__) && !defined(__powerpc__)
+inline
+#endif
+static int GC_setspecific (GC_key_t key, void *value) {
+ GC_thread_tls = value;
+ return pthread_setspecific (key, value);
+}
#endif
static GC_bool keys_initialized;
static struct GC_Thread_Rep first_thread;
+#ifdef NACL
+extern volatile int nacl_thread_parked[MAX_NACL_GC_THREADS];
+extern volatile int nacl_thread_used[MAX_NACL_GC_THREADS];
+extern volatile int nacl_thread_parking_inited;
+extern volatile int nacl_num_gc_threads;
+extern pthread_mutex_t nacl_thread_alloc_lock;
+extern __thread int nacl_thread_idx;
+extern __thread GC_thread nacl_gc_thread_self;
+
+extern void nacl_pre_syscall_hook();
+extern void nacl_post_syscall_hook();
+extern void nacl_register_gc_hooks(void (*pre)(), void (*post)());
+
+#include <stdio.h>
+
+struct nacl_irt_blockhook {
+ int (*register_block_hooks)(void (*pre)(void), void (*post)(void));
+};
+
+extern size_t nacl_interface_query(const char *interface_ident,
+ void *table, size_t tablesize);
+
+void nacl_initialize_gc_thread()
+{
+ int i;
+ static struct nacl_irt_blockhook gc_hook;
+
+ pthread_mutex_lock(&nacl_thread_alloc_lock);
+ if (!nacl_thread_parking_inited)
+ {
+ for (i = 0; i < MAX_NACL_GC_THREADS; i++) {
+ nacl_thread_used[i] = 0;
+ nacl_thread_parked[i] = 0;
+ }
+ // TODO: replace with public 'register hook' function when
+ // available from glibc
+ nacl_interface_query("nacl-irt-blockhook-0.1", &gc_hook, sizeof(gc_hook));
+ gc_hook.register_block_hooks(nacl_pre_syscall_hook, nacl_post_syscall_hook);
+ nacl_thread_parking_inited = 1;
+ }
+ GC_ASSERT(nacl_num_gc_threads <= MAX_NACL_GC_THREADS);
+ for (i = 0; i < MAX_NACL_GC_THREADS; i++) {
+ if (nacl_thread_used[i] == 0) {
+ nacl_thread_used[i] = 1;
+ nacl_thread_idx = i;
+ nacl_num_gc_threads++;
+ break;
+ }
+ }
+ pthread_mutex_unlock(&nacl_thread_alloc_lock);
+}
+
+void nacl_shutdown_gc_thread()
+{
+ pthread_mutex_lock(&nacl_thread_alloc_lock);
+ GC_ASSERT(nacl_thread_idx >= 0 && nacl_thread_idx < MAX_NACL_GC_THREADS);
+ GC_ASSERT(nacl_thread_used[nacl_thread_idx] != 0);
+ nacl_thread_used[nacl_thread_idx] = 0;
+ nacl_thread_idx = -1;
+ nacl_num_gc_threads--;
+ pthread_mutex_unlock(&nacl_thread_alloc_lock);
+}
+
+#endif /* NACL */
+
/* Add a thread to GC_threads. We assume it wasn't already there. */
/* Caller holds allocation lock. */
GC_thread GC_new_thread(pthread_t id)
{
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ int hv = ((unsigned long)id) % THREAD_TABLE_SZ;
GC_thread result;
static GC_bool first_thread_used = FALSE;
}
if (result == 0) return(0);
result -> id = id;
+#ifdef PLATFORM_ANDROID
+ result -> kernel_id = gettid();
+#endif
result -> next = GC_threads[hv];
GC_threads[hv] = result;
+#ifdef NACL
+ nacl_gc_thread_self = result;
+ nacl_initialize_gc_thread();
+#endif
GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
return(result);
}
/* Caller holds allocation lock. */
void GC_delete_thread(pthread_t id)
{
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ int hv = ((unsigned long)id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
register GC_thread prev = 0;
+#ifdef NACL
+ nacl_shutdown_gc_thread();
+ nacl_gc_thread_self = NULL;
+#endif
+
while (!pthread_equal(p -> id, id)) {
prev = p;
p = p -> next;
/* This is OK, but we need a way to delete a specific one. */
void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
{
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ int hv = ((unsigned long)id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
register GC_thread prev = 0;
/* return the most recent one. */
GC_thread GC_lookup_thread(pthread_t id)
{
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ int hv = ((unsigned long)id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
return ptr ? 1 : 0;
}
+void GC_register_altstack (void *stack, int stack_size, void *altstack, int altstack_size)
+{
+ GC_thread thread;
+
+ LOCK();
+ thread = (void *)GC_lookup_thread(pthread_self());
+ if (thread) {
+ thread->stack = stack;
+ thread->stack_size = stack_size;
+ thread->altstack = altstack;
+ thread->altstack_size = altstack_size;
+ } else {
+ /*
+ * This happens if we are called before GC_thr_init ().
+ */
+ main_pthread_self = pthread_self ();
+ main_stack = stack;
+ main_stack_size = stack_size;
+ main_altstack = altstack;
+ main_altstack_size = altstack_size;
+ }
+ UNLOCK();
+}
+
#ifdef HANDLE_FORK
/* Remove all entries from the GC_threads table, except the */
/* one for the current thread. We need to do this in the child */
/* Return the number of processors, or i<= 0 if it can't be determined. */
int GC_get_nprocs()
{
+#ifndef NACL
/* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
/* appears to be buggy in many cases. */
/* We look for lines "cpu<n>" in /proc/stat. */
}
close(f);
return result;
+#else /* NACL */
+ return sysconf(_SC_NPROCESSORS_ONLN);
+#endif
}
#endif /* GC_LINUX_THREADS */
t -> flags = DETACHED | MAIN_THREAD;
#ifdef MONO_DEBUGGER_SUPPORTED
if (gc_thread_vtable && gc_thread_vtable->thread_created)
- gc_thread_vtable->thread_created (pthread_self (), &t->stop_info.stack_ptr);
+# ifdef GC_DARWIN_THREADS
+ gc_thread_vtable->thread_created (mach_thread_self (), &t->stop_info.stack_ptr);
+# else
+ gc_thread_vtable->thread_created (pthread_self (), &t->stop_info.stack_ptr);
+# endif
#endif
+ if (pthread_self () == main_pthread_self) {
+ t->stack = main_stack;
+ t->stack_size = main_stack_size;
+ t->altstack = main_altstack;
+ t->altstack_size = main_altstack_size;
+ }
GC_stop_init();
GC_nprocs = sysconf(_SC_NPROC_ONLN);
if (GC_nprocs <= 0) GC_nprocs = 1;
# endif
-# if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)
+# if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS) || defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
int ncpus = 1;
size_t len = sizeof(ncpus);
sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0);
GC_markers = atoi(markers_string);
} else {
GC_markers = GC_nprocs;
+ if (GC_markers > MAX_MARKERS)
+ GC_markers = MAX_MARKERS;
}
}
# endif
}
-#if !defined(GC_DARWIN_THREADS)
+#if !defined(GC_DARWIN_THREADS) && !defined(GC_OPENBSD_THREADS)
+#ifndef NACL
int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
{
sigset_t fudged_set;
}
return(REAL_FUNC(pthread_sigmask)(how, set, oset));
}
+#endif
#endif /* !GC_DARWIN_THREADS */
/* Wrappers for functions that are likely to block for an appreciable */
me = GC_lookup_thread(pthread_self());
GC_destroy_thread_local(me);
if (me -> flags & DETACHED) {
+# ifdef THREAD_LOCAL_ALLOC
+ /* NULL out the tls key to prevent the dtor function from being called */
+ if (0 != GC_setspecific(GC_thread_key, NULL))
+ ABORT("Failed to set thread specific allocation pointers");
+#endif
GC_delete_thread(pthread_self());
} else {
me -> flags |= FINISHED;
return result;
}
+#ifdef NACL
+/* TODO: remove, NaCl glibc now supports pthread cleanup functions. */
+void
+WRAP_FUNC(pthread_exit)(void *status)
+{
+ REAL_FUNC(pthread_exit)(status);
+}
+#endif
+
int
WRAP_FUNC(pthread_detach)(pthread_t thread)
{
# endif /* IA64 */
#ifdef MONO_DEBUGGER_SUPPORTED
if (gc_thread_vtable && gc_thread_vtable->thread_created)
- gc_thread_vtable->thread_created (my_pthread, &me->stop_info.stack_ptr);
+# ifdef GC_DARWIN_THREADS
+ gc_thread_vtable->thread_created (mach_thread_self(), &me->stop_info.stack_ptr);
+# else
+ gc_thread_vtable->thread_created (my_pthread, &me->stop_info.stack_ptr);
+# endif
#endif
UNLOCK();
if (start) *start = si -> start_routine;
if (start_arg) *start_arg = si -> arg;
- sem_post(&(si -> registered)); /* Last action on si. */
+ if (!(si->flags & FOREIGN_THREAD))
+ sem_post(&(si -> registered)); /* Last action on si. */
/* OK to deallocate. */
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
LOCK();