2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
18 #include "private/pthread_support.h"
20 #if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) && \
21 !defined(GC_DARWIN_THREADS)
28 STATIC int GC_nacl_num_gc_threads = 0;
29 STATIC __thread int GC_nacl_thread_idx = -1;
30 STATIC int GC_nacl_park_threads_now = 0;
31 STATIC pthread_t GC_nacl_thread_parker = -1;
33 GC_INNER __thread GC_thread GC_nacl_gc_thread_self = NULL;
35 int GC_nacl_thread_parked[MAX_NACL_GC_THREADS];
36 int GC_nacl_thread_used[MAX_NACL_GC_THREADS];
38 #elif !defined(GC_OPENBSD_THREADS)
41 #include <semaphore.h>
44 #include "atomic_ops.h"
46 /* It's safe to call original pthread_sigmask() here. */
47 #undef pthread_sigmask
52 # define NSIG (MAXSIG+1)
55 # elif defined(__SIGRTMAX)
56 # define NSIG (__SIGRTMAX+1)
62 void GC_print_sig_mask(void)
67 if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
68 ABORT("pthread_sigmask failed");
69 GC_printf("Blocked: ");
70 for (i = 1; i < NSIG; i++) {
71 if (sigismember(&blocked, i))
76 #endif /* DEBUG_THREADS */
78 /* Remove the signals that we want to allow in thread stopping */
79 /* handler from a set. */
80 STATIC void GC_remove_allowed_signals(sigset_t *set)
82 if (sigdelset(set, SIGINT) != 0
83 || sigdelset(set, SIGQUIT) != 0
84 || sigdelset(set, SIGABRT) != 0
85 || sigdelset(set, SIGTERM) != 0) {
86 ABORT("sigdelset() failed");
90 /* Handlers write to the thread structure, which is in the heap, */
91 /* and hence can trigger a protection fault. */
92 if (sigdelset(set, SIGSEGV) != 0
94 || sigdelset(set, SIGBUS) != 0
97 ABORT("sigdelset() failed");
102 static sigset_t suspend_handler_mask;
104 STATIC volatile AO_t GC_stop_count = 0;
105 /* Incremented at the beginning of GC_stop_world. */
107 STATIC volatile AO_t GC_world_is_stopped = FALSE;
108 /* FALSE ==> it is safe for threads to restart, i.e. */
109 /* they will see another suspend signal before they */
110 /* are expected to stop (unless they have voluntarily */
113 #ifdef GC_OSF1_THREADS
114 STATIC GC_bool GC_retry_signals = TRUE;
116 STATIC GC_bool GC_retry_signals = FALSE;
120 * We use signals to stop threads during GC.
122 * Suspended threads wait in signal handler for SIG_THR_RESTART.
123 * That's more portable than semaphores or condition variables.
124 * (We do use sem_post from a signal handler, but that should be portable.)
126 * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
127 * Note that we can't just stop a thread; we need it to save its stack
128 * pointer(s) and acknowledge.
131 #ifndef SIG_THR_RESTART
132 # if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS) \
133 || defined(GC_NETBSD_THREADS)
135 # define SIG_THR_RESTART _SIGRTMIN + 5
137 # define SIG_THR_RESTART SIGRTMIN + 5
140 # define SIG_THR_RESTART SIGXCPU
144 #ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
145 /* Some targets (eg., Solaris) might require this to be called when */
146 /* doing thread registering from the thread destructor. */
147 GC_INNER void GC_unblock_gc_signals(void)
151 sigaddset(&set, SIG_SUSPEND);
152 sigaddset(&set, SIG_THR_RESTART);
153 if (pthread_sigmask(SIG_UNBLOCK, &set, NULL) != 0)
154 ABORT("pthread_sigmask failed");
156 #endif /* GC_EXPLICIT_SIGNALS_UNBLOCK */
158 STATIC sem_t GC_suspend_ack_sem;
160 #ifdef GC_NETBSD_THREADS
161 # define GC_NETBSD_THREADS_WORKAROUND
162 /* It seems to be necessary to wait until threads have restarted. */
163 /* But it is unclear why that is the case. */
164 STATIC sem_t GC_restart_ack_sem;
167 STATIC void GC_suspend_handler_inner(ptr_t sig_arg, void *context);
171 STATIC void GC_suspend_handler(int sig, siginfo_t *info, void *context)
173 STATIC void GC_suspend_handler(int sig)
176 # if defined(IA64) || defined(HP_PA) || defined(M68K)
177 int old_errno = errno;
178 GC_with_callee_saves_pushed(GC_suspend_handler_inner, (ptr_t)(word)sig);
181 /* We believe that in all other cases the full context is already */
182 /* in the signal handler frame. */
183 int old_errno = errno;
187 GC_suspend_handler_inner((ptr_t)(word)sig, context);
193 STATIC void GC_suspend_handler_inner(ptr_t sig_arg, void *context)
195 pthread_t self = pthread_self();
197 IF_CANCEL(int cancel_state;)
198 AO_t my_stop_count = AO_load(&GC_stop_count);
200 if ((signed_word)sig_arg != SIG_SUSPEND)
201 ABORT("Bad signal in suspend_handler");
203 DISABLE_CANCEL(cancel_state);
204 /* pthread_setcancelstate is not defined to be async-signal-safe. */
205 /* But the glibc version appears to be in the absence of */
206 /* asynchronous cancellation. And since this signal handler */
207 /* to block on sigsuspend, which is both async-signal-safe */
208 /* and a cancellation point, there seems to be no obvious way */
209 /* out of it. In fact, it looks to me like an async-signal-safe */
210 /* cancellation point is inherently a problem, unless there is */
211 /* some way to disable cancellation in the handler. */
212 # ifdef DEBUG_THREADS
213 GC_log_printf("Suspending 0x%x\n", (unsigned)self);
216 me = GC_lookup_thread(self);
217 /* The lookup here is safe, since I'm doing this on behalf */
218 /* of a thread which holds the allocation lock in order */
219 /* to stop the world. Thus concurrent modification of the */
220 /* data structure is impossible. */
221 if (me -> stop_info.last_stop_count == my_stop_count) {
222 /* Duplicate signal. OK if we are retrying. */
223 if (!GC_retry_signals) {
224 WARN("Duplicate suspend signal in thread %p\n", self);
226 RESTORE_CANCEL(cancel_state);
230 me -> stop_info.stack_ptr = GC_save_regs_in_stack();
232 me -> stop_info.stack_ptr = GC_approx_sp();
235 me -> backing_store_ptr = GC_save_regs_in_stack();
238 /* Tell the thread that wants to stop the world that this */
239 /* thread has been stopped. Note that sem_post() is */
240 /* the only async-signal-safe primitive in LinuxThreads. */
241 sem_post(&GC_suspend_ack_sem);
242 me -> stop_info.last_stop_count = my_stop_count;
244 /* Wait until that thread tells us to restart by sending */
245 /* this thread a SIG_THR_RESTART signal. */
246 /* SIG_THR_RESTART should be masked at this point. Thus */
247 /* there is no race. */
248 /* We do not continue until we receive a SIG_THR_RESTART, */
249 /* but we do not take that as authoritative. (We may be */
250 /* accidentally restarted by one of the user signals we */
251 /* don't block.) After we receive the signal, we use a */
252 /* primitive and expensive mechanism to wait until it's */
253 /* really safe to proceed. Under normal circumstances, */
254 /* this code should not be executed. */
256 sigsuspend (&suspend_handler_mask);
257 } while (AO_load_acquire(&GC_world_is_stopped)
258 && AO_load(&GC_stop_count) == my_stop_count);
259 /* If the RESTART signal gets lost, we can still lose. That should */
260 /* be less likely than losing the SUSPEND signal, since we don't do */
261 /* much between the sem_post and sigsuspend. */
262 /* We'd need more handshaking to work around that. */
263 /* Simply dropping the sigsuspend call should be safe, but is */
264 /* unlikely to be efficient. */
266 # ifdef DEBUG_THREADS
267 GC_log_printf("Continuing 0x%x\n", (unsigned)self);
269 RESTORE_CANCEL(cancel_state);
272 STATIC void GC_restart_handler(int sig)
274 # if defined(DEBUG_THREADS) || defined(GC_NETBSD_THREADS_WORKAROUND)
275 int old_errno = errno; /* Preserve errno value. */
278 if (sig != SIG_THR_RESTART) ABORT("Bad signal in suspend_handler");
280 # ifdef GC_NETBSD_THREADS_WORKAROUND
281 sem_post(&GC_restart_ack_sem);
285 ** Note: even if we don't do anything useful here,
286 ** it would still be necessary to have a signal handler,
287 ** rather than ignoring the signals, otherwise
288 ** the signals will not be delivered at all, and
289 ** will thus not interrupt the sigsuspend() above.
292 # ifdef DEBUG_THREADS
293 GC_log_printf("In GC_restart_handler for 0x%x\n",
294 (unsigned)pthread_self());
296 # if defined(DEBUG_THREADS) || defined(GC_NETBSD_THREADS_WORKAROUND)
301 #endif /* !GC_OPENBSD_THREADS && !NACL */
304 # define IF_IA64(x) x
308 /* We hold allocation lock. Should do exactly the right thing if the */
309 /* world is stopped. Should not fail if it isn't. */
310 GC_INNER void GC_push_all_stacks(void)
312 GC_bool found_me = FALSE;
317 /* On IA64, we also need to scan the register backing store. */
318 IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
319 pthread_t self = pthread_self();
322 if (!GC_thr_initialized) GC_thr_init();
323 # ifdef DEBUG_THREADS
324 GC_log_printf("Pushing stacks from thread 0x%x\n", (unsigned)self);
326 for (i = 0; i < THREAD_TABLE_SZ; i++) {
327 for (p = GC_threads[i]; p != 0; p = p -> next) {
328 if (p -> flags & FINISHED) continue;
330 if (THREAD_EQUAL(p -> id, self)) {
331 GC_ASSERT(!p->thread_blocked);
333 lo = (ptr_t)GC_save_regs_in_stack();
338 IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
340 lo = p -> stop_info.stack_ptr;
341 IF_IA64(bs_hi = p -> backing_store_ptr;)
343 if ((p -> flags & MAIN_THREAD) == 0) {
345 IF_IA64(bs_lo = p -> backing_store_end);
347 /* The original stack. */
349 IF_IA64(bs_lo = BACKING_STORE_BASE;)
351 # ifdef DEBUG_THREADS
352 GC_log_printf("Stack for thread 0x%x = [%p,%p)\n",
353 (unsigned)(p -> id), lo, hi);
355 if (0 == lo) ABORT("GC_push_all_stacks: sp not set!");
356 GC_push_all_stack_sections(lo, hi, p -> traced_stack_sect);
357 # ifdef STACK_GROWS_UP
358 total_size += lo - hi;
360 total_size += hi - lo; /* lo <= hi */
363 /* Push reg_storage as roots, this will cover the reg context. */
364 GC_push_all_stack((ptr_t)p -> stop_info.reg_storage,
365 (ptr_t)(p -> stop_info.reg_storage + NACL_GC_REG_STORAGE_SIZE));
366 total_size += NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t);
369 # ifdef DEBUG_THREADS
370 GC_log_printf("Reg stack for thread 0x%x = [%p,%p)\n",
371 (unsigned)p -> id, bs_lo, bs_hi);
373 /* FIXME: This (if p->id==self) may add an unbounded number of */
374 /* entries, and hence overflow the mark stack, which is bad. */
375 GC_push_all_register_sections(bs_lo, bs_hi,
376 THREAD_EQUAL(p -> id, self),
377 p -> traced_stack_sect);
378 total_size += bs_hi - bs_lo; /* bs_lo <= bs_hi */
382 if (GC_print_stats == VERBOSE) {
383 GC_log_printf("Pushed %d thread stacks\n", (int)nthreads);
385 if (!found_me && !GC_in_thread_creation)
386 ABORT("Collecting from unknown thread");
387 GC_total_stacksize = total_size;
391 /* There seems to be a very rare thread stopping problem. To help us */
392 /* debug that, we save the ids of the stopping thread. */
393 pthread_t GC_stopping_thread;
394 int GC_stopping_pid = 0;
397 #ifdef PLATFORM_ANDROID
398 extern int tkill(pid_t tid, int sig); /* from sys/linux-unistd.h */
400 static int android_thread_kill(pid_t tid, int sig)
403 int old_errno = errno;
405 ret = tkill(tid, sig);
413 #endif /* PLATFORM_ANDROID */
415 /* We hold the allocation lock. Suspend all threads that might */
416 /* still be running. Return the number of suspend signals that */
418 STATIC int GC_suspend_all(void)
420 int n_live_threads = 0;
425 # ifndef GC_OPENBSD_THREADS
428 pthread_t self = pthread_self();
430 # ifdef DEBUG_THREADS
431 GC_stopping_thread = self;
432 GC_stopping_pid = getpid();
434 for (i = 0; i < THREAD_TABLE_SZ; i++) {
435 for (p = GC_threads[i]; p != 0; p = p -> next) {
436 if (!THREAD_EQUAL(p -> id, self)) {
437 if (p -> flags & FINISHED) continue;
438 if (p -> thread_blocked) /* Will wait */ continue;
439 # ifndef GC_OPENBSD_THREADS
440 if (p -> stop_info.last_stop_count == GC_stop_count) continue;
443 # ifdef DEBUG_THREADS
444 GC_log_printf("Sending suspend signal to 0x%x\n",
445 (unsigned)(p -> id));
448 # ifdef GC_OPENBSD_THREADS
451 if (pthread_suspend_np(p -> id) != 0)
452 ABORT("pthread_suspend_np failed");
453 if (pthread_stackseg_np(p->id, &stack))
454 ABORT("pthread_stackseg_np failed");
455 p -> stop_info.stack_ptr = (ptr_t)stack.ss_sp - stack.ss_size;
458 # ifndef PLATFORM_ANDROID
459 result = pthread_kill(p -> id, SIG_SUSPEND);
461 result = android_thread_kill(p -> kernel_id, SIG_SUSPEND);
465 /* Not really there anymore. Possible? */
471 ABORT("pthread_kill failed");
479 # ifndef NACL_PARK_WAIT_NANOSECONDS
480 # define NACL_PARK_WAIT_NANOSECONDS (100 * 1000)
482 # ifdef DEBUG_THREADS
483 GC_log_printf("pthread_stop_world: num_threads %d\n",
484 GC_nacl_num_gc_threads - 1);
486 GC_nacl_thread_parker = pthread_self();
487 GC_nacl_park_threads_now = 1;
488 # ifdef DEBUG_THREADS
489 GC_stopping_thread = GC_nacl_thread_parker;
490 GC_stopping_pid = getpid();
494 int num_threads_parked = 0;
498 /* Check the 'parked' flag for each thread the GC knows about. */
499 for (i = 0; i < MAX_NACL_GC_THREADS
500 && num_used < GC_nacl_num_gc_threads; i++) {
501 if (GC_nacl_thread_used[i] == 1) {
503 if (GC_nacl_thread_parked[i] == 1) {
504 num_threads_parked++;
508 /* -1 for the current thread. */
509 if (num_threads_parked >= GC_nacl_num_gc_threads - 1)
512 ts.tv_nsec = NACL_PARK_WAIT_NANOSECONDS;
513 # ifdef DEBUG_THREADS
514 GC_log_printf("Sleep waiting for %d threads to park...\n",
515 GC_nacl_num_gc_threads - num_threads_parked - 1);
517 /* This requires _POSIX_TIMERS feature. */
521 return n_live_threads;
524 GC_INNER void GC_stop_world(void)
526 # if !defined(GC_OPENBSD_THREADS) && !defined(NACL)
531 GC_ASSERT(I_HOLD_LOCK());
532 # ifdef DEBUG_THREADS
533 GC_log_printf("Stopping the world from 0x%x\n", (unsigned)pthread_self());
536 /* Make sure all free list construction has stopped before we start. */
537 /* No new construction can start, since free list construction is */
538 /* required to acquire and release the GC lock before it starts, */
539 /* and we have the lock. */
540 # ifdef PARALLEL_MARK
542 GC_acquire_mark_lock();
543 GC_ASSERT(GC_fl_builder_count == 0);
544 /* We should have previously waited for it to become zero. */
546 # endif /* PARALLEL_MARK */
548 # if defined(GC_OPENBSD_THREADS) || defined(NACL)
549 (void)GC_suspend_all();
551 AO_store(&GC_stop_count, GC_stop_count+1);
552 /* Only concurrent reads are possible. */
553 AO_store_release(&GC_world_is_stopped, TRUE);
554 n_live_threads = GC_suspend_all();
556 if (GC_retry_signals) {
557 unsigned long wait_usecs = 0; /* Total wait since retry. */
558 # define WAIT_UNIT 3000
559 # define RETRY_INTERVAL 100000
563 sem_getvalue(&GC_suspend_ack_sem, &ack_count);
564 if (ack_count == n_live_threads) break;
565 if (wait_usecs > RETRY_INTERVAL) {
566 int newly_sent = GC_suspend_all();
568 if (GC_print_stats) {
569 GC_log_printf("Resent %d signals after timeout\n", newly_sent);
571 sem_getvalue(&GC_suspend_ack_sem, &ack_count);
572 if (newly_sent < n_live_threads - ack_count) {
573 WARN("Lost some threads during GC_stop_world?!\n",0);
574 n_live_threads = ack_count + newly_sent;
579 wait_usecs += WAIT_UNIT;
583 for (i = 0; i < n_live_threads; i++) {
585 if (0 != (code = sem_wait(&GC_suspend_ack_sem))) {
586 /* On Linux, sem_wait is documented to always return zero. */
587 /* But the documentation appears to be incorrect. */
588 if (errno == EINTR) {
589 /* Seems to happen with some versions of gdb. */
592 ABORT("sem_wait for handler failed");
597 # ifdef PARALLEL_MARK
599 GC_release_mark_lock();
601 # ifdef DEBUG_THREADS
602 GC_log_printf("World stopped from 0x%x\n", (unsigned)pthread_self());
603 GC_stopping_thread = 0;
608 # if defined(__x86_64__)
609 # define NACL_STORE_REGS() \
611 __asm__ __volatile__ ("push %rbx"); \
612 __asm__ __volatile__ ("push %rbp"); \
613 __asm__ __volatile__ ("push %r12"); \
614 __asm__ __volatile__ ("push %r13"); \
615 __asm__ __volatile__ ("push %r14"); \
616 __asm__ __volatile__ ("push %r15"); \
617 __asm__ __volatile__ ("mov %%esp, %0" \
618 : "=m" (GC_nacl_gc_thread_self->stop_info.stack_ptr)); \
619 BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr, \
620 GC_nacl_gc_thread_self->stop_info.reg_storage, \
621 NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t)); \
622 __asm__ __volatile__ ("naclasp $48, %r15"); \
624 # elif defined(__i386__)
625 # define NACL_STORE_REGS() \
627 __asm__ __volatile__ ("push %ebx"); \
628 __asm__ __volatile__ ("push %ebp"); \
629 __asm__ __volatile__ ("push %esi"); \
630 __asm__ __volatile__ ("push %edi"); \
631 __asm__ __volatile__ ("mov %%esp, %0" \
632 : "=m" (GC_nacl_gc_thread_self->stop_info.stack_ptr)); \
633 BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr, \
634 GC_nacl_gc_thread_self->stop_info.reg_storage, \
635 NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));\
636 __asm__ __volatile__ ("add $16, %esp"); \
639 # error FIXME for non-amd64/x86 NaCl
642 GC_API_OSCALL void nacl_pre_syscall_hook(void)
644 if (GC_nacl_thread_idx != -1) {
646 GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
647 GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
651 GC_API_OSCALL void __nacl_suspend_thread_if_needed(void)
653 if (GC_nacl_park_threads_now) {
654 pthread_t self = pthread_self();
656 /* Don't try to park the thread parker. */
657 if (GC_nacl_thread_parker == self)
660 /* This can happen when a thread is created outside of the GC */
661 /* system (wthread mostly). */
662 if (GC_nacl_thread_idx < 0)
665 /* If it was already 'parked', we're returning from a syscall, */
666 /* so don't bother storing registers again, the GC has a set. */
667 if (!GC_nacl_thread_parked[GC_nacl_thread_idx]) {
669 GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
671 GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
672 while (GC_nacl_park_threads_now) {
675 GC_nacl_thread_parked[GC_nacl_thread_idx] = 0;
677 /* Clear out the reg storage for next suspend. */
678 BZERO(GC_nacl_gc_thread_self->stop_info.reg_storage,
679 NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));
683 GC_API_OSCALL void nacl_post_syscall_hook(void)
685 /* Calling __nacl_suspend_thread_if_needed right away should */
686 /* guarantee we don't mutate the GC set. */
687 __nacl_suspend_thread_if_needed();
688 if (GC_nacl_thread_idx != -1) {
689 GC_nacl_thread_parked[GC_nacl_thread_idx] = 0;
693 STATIC GC_bool GC_nacl_thread_parking_inited = FALSE;
694 STATIC pthread_mutex_t GC_nacl_thread_alloc_lock = PTHREAD_MUTEX_INITIALIZER;
696 GC_INNER void GC_nacl_initialize_gc_thread(void)
699 pthread_mutex_lock(&GC_nacl_thread_alloc_lock);
700 if (!GC_nacl_thread_parking_inited) {
701 BZERO(GC_nacl_thread_parked, sizeof(GC_nacl_thread_parked));
702 BZERO(GC_nacl_thread_used, sizeof(GC_nacl_thread_used));
703 GC_nacl_thread_parking_inited = TRUE;
705 GC_ASSERT(GC_nacl_num_gc_threads <= MAX_NACL_GC_THREADS);
706 for (i = 0; i < MAX_NACL_GC_THREADS; i++) {
707 if (GC_nacl_thread_used[i] == 0) {
708 GC_nacl_thread_used[i] = 1;
709 GC_nacl_thread_idx = i;
710 GC_nacl_num_gc_threads++;
714 pthread_mutex_unlock(&GC_nacl_thread_alloc_lock);
717 GC_INNER void GC_nacl_shutdown_gc_thread(void)
719 pthread_mutex_lock(&GC_nacl_thread_alloc_lock);
720 GC_ASSERT(GC_nacl_thread_idx >= 0);
721 GC_ASSERT(GC_nacl_thread_idx < MAX_NACL_GC_THREADS);
722 GC_ASSERT(GC_nacl_thread_used[GC_nacl_thread_idx] != 0);
723 GC_nacl_thread_used[GC_nacl_thread_idx] = 0;
724 GC_nacl_thread_idx = -1;
725 GC_nacl_num_gc_threads--;
726 pthread_mutex_unlock(&GC_nacl_thread_alloc_lock);
730 /* Caller holds allocation lock, and has held it continuously since */
731 /* the world stopped. */
732 GC_INNER void GC_start_world(void)
735 pthread_t self = pthread_self();
737 register GC_thread p;
738 # ifndef GC_OPENBSD_THREADS
739 register int n_live_threads = 0;
742 # ifdef GC_NETBSD_THREADS_WORKAROUND
746 # ifdef DEBUG_THREADS
747 GC_log_printf("World starting\n");
750 # ifndef GC_OPENBSD_THREADS
751 AO_store(&GC_world_is_stopped, FALSE);
753 for (i = 0; i < THREAD_TABLE_SZ; i++) {
754 for (p = GC_threads[i]; p != 0; p = p -> next) {
755 if (!THREAD_EQUAL(p -> id, self)) {
756 if (p -> flags & FINISHED) continue;
757 if (p -> thread_blocked) continue;
758 # ifndef GC_OPENBSD_THREADS
761 # ifdef DEBUG_THREADS
762 GC_log_printf("Sending restart signal to 0x%x\n",
763 (unsigned)(p -> id));
766 # ifdef GC_OPENBSD_THREADS
767 if (pthread_resume_np(p -> id) != 0)
768 ABORT("pthread_resume_np failed");
770 # ifndef PLATFORM_ANDROID
771 result = pthread_kill(p -> id, SIG_THR_RESTART);
773 result = android_thread_kill(p -> kernel_id, SIG_THR_RESTART);
777 /* Not really there anymore. Possible? */
783 ABORT("pthread_kill failed");
789 # ifdef GC_NETBSD_THREADS_WORKAROUND
790 for (i = 0; i < n_live_threads; i++) {
791 while (0 != (code = sem_wait(&GC_restart_ack_sem))) {
792 if (errno != EINTR) {
794 GC_log_printf("sem_wait() returned %d\n", code);
795 ABORT("sem_wait() for restart handler failed");
800 # ifdef DEBUG_THREADS
801 GC_log_printf("World started\n");
804 # ifdef DEBUG_THREADS
805 GC_log_printf("World starting...\n");
807 GC_nacl_park_threads_now = 0;
811 GC_INNER void GC_stop_init(void)
813 # if !defined(GC_OPENBSD_THREADS) && !defined(NACL)
814 struct sigaction act;
816 if (sem_init(&GC_suspend_ack_sem, GC_SEM_INIT_PSHARED, 0) != 0)
817 ABORT("sem_init failed");
818 # ifdef GC_NETBSD_THREADS_WORKAROUND
819 if (sem_init(&GC_restart_ack_sem, GC_SEM_INIT_PSHARED, 0) != 0)
820 ABORT("sem_init failed");
824 act.sa_flags = SA_RESTART
832 if (sigfillset(&act.sa_mask) != 0) {
833 ABORT("sigfillset() failed");
835 # ifdef GC_RTEMS_PTHREADS
836 if(sigprocmask(SIG_UNBLOCK, &act.sa_mask, NULL) != 0) {
837 ABORT("rtems sigprocmask() failed");
840 GC_remove_allowed_signals(&act.sa_mask);
841 /* SIG_THR_RESTART is set in the resulting mask. */
842 /* It is unmasked by the handler when necessary. */
844 act.sa_sigaction = GC_suspend_handler;
846 act.sa_handler = GC_suspend_handler;
848 if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
849 ABORT("Cannot set SIG_SUSPEND handler");
853 act.sa_flags &= ~ SA_SIGINFO;
855 act.sa_handler = GC_restart_handler;
856 if (sigaction(SIG_THR_RESTART, &act, NULL) != 0) {
857 ABORT("Cannot set SIG_THR_RESTART handler");
860 /* Initialize suspend_handler_mask. It excludes SIG_THR_RESTART. */
861 if (sigfillset(&suspend_handler_mask) != 0) ABORT("sigfillset() failed");
862 GC_remove_allowed_signals(&suspend_handler_mask);
863 if (sigdelset(&suspend_handler_mask, SIG_THR_RESTART) != 0)
864 ABORT("sigdelset() failed");
866 /* Check for GC_RETRY_SIGNALS. */
867 if (0 != GETENV("GC_RETRY_SIGNALS")) {
868 GC_retry_signals = TRUE;
870 if (0 != GETENV("GC_NO_RETRY_SIGNALS")) {
871 GC_retry_signals = FALSE;
873 if (GC_print_stats && GC_retry_signals) {
874 GC_log_printf("Will retry suspend signal if necessary\n");
876 # endif /* !GC_OPENBSD_THREADS && !NACL */