2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
4 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 * Permission is hereby granted to use or copy this program
8 * for any purpose, provided the above notices are retained on all copies.
9 * Permission to modify the code and to distribute modified code is granted,
10 * provided the above notices are retained, and a notice that the code was
11 * modified is included with the above copyright notice.
14 * Support code for Solaris threads. Provides functionality we wish Sun
15 * had provided. Relies on some information we probably shouldn't rely on.
17 /* Boehm, September 14, 1994 4:44 pm PDT */
21 # if defined(GC_SOLARIS_THREADS) || defined(GC_SOLARIS_PTHREADS) \
22 || defined(GC_THREADS)
23 # include "private/gc_priv.h"
26 # if defined(GC_SOLARIS_THREADS) || defined(GC_SOLARIS_PTHREADS)
27 # include "private/solaris_threads.h"
32 # include <sys/types.h>
33 # include <sys/mman.h>
34 # include <sys/time.h>
35 # include <sys/resource.h>
36 # include <sys/stat.h>
37 # include <sys/syscall.h>
38 # include <sys/procfs.h>
41 # define _CLASSIC_XOPEN_TYPES
46 --> Not yet supported. Try porting the code from linux_threads.c.
50 * This is the default size of the LWP arrays. If there are more LWPs
51 * than this when a stop-the-world GC happens, set_max_lwps will be
53 * This must be higher than the number of LWPs at startup time.
54 * The threads library creates a thread early on, so the min. is 3
56 # define DEFAULT_MAX_LWPS 4
63 cond_t GC_prom_join_cv; /* Broadcast when any thread terminates */
64 cond_t GC_create_cv; /* Signalled when a new undetached */
70 #endif /* MMAP_STACKS */
72 /* We use the allocation lock to protect thread-related data structures. */
74 /* We stop the world using /proc primitives. This makes some */
75 /* minimal assumptions about the threads implementation. */
76 /* We don't play by the rules, since the rules make this */
77 /* impossible (as of Solaris 2.3). Also note that as of */
78 /* Solaris 2.3 the various thread and lwp suspension */
79 /* primitives failed to stop threads by the time the request */
83 static sigset_t old_mask;
85 /* Sleep for n milliseconds, n < 1000 */
86 void GC_msec_sleep(int n)
91 ts.tv_nsec = 1000000*n;
92 if (syscall(SYS_nanosleep, &ts, 0) < 0) {
93 ABORT("nanosleep failed");
96 /* Turn off preemption; gross but effective. */
97 /* Caller has allocation lock. */
98 /* Actually this is not needed under Solaris 2.3 and */
99 /* 2.4, but hopefully that'll change. */
104 (void)sigfillset(&set);
105 sigdelset(&set, SIGABRT);
106 syscall(SYS_sigprocmask, SIG_SETMASK, &set, &old_mask);
111 syscall(SYS_sigprocmask, SIG_SETMASK, &old_mask, NULL);
114 int GC_main_proc_fd = -1;
117 struct lwp_cache_entry {
119 int lc_descr; /* /proc file descriptor. */
120 } GC_lwp_cache_default[DEFAULT_MAX_LWPS];
122 static int max_lwps = DEFAULT_MAX_LWPS;
123 static struct lwp_cache_entry *GC_lwp_cache = GC_lwp_cache_default;
125 static prgregset_t GC_lwp_registers_default[DEFAULT_MAX_LWPS];
126 static prgregset_t *GC_lwp_registers = GC_lwp_registers_default;
128 /* Return a file descriptor for the /proc entry corresponding */
129 /* to the given lwp. The file descriptor may be stale if the */
130 /* lwp exited and a new one was forked. */
131 static int open_lwp(lwpid_t id)
134 static int next_victim = 0;
137 for (i = 0; i < max_lwps; i++) {
138 if (GC_lwp_cache[i].lc_id == id) return(GC_lwp_cache[i].lc_descr);
140 result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
142 * If PIOCOPENLWP fails, try closing fds in the cache until it succeeds.
144 if (result < 0 && errno == EMFILE) {
145 for (i = 0; i < max_lwps; i++) {
146 if (GC_lwp_cache[i].lc_id != 0) {
147 (void)syscall(SYS_close, GC_lwp_cache[i].lc_descr);
148 result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
149 if (result >= 0 || (result < 0 && errno != EMFILE))
155 if (errno == EMFILE) {
156 ABORT("Too many open files");
158 return(-1) /* exited? */;
160 if (GC_lwp_cache[next_victim].lc_id != 0)
161 (void)syscall(SYS_close, GC_lwp_cache[next_victim].lc_descr);
162 GC_lwp_cache[next_victim].lc_id = id;
163 GC_lwp_cache[next_victim].lc_descr = result;
164 if (++next_victim >= max_lwps)
169 static void uncache_lwp(lwpid_t id)
173 for (i = 0; i < max_lwps; i++) {
174 if (GC_lwp_cache[i].lc_id == id) {
175 (void)syscall(SYS_close, GC_lwp_cache[id].lc_descr);
176 GC_lwp_cache[i].lc_id = 0;
181 /* Sequence of current lwp ids */
182 static lwpid_t GC_current_ids_default[DEFAULT_MAX_LWPS + 1];
183 static lwpid_t *GC_current_ids = GC_current_ids_default;
185 /* Temporary used below (can be big if large number of LWPs) */
186 static lwpid_t last_ids_default[DEFAULT_MAX_LWPS + 1];
187 static lwpid_t *last_ids = last_ids_default;
190 #define ROUNDUP(n) WORDS_TO_BYTES(ROUNDED_UP_WORDS(n))
192 static void set_max_lwps(GC_word n)
196 int required_bytes = ROUNDUP(n * sizeof(struct lwp_cache_entry))
197 + ROUNDUP(n * sizeof(prgregset_t))
198 + ROUNDUP((n + 1) * sizeof(lwpid_t))
199 + ROUNDUP((n + 1) * sizeof(lwpid_t));
201 GC_expand_hp_inner(divHBLKSZ((word)required_bytes));
202 oldmem = mem = GC_scratch_alloc(required_bytes);
203 if (0 == mem) ABORT("No space for lwp data structures");
206 * We can either flush the old lwp cache or copy it over. Do the latter.
208 memcpy(mem, GC_lwp_cache, max_lwps * sizeof(struct lwp_cache_entry));
209 GC_lwp_cache = (struct lwp_cache_entry*)mem;
210 mem += ROUNDUP(n * sizeof(struct lwp_cache_entry));
212 BZERO(GC_lwp_registers, max_lwps * sizeof(GC_lwp_registers[0]));
213 GC_lwp_registers = (prgregset_t *)mem;
214 mem += ROUNDUP(n * sizeof(prgregset_t));
217 GC_current_ids = (lwpid_t *)mem;
218 mem += ROUNDUP((n + 1) * sizeof(lwpid_t));
220 last_ids = (lwpid_t *)mem;
221 mem += ROUNDUP((n + 1)* sizeof(lwpid_t));
223 if (mem > oldmem + required_bytes)
224 ABORT("set_max_lwps buffer overflow");
230 /* Stop all lwps in process. Assumes preemption is off. */
231 /* Caller has allocation lock (and any other locks he may */
233 static void stop_all_lwps()
240 lwpid_t me = _lwp_self();
242 if (GC_main_proc_fd == -1) {
243 sprintf(buf, "/proc/%d", getpid());
244 GC_main_proc_fd = syscall(SYS_open, buf, O_RDONLY);
245 if (GC_main_proc_fd < 0) {
247 ABORT("/proc open failed: too many open files");
248 GC_printf1("/proc open failed: errno %d", errno);
252 BZERO(GC_lwp_registers, sizeof (prgregset_t) * max_lwps);
253 for (i = 0; i < max_lwps; i++)
256 if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCSTATUS, &status) < 0)
257 ABORT("Main PIOCSTATUS failed");
258 if (status.pr_nlwp < 1)
259 ABORT("Invalid number of lwps returned by PIOCSTATUS");
260 if (status.pr_nlwp >= max_lwps) {
261 set_max_lwps(status.pr_nlwp*2 + 10);
263 * The data in the old GC_current_ids and
264 * GC_lwp_registers has been trashed. Cleaning out last_ids
265 * will make sure every LWP gets re-examined.
267 for (i = 0; i < max_lwps; i++)
271 if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCLWPIDS, GC_current_ids) < 0)
272 ABORT("PIOCLWPIDS failed");
274 for (i = 0; GC_current_ids[i] != 0 && i < max_lwps; i++) {
275 if (GC_current_ids[i] != last_ids[i]) {
277 if (GC_current_ids[i] != me) {
278 /* PIOCSTOP doesn't work without a writable */
279 /* descriptor. And that makes the process */
281 if (_lwp_suspend(GC_current_ids[i]) < 0) {
282 /* Could happen if the lwp exited */
283 uncache_lwp(GC_current_ids[i]);
284 GC_current_ids[i] = me; /* ignore */
290 * In the unlikely event something does a fork between the
291 * PIOCSTATUS and the PIOCLWPIDS.
295 /* All lwps in GC_current_ids != me have been suspended. Note */
296 /* that _lwp_suspend is idempotent. */
297 for (i = 0; GC_current_ids[i] != 0; i++) {
298 if (GC_current_ids[i] != last_ids[i]) {
299 if (GC_current_ids[i] != me) {
300 lwp_fd = open_lwp(GC_current_ids[i]);
303 GC_current_ids[i] = me;
306 /* LWP should be stopped. Empirically it sometimes */
307 /* isn't, and more frequently the PR_STOPPED flag */
308 /* is not set. Wait for PR_STOPPED. */
309 if (syscall(SYS_ioctl, lwp_fd,
310 PIOCSTATUS, &status) < 0) {
311 /* Possible if the descriptor was stale, or */
312 /* we encountered the 2.3 _lwp_suspend bug. */
313 uncache_lwp(GC_current_ids[i]);
314 GC_current_ids[i] = me; /* handle next time. */
316 while (!(status.pr_flags & PR_STOPPED)) {
318 if (syscall(SYS_ioctl, lwp_fd,
319 PIOCSTATUS, &status) < 0) {
320 ABORT("Repeated PIOCSTATUS failed");
322 if (status.pr_flags & PR_STOPPED) break;
325 if (syscall(SYS_ioctl, lwp_fd,
326 PIOCSTATUS, &status) < 0) {
327 ABORT("Repeated PIOCSTATUS failed");
330 if (status.pr_who != GC_current_ids[i]) {
331 /* can happen if thread was on death row */
332 uncache_lwp(GC_current_ids[i]);
333 GC_current_ids[i] = me; /* handle next time. */
336 /* Save registers where collector can */
338 BCOPY(status.pr_reg, GC_lwp_registers[i],
339 sizeof (prgregset_t));
345 for (i = 0; i < max_lwps; i++) last_ids[i] = GC_current_ids[i];
349 /* Restart all lwps in process. Assumes preemption is off. */
350 static void restart_all_lwps()
355 lwpid_t me = _lwp_self();
358 for (i = 0; GC_current_ids[i] != 0; i++) {
360 if (GC_current_ids[i] != me) {
361 int lwp_fd = open_lwp(GC_current_ids[i]);
364 if (lwp_fd < 0) ABORT("open_lwp failed");
365 if (syscall(SYS_ioctl, lwp_fd,
366 PIOCSTATUS, &status) < 0) {
367 ABORT("PIOCSTATUS failed in restart_all_lwps");
369 if (memcmp(status.pr_reg, GC_lwp_registers[i],
370 sizeof (prgregset_t)) != 0) {
373 for(j = 0; j < NPRGREG; j++)
375 GC_printf3("%i: %x -> %x\n", j,
376 GC_lwp_registers[i][j],
379 ABORT("Register contents changed");
381 if (!status.pr_flags & PR_STOPPED) {
382 ABORT("lwp no longer stopped");
387 if (syscall(SYS_ioctl, lwp_fd,
388 PIOCGWIN, &windows) < 0) {
389 ABORT("PIOCSTATUS failed in restart_all_lwps");
391 if (windows.wbcnt > 0) ABORT("unsaved register windows");
395 # endif /* PARANOID */
396 if (GC_current_ids[i] == me) continue;
397 if (_lwp_continue(GC_current_ids[i]) < 0) {
398 ABORT("Failed to restart lwp");
401 if (i >= max_lwps) ABORT("Too many lwps");
404 GC_bool GC_multithreaded = 0;
409 if (GC_multithreaded)
413 void GC_start_world()
415 if (GC_multithreaded)
420 void GC_thr_init(void);
422 GC_bool GC_thr_initialized = FALSE;
424 size_t GC_min_stack_sz;
428 * stack_head is stored at the top of free stacks
431 struct stack_head *next;
436 # define N_FREE_LISTS 25
437 struct stack_head *GC_stack_free_lists[N_FREE_LISTS] = { 0 };
438 /* GC_stack_free_lists[i] is free list for stacks of */
439 /* size GC_min_stack_sz*2**i. */
440 /* Free lists are linked through stack_head stored */ /* at top of stack. */
442 /* Return a stack of size at least *stack_size. *stack_size is */
443 /* replaced by the actual stack size. */
444 /* Caller holds allocation lock. */
445 ptr_t GC_stack_alloc(size_t * stack_size)
447 register size_t requested_sz = *stack_size;
448 register size_t search_sz = GC_min_stack_sz;
449 register int index = 0; /* = log2(search_sz/GC_min_stack_sz) */
451 register struct stack_head *result;
453 while (search_sz < requested_sz) {
457 if ((result = GC_stack_free_lists[index]) == 0
458 && (result = GC_stack_free_lists[index+1]) != 0) {
459 /* Try next size up. */
460 search_sz *= 2; index++;
463 base = GC_stack_free_lists[index]->base;
464 GC_stack_free_lists[index] = GC_stack_free_lists[index]->next;
467 base = (ptr_t)mmap(0, search_sz + GC_page_size,
468 PROT_READ|PROT_WRITE, MAP_PRIVATE |MAP_NORESERVE,
470 if (base == (ptr_t)-1)
476 mprotect(base, GC_page_size, PROT_NONE);
477 /* Should this use divHBLKSZ(search_sz + GC_page_size) ? -- cf */
478 GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
479 base += GC_page_size;
482 base = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_size);
489 base = (ptr_t)(((word)base + GC_page_size) & ~(GC_page_size - 1));
490 /* Protect hottest page to detect overflow. */
491 # ifdef SOLARIS23_MPROTECT_BUG_FIXED
492 mprotect(base, GC_page_size, PROT_NONE);
494 GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
496 base += GC_page_size;
499 *stack_size = search_sz;
503 /* Caller holds allocationlock. */
504 void GC_stack_free(ptr_t stack, size_t size)
506 register int index = 0;
507 register size_t search_sz = GC_min_stack_sz;
508 register struct stack_head *head;
512 mmap(stack, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_NORESERVE|MAP_FIXED,
515 while (search_sz < size) {
519 if (search_sz != size) ABORT("Bad stack size");
521 head = (struct stack_head *)(stack + search_sz - sizeof(struct stack_head));
522 head->next = GC_stack_free_lists[index];
524 GC_stack_free_lists[index] = head;
527 void GC_my_stack_limits();
529 /* Notify virtual dirty bit implementation that known empty parts of */
530 /* stacks do not contain useful data. */
531 /* Caller holds allocation lock. */
532 void GC_old_stacks_are_fresh()
534 /* No point in doing this for MMAP stacks - and pointers are zero'd out */
535 /* by the mmap in GC_stack_free */
538 register struct stack_head *s;
541 register struct hblk * h;
544 for (i = 0, sz= GC_min_stack_sz; i < N_FREE_LISTS;
546 for (s = GC_stack_free_lists[i]; s != 0; s = s->next) {
548 h = (struct hblk *)(((word)p + HBLKSIZE-1) & ~(HBLKSIZE-1));
550 GC_is_fresh((struct hblk *)p, divHBLKSZ(sz));
552 GC_is_fresh((struct hblk *)p, divHBLKSZ(sz) - 1);
553 BZERO(p, (ptr_t)h - p);
557 #endif /* MMAP_STACKS */
558 GC_my_stack_limits();
561 /* The set of all known threads. We intercept thread creation and */
562 /* joins. We never actually create detached threads. We allocate all */
563 /* new thread stacks ourselves. These allow us to maintain this */
564 /* data structure. */
566 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
567 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
569 void GC_push_thread_structures GC_PROTO((void))
571 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
574 /* Add a thread to GC_threads. We assume it wasn't already there. */
575 /* Caller holds allocation lock. */
576 GC_thread GC_new_thread(thread_t id)
578 int hv = ((word)id) % THREAD_TABLE_SZ;
580 static struct GC_Thread_Rep first_thread;
581 static GC_bool first_thread_used = FALSE;
583 if (!first_thread_used) {
584 result = &first_thread;
585 first_thread_used = TRUE;
586 /* Dont acquire allocation lock, since we may already hold it. */
588 result = (struct GC_Thread_Rep *)
589 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
591 if (result == 0) return(0);
593 result -> next = GC_threads[hv];
594 GC_threads[hv] = result;
595 /* result -> finished = 0; */
596 (void) cond_init(&(result->join_cv), USYNC_THREAD, 0);
600 /* Delete a thread from GC_threads. We assume it is there. */
601 /* (The code intentionally traps if it wasn't.) */
602 /* Caller holds allocation lock. */
603 void GC_delete_thread(thread_t id)
605 int hv = ((word)id) % THREAD_TABLE_SZ;
606 register GC_thread p = GC_threads[hv];
607 register GC_thread prev = 0;
609 while (p -> id != id) {
614 GC_threads[hv] = p -> next;
616 prev -> next = p -> next;
620 /* Return the GC_thread correpsonding to a given thread_t. */
621 /* Returns 0 if it's not there. */
622 /* Caller holds allocation lock. */
623 GC_thread GC_lookup_thread(thread_t id)
625 int hv = ((word)id) % THREAD_TABLE_SZ;
626 register GC_thread p = GC_threads[hv];
628 while (p != 0 && p -> id != id) p = p -> next;
632 /* Solaris 2/Intel uses an initial stack size limit slightly bigger than the
633 SPARC default of 8 MB. Account for this to warn only if the user has
634 raised the limit beyond the default.
636 This is identical to DFLSSIZ defined in <sys/vm_machparam.h>. This file
637 is installed in /usr/platform/`uname -m`/include, which is not in the
638 default include directory list, so copy the definition here. */
640 # define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024 + ((USRSTACK) & 0x3FFFFF))
642 # define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024)
645 word GC_get_orig_stack_size() {
647 static int warned = 0;
650 if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
651 result = (word)rl.rlim_cur & ~(HBLKSIZE-1);
652 if (result > MAX_ORIG_STACK_SIZE) {
654 WARN("Large stack limit(%ld): only scanning 8 MB\n", result);
657 result = MAX_ORIG_STACK_SIZE;
662 /* Notify dirty bit implementation of unused parts of my stack. */
663 /* Caller holds allocation lock. */
664 void GC_my_stack_limits()
667 register ptr_t hottest = (ptr_t)((word)(&dummy) & ~(HBLKSIZE-1));
668 register GC_thread me = GC_lookup_thread(thr_self());
669 register size_t stack_size = me -> stack_size;
670 register ptr_t stack;
672 if (stack_size == 0) {
673 /* original thread */
674 /* Empirically, what should be the stack page with lowest */
675 /* address is actually inaccessible. */
676 stack_size = GC_get_orig_stack_size() - GC_page_size;
677 stack = GC_stackbottom - stack_size + GC_page_size;
681 if (stack > hottest || stack + stack_size < hottest) {
682 ABORT("sp out of bounds");
684 GC_is_fresh((struct hblk *)stack, divHBLKSZ(hottest - stack));
688 /* We hold allocation lock. Should do exactly the right thing if the */
689 /* world is stopped. Should not fail if it isn't. */
690 void GC_push_all_stacks()
693 register GC_thread p;
694 register ptr_t sp = GC_approx_sp();
695 register ptr_t bottom, top;
698 # define PUSH(bottom,top) \
699 if (GC_dirty_maintained) { \
700 GC_push_selected((bottom), (top), GC_page_was_ever_dirty, \
701 GC_push_all_stack); \
703 GC_push_all_stack((bottom), (top)); \
705 GC_push_all_stack((ptr_t)GC_lwp_registers,
706 (ptr_t)GC_lwp_registers
707 + max_lwps * sizeof(GC_lwp_registers[0]));
708 for (i = 0; i < THREAD_TABLE_SZ; i++) {
709 for (p = GC_threads[i]; p != 0; p = p -> next) {
710 if (p -> stack_size != 0) {
712 top = p -> stack + p -> stack_size;
714 /* The original stack. */
715 bottom = GC_stackbottom - GC_get_orig_stack_size() + GC_page_size;
716 top = GC_stackbottom;
718 if ((word)sp > (word)bottom && (word)sp < (word)top) bottom = sp;
725 int GC_is_thread_stack(ptr_t addr)
728 register GC_thread p;
729 register ptr_t bottom, top;
731 for (i = 0; i < THREAD_TABLE_SZ; i++) {
732 for (p = GC_threads[i]; p != 0; p = p -> next) {
733 if (p -> stack_size != 0) {
734 if (p -> stack <= addr &&
735 addr < p -> stack + p -> stack_size)
743 /* The only thread that ever really performs a thr_join. */
744 void * GC_thr_daemon(void * dummy)
748 register GC_thread t;
754 result = thr_join((thread_t)0, &departed, &status);
757 /* No more threads; wait for create. */
758 for (i = 0; i < THREAD_TABLE_SZ; i++) {
759 for (t = GC_threads[i]; t != 0; t = t -> next) {
760 if (!(t -> flags & (DETACHED | FINISHED))) {
762 goto start; /* Thread started just before we */
763 /* acquired the lock. */
767 cond_wait(&GC_create_cv, &GC_allocate_ml);
770 t = GC_lookup_thread(departed);
772 if (!(t -> flags & CLIENT_OWNS_STACK)) {
773 GC_stack_free(t -> stack, t -> stack_size);
775 if (t -> flags & DETACHED) {
776 GC_delete_thread(departed);
778 t -> status = status;
779 t -> flags |= FINISHED;
780 cond_signal(&(t -> join_cv));
781 cond_broadcast(&GC_prom_join_cv);
788 /* We hold the allocation lock, or caller ensures that 2 instances */
789 /* cannot be invoked concurrently. */
790 void GC_thr_init(void)
796 if (GC_thr_initialized)
798 GC_thr_initialized = TRUE;
799 GC_min_stack_sz = ((thr_min_stack() + 32*1024 + HBLKSIZE-1)
802 GC_zfd = open("/dev/zero", O_RDONLY);
804 ABORT("Can't open /dev/zero");
805 #endif /* MMAP_STACKS */
806 cond_init(&GC_prom_join_cv, USYNC_THREAD, 0);
807 cond_init(&GC_create_cv, USYNC_THREAD, 0);
808 /* Add the initial thread, so we can stop it. */
809 t = GC_new_thread(thr_self());
811 t -> flags = DETACHED | CLIENT_OWNS_STACK;
812 ret = thr_create(0 /* stack */, 0 /* stack_size */, GC_thr_daemon,
813 0 /* arg */, THR_DETACHED | THR_DAEMON,
814 &tid /* thread_id */);
816 GC_err_printf1("Thr_create returned %ld\n", ret);
817 ABORT("Cant fork daemon");
819 thr_setprio(tid, 126);
822 /* We acquire the allocation lock to prevent races with */
823 /* stopping/starting world. */
824 /* This is no more correct than the underlying Solaris 2.X */
825 /* implementation. Under 2.3 THIS IS BROKEN. */
826 int GC_thr_suspend(thread_t target_thread)
832 result = thr_suspend(target_thread);
834 t = GC_lookup_thread(target_thread);
835 if (t == 0) ABORT("thread unknown to GC");
836 t -> flags |= SUSPNDED;
842 int GC_thr_continue(thread_t target_thread)
848 result = thr_continue(target_thread);
850 t = GC_lookup_thread(target_thread);
851 if (t == 0) ABORT("thread unknown to GC");
852 t -> flags &= ~SUSPNDED;
858 int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
860 register GC_thread t;
866 register GC_bool thread_exists;
869 thread_exists = FALSE;
870 for (i = 0; i < THREAD_TABLE_SZ; i++) {
871 for (t = GC_threads[i]; t != 0; t = t -> next) {
872 if (!(t -> flags & DETACHED)) {
873 if (t -> flags & FINISHED) {
876 thread_exists = TRUE;
880 if (!thread_exists) {
884 cond_wait(&GC_prom_join_cv, &GC_allocate_ml);
887 t = GC_lookup_thread(wait_for);
888 if (t == 0 || t -> flags & DETACHED) {
892 if (wait_for == thr_self()) {
896 while (!(t -> flags & FINISHED)) {
897 cond_wait(&(t -> join_cv), &GC_allocate_ml);
902 if (status) *status = t -> status;
903 if (departed) *departed = t -> id;
904 cond_destroy(&(t -> join_cv));
905 GC_delete_thread(t -> id);
913 GC_thr_create(void *stack_base, size_t stack_size,
914 void *(*start_routine)(void *), void *arg, long flags,
915 thread_t *new_thread)
919 thread_t my_new_thread;
921 void * stack = stack_base;
924 if (!GC_is_initialized) GC_init_inner();
927 if (stack_size == 0) stack_size = 1024*1024;
928 stack = (void *)GC_stack_alloc(&stack_size);
935 my_flags |= CLIENT_OWNS_STACK;
937 if (flags & THR_DETACHED) my_flags |= DETACHED;
938 if (flags & THR_SUSPENDED) my_flags |= SUSPNDED;
939 result = thr_create(stack, stack_size, start_routine,
940 arg, flags & ~THR_DETACHED, &my_new_thread);
942 t = GC_new_thread(my_new_thread);
943 t -> flags = my_flags;
944 if (!(my_flags & DETACHED)) cond_init(&(t -> join_cv), USYNC_THREAD, 0);
946 t -> stack_size = stack_size;
947 if (new_thread != 0) *new_thread = my_new_thread;
948 cond_signal(&GC_create_cv);
951 if (!(my_flags & CLIENT_OWNS_STACK)) {
952 GC_stack_free(stack, stack_size);
959 # else /* !GC_SOLARIS_THREADS */
962 int GC_no_sunOS_threads;