2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 * Support code for LinuxThreads, the clone()-based kernel
18 * thread package for Linux which is included in libc6.
20 * This code relies on implementation details of LinuxThreads,
21 * (i.e. properties not guaranteed by the Pthread standard),
22 * though this version now does less of that than the other Pthreads
25 * Note that there is a lot of code duplication between linux_threads.c
26 * and thread support for some of the other Posix platforms; any changes
27 * made here may need to be reflected there too.
29 /* DG/UX ix86 support <takis@xfree86.org> */
31 * Linux_threads.c now also includes some code to support HPUX and
32 * OSF1 (Compaq Tru64 Unix, really). The OSF1 support is based on Eric Benson's
35 * Eric also suggested an alternate basis for a lock implementation in
37 * + #elif defined(OSF1)
38 * + unsigned long GC_allocate_lock = 0;
39 * + msemaphore GC_allocate_semaphore;
40 * + # define GC_TRY_LOCK() \
41 * + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
42 * + ? (GC_allocate_lock = 1) \
44 * + # define GC_LOCK_TAKEN GC_allocate_lock
47 /*#define DEBUG_THREADS 1*/
48 /*#define GC_ASSERTIONS*/
50 # include "private/pthread_support.h"
52 # if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
53 && !defined(GC_WIN32_THREADS)
55 # if defined(GC_HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \
56 && !defined(USE_COMPILER_TLS)
58 # define USE_PTHREAD_SPECIFIC
59 /* Empirically, as of gcc 3.3, USE_COMPILER_TLS doesn't work. */
61 # define USE_COMPILER_TLS
65 # if defined USE_HPUX_TLS
66 --> Macro replaced by USE_COMPILER_TLS
69 # if (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \
70 defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)) || \
71 defined(GC_NETBSD_THREADS) && !defined(USE_PTHREAD_SPECIFIC) || \
72 defined(GC_OPENBSD_THREADS)
73 # define USE_PTHREAD_SPECIFIC
76 # if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE)
77 # define _POSIX4A_DRAFT10_SOURCE 1
80 # if defined(GC_DGUX386_THREADS) && !defined(_USING_POSIX4A_DRAFT10)
81 # define _USING_POSIX4A_DRAFT10 1
84 # ifdef THREAD_LOCAL_ALLOC
85 # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_COMPILER_TLS)
86 # include "private/specific.h"
89 /* Note that these macros should be used only to get/set the GC_thread pointer.
90 * We need to use both tls and pthread because we use the pthread_create function hook to
91 * free the data for foreign threads. When that doesn't happen, libgc could have old
92 * pthread_t that get reused...
94 # if defined(USE_PTHREAD_SPECIFIC)
95 # define GC_getspecific pthread_getspecific
96 # define GC_setspecific pthread_setspecific
97 # define GC_key_create pthread_key_create
98 typedef pthread_key_t GC_key_t;
100 # if defined(USE_COMPILER_TLS)
101 /* Note sles9 gcc on powerpc gets confused by the define to set GC_thread_tls and pthread_setspecific
102 * so we actually use a static inline function decalred below that is equivalent to:
103 * define GC_setspecific(key, v) (GC_thread_tls = (v), pthread_setspecific ((key), (v)))
105 # define GC_getspecific(x) (GC_thread_tls)
106 # define GC_key_create pthread_key_create
107 typedef pthread_key_t GC_key_t;
111 # include <pthread.h>
116 # include <sys/mman.h>
117 # include <sys/time.h>
118 # include <sys/types.h>
119 # include <sys/stat.h>
123 #if defined(GC_DARWIN_THREADS)
124 # include "private/darwin_semaphore.h"
126 # include <semaphore.h>
127 #endif /* !GC_DARWIN_THREADS */
129 #if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)
130 # include <sys/sysctl.h>
131 #endif /* GC_DARWIN_THREADS */
133 #if defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
134 # include <sys/param.h>
135 # include <sys/sysctl.h>
140 #if defined(GC_DGUX386_THREADS)
141 # include <sys/dg_sys_info.h>
142 # include <sys/_int_psem.h>
143 /* sem_t is an uint in DG/UX */
144 typedef unsigned int sem_t;
145 #endif /* GC_DGUX386_THREADS */
151 #ifdef GC_USE_LD_WRAP
152 # define WRAP_FUNC(f) __wrap_##f
153 # define REAL_FUNC(f) __real_##f
155 # define WRAP_FUNC(f) GC_##f
156 # if !defined(GC_DGUX386_THREADS)
157 # define REAL_FUNC(f) f
158 # else /* GC_DGUX386_THREADS */
159 # define REAL_FUNC(f) __d10_##f
160 # endif /* GC_DGUX386_THREADS */
161 # undef pthread_create
162 # if !defined(GC_DARWIN_THREADS)
163 # undef pthread_sigmask
166 # undef pthread_detach
167 # if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
168 && !defined(_PTHREAD_USE_PTDNAM_)
169 /* Restore the original mangled names on Tru64 UNIX. */
170 # define pthread_create __pthread_create
171 # define pthread_join __pthread_join
172 # define pthread_detach __pthread_detach
178 static GC_bool parallel_initialized = FALSE;
180 void GC_init_parallel();
182 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
184 /* We don't really support thread-local allocation with DBG_HDRS_ALL */
186 /* work around a dlopen issue (bug #75390), undefs to avoid warnings with redefinitions */
187 #undef PACKAGE_BUGREPORT
189 #undef PACKAGE_STRING
190 #undef PACKAGE_TARNAME
191 #undef PACKAGE_VERSION
192 #include "mono/utils/mono-compiler.h"
195 GC_key_t GC_thread_key;
197 #ifdef USE_COMPILER_TLS
198 __thread MONO_TLS_FAST void* GC_thread_tls;
201 * gcc errors out with /tmp/ccdPMFuq.s:2994: Error: symbol `.LTLS4' is already defined
202 * if the inline is added on powerpc
204 #if !defined(__ppc__) && !defined(__powerpc__)
207 static int GC_setspecific (GC_key_t key, void *value) {
208 GC_thread_tls = value;
209 return pthread_setspecific (key, value);
213 static GC_bool keys_initialized;
215 #ifdef MONO_DEBUGGER_SUPPORTED
216 #include "include/libgc-mono-debugger.h"
219 /* Recover the contents of the freelist array fl into the global one gfl.*/
220 /* Note that the indexing scheme differs, in that gfl has finer size */
221 /* resolution, even if not all entries are used. */
222 /* We hold the allocator lock. */
223 static void return_freelists(ptr_t *fl, ptr_t *gfl)
229 for (i = 1; i < NFREELISTS; ++i) {
230 nwords = i * (GRANULARITY/sizeof(word));
233 if ((word)q >= HBLKSIZE) {
234 if (gfl[nwords] == 0) {
238 for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
244 /* Clear fl[i], since the thread structure may hang around. */
245 /* Do it in a way that is likely to trap if we access it. */
246 fl[i] = (ptr_t)HBLKSIZE;
250 /* We statically allocate a single "size 0" object. It is linked to */
251 /* itself, and is thus repeatedly reused for all size 0 allocation */
252 /* requests. (Size 0 gcj allocation requests are incorrect, and */
253 /* we arrange for those to fault asap.) */
254 static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
256 void GC_delete_gc_thread(pthread_t id, GC_thread gct);
257 void GC_destroy_thread_local(GC_thread p);
259 void GC_thread_deregister_foreign (void *data)
261 GC_thread me = (GC_thread)data;
262 /* GC_fprintf1( "\n\n\n\n --- Deregister %x ---\n\n\n\n\n", me->flags ); */
263 if (me -> flags & FOREIGN_THREAD) {
265 /* GC_fprintf0( "\n\n\n\n --- FOO ---\n\n\n\n\n" ); */
266 #if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
267 GC_destroy_thread_local (me);
269 GC_delete_gc_thread(me->id, me);
274 /* Each thread structure must be initialized. */
275 /* This call must be made from the new thread. */
276 /* Caller holds allocation lock. */
277 void GC_init_thread_local(GC_thread p)
281 if (!keys_initialized) {
282 if (0 != GC_key_create(&GC_thread_key, GC_thread_deregister_foreign)) {
283 ABORT("Failed to create key for local allocator");
285 keys_initialized = TRUE;
287 if (0 != GC_setspecific(GC_thread_key, p)) {
288 ABORT("Failed to set thread specific allocation pointers");
290 for (i = 1; i < NFREELISTS; ++i) {
291 p -> ptrfree_freelists[i] = (ptr_t)1;
292 p -> normal_freelists[i] = (ptr_t)1;
293 # ifdef GC_GCJ_SUPPORT
294 p -> gcj_freelists[i] = (ptr_t)1;
297 /* Set up the size 0 free lists. */
298 p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
299 p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
300 # ifdef GC_GCJ_SUPPORT
301 p -> gcj_freelists[0] = (ptr_t)(-1);
305 #ifdef GC_GCJ_SUPPORT
306 extern ptr_t * GC_gcjobjfreelist;
309 /* We hold the allocator lock. */
310 void GC_destroy_thread_local(GC_thread p)
312 /* We currently only do this from the thread itself or from */
313 /* the fork handler for a child process. */
315 GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
317 return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
318 return_freelists(p -> normal_freelists, GC_objfreelist);
319 # ifdef GC_GCJ_SUPPORT
320 return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
324 extern GC_PTR GC_generic_malloc_many();
326 GC_PTR GC_local_malloc(size_t bytes)
328 if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
329 return(GC_malloc(bytes));
331 int index = INDEX_FROM_BYTES(bytes);
334 # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
335 GC_key_t k = GC_thread_key;
339 # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
340 if (EXPECT(0 == k, 0)) {
341 /* This can happen if we get called when the world is */
342 /* being initialized. Whether we can actually complete */
343 /* the initialization then is unclear. */
348 tsd = GC_getspecific(GC_thread_key);
349 # ifdef GC_ASSERTIONS
351 GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
354 my_fl = ((GC_thread)tsd) -> normal_freelists + index;
356 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
357 ptr_t next = obj_link(my_entry);
358 GC_PTR result = (GC_PTR)my_entry;
360 obj_link(my_entry) = 0;
361 PREFETCH_FOR_WRITE(next);
363 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
364 *my_fl = my_entry + index + 1;
365 return GC_malloc(bytes);
367 GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
368 if (*my_fl == 0) return GC_oom_fn(bytes);
369 return GC_local_malloc(bytes);
374 GC_PTR GC_local_malloc_atomic(size_t bytes)
376 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
377 return(GC_malloc_atomic(bytes));
379 int index = INDEX_FROM_BYTES(bytes);
380 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
381 -> ptrfree_freelists + index;
382 ptr_t my_entry = *my_fl;
384 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
385 GC_PTR result = (GC_PTR)my_entry;
386 *my_fl = obj_link(my_entry);
388 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
389 *my_fl = my_entry + index + 1;
390 return GC_malloc_atomic(bytes);
392 GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
393 /* *my_fl is updated while the collector is excluded; */
394 /* the free list is always visible to the collector as */
396 if (*my_fl == 0) return GC_oom_fn(bytes);
397 return GC_local_malloc_atomic(bytes);
402 #ifdef GC_GCJ_SUPPORT
404 #include "include/gc_gcj.h"
407 extern GC_bool GC_gcj_malloc_initialized;
410 extern int GC_gcj_kind;
412 GC_PTR GC_local_gcj_malloc(size_t bytes,
413 void * ptr_to_struct_containing_descr)
415 GC_ASSERT(GC_gcj_malloc_initialized);
416 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
417 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
419 int index = INDEX_FROM_BYTES(bytes);
420 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
421 -> gcj_freelists + index;
422 ptr_t my_entry = *my_fl;
423 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
424 GC_PTR result = (GC_PTR)my_entry;
425 GC_ASSERT(!GC_incremental);
426 /* We assert that any concurrent marker will stop us. */
427 /* Thus it is impossible for a mark procedure to see the */
428 /* allocation of the next object, but to see this object */
429 /* still containing a free list pointer. Otherwise the */
430 /* marker might find a random "mark descriptor". */
431 *(volatile ptr_t *)my_fl = obj_link(my_entry);
432 /* We must update the freelist before we store the pointer. */
433 /* Otherwise a GC at this point would see a corrupted */
435 /* A memory barrier is probably never needed, since the */
436 /* action of stopping this thread will cause prior writes */
438 GC_ASSERT(((void * volatile *)result)[1] == 0);
439 *(void * volatile *)result = ptr_to_struct_containing_descr;
441 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
442 if (!GC_incremental) *my_fl = my_entry + index + 1;
443 /* In the incremental case, we always have to take this */
444 /* path. Thus we leave the counter alone. */
445 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
447 GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
448 if (*my_fl == 0) return GC_oom_fn(bytes);
449 return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
454 /* Similar to GC_local_gcj_malloc, but the size is in words, and we don't */
455 /* adjust it. The size is assumed to be such that it can be */
456 /* allocated as a small object. */
457 void * GC_local_gcj_fast_malloc(size_t lw, void * ptr_to_struct_containing_descr)
459 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
460 -> gcj_freelists + lw;
461 ptr_t my_entry = *my_fl;
463 GC_ASSERT(GC_gcj_malloc_initialized);
465 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
466 GC_PTR result = (GC_PTR)my_entry;
467 GC_ASSERT(!GC_incremental);
468 /* We assert that any concurrent marker will stop us. */
469 /* Thus it is impossible for a mark procedure to see the */
470 /* allocation of the next object, but to see this object */
471 /* still containing a free list pointer. Otherwise the */
472 /* marker might find a random "mark descriptor". */
473 *(volatile ptr_t *)my_fl = obj_link(my_entry);
474 /* We must update the freelist before we store the pointer. */
475 /* Otherwise a GC at this point would see a corrupted */
477 /* A memory barrier is probably never needed, since the */
478 /* action of stopping this thread will cause prior writes */
480 GC_ASSERT(((void * volatile *)result)[1] == 0);
481 *(void * volatile *)result = ptr_to_struct_containing_descr;
483 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
484 if (!GC_incremental) *my_fl = my_entry + lw + 1;
485 /* In the incremental case, we always have to take this */
486 /* path. Thus we leave the counter alone. */
487 return GC_gcj_fast_malloc(lw, ptr_to_struct_containing_descr);
489 GC_generic_malloc_many(BYTES_FROM_INDEX(lw), GC_gcj_kind, my_fl);
490 if (*my_fl == 0) return GC_oom_fn(BYTES_FROM_INDEX(lw));
491 return GC_local_gcj_fast_malloc(lw, ptr_to_struct_containing_descr);
495 #endif /* GC_GCJ_SUPPORT */
497 # else /* !THREAD_LOCAL_ALLOC && !DBG_HDRS_ALL */
499 # define GC_destroy_thread_local(t)
501 # endif /* !THREAD_LOCAL_ALLOC */
505 To make sure that we're using LinuxThreads and not some other thread
506 package, we generate a dummy reference to `pthread_kill_other_threads_np'
507 (was `__pthread_initial_thread_bos' but that disappeared),
508 which is a symbol defined in LinuxThreads, but (hopefully) not in other
511 We no longer do this, since this code is now portable enough that it might
512 actually work for something else.
514 void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
517 long GC_nprocs = 1; /* Number of processors. We may not have */
518 /* access to all of them, but this is as good */
519 /* a guess as any ... */
524 # define MAX_MARKERS 16
527 static ptr_t marker_sp[MAX_MARKERS] = {0};
529 void * GC_mark_thread(void * id)
533 marker_sp[(word)id] = GC_approx_sp();
534 for (;; ++my_mark_no) {
535 /* GC_mark_no is passed only to allow GC_help_marker to terminate */
536 /* promptly. This is important if it were called from the signal */
537 /* handler or from the GC lock acquisition code. Under Linux, it's */
538 /* not safe to call it from a signal handler, since it uses mutexes */
539 /* and condition variables. Since it is called only here, the */
540 /* argument is unnecessary. */
541 if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
542 /* resynchronize if we get far off, e.g. because GC_mark_no */
544 my_mark_no = GC_mark_no;
546 # ifdef DEBUG_THREADS
547 GC_printf1("Starting mark helper for mark number %ld\n", my_mark_no);
549 GC_help_marker(my_mark_no);
553 extern long GC_markers; /* Number of mark threads we would */
554 /* like to have. Includes the */
555 /* initiating thread. */
557 pthread_t GC_mark_threads[MAX_MARKERS];
559 #define PTHREAD_CREATE REAL_FUNC(pthread_create)
561 static void start_mark_threads()
566 if (GC_markers > MAX_MARKERS) {
567 WARN("Limiting number of mark threads\n", 0);
568 GC_markers = MAX_MARKERS;
570 if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
572 if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
573 ABORT("pthread_attr_setdetachstate failed");
575 # if defined(HPUX) || defined(GC_DGUX386_THREADS)
576 /* Default stack size is usually too small: fix it. */
577 /* Otherwise marker threads or GC may run out of */
579 # define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
584 if (pthread_attr_getstacksize(&attr, &old_size) != 0)
585 ABORT("pthread_attr_getstacksize failed\n");
586 if (old_size < MIN_STACK_SIZE) {
587 if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
588 ABORT("pthread_attr_setstacksize failed\n");
591 # endif /* HPUX || GC_DGUX386_THREADS */
593 if (GC_print_stats) {
594 GC_printf1("Starting %ld marker threads\n", GC_markers - 1);
597 for (i = 0; i < GC_markers - 1; ++i) {
598 if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
599 GC_mark_thread, (void *)(word)i)) {
600 WARN("Marker thread creation failed, errno = %ld.\n", errno);
605 #else /* !PARALLEL_MARK */
607 static __inline__ void start_mark_threads()
611 #endif /* !PARALLEL_MARK */
613 GC_bool GC_thr_initialized = FALSE;
615 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
618 * gcc-3.3.6 miscompiles the &GC_thread_key+sizeof(&GC_thread_key) expression so
619 * put it into a separate function.
621 # if defined(__GNUC__) && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
622 static __attribute__((noinline)) unsigned char* get_gc_thread_key_addr GC_PROTO((void))
624 return (unsigned char*)&GC_thread_key;
627 void GC_push_thread_structures GC_PROTO((void))
629 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
630 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
631 GC_push_all((ptr_t)get_gc_thread_key_addr(),
632 (ptr_t)(get_gc_thread_key_addr())+sizeof(&GC_thread_key));
638 void GC_push_thread_structures GC_PROTO((void))
640 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
641 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
642 GC_push_all((ptr_t)(&GC_thread_key),
643 (ptr_t)(&GC_thread_key)+sizeof(&GC_thread_key));
649 #ifdef THREAD_LOCAL_ALLOC
650 /* We must explicitly mark ptrfree and gcj free lists, since the free */
651 /* list links wouldn't otherwise be found. We also set them in the */
652 /* normal free lists, since that involves touching less memory than if */
653 /* we scanned them normally. */
654 void GC_mark_thread_local_free_lists(void)
660 for (i = 0; i < THREAD_TABLE_SZ; ++i) {
661 for (p = GC_threads[i]; 0 != p; p = p -> next) {
662 for (j = 1; j < NFREELISTS; ++j) {
663 q = p -> ptrfree_freelists[j];
664 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
665 q = p -> normal_freelists[j];
666 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
667 # ifdef GC_GCJ_SUPPORT
668 q = p -> gcj_freelists[j];
669 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
670 # endif /* GC_GCJ_SUPPORT */
675 #endif /* THREAD_LOCAL_ALLOC */
677 static struct GC_Thread_Rep first_thread;
679 /* Add a thread to GC_threads. We assume it wasn't already there. */
680 /* Caller holds allocation lock. */
681 GC_thread GC_new_thread(pthread_t id)
683 int hv = ((word)id) % THREAD_TABLE_SZ;
685 static GC_bool first_thread_used = FALSE;
687 if (!first_thread_used) {
688 result = &first_thread;
689 first_thread_used = TRUE;
691 result = (struct GC_Thread_Rep *)
692 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
694 if (result == 0) return(0);
696 #ifdef PLATFORM_ANDROID
697 result -> kernel_id = gettid();
699 result -> next = GC_threads[hv];
700 GC_threads[hv] = result;
701 GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
705 /* Delete a thread from GC_threads. We assume it is there. */
706 /* (The code intentionally traps if it wasn't.) */
707 /* Caller holds allocation lock. */
708 void GC_delete_thread(pthread_t id)
710 int hv = ((word)id) % THREAD_TABLE_SZ;
711 register GC_thread p = GC_threads[hv];
712 register GC_thread prev = 0;
714 while (!pthread_equal(p -> id, id)) {
719 GC_threads[hv] = p -> next;
721 prev -> next = p -> next;
723 #ifdef MONO_DEBUGGER_SUPPORTED
724 if (gc_thread_vtable && gc_thread_vtable->thread_exited)
725 gc_thread_vtable->thread_exited (id, &p->stop_info.stack_ptr);
728 #ifdef GC_DARWIN_THREADS
729 mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
735 /* If a thread has been joined, but we have not yet */
736 /* been notified, then there may be more than one thread */
737 /* in the table with the same pthread id. */
738 /* This is OK, but we need a way to delete a specific one. */
739 void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
741 int hv = ((word)id) % THREAD_TABLE_SZ;
742 register GC_thread p = GC_threads[hv];
743 register GC_thread prev = 0;
750 GC_threads[hv] = p -> next;
752 prev -> next = p -> next;
755 #ifdef GC_DARWIN_THREADS
756 mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
762 /* Return a GC_thread corresponding to a given pthread_t. */
763 /* Returns 0 if it's not there. */
764 /* Caller holds allocation lock or otherwise inhibits */
766 /* If there is more than one thread with the given id we */
767 /* return the most recent one. */
768 GC_thread GC_lookup_thread(pthread_t id)
770 int hv = ((word)id) % THREAD_TABLE_SZ;
771 register GC_thread p = GC_threads[hv];
773 while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
777 int GC_thread_is_registered (void)
782 ptr = (void *)GC_lookup_thread(pthread_self());
789 /* Remove all entries from the GC_threads table, except the */
790 /* one for the current thread. We need to do this in the child */
791 /* process after a fork(), since only the current thread */
792 /* survives in the child. */
793 void GC_remove_all_threads_but_me(void)
795 pthread_t self = pthread_self();
797 GC_thread p, next, me;
799 for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
801 for (p = GC_threads[hv]; 0 != p; p = next) {
803 if (p -> id == self) {
807 # ifdef THREAD_LOCAL_ALLOC
808 if (!(p -> flags & FINISHED)) {
809 GC_destroy_thread_local(p);
811 # endif /* THREAD_LOCAL_ALLOC */
812 if (p != &first_thread) GC_INTERNAL_FREE(p);
819 #endif /* HANDLE_FORK */
821 #ifdef USE_PROC_FOR_LIBRARIES
822 int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
827 # ifdef PARALLEL_MARK
828 for (i = 0; i < GC_markers; ++i) {
829 if (marker_sp[i] > lo & marker_sp[i] < hi) return 1;
832 for (i = 0; i < THREAD_TABLE_SZ; i++) {
833 for (p = GC_threads[i]; p != 0; p = p -> next) {
834 if (0 != p -> stack_end) {
835 # ifdef STACK_GROWS_UP
836 if (p -> stack_end >= lo && p -> stack_end < hi) return 1;
837 # else /* STACK_GROWS_DOWN */
838 if (p -> stack_end > lo && p -> stack_end <= hi) return 1;
845 #endif /* USE_PROC_FOR_LIBRARIES */
847 #ifdef GC_LINUX_THREADS
848 /* Return the number of processors, or i<= 0 if it can't be determined. */
851 /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
852 /* appears to be buggy in many cases. */
853 /* We look for lines "cpu<n>" in /proc/stat. */
854 # define STAT_BUF_SIZE 4096
855 # define STAT_READ read
856 /* If read is wrapped, this may need to be redefined to call */
858 char stat_buf[STAT_BUF_SIZE];
861 /* Some old kernels only have a single "cpu nnnn ..." */
862 /* entry in /proc/stat. We identify those as */
866 f = open("/proc/stat", O_RDONLY);
867 if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {
868 WARN("Couldn't read /proc/stat\n", 0);
871 for (i = 0; i < len - 100; ++i) {
872 if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
873 && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
874 int cpu_no = atoi(stat_buf + i + 4);
875 if (cpu_no >= result) result = cpu_no + 1;
881 #endif /* GC_LINUX_THREADS */
883 /* We hold the GC lock. Wait until an in-progress GC has finished. */
884 /* Repeatedly RELEASES GC LOCK in order to wait. */
885 /* If wait_for_all is true, then we exit with the GC lock held and no */
886 /* collection in progress; otherwise we just wait for the current GC */
888 extern GC_bool GC_collection_in_progress();
889 void GC_wait_for_gc_completion(GC_bool wait_for_all)
891 if (GC_incremental && GC_collection_in_progress()) {
892 int old_gc_no = GC_gc_no;
894 /* Make sure that no part of our stack is still on the mark stack, */
895 /* since it's about to be unmapped. */
896 while (GC_incremental && GC_collection_in_progress()
897 && (wait_for_all || old_gc_no == GC_gc_no)) {
899 GC_in_thread_creation = TRUE;
900 GC_collect_a_little_inner(1);
901 GC_in_thread_creation = FALSE;
911 /* Procedures called before and after a fork. The goal here is to make */
912 /* it safe to call GC_malloc() in a forked child. It's unclear that is */
913 /* attainable, since the single UNIX spec seems to imply that one */
914 /* should only call async-signal-safe functions, and we probably can't */
915 /* quite guarantee that. But we give it our best shot. (That same */
916 /* spec also implies that it's not safe to call the system malloc */
917 /* between fork() and exec(). Thus we're doing no worse than it. */
919 /* Called before a fork() */
920 void GC_fork_prepare_proc(void)
922 /* Acquire all relevant locks, so that after releasing the locks */
923 /* the child will see a consistent state in which monitor */
924 /* invariants hold. Unfortunately, we can't acquire libc locks */
925 /* we might need, and there seems to be no guarantee that libc */
926 /* must install a suitable fork handler. */
927 /* Wait for an ongoing GC to finish, since we can't finish it in */
928 /* the (one remaining thread in) the child. */
930 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
931 GC_wait_for_reclaim();
933 GC_wait_for_gc_completion(TRUE);
934 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
935 GC_acquire_mark_lock();
939 /* Called in parent after a fork() */
940 void GC_fork_parent_proc(void)
942 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
943 GC_release_mark_lock();
948 /* Called in child after a fork() */
949 void GC_fork_child_proc(void)
951 /* Clean up the thread table, so that just our thread is left. */
952 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
953 GC_release_mark_lock();
955 GC_remove_all_threads_but_me();
956 # ifdef PARALLEL_MARK
957 /* Turn off parallel marking in the child, since we are probably */
958 /* just going to exec, and we would have to restart mark threads. */
961 # endif /* PARALLEL_MARK */
964 #endif /* HANDLE_FORK */
966 #if defined(GC_DGUX386_THREADS)
967 /* Return the number of processors, or i<= 0 if it can't be determined. */
970 /* <takis@XFree86.Org> */
972 struct dg_sys_info_pm_info pm_sysinfo;
975 status = dg_sys_info((long int *) &pm_sysinfo,
976 DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
978 /* set -1 for error */
982 numCpus = pm_sysinfo.idle_vp_count;
984 # ifdef DEBUG_THREADS
985 GC_printf1("Number of active CPUs in this system: %d\n", numCpus);
989 #endif /* GC_DGUX386_THREADS */
991 /* We hold the allocation lock. */
994 # ifndef GC_DARWIN_THREADS
999 if (GC_thr_initialized) return;
1000 GC_thr_initialized = TRUE;
1003 /* Prepare for a possible fork. */
1004 pthread_atfork(GC_fork_prepare_proc, GC_fork_parent_proc,
1005 GC_fork_child_proc);
1006 # endif /* HANDLE_FORK */
1007 /* Add the initial thread, so we can stop it. */
1008 t = GC_new_thread(pthread_self());
1009 # ifdef GC_DARWIN_THREADS
1010 t -> stop_info.mach_thread = mach_thread_self();
1012 t -> stop_info.stack_ptr = (ptr_t)(&dummy);
1014 t -> flags = DETACHED | MAIN_THREAD;
1015 #ifdef MONO_DEBUGGER_SUPPORTED
1016 if (gc_thread_vtable && gc_thread_vtable->thread_created)
1017 # ifdef GC_DARWIN_THREADS
1018 gc_thread_vtable->thread_created (mach_thread_self (), &t->stop_info.stack_ptr);
1020 gc_thread_vtable->thread_created (pthread_self (), &t->stop_info.stack_ptr);
1026 /* Set GC_nprocs. */
1028 char * nprocs_string = GETENV("GC_NPROCS");
1030 if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
1032 if (GC_nprocs <= 0) {
1033 # if defined(GC_HPUX_THREADS)
1034 GC_nprocs = pthread_num_processors_np();
1036 # if defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS)
1037 GC_nprocs = sysconf(_SC_NPROCESSORS_ONLN);
1038 if (GC_nprocs <= 0) GC_nprocs = 1;
1040 # if defined(GC_IRIX_THREADS)
1041 GC_nprocs = sysconf(_SC_NPROC_ONLN);
1042 if (GC_nprocs <= 0) GC_nprocs = 1;
1044 # if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS) || defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
1046 size_t len = sizeof(ncpus);
1047 sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0);
1050 # if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)
1051 GC_nprocs = GC_get_nprocs();
1054 if (GC_nprocs <= 0) {
1055 WARN("GC_get_nprocs() returned %ld\n", GC_nprocs);
1057 # ifdef PARALLEL_MARK
1061 # ifdef PARALLEL_MARK
1063 char * markers_string = GETENV("GC_MARKERS");
1064 if (markers_string != NULL) {
1065 GC_markers = atoi(markers_string);
1067 GC_markers = GC_nprocs;
1068 if (GC_markers > MAX_MARKERS)
1069 GC_markers = MAX_MARKERS;
1074 # ifdef PARALLEL_MARK
1076 if (GC_print_stats) {
1077 GC_printf2("Number of processors = %ld, "
1078 "number of marker threads = %ld\n", GC_nprocs, GC_markers);
1081 if (GC_markers == 1) {
1082 GC_parallel = FALSE;
1084 if (GC_print_stats) {
1085 GC_printf0("Single marker thread, turning off parallel marking\n");
1090 /* Disable true incremental collection, but generational is OK. */
1091 GC_time_limit = GC_TIME_UNLIMITED;
1093 /* If we are using a parallel marker, actually start helper threads. */
1094 if (GC_parallel) start_mark_threads();
1099 /* Perform all initializations, including those that */
1100 /* may require allocation. */
1101 /* Called without allocation lock. */
1102 /* Must be called before a second thread is created. */
1103 /* Called without allocation lock. */
1104 void GC_init_parallel()
1106 if (parallel_initialized) return;
1107 parallel_initialized = TRUE;
1109 /* GC_init() calls us back, so set flag first. */
1110 if (!GC_is_initialized) GC_init();
1111 /* Initialize thread local free lists if used. */
1112 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1114 GC_init_thread_local(GC_lookup_thread(pthread_self()));
1120 #if !defined(GC_DARWIN_THREADS) && !defined(GC_OPENBSD_THREADS)
1121 int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
1123 sigset_t fudged_set;
1125 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
1127 sigdelset(&fudged_set, SIG_SUSPEND);
1130 return(REAL_FUNC(pthread_sigmask)(how, set, oset));
1132 #endif /* !GC_DARWIN_THREADS */
1134 /* Wrappers for functions that are likely to block for an appreciable */
1135 /* length of time. Must be called in pairs, if at all. */
1136 /* Nothing much beyond the system call itself should be executed */
1137 /* between these. */
1139 void GC_start_blocking(void) {
1140 # define SP_SLOP 128
1143 me = GC_lookup_thread(pthread_self());
1144 GC_ASSERT(!(me -> thread_blocked));
1146 me -> stop_info.stack_ptr = (ptr_t)GC_save_regs_in_stack();
1148 # ifndef GC_DARWIN_THREADS
1149 me -> stop_info.stack_ptr = (ptr_t)GC_approx_sp();
1153 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack() + SP_SLOP;
1155 /* Add some slop to the stack pointer, since the wrapped call may */
1156 /* end up pushing more callee-save registers. */
1157 # ifndef GC_DARWIN_THREADS
1158 # ifdef STACK_GROWS_UP
1159 me -> stop_info.stack_ptr += SP_SLOP;
1161 me -> stop_info.stack_ptr -= SP_SLOP;
1164 me -> thread_blocked = TRUE;
1168 void GC_end_blocking(void) {
1170 LOCK(); /* This will block if the world is stopped. */
1171 me = GC_lookup_thread(pthread_self());
1172 GC_ASSERT(me -> thread_blocked);
1173 me -> thread_blocked = FALSE;
1177 #if defined(GC_DGUX386_THREADS)
1178 #define __d10_sleep sleep
1179 #endif /* GC_DGUX386_THREADS */
1181 /* A wrapper for the standard C sleep function */
1182 int WRAP_FUNC(sleep) (unsigned int seconds)
1186 GC_start_blocking();
1187 result = REAL_FUNC(sleep)(seconds);
1193 void *(*start_routine)(void *);
1196 sem_t registered; /* 1 ==> in our thread table, but */
1197 /* parent hasn't yet noticed. */
1200 /* Called at thread exit. */
1201 /* Never called for main thread. That's OK, since it */
1202 /* results in at most a tiny one-time leak. And */
1203 /* linuxthreads doesn't reclaim the main threads */
1204 /* resources or id anyway. */
1205 void GC_thread_exit_proc(void *arg)
1210 me = GC_lookup_thread(pthread_self());
1211 GC_destroy_thread_local(me);
1212 if (me -> flags & DETACHED) {
1213 # ifdef THREAD_LOCAL_ALLOC
1214 /* NULL out the tls key to prevent the dtor function from being called */
1215 if (0 != GC_setspecific(GC_thread_key, NULL))
1216 ABORT("Failed to set thread specific allocation pointers");
1218 GC_delete_thread(pthread_self());
1220 me -> flags |= FINISHED;
1222 # if defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
1223 && !defined(USE_COMPILER_TLS) && !defined(DBG_HDRS_ALL)
1224 GC_remove_specific(GC_thread_key);
1226 /* The following may run the GC from "nonexistent" thread. */
1227 GC_wait_for_gc_completion(FALSE);
1231 int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
1234 GC_thread thread_gc_id;
1237 thread_gc_id = GC_lookup_thread(thread);
1238 /* This is guaranteed to be the intended one, since the thread id */
1239 /* cant have been recycled by pthreads. */
1241 result = REAL_FUNC(pthread_join)(thread, retval);
1242 # if defined (GC_FREEBSD_THREADS)
1243 /* On FreeBSD, the wrapped pthread_join() sometimes returns (what
1244 appears to be) a spurious EINTR which caused the test and real code
1245 to gratuitously fail. Having looked at system pthread library source
1246 code, I see how this return code may be generated. In one path of
1247 code, pthread_join() just returns the errno setting of the thread
1248 being joined. This does not match the POSIX specification or the
1249 local man pages thus I have taken the liberty to catch this one
1250 spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
1251 if (result == EINTR) result = 0;
1255 /* Here the pthread thread id may have been recycled. */
1256 GC_delete_gc_thread(thread, thread_gc_id);
1263 WRAP_FUNC(pthread_detach)(pthread_t thread)
1266 GC_thread thread_gc_id;
1269 thread_gc_id = GC_lookup_thread(thread);
1271 result = REAL_FUNC(pthread_detach)(thread);
1274 thread_gc_id -> flags |= DETACHED;
1275 /* Here the pthread thread id may have been recycled. */
1276 if (thread_gc_id -> flags & FINISHED) {
1277 GC_delete_gc_thread(thread, thread_gc_id);
1284 GC_bool GC_in_thread_creation = FALSE;
1286 typedef void *(*ThreadStartFn)(void *);
1287 void * GC_start_routine_head(void * arg, void *base_addr,
1288 ThreadStartFn *start, void **start_arg )
1290 struct start_info * si = arg;
1293 pthread_t my_pthread;
1295 my_pthread = pthread_self();
1296 # ifdef DEBUG_THREADS
1297 GC_printf1("Starting thread 0x%lx\n", my_pthread);
1298 GC_printf1("pid = %ld\n", (long) getpid());
1299 GC_printf1("sp = 0x%lx\n", (long) &arg);
1302 GC_in_thread_creation = TRUE;
1303 me = GC_new_thread(my_pthread);
1304 GC_in_thread_creation = FALSE;
1305 #ifdef GC_DARWIN_THREADS
1306 me -> stop_info.mach_thread = mach_thread_self();
1308 me -> stop_info.stack_ptr = 0;
1310 me -> flags = si -> flags;
1311 /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
1312 /* doesn't work because the stack base in /proc/self/stat is the */
1313 /* one for the main thread. There is a strong argument that that's */
1314 /* a kernel bug, but a pervasive one. */
1315 # ifdef STACK_GROWS_DOWN
1316 me -> stack_end = (ptr_t)(((word)(base_addr) + (GC_page_size - 1))
1317 & ~(GC_page_size - 1));
1318 # ifndef GC_DARWIN_THREADS
1319 me -> stop_info.stack_ptr = me -> stack_end - 0x10;
1321 /* Needs to be plausible, since an asynchronous stack mark */
1322 /* should not crash. */
1324 me -> stack_end = (ptr_t)((word)(base_addr) & ~(GC_page_size - 1));
1325 me -> stop_info.stack_ptr = me -> stack_end + 0x10;
1327 /* This is dubious, since we may be more than a page into the stack, */
1328 /* and hence skip some of it, though it's not clear that matters. */
1330 me -> backing_store_end = (ptr_t)
1331 (GC_save_regs_in_stack() & ~(GC_page_size - 1));
1332 /* This is also < 100% convincing. We should also read this */
1333 /* from /proc, but the hook to do so isn't there yet. */
1335 #ifdef MONO_DEBUGGER_SUPPORTED
1336 if (gc_thread_vtable && gc_thread_vtable->thread_created)
1337 # ifdef GC_DARWIN_THREADS
1338 gc_thread_vtable->thread_created (mach_thread_self(), &me->stop_info.stack_ptr);
1340 gc_thread_vtable->thread_created (my_pthread, &me->stop_info.stack_ptr);
1345 if (start) *start = si -> start_routine;
1346 if (start_arg) *start_arg = si -> arg;
1348 if (!(si->flags & FOREIGN_THREAD))
1349 sem_post(&(si -> registered)); /* Last action on si. */
1350 /* OK to deallocate. */
1351 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1353 GC_init_thread_local(me);
1360 int GC_thread_register_foreign (void *base_addr)
1362 struct start_info si = { 0, }; /* stacked for legibility & locking */
1365 # ifdef DEBUG_THREADS
1366 GC_printf1( "GC_thread_register_foreign %p\n", &si );
1369 si.flags = FOREIGN_THREAD;
1371 if (!parallel_initialized) GC_init_parallel();
1373 if (!GC_thr_initialized) GC_thr_init();
1377 me = GC_start_routine_head(&si, base_addr, NULL, NULL);
1382 void * GC_start_routine(void * arg)
1385 struct start_info * si = arg;
1388 ThreadStartFn start;
1391 me = GC_start_routine_head (arg, &dummy, &start, &start_arg);
1393 pthread_cleanup_push(GC_thread_exit_proc, 0);
1394 # ifdef DEBUG_THREADS
1395 GC_printf1("start_routine = 0x%lx\n", start);
1397 result = (*start)(start_arg);
1399 GC_printf1("Finishing thread 0x%x\n", pthread_self());
1401 me -> status = result;
1402 pthread_cleanup_pop(1);
1403 /* Cleanup acquires lock, ensuring that we can't exit */
1404 /* while a collection that thinks we're alive is trying to stop */
1410 WRAP_FUNC(pthread_create)(pthread_t *new_thread,
1411 const pthread_attr_t *attr,
1412 void *(*start_routine)(void *), void *arg)
1417 struct start_info * si;
1418 /* This is otherwise saved only in an area mmapped by the thread */
1419 /* library, which isn't visible to the collector. */
1421 /* We resist the temptation to muck with the stack size here, */
1422 /* even if the default is unreasonably small. That's the client's */
1423 /* responsibility. */
1426 si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),
1429 if (!parallel_initialized) GC_init_parallel();
1430 if (0 == si) return(ENOMEM);
1431 sem_init(&(si -> registered), 0, 0);
1432 si -> start_routine = start_routine;
1435 if (!GC_thr_initialized) GC_thr_init();
1436 # ifdef GC_ASSERTIONS
1440 pthread_attr_t my_attr;
1441 pthread_attr_init(&my_attr);
1442 pthread_attr_getstacksize(&my_attr, &stack_size);
1444 pthread_attr_getstacksize(attr, &stack_size);
1446 # ifdef PARALLEL_MARK
1447 GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word)));
1449 /* FreeBSD-5.3/Alpha: default pthread stack is 64K, */
1450 /* HBLKSIZE=8192, sizeof(word)=8 */
1451 GC_ASSERT(stack_size >= 65536);
1453 /* Our threads may need to do some work for the GC. */
1454 /* Ridiculously small threads won't work, and they */
1455 /* probably wouldn't work anyway. */
1459 detachstate = PTHREAD_CREATE_JOINABLE;
1461 pthread_attr_getdetachstate(attr, &detachstate);
1463 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
1464 si -> flags = my_flags;
1466 # ifdef DEBUG_THREADS
1467 GC_printf1("About to start new thread from thread 0x%X\n",
1471 result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
1473 # ifdef DEBUG_THREADS
1474 GC_printf1("Started thread 0x%X\n", *new_thread);
1476 /* Wait until child has been added to the thread table. */
1477 /* This also ensures that we hold onto si until the child is done */
1478 /* with it. Thus it doesn't matter whether it is otherwise */
1479 /* visible to the collector. */
1481 while (0 != sem_wait(&(si -> registered))) {
1482 if (EINTR != errno) ABORT("sem_wait failed");
1485 sem_destroy(&(si -> registered));
1487 GC_INTERNAL_FREE(si);
1493 #ifdef GENERIC_COMPARE_AND_SWAP
1494 pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;
1496 GC_bool GC_compare_and_exchange(volatile GC_word *addr,
1497 GC_word old, GC_word new_val)
1500 pthread_mutex_lock(&GC_compare_and_swap_lock);
1507 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1511 GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)
1514 pthread_mutex_lock(&GC_compare_and_swap_lock);
1516 *addr = old + how_much;
1517 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1521 #endif /* GENERIC_COMPARE_AND_SWAP */
1522 /* Spend a few cycles in a way that can't introduce contention with */
1523 /* othre threads. */
1527 # if !defined(__GNUC__) || defined(__INTEL_COMPILER)
1528 volatile word dummy = 0;
1531 for (i = 0; i < 10; ++i) {
1532 # if defined(__GNUC__) && !defined(__INTEL_COMPILER)
1533 __asm__ __volatile__ (" " : : : "memory");
1535 /* Something that's unlikely to be optimized away. */
1541 #define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */
1544 VOLATILE GC_bool GC_collecting = 0;
1545 /* A hint that we're in the collector and */
1546 /* holding the allocation lock for an */
1547 /* extended period. */
1549 #if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)
1550 /* If we don't want to use the below spinlock implementation, either */
1551 /* because we don't have a GC_test_and_set implementation, or because */
1552 /* we don't want to risk sleeping, we can still try spinning on */
1553 /* pthread_mutex_trylock for a while. This appears to be very */
1554 /* beneficial in many cases. */
1555 /* I suspect that under high contention this is nearly always better */
1556 /* than the spin lock. But it's a bit slower on a uniprocessor. */
1557 /* Hence we still default to the spin lock. */
1558 /* This is also used to acquire the mark lock for the parallel */
1561 /* Here we use a strict exponential backoff scheme. I don't know */
1562 /* whether that's better or worse than the above. We eventually */
1563 /* yield by calling pthread_mutex_lock(); it never makes sense to */
1564 /* explicitly sleep. */
1568 unsigned long GC_spin_count = 0;
1569 unsigned long GC_block_count = 0;
1570 unsigned long GC_unlocked_count = 0;
1573 void GC_generic_lock(pthread_mutex_t * lock)
1575 #ifndef NO_PTHREAD_TRYLOCK
1576 unsigned pause_length = 1;
1579 if (0 == pthread_mutex_trylock(lock)) {
1581 ++GC_unlocked_count;
1585 for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
1586 for (i = 0; i < pause_length; ++i) {
1589 switch(pthread_mutex_trylock(lock)) {
1598 ABORT("Unexpected error from pthread_mutex_trylock");
1601 #endif /* !NO_PTHREAD_TRYLOCK */
1605 pthread_mutex_lock(lock);
1608 #endif /* !USE_SPIN_LOCK || PARALLEL_MARK */
1610 #if defined(USE_SPIN_LOCK)
1612 /* Reasonably fast spin locks. Basically the same implementation */
1613 /* as STL alloc.h. This isn't really the right way to do this. */
1614 /* but until the POSIX scheduling mess gets straightened out ... */
1616 volatile unsigned int GC_allocate_lock = 0;
1621 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
1622 # define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
1623 static unsigned spin_max = low_spin_max;
1624 unsigned my_spin_max;
1625 static unsigned last_spins = 0;
1626 unsigned my_last_spins;
1629 if (!GC_test_and_set(&GC_allocate_lock)) {
1632 my_spin_max = spin_max;
1633 my_last_spins = last_spins;
1634 for (i = 0; i < my_spin_max; i++) {
1635 if (GC_collecting || GC_nprocs == 1) goto yield;
1636 if (i < my_last_spins/2 || GC_allocate_lock) {
1640 if (!GC_test_and_set(&GC_allocate_lock)) {
1643 * Spinning worked. Thus we're probably not being scheduled
1644 * against the other process with which we were contending.
1645 * Thus it makes sense to spin longer the next time.
1648 spin_max = high_spin_max;
1652 /* We are probably being scheduled against the other process. Sleep. */
1653 spin_max = low_spin_max;
1656 if (!GC_test_and_set(&GC_allocate_lock)) {
1659 # define SLEEP_THRESHOLD 12
1660 /* Under Linux very short sleeps tend to wait until */
1661 /* the current time quantum expires. On old Linux */
1662 /* kernels nanosleep(<= 2ms) just spins under Linux. */
1663 /* (Under 2.4, this happens only for real-time */
1664 /* processes.) We want to minimize both behaviors */
1666 if (i < SLEEP_THRESHOLD) {
1672 /* Don't wait for more than about 15msecs, even */
1673 /* under extreme contention. */
1675 ts.tv_nsec = 1 << i;
1681 #else /* !USE_SPINLOCK */
1684 #ifndef NO_PTHREAD_TRYLOCK
1685 if (1 == GC_nprocs || GC_collecting) {
1686 pthread_mutex_lock(&GC_allocate_ml);
1688 GC_generic_lock(&GC_allocate_ml);
1690 #else /* !NO_PTHREAD_TRYLOCK */
1691 pthread_mutex_lock(&GC_allocate_ml);
1692 #endif /* !NO_PTHREAD_TRYLOCK */
1695 #endif /* !USE_SPINLOCK */
1697 #if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
1699 #ifdef GC_ASSERTIONS
1700 pthread_t GC_mark_lock_holder = NO_THREAD;
1704 /* Ugly workaround for a linux threads bug in the final versions */
1705 /* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */
1706 /* field even when it fails to acquire the mutex. This causes */
1707 /* pthread_cond_wait to die. Remove for glibc2.2. */
1708 /* According to the man page, we should use */
1709 /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
1711 static pthread_mutex_t mark_mutex =
1712 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
1714 static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
1717 static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
1719 void GC_acquire_mark_lock()
1722 if (pthread_mutex_lock(&mark_mutex) != 0) {
1723 ABORT("pthread_mutex_lock failed");
1726 GC_generic_lock(&mark_mutex);
1727 # ifdef GC_ASSERTIONS
1728 GC_mark_lock_holder = pthread_self();
1732 void GC_release_mark_lock()
1734 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1735 # ifdef GC_ASSERTIONS
1736 GC_mark_lock_holder = NO_THREAD;
1738 if (pthread_mutex_unlock(&mark_mutex) != 0) {
1739 ABORT("pthread_mutex_unlock failed");
1743 /* Collector must wait for a freelist builders for 2 reasons: */
1744 /* 1) Mark bits may still be getting examined without lock. */
1745 /* 2) Partial free lists referenced only by locals may not be scanned */
1746 /* correctly, e.g. if they contain "pointer-free" objects, since the */
1747 /* free-list link may be ignored. */
1748 void GC_wait_builder()
1750 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1751 # ifdef GC_ASSERTIONS
1752 GC_mark_lock_holder = NO_THREAD;
1754 if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
1755 ABORT("pthread_cond_wait failed");
1757 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1758 # ifdef GC_ASSERTIONS
1759 GC_mark_lock_holder = pthread_self();
1763 void GC_wait_for_reclaim()
1765 GC_acquire_mark_lock();
1766 while (GC_fl_builder_count > 0) {
1769 GC_release_mark_lock();
1772 void GC_notify_all_builder()
1774 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1775 if (pthread_cond_broadcast(&builder_cv) != 0) {
1776 ABORT("pthread_cond_broadcast failed");
1780 #endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
1782 #ifdef PARALLEL_MARK
1784 static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
1786 void GC_wait_marker()
1788 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1789 # ifdef GC_ASSERTIONS
1790 GC_mark_lock_holder = NO_THREAD;
1792 if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
1793 ABORT("pthread_cond_wait failed");
1795 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1796 # ifdef GC_ASSERTIONS
1797 GC_mark_lock_holder = pthread_self();
1801 void GC_notify_all_marker()
1803 if (pthread_cond_broadcast(&mark_cv) != 0) {
1804 ABORT("pthread_cond_broadcast failed");
1808 #endif /* PARALLEL_MARK */
1810 # endif /* GC_LINUX_THREADS and friends */