2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 * Copyright (c) 2000-2001 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 * Support code for LinuxThreads, the clone()-based kernel
18 * thread package for Linux which is included in libc6.
20 * This code relies on implementation details of LinuxThreads,
21 * (i.e. properties not guaranteed by the Pthread standard),
22 * though this version now does less of that than the other Pthreads
25 * Note that there is a lot of code duplication between linux_threads.c
26 * and thread support for some of the other Posix platforms; any changes
27 * made here may need to be reflected there too.
29 /* DG/UX ix86 support <takis@xfree86.org> */
31 * Linux_threads.c now also includes some code to support HPUX and
32 * OSF1 (Compaq Tru64 Unix, really). The OSF1 support is based on Eric Benson's
35 * Eric also suggested an alternate basis for a lock implementation in
37 * + #elif defined(OSF1)
38 * + unsigned long GC_allocate_lock = 0;
39 * + msemaphore GC_allocate_semaphore;
40 * + # define GC_TRY_LOCK() \
41 * + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
42 * + ? (GC_allocate_lock = 1) \
44 * + # define GC_LOCK_TAKEN GC_allocate_lock
47 /*#define DEBUG_THREADS 1*/
48 /*#define GC_ASSERTIONS*/
50 # include "private/pthread_support.h"
52 # if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
53 && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS) \
54 && !defined(GC_AIX_THREADS)
56 # if defined(GC_HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \
57 && !defined(USE_COMPILER_TLS)
59 # define USE_PTHREAD_SPECIFIC
60 /* Empirically, as of gcc 3.3, USE_COMPILER_TLS doesn't work. */
62 # define USE_COMPILER_TLS
66 # if defined USE_HPUX_TLS
67 --> Macro replaced by USE_COMPILER_TLS
70 # if (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \
71 defined(GC_DARWIN_THREADS)) && !defined(USE_PTHREAD_SPECIFIC)
72 # define USE_PTHREAD_SPECIFIC
75 # if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE)
76 # define _POSIX4A_DRAFT10_SOURCE 1
79 # if defined(GC_DGUX386_THREADS) && !defined(_USING_POSIX4A_DRAFT10)
80 # define _USING_POSIX4A_DRAFT10 1
83 # ifdef THREAD_LOCAL_ALLOC
84 # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_COMPILER_TLS)
85 # include "private/specific.h"
87 # if defined(USE_PTHREAD_SPECIFIC)
88 # define GC_getspecific pthread_getspecific
89 # define GC_setspecific pthread_setspecific
90 # define GC_key_create pthread_key_create
91 typedef pthread_key_t GC_key_t;
93 # if defined(USE_COMPILER_TLS)
94 # define GC_getspecific(x) (x)
95 # define GC_setspecific(key, v) ((key) = (v), 0)
96 # define GC_key_create(key, d) 0
97 typedef void * GC_key_t;
101 # include <pthread.h>
106 # include <sys/mman.h>
107 # include <sys/time.h>
108 # include <sys/types.h>
109 # include <sys/stat.h>
113 #if defined(GC_DARWIN_THREADS)
114 # include "private/darwin_semaphore.h"
116 # include <semaphore.h>
117 #endif /* !GC_DARWIN_THREADS */
119 #if defined(GC_DARWIN_THREADS)
120 # include <sys/sysctl.h>
121 #endif /* GC_DARWIN_THREADS */
125 #if defined(GC_DGUX386_THREADS)
126 # include <sys/dg_sys_info.h>
127 # include <sys/_int_psem.h>
128 /* sem_t is an uint in DG/UX */
129 typedef unsigned int sem_t;
130 #endif /* GC_DGUX386_THREADS */
136 #ifdef GC_USE_LD_WRAP
137 # define WRAP_FUNC(f) __wrap_##f
138 # define REAL_FUNC(f) __real_##f
140 # define WRAP_FUNC(f) GC_##f
141 # if !defined(GC_DGUX386_THREADS)
142 # define REAL_FUNC(f) f
143 # else /* GC_DGUX386_THREADS */
144 # define REAL_FUNC(f) __d10_##f
145 # endif /* GC_DGUX386_THREADS */
146 # undef pthread_create
147 # if !defined(GC_DARWIN_THREADS)
148 # undef pthread_sigmask
151 # undef pthread_detach
152 # if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
153 && !defined(_PTHREAD_USE_PTDNAM_)
154 /* Restore the original mangled names on Tru64 UNIX. */
155 # define pthread_create __pthread_create
156 # define pthread_join __pthread_join
157 # define pthread_detach __pthread_detach
163 static GC_bool parallel_initialized = FALSE;
165 void GC_init_parallel();
167 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
169 /* We don't really support thread-local allocation with DBG_HDRS_ALL */
171 /* work around a dlopen issue (bug #75390), undefs to avoid warnings with redefinitions */
172 #undef PACKAGE_BUGREPORT
174 #undef PACKAGE_STRING
175 #undef PACKAGE_TARNAME
176 #undef PACKAGE_VERSION
177 #include "mono/utils/mono-compiler.h"
180 #ifdef USE_COMPILER_TLS
181 __thread MONO_TLS_FAST
183 GC_key_t GC_thread_key;
185 static GC_bool keys_initialized;
187 /* Recover the contents of the freelist array fl into the global one gfl.*/
188 /* Note that the indexing scheme differs, in that gfl has finer size */
189 /* resolution, even if not all entries are used. */
190 /* We hold the allocator lock. */
191 static void return_freelists(ptr_t *fl, ptr_t *gfl)
197 for (i = 1; i < NFREELISTS; ++i) {
198 nwords = i * (GRANULARITY/sizeof(word));
201 if ((word)q >= HBLKSIZE) {
202 if (gfl[nwords] == 0) {
206 for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
212 /* Clear fl[i], since the thread structure may hang around. */
213 /* Do it in a way that is likely to trap if we access it. */
214 fl[i] = (ptr_t)HBLKSIZE;
218 /* We statically allocate a single "size 0" object. It is linked to */
219 /* itself, and is thus repeatedly reused for all size 0 allocation */
220 /* requests. (Size 0 gcj allocation requests are incorrect, and */
221 /* we arrange for those to fault asap.) */
222 static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
224 void GC_delete_thread(pthread_t id);
226 void GC_thread_deregister_foreign (void *data)
228 GC_thread me = (GC_thread)data;
229 /* GC_fprintf1( "\n\n\n\n --- Deregister %x ---\n\n\n\n\n", me->flags ); */
230 if (me -> flags & FOREIGN_THREAD) {
232 /* GC_fprintf0( "\n\n\n\n --- FOO ---\n\n\n\n\n" ); */
233 GC_delete_thread(me->id);
238 /* Each thread structure must be initialized. */
239 /* This call must be made from the new thread. */
240 /* Caller holds allocation lock. */
241 void GC_init_thread_local(GC_thread p)
245 if (!keys_initialized) {
246 if (0 != GC_key_create(&GC_thread_key, GC_thread_deregister_foreign)) {
247 ABORT("Failed to create key for local allocator");
249 keys_initialized = TRUE;
251 if (0 != GC_setspecific(GC_thread_key, p)) {
252 ABORT("Failed to set thread specific allocation pointers");
254 for (i = 1; i < NFREELISTS; ++i) {
255 p -> ptrfree_freelists[i] = (ptr_t)1;
256 p -> normal_freelists[i] = (ptr_t)1;
257 # ifdef GC_GCJ_SUPPORT
258 p -> gcj_freelists[i] = (ptr_t)1;
261 /* Set up the size 0 free lists. */
262 p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
263 p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
264 # ifdef GC_GCJ_SUPPORT
265 p -> gcj_freelists[0] = (ptr_t)(-1);
269 #ifdef GC_GCJ_SUPPORT
270 extern ptr_t * GC_gcjobjfreelist;
273 /* We hold the allocator lock. */
274 void GC_destroy_thread_local(GC_thread p)
276 /* We currently only do this from the thread itself or from */
277 /* the fork handler for a child process. */
279 GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
281 return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
282 return_freelists(p -> normal_freelists, GC_objfreelist);
283 # ifdef GC_GCJ_SUPPORT
284 return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
288 extern GC_PTR GC_generic_malloc_many();
290 GC_PTR GC_local_malloc(size_t bytes)
292 if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
293 return(GC_malloc(bytes));
295 int index = INDEX_FROM_BYTES(bytes);
298 # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
299 GC_key_t k = GC_thread_key;
303 # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
304 if (EXPECT(0 == k, 0)) {
305 /* This can happen if we get called when the world is */
306 /* being initialized. Whether we can actually complete */
307 /* the initialization then is unclear. */
312 tsd = GC_getspecific(GC_thread_key);
313 # ifdef GC_ASSERTIONS
315 GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
318 my_fl = ((GC_thread)tsd) -> normal_freelists + index;
320 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
321 ptr_t next = obj_link(my_entry);
322 GC_PTR result = (GC_PTR)my_entry;
324 obj_link(my_entry) = 0;
325 PREFETCH_FOR_WRITE(next);
327 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
328 *my_fl = my_entry + index + 1;
329 return GC_malloc(bytes);
331 GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
332 if (*my_fl == 0) return GC_oom_fn(bytes);
333 return GC_local_malloc(bytes);
338 GC_PTR GC_local_malloc_atomic(size_t bytes)
340 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
341 return(GC_malloc_atomic(bytes));
343 int index = INDEX_FROM_BYTES(bytes);
344 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
345 -> ptrfree_freelists + index;
346 ptr_t my_entry = *my_fl;
348 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
349 GC_PTR result = (GC_PTR)my_entry;
350 *my_fl = obj_link(my_entry);
352 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
353 *my_fl = my_entry + index + 1;
354 return GC_malloc_atomic(bytes);
356 GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
357 /* *my_fl is updated while the collector is excluded; */
358 /* the free list is always visible to the collector as */
360 if (*my_fl == 0) return GC_oom_fn(bytes);
361 return GC_local_malloc_atomic(bytes);
366 #ifdef GC_GCJ_SUPPORT
368 #include "include/gc_gcj.h"
371 extern GC_bool GC_gcj_malloc_initialized;
374 extern int GC_gcj_kind;
376 GC_PTR GC_local_gcj_malloc(size_t bytes,
377 void * ptr_to_struct_containing_descr)
379 GC_ASSERT(GC_gcj_malloc_initialized);
380 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
381 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
383 int index = INDEX_FROM_BYTES(bytes);
384 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
385 -> gcj_freelists + index;
386 ptr_t my_entry = *my_fl;
387 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
388 GC_PTR result = (GC_PTR)my_entry;
389 GC_ASSERT(!GC_incremental);
390 /* We assert that any concurrent marker will stop us. */
391 /* Thus it is impossible for a mark procedure to see the */
392 /* allocation of the next object, but to see this object */
393 /* still containing a free list pointer. Otherwise the */
394 /* marker might find a random "mark descriptor". */
395 *(volatile ptr_t *)my_fl = obj_link(my_entry);
396 /* We must update the freelist before we store the pointer. */
397 /* Otherwise a GC at this point would see a corrupted */
399 /* A memory barrier is probably never needed, since the */
400 /* action of stopping this thread will cause prior writes */
402 GC_ASSERT(((void * volatile *)result)[1] == 0);
403 *(void * volatile *)result = ptr_to_struct_containing_descr;
405 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
406 if (!GC_incremental) *my_fl = my_entry + index + 1;
407 /* In the incremental case, we always have to take this */
408 /* path. Thus we leave the counter alone. */
409 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
411 GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
412 if (*my_fl == 0) return GC_oom_fn(bytes);
413 return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
418 /* Similar to GC_local_gcj_malloc, but the size is in words, and we don't */
419 /* adjust it. The size is assumed to be such that it can be */
420 /* allocated as a small object. */
421 void * GC_local_gcj_fast_malloc(size_t lw, void * ptr_to_struct_containing_descr)
423 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
424 -> gcj_freelists + lw;
425 ptr_t my_entry = *my_fl;
427 GC_ASSERT(GC_gcj_malloc_initialized);
429 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
430 GC_PTR result = (GC_PTR)my_entry;
431 GC_ASSERT(!GC_incremental);
432 /* We assert that any concurrent marker will stop us. */
433 /* Thus it is impossible for a mark procedure to see the */
434 /* allocation of the next object, but to see this object */
435 /* still containing a free list pointer. Otherwise the */
436 /* marker might find a random "mark descriptor". */
437 *(volatile ptr_t *)my_fl = obj_link(my_entry);
438 /* We must update the freelist before we store the pointer. */
439 /* Otherwise a GC at this point would see a corrupted */
441 /* A memory barrier is probably never needed, since the */
442 /* action of stopping this thread will cause prior writes */
444 GC_ASSERT(((void * volatile *)result)[1] == 0);
445 *(void * volatile *)result = ptr_to_struct_containing_descr;
447 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
448 if (!GC_incremental) *my_fl = my_entry + lw + 1;
449 /* In the incremental case, we always have to take this */
450 /* path. Thus we leave the counter alone. */
451 return GC_gcj_fast_malloc(lw, ptr_to_struct_containing_descr);
453 GC_generic_malloc_many(BYTES_FROM_INDEX(lw), GC_gcj_kind, my_fl);
454 if (*my_fl == 0) return GC_oom_fn(BYTES_FROM_INDEX(lw));
455 return GC_local_gcj_fast_malloc(lw, ptr_to_struct_containing_descr);
459 #endif /* GC_GCJ_SUPPORT */
461 # else /* !THREAD_LOCAL_ALLOC && !DBG_HDRS_ALL */
463 # define GC_destroy_thread_local(t)
465 # endif /* !THREAD_LOCAL_ALLOC */
469 To make sure that we're using LinuxThreads and not some other thread
470 package, we generate a dummy reference to `pthread_kill_other_threads_np'
471 (was `__pthread_initial_thread_bos' but that disappeared),
472 which is a symbol defined in LinuxThreads, but (hopefully) not in other
475 We no longer do this, since this code is now portable enough that it might
476 actually work for something else.
478 void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
481 long GC_nprocs = 1; /* Number of processors. We may not have */
482 /* access to all of them, but this is as good */
483 /* a guess as any ... */
488 # define MAX_MARKERS 16
491 static ptr_t marker_sp[MAX_MARKERS] = {0};
493 void * GC_mark_thread(void * id)
497 marker_sp[(word)id] = GC_approx_sp();
498 for (;; ++my_mark_no) {
499 /* GC_mark_no is passed only to allow GC_help_marker to terminate */
500 /* promptly. This is important if it were called from the signal */
501 /* handler or from the GC lock acquisition code. Under Linux, it's */
502 /* not safe to call it from a signal handler, since it uses mutexes */
503 /* and condition variables. Since it is called only here, the */
504 /* argument is unnecessary. */
505 if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
506 /* resynchronize if we get far off, e.g. because GC_mark_no */
508 my_mark_no = GC_mark_no;
510 # ifdef DEBUG_THREADS
511 GC_printf1("Starting mark helper for mark number %ld\n", my_mark_no);
513 GC_help_marker(my_mark_no);
517 extern long GC_markers; /* Number of mark threads we would */
518 /* like to have. Includes the */
519 /* initiating thread. */
521 pthread_t GC_mark_threads[MAX_MARKERS];
523 #define PTHREAD_CREATE REAL_FUNC(pthread_create)
525 static void start_mark_threads()
530 if (GC_markers > MAX_MARKERS) {
531 WARN("Limiting number of mark threads\n", 0);
532 GC_markers = MAX_MARKERS;
534 if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
536 if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
537 ABORT("pthread_attr_setdetachstate failed");
539 # if defined(HPUX) || defined(GC_DGUX386_THREADS)
540 /* Default stack size is usually too small: fix it. */
541 /* Otherwise marker threads or GC may run out of */
543 # define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
548 if (pthread_attr_getstacksize(&attr, &old_size) != 0)
549 ABORT("pthread_attr_getstacksize failed\n");
550 if (old_size < MIN_STACK_SIZE) {
551 if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
552 ABORT("pthread_attr_setstacksize failed\n");
555 # endif /* HPUX || GC_DGUX386_THREADS */
557 if (GC_print_stats) {
558 GC_printf1("Starting %ld marker threads\n", GC_markers - 1);
561 for (i = 0; i < GC_markers - 1; ++i) {
562 if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
563 GC_mark_thread, (void *)(word)i)) {
564 WARN("Marker thread creation failed, errno = %ld.\n", errno);
569 #else /* !PARALLEL_MARK */
571 static __inline__ void start_mark_threads()
575 #endif /* !PARALLEL_MARK */
577 GC_bool GC_thr_initialized = FALSE;
579 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
582 * gcc-3.3.6 miscompiles the &GC_thread_key+sizeof(&GC_thread_key) expression so
583 * put it into a separate function.
585 # if defined(__GNUC__) && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
586 static __attribute__((noinline)) unsigned char* get_gc_thread_key_addr GC_PROTO((void))
588 return (unsigned char*)&GC_thread_key;
591 void GC_push_thread_structures GC_PROTO((void))
593 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
594 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
595 GC_push_all((ptr_t)get_gc_thread_key_addr(),
596 (ptr_t)(get_gc_thread_key_addr())+sizeof(&GC_thread_key));
602 void GC_push_thread_structures GC_PROTO((void))
604 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
605 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
606 GC_push_all((ptr_t)(&GC_thread_key),
607 (ptr_t)(&GC_thread_key)+sizeof(&GC_thread_key));
613 #ifdef THREAD_LOCAL_ALLOC
614 /* We must explicitly mark ptrfree and gcj free lists, since the free */
615 /* list links wouldn't otherwise be found. We also set them in the */
616 /* normal free lists, since that involves touching less memory than if */
617 /* we scanned them normally. */
618 void GC_mark_thread_local_free_lists(void)
624 for (i = 0; i < THREAD_TABLE_SZ; ++i) {
625 for (p = GC_threads[i]; 0 != p; p = p -> next) {
626 for (j = 1; j < NFREELISTS; ++j) {
627 q = p -> ptrfree_freelists[j];
628 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
629 q = p -> normal_freelists[j];
630 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
631 # ifdef GC_GCJ_SUPPORT
632 q = p -> gcj_freelists[j];
633 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
634 # endif /* GC_GCJ_SUPPORT */
639 #endif /* THREAD_LOCAL_ALLOC */
641 static struct GC_Thread_Rep first_thread;
643 /* Add a thread to GC_threads. We assume it wasn't already there. */
644 /* Caller holds allocation lock. */
645 GC_thread GC_new_thread(pthread_t id)
647 int hv = ((word)id) % THREAD_TABLE_SZ;
649 static GC_bool first_thread_used = FALSE;
651 if (!first_thread_used) {
652 result = &first_thread;
653 first_thread_used = TRUE;
655 result = (struct GC_Thread_Rep *)
656 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
658 if (result == 0) return(0);
660 result -> next = GC_threads[hv];
661 GC_threads[hv] = result;
662 GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
666 /* Delete a thread from GC_threads. We assume it is there. */
667 /* (The code intentionally traps if it wasn't.) */
668 /* Caller holds allocation lock. */
669 void GC_delete_thread(pthread_t id)
671 int hv = ((word)id) % THREAD_TABLE_SZ;
672 register GC_thread p = GC_threads[hv];
673 register GC_thread prev = 0;
675 while (!pthread_equal(p -> id, id)) {
680 GC_threads[hv] = p -> next;
682 prev -> next = p -> next;
687 /* If a thread has been joined, but we have not yet */
688 /* been notified, then there may be more than one thread */
689 /* in the table with the same pthread id. */
690 /* This is OK, but we need a way to delete a specific one. */
691 void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
693 int hv = ((word)id) % THREAD_TABLE_SZ;
694 register GC_thread p = GC_threads[hv];
695 register GC_thread prev = 0;
702 GC_threads[hv] = p -> next;
704 prev -> next = p -> next;
709 /* Return a GC_thread corresponding to a given pthread_t. */
710 /* Returns 0 if it's not there. */
711 /* Caller holds allocation lock or otherwise inhibits */
713 /* If there is more than one thread with the given id we */
714 /* return the most recent one. */
715 GC_thread GC_lookup_thread(pthread_t id)
717 int hv = ((word)id) % THREAD_TABLE_SZ;
718 register GC_thread p = GC_threads[hv];
720 while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
724 int GC_thread_is_registered (void)
729 ptr = (void *)GC_lookup_thread(pthread_self());
736 /* Remove all entries from the GC_threads table, except the */
737 /* one for the current thread. We need to do this in the child */
738 /* process after a fork(), since only the current thread */
739 /* survives in the child. */
740 void GC_remove_all_threads_but_me(void)
742 pthread_t self = pthread_self();
744 GC_thread p, next, me;
746 for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
748 for (p = GC_threads[hv]; 0 != p; p = next) {
750 if (p -> id == self) {
754 # ifdef THREAD_LOCAL_ALLOC
755 if (!(p -> flags & FINISHED)) {
756 GC_destroy_thread_local(p);
758 # endif /* THREAD_LOCAL_ALLOC */
759 if (p != &first_thread) GC_INTERNAL_FREE(p);
765 #endif /* HANDLE_FORK */
767 #ifdef USE_PROC_FOR_LIBRARIES
768 int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
773 # ifdef PARALLEL_MARK
774 for (i = 0; i < GC_markers; ++i) {
775 if (marker_sp[i] > lo & marker_sp[i] < hi) return 1;
778 for (i = 0; i < THREAD_TABLE_SZ; i++) {
779 for (p = GC_threads[i]; p != 0; p = p -> next) {
780 if (0 != p -> stack_end) {
781 # ifdef STACK_GROWS_UP
782 if (p -> stack_end >= lo && p -> stack_end < hi) return 1;
783 # else /* STACK_GROWS_DOWN */
784 if (p -> stack_end > lo && p -> stack_end <= hi) return 1;
791 #endif /* USE_PROC_FOR_LIBRARIES */
793 #ifdef GC_LINUX_THREADS
794 /* Return the number of processors, or i<= 0 if it can't be determined. */
797 /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
798 /* appears to be buggy in many cases. */
799 /* We look for lines "cpu<n>" in /proc/stat. */
800 # define STAT_BUF_SIZE 4096
801 # define STAT_READ read
802 /* If read is wrapped, this may need to be redefined to call */
804 char stat_buf[STAT_BUF_SIZE];
807 /* Some old kernels only have a single "cpu nnnn ..." */
808 /* entry in /proc/stat. We identify those as */
812 f = open("/proc/stat", O_RDONLY);
813 if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {
814 WARN("Couldn't read /proc/stat\n", 0);
817 for (i = 0; i < len - 100; ++i) {
818 if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
819 && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
820 int cpu_no = atoi(stat_buf + i + 4);
821 if (cpu_no >= result) result = cpu_no + 1;
827 #endif /* GC_LINUX_THREADS */
829 /* We hold the GC lock. Wait until an in-progress GC has finished. */
830 /* Repeatedly RELEASES GC LOCK in order to wait. */
831 /* If wait_for_all is true, then we exit with the GC lock held and no */
832 /* collection in progress; otherwise we just wait for the current GC */
834 extern GC_bool GC_collection_in_progress();
835 void GC_wait_for_gc_completion(GC_bool wait_for_all)
837 if (GC_incremental && GC_collection_in_progress()) {
838 int old_gc_no = GC_gc_no;
840 /* Make sure that no part of our stack is still on the mark stack, */
841 /* since it's about to be unmapped. */
842 while (GC_incremental && GC_collection_in_progress()
843 && (wait_for_all || old_gc_no == GC_gc_no)) {
845 GC_in_thread_creation = TRUE;
846 GC_collect_a_little_inner(1);
847 GC_in_thread_creation = FALSE;
857 /* Procedures called before and after a fork. The goal here is to make */
858 /* it safe to call GC_malloc() in a forked child. It's unclear that is */
859 /* attainable, since the single UNIX spec seems to imply that one */
860 /* should only call async-signal-safe functions, and we probably can't */
861 /* quite guarantee that. But we give it our best shot. (That same */
862 /* spec also implies that it's not safe to call the system malloc */
863 /* between fork() and exec(). Thus we're doing no worse than it. */
865 /* Called before a fork() */
866 void GC_fork_prepare_proc(void)
868 /* Acquire all relevant locks, so that after releasing the locks */
869 /* the child will see a consistent state in which monitor */
870 /* invariants hold. Unfortunately, we can't acquire libc locks */
871 /* we might need, and there seems to be no guarantee that libc */
872 /* must install a suitable fork handler. */
873 /* Wait for an ongoing GC to finish, since we can't finish it in */
874 /* the (one remaining thread in) the child. */
876 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
877 GC_wait_for_reclaim();
879 GC_wait_for_gc_completion(TRUE);
880 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
881 GC_acquire_mark_lock();
885 /* Called in parent after a fork() */
886 void GC_fork_parent_proc(void)
888 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
889 GC_release_mark_lock();
894 /* Called in child after a fork() */
895 void GC_fork_child_proc(void)
897 /* Clean up the thread table, so that just our thread is left. */
898 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
899 GC_release_mark_lock();
901 GC_remove_all_threads_but_me();
902 # ifdef PARALLEL_MARK
903 /* Turn off parallel marking in the child, since we are probably */
904 /* just going to exec, and we would have to restart mark threads. */
907 # endif /* PARALLEL_MARK */
910 #endif /* HANDLE_FORK */
912 #if defined(GC_DGUX386_THREADS)
913 /* Return the number of processors, or i<= 0 if it can't be determined. */
916 /* <takis@XFree86.Org> */
918 struct dg_sys_info_pm_info pm_sysinfo;
921 status = dg_sys_info((long int *) &pm_sysinfo,
922 DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
924 /* set -1 for error */
928 numCpus = pm_sysinfo.idle_vp_count;
930 # ifdef DEBUG_THREADS
931 GC_printf1("Number of active CPUs in this system: %d\n", numCpus);
935 #endif /* GC_DGUX386_THREADS */
937 /* We hold the allocation lock. */
940 # ifndef GC_DARWIN_THREADS
945 if (GC_thr_initialized) return;
946 GC_thr_initialized = TRUE;
949 /* Prepare for a possible fork. */
950 pthread_atfork(GC_fork_prepare_proc, GC_fork_parent_proc,
952 # endif /* HANDLE_FORK */
953 /* Add the initial thread, so we can stop it. */
954 t = GC_new_thread(pthread_self());
955 # ifdef GC_DARWIN_THREADS
956 t -> stop_info.mach_thread = mach_thread_self();
958 t -> stop_info.stack_ptr = (ptr_t)(&dummy);
960 t -> flags = DETACHED | MAIN_THREAD;
966 char * nprocs_string = GETENV("GC_NPROCS");
968 if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
970 if (GC_nprocs <= 0) {
971 # if defined(GC_HPUX_THREADS)
972 GC_nprocs = pthread_num_processors_np();
974 # if defined(GC_OSF1_THREADS)
975 GC_nprocs = sysconf(_SC_NPROCESSORS_ONLN);
976 if (GC_nprocs <= 0) GC_nprocs = 1;
978 # if defined(GC_FREEBSD_THREADS)
981 # if defined(GC_DARWIN_THREADS)
983 size_t len = sizeof(ncpus);
984 sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0);
987 # if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)
988 GC_nprocs = GC_get_nprocs();
991 if (GC_nprocs <= 0) {
992 WARN("GC_get_nprocs() returned %ld\n", GC_nprocs);
994 # ifdef PARALLEL_MARK
998 # ifdef PARALLEL_MARK
1000 char * markers_string = GETENV("GC_MARKERS");
1001 if (markers_string != NULL) {
1002 GC_markers = atoi(markers_string);
1004 GC_markers = GC_nprocs;
1009 # ifdef PARALLEL_MARK
1011 if (GC_print_stats) {
1012 GC_printf2("Number of processors = %ld, "
1013 "number of marker threads = %ld\n", GC_nprocs, GC_markers);
1016 if (GC_markers == 1) {
1017 GC_parallel = FALSE;
1019 if (GC_print_stats) {
1020 GC_printf0("Single marker thread, turning off parallel marking\n");
1025 /* Disable true incremental collection, but generational is OK. */
1026 GC_time_limit = GC_TIME_UNLIMITED;
1032 /* Perform all initializations, including those that */
1033 /* may require allocation. */
1034 /* Called without allocation lock. */
1035 /* Must be called before a second thread is created. */
1036 /* Called without allocation lock. */
1037 void GC_init_parallel()
1039 if (parallel_initialized) return;
1040 parallel_initialized = TRUE;
1042 /* GC_init() calls us back, so set flag first. */
1043 if (!GC_is_initialized) GC_init();
1044 /* If we are using a parallel marker, start the helper threads. */
1045 # ifdef PARALLEL_MARK
1046 if (GC_parallel) start_mark_threads();
1048 /* Initialize thread local free lists if used. */
1049 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1051 GC_init_thread_local(GC_lookup_thread(pthread_self()));
1057 #if !defined(GC_DARWIN_THREADS)
1058 int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
1060 sigset_t fudged_set;
1062 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
1064 sigdelset(&fudged_set, SIG_SUSPEND);
1067 return(REAL_FUNC(pthread_sigmask)(how, set, oset));
1069 #endif /* !GC_DARWIN_THREADS */
1071 /* Wrappers for functions that are likely to block for an appreciable */
1072 /* length of time. Must be called in pairs, if at all. */
1073 /* Nothing much beyond the system call itself should be executed */
1074 /* between these. */
1076 void GC_start_blocking(void) {
1077 # define SP_SLOP 128
1080 me = GC_lookup_thread(pthread_self());
1081 GC_ASSERT(!(me -> thread_blocked));
1083 me -> stop_info.stack_ptr = (ptr_t)GC_save_regs_in_stack();
1085 # ifndef GC_DARWIN_THREADS
1086 me -> stop_info.stack_ptr = (ptr_t)GC_approx_sp();
1090 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack() + SP_SLOP;
1092 /* Add some slop to the stack pointer, since the wrapped call may */
1093 /* end up pushing more callee-save registers. */
1094 # ifndef GC_DARWIN_THREADS
1095 # ifdef STACK_GROWS_UP
1096 me -> stop_info.stack_ptr += SP_SLOP;
1098 me -> stop_info.stack_ptr -= SP_SLOP;
1101 me -> thread_blocked = TRUE;
1105 void GC_end_blocking(void) {
1107 LOCK(); /* This will block if the world is stopped. */
1108 me = GC_lookup_thread(pthread_self());
1109 GC_ASSERT(me -> thread_blocked);
1110 me -> thread_blocked = FALSE;
1114 #if defined(GC_DGUX386_THREADS)
1115 #define __d10_sleep sleep
1116 #endif /* GC_DGUX386_THREADS */
1118 /* A wrapper for the standard C sleep function */
1119 int WRAP_FUNC(sleep) (unsigned int seconds)
1123 GC_start_blocking();
1124 result = REAL_FUNC(sleep)(seconds);
1130 void *(*start_routine)(void *);
1133 sem_t registered; /* 1 ==> in our thread table, but */
1134 /* parent hasn't yet noticed. */
1137 /* Called at thread exit. */
1138 /* Never called for main thread. That's OK, since it */
1139 /* results in at most a tiny one-time leak. And */
1140 /* linuxthreads doesn't reclaim the main threads */
1141 /* resources or id anyway. */
1142 void GC_thread_exit_proc(void *arg)
1147 me = GC_lookup_thread(pthread_self());
1148 GC_destroy_thread_local(me);
1149 if (me -> flags & DETACHED) {
1150 GC_delete_thread(pthread_self());
1152 me -> flags |= FINISHED;
1154 # if defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
1155 && !defined(USE_COMPILER_TLS) && !defined(DBG_HDRS_ALL)
1156 GC_remove_specific(GC_thread_key);
1158 /* The following may run the GC from "nonexistent" thread. */
1159 GC_wait_for_gc_completion(FALSE);
1163 int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
1166 GC_thread thread_gc_id;
1169 thread_gc_id = GC_lookup_thread(thread);
1170 /* This is guaranteed to be the intended one, since the thread id */
1171 /* cant have been recycled by pthreads. */
1173 result = REAL_FUNC(pthread_join)(thread, retval);
1174 # if defined (GC_FREEBSD_THREADS)
1175 /* On FreeBSD, the wrapped pthread_join() sometimes returns (what
1176 appears to be) a spurious EINTR which caused the test and real code
1177 to gratuitously fail. Having looked at system pthread library source
1178 code, I see how this return code may be generated. In one path of
1179 code, pthread_join() just returns the errno setting of the thread
1180 being joined. This does not match the POSIX specification or the
1181 local man pages thus I have taken the liberty to catch this one
1182 spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
1183 if (result == EINTR) result = 0;
1187 /* Here the pthread thread id may have been recycled. */
1188 GC_delete_gc_thread(thread, thread_gc_id);
1195 WRAP_FUNC(pthread_detach)(pthread_t thread)
1198 GC_thread thread_gc_id;
1201 thread_gc_id = GC_lookup_thread(thread);
1203 result = REAL_FUNC(pthread_detach)(thread);
1206 thread_gc_id -> flags |= DETACHED;
1207 /* Here the pthread thread id may have been recycled. */
1208 if (thread_gc_id -> flags & FINISHED) {
1209 GC_delete_gc_thread(thread, thread_gc_id);
1216 GC_bool GC_in_thread_creation = FALSE;
1218 typedef void *(*ThreadStartFn)(void *);
1219 void * GC_start_routine_head(void * arg, void *base_addr,
1220 ThreadStartFn *start, void **start_arg )
1222 struct start_info * si = arg;
1225 pthread_t my_pthread;
1227 my_pthread = pthread_self();
1228 # ifdef DEBUG_THREADS
1229 GC_printf1("Starting thread 0x%lx\n", my_pthread);
1230 GC_printf1("pid = %ld\n", (long) getpid());
1231 GC_printf1("sp = 0x%lx\n", (long) &arg);
1234 GC_in_thread_creation = TRUE;
1235 me = GC_new_thread(my_pthread);
1236 GC_in_thread_creation = FALSE;
1237 #ifdef GC_DARWIN_THREADS
1238 me -> stop_info.mach_thread = mach_thread_self();
1240 me -> stop_info.stack_ptr = 0;
1242 me -> flags = si -> flags;
1243 /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
1244 /* doesn't work because the stack base in /proc/self/stat is the */
1245 /* one for the main thread. There is a strong argument that that's */
1246 /* a kernel bug, but a pervasive one. */
1247 # ifdef STACK_GROWS_DOWN
1248 me -> stack_end = (ptr_t)(((word)(base_addr) + (GC_page_size - 1))
1249 & ~(GC_page_size - 1));
1250 # ifndef GC_DARWIN_THREADS
1251 me -> stop_info.stack_ptr = me -> stack_end - 0x10;
1253 /* Needs to be plausible, since an asynchronous stack mark */
1254 /* should not crash. */
1256 me -> stack_end = (ptr_t)((word)(base_addr) & ~(GC_page_size - 1));
1257 me -> stop_info.stack_ptr = me -> stack_end + 0x10;
1259 /* This is dubious, since we may be more than a page into the stack, */
1260 /* and hence skip some of it, though it's not clear that matters. */
1262 me -> backing_store_end = (ptr_t)
1263 (GC_save_regs_in_stack() & ~(GC_page_size - 1));
1264 /* This is also < 100% convincing. We should also read this */
1265 /* from /proc, but the hook to do so isn't there yet. */
1269 if (start) *start = si -> start_routine;
1270 if (start_arg) *start_arg = si -> arg;
1272 sem_post(&(si -> registered)); /* Last action on si. */
1273 /* OK to deallocate. */
1274 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1276 GC_init_thread_local(me);
1283 int GC_thread_register_foreign (void *base_addr)
1285 struct start_info si = { 0, }; /* stacked for legibility & locking */
1288 # ifdef DEBUG_THREADS
1289 GC_printf1( "GC_thread_register_foreign %p\n", &si );
1292 si.flags = FOREIGN_THREAD;
1294 if (!parallel_initialized) GC_init_parallel();
1296 if (!GC_thr_initialized) GC_thr_init();
1300 me = GC_start_routine_head(&si, base_addr, NULL, NULL);
1305 void * GC_start_routine(void * arg)
1308 struct start_info * si = arg;
1311 ThreadStartFn start;
1314 me = GC_start_routine_head (arg, &dummy, &start, &start_arg);
1316 pthread_cleanup_push(GC_thread_exit_proc, 0);
1317 # ifdef DEBUG_THREADS
1318 GC_printf1("start_routine = 0x%lx\n", start);
1320 result = (*start)(start_arg);
1322 GC_printf1("Finishing thread 0x%x\n", pthread_self());
1324 me -> status = result;
1325 pthread_cleanup_pop(1);
1326 /* Cleanup acquires lock, ensuring that we can't exit */
1327 /* while a collection that thinks we're alive is trying to stop */
1333 WRAP_FUNC(pthread_create)(pthread_t *new_thread,
1334 const pthread_attr_t *attr,
1335 void *(*start_routine)(void *), void *arg)
1340 struct start_info * si;
1341 /* This is otherwise saved only in an area mmapped by the thread */
1342 /* library, which isn't visible to the collector. */
1344 /* We resist the temptation to muck with the stack size here, */
1345 /* even if the default is unreasonably small. That's the client's */
1346 /* responsibility. */
1349 si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),
1352 if (!parallel_initialized) GC_init_parallel();
1353 if (0 == si) return(ENOMEM);
1354 sem_init(&(si -> registered), 0, 0);
1355 si -> start_routine = start_routine;
1358 if (!GC_thr_initialized) GC_thr_init();
1359 # ifdef GC_ASSERTIONS
1363 pthread_attr_t my_attr;
1364 pthread_attr_init(&my_attr);
1365 pthread_attr_getstacksize(&my_attr, &stack_size);
1367 pthread_attr_getstacksize(attr, &stack_size);
1369 GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word)));
1370 /* Our threads may need to do some work for the GC. */
1371 /* Ridiculously small threads won't work, and they */
1372 /* probably wouldn't work anyway. */
1376 detachstate = PTHREAD_CREATE_JOINABLE;
1378 pthread_attr_getdetachstate(attr, &detachstate);
1380 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
1381 si -> flags = my_flags;
1383 # ifdef DEBUG_THREADS
1384 GC_printf1("About to start new thread from thread 0x%X\n",
1388 result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
1390 # ifdef DEBUG_THREADS
1391 GC_printf1("Started thread 0x%X\n", *new_thread);
1393 /* Wait until child has been added to the thread table. */
1394 /* This also ensures that we hold onto si until the child is done */
1395 /* with it. Thus it doesn't matter whether it is otherwise */
1396 /* visible to the collector. */
1398 while (0 != sem_wait(&(si -> registered))) {
1399 if (EINTR != errno) ABORT("sem_wait failed");
1402 sem_destroy(&(si -> registered));
1404 GC_INTERNAL_FREE(si);
1410 #ifdef GENERIC_COMPARE_AND_SWAP
1411 pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;
1413 GC_bool GC_compare_and_exchange(volatile GC_word *addr,
1414 GC_word old, GC_word new_val)
1417 pthread_mutex_lock(&GC_compare_and_swap_lock);
1424 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1428 GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)
1431 pthread_mutex_lock(&GC_compare_and_swap_lock);
1433 *addr = old + how_much;
1434 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1438 #endif /* GENERIC_COMPARE_AND_SWAP */
1439 /* Spend a few cycles in a way that can't introduce contention with */
1440 /* othre threads. */
1444 # if !defined(__GNUC__) || defined(__INTEL_COMPILER)
1445 volatile word dummy = 0;
1448 for (i = 0; i < 10; ++i) {
1449 # if defined(__GNUC__) && !defined(__INTEL_COMPILER)
1450 __asm__ __volatile__ (" " : : : "memory");
1452 /* Something that's unlikely to be optimized away. */
1458 #define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */
1461 VOLATILE GC_bool GC_collecting = 0;
1462 /* A hint that we're in the collector and */
1463 /* holding the allocation lock for an */
1464 /* extended period. */
1466 #if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)
1467 /* If we don't want to use the below spinlock implementation, either */
1468 /* because we don't have a GC_test_and_set implementation, or because */
1469 /* we don't want to risk sleeping, we can still try spinning on */
1470 /* pthread_mutex_trylock for a while. This appears to be very */
1471 /* beneficial in many cases. */
1472 /* I suspect that under high contention this is nearly always better */
1473 /* than the spin lock. But it's a bit slower on a uniprocessor. */
1474 /* Hence we still default to the spin lock. */
1475 /* This is also used to acquire the mark lock for the parallel */
1478 /* Here we use a strict exponential backoff scheme. I don't know */
1479 /* whether that's better or worse than the above. We eventually */
1480 /* yield by calling pthread_mutex_lock(); it never makes sense to */
1481 /* explicitly sleep. */
1485 unsigned long GC_spin_count = 0;
1486 unsigned long GC_block_count = 0;
1487 unsigned long GC_unlocked_count = 0;
1490 void GC_generic_lock(pthread_mutex_t * lock)
1492 #ifndef NO_PTHREAD_TRYLOCK
1493 unsigned pause_length = 1;
1496 if (0 == pthread_mutex_trylock(lock)) {
1498 ++GC_unlocked_count;
1502 for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
1503 for (i = 0; i < pause_length; ++i) {
1506 switch(pthread_mutex_trylock(lock)) {
1515 ABORT("Unexpected error from pthread_mutex_trylock");
1518 #endif /* !NO_PTHREAD_TRYLOCK */
1522 pthread_mutex_lock(lock);
1525 #endif /* !USE_SPIN_LOCK || PARALLEL_MARK */
1527 #if defined(USE_SPIN_LOCK)
1529 /* Reasonably fast spin locks. Basically the same implementation */
1530 /* as STL alloc.h. This isn't really the right way to do this. */
1531 /* but until the POSIX scheduling mess gets straightened out ... */
1533 volatile unsigned int GC_allocate_lock = 0;
1538 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
1539 # define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
1540 static unsigned spin_max = low_spin_max;
1541 unsigned my_spin_max;
1542 static unsigned last_spins = 0;
1543 unsigned my_last_spins;
1546 if (!GC_test_and_set(&GC_allocate_lock)) {
1549 my_spin_max = spin_max;
1550 my_last_spins = last_spins;
1551 for (i = 0; i < my_spin_max; i++) {
1552 if (GC_collecting || GC_nprocs == 1) goto yield;
1553 if (i < my_last_spins/2 || GC_allocate_lock) {
1557 if (!GC_test_and_set(&GC_allocate_lock)) {
1560 * Spinning worked. Thus we're probably not being scheduled
1561 * against the other process with which we were contending.
1562 * Thus it makes sense to spin longer the next time.
1565 spin_max = high_spin_max;
1569 /* We are probably being scheduled against the other process. Sleep. */
1570 spin_max = low_spin_max;
1573 if (!GC_test_and_set(&GC_allocate_lock)) {
1576 # define SLEEP_THRESHOLD 12
1577 /* Under Linux very short sleeps tend to wait until */
1578 /* the current time quantum expires. On old Linux */
1579 /* kernels nanosleep(<= 2ms) just spins under Linux. */
1580 /* (Under 2.4, this happens only for real-time */
1581 /* processes.) We want to minimize both behaviors */
1583 if (i < SLEEP_THRESHOLD) {
1589 /* Don't wait for more than about 15msecs, even */
1590 /* under extreme contention. */
1592 ts.tv_nsec = 1 << i;
1598 #else /* !USE_SPINLOCK */
1601 #ifndef NO_PTHREAD_TRYLOCK
1602 if (1 == GC_nprocs || GC_collecting) {
1603 pthread_mutex_lock(&GC_allocate_ml);
1605 GC_generic_lock(&GC_allocate_ml);
1607 #else /* !NO_PTHREAD_TRYLOCK */
1608 pthread_mutex_lock(&GC_allocate_ml);
1609 #endif /* !NO_PTHREAD_TRYLOCK */
1612 #endif /* !USE_SPINLOCK */
1614 #if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
1616 #ifdef GC_ASSERTIONS
1617 pthread_t GC_mark_lock_holder = NO_THREAD;
1621 /* Ugly workaround for a linux threads bug in the final versions */
1622 /* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */
1623 /* field even when it fails to acquire the mutex. This causes */
1624 /* pthread_cond_wait to die. Remove for glibc2.2. */
1625 /* According to the man page, we should use */
1626 /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
1628 static pthread_mutex_t mark_mutex =
1629 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
1631 static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
1634 static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
1636 void GC_acquire_mark_lock()
1639 if (pthread_mutex_lock(&mark_mutex) != 0) {
1640 ABORT("pthread_mutex_lock failed");
1643 GC_generic_lock(&mark_mutex);
1644 # ifdef GC_ASSERTIONS
1645 GC_mark_lock_holder = pthread_self();
1649 void GC_release_mark_lock()
1651 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1652 # ifdef GC_ASSERTIONS
1653 GC_mark_lock_holder = NO_THREAD;
1655 if (pthread_mutex_unlock(&mark_mutex) != 0) {
1656 ABORT("pthread_mutex_unlock failed");
1660 /* Collector must wait for a freelist builders for 2 reasons: */
1661 /* 1) Mark bits may still be getting examined without lock. */
1662 /* 2) Partial free lists referenced only by locals may not be scanned */
1663 /* correctly, e.g. if they contain "pointer-free" objects, since the */
1664 /* free-list link may be ignored. */
1665 void GC_wait_builder()
1667 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1668 # ifdef GC_ASSERTIONS
1669 GC_mark_lock_holder = NO_THREAD;
1671 if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
1672 ABORT("pthread_cond_wait failed");
1674 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1675 # ifdef GC_ASSERTIONS
1676 GC_mark_lock_holder = pthread_self();
1680 void GC_wait_for_reclaim()
1682 GC_acquire_mark_lock();
1683 while (GC_fl_builder_count > 0) {
1686 GC_release_mark_lock();
1689 void GC_notify_all_builder()
1691 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1692 if (pthread_cond_broadcast(&builder_cv) != 0) {
1693 ABORT("pthread_cond_broadcast failed");
1697 #endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
1699 #ifdef PARALLEL_MARK
1701 static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
1703 void GC_wait_marker()
1705 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1706 # ifdef GC_ASSERTIONS
1707 GC_mark_lock_holder = NO_THREAD;
1709 if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
1710 ABORT("pthread_cond_wait failed");
1712 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1713 # ifdef GC_ASSERTIONS
1714 GC_mark_lock_holder = pthread_self();
1718 void GC_notify_all_marker()
1720 if (pthread_cond_broadcast(&mark_cv) != 0) {
1721 ABORT("pthread_cond_broadcast failed");
1725 #endif /* PARALLEL_MARK */
1727 # endif /* GC_LINUX_THREADS and friends */