X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=src%2Fmm%2Fboehm-gc%2Fmalloc.c;h=62fc64cfa38f66d5a385c0a4cdd1c1af0551716d;hb=c83bff94e40ee0a218a39931af17814d1a42cb5c;hp=5e25fe460144a7d1cfbbc291dfc0b33143235ed9;hpb=29c5375943f6b0d6c2e2f77c07c13ae04dae5565;p=cacao.git diff --git a/src/mm/boehm-gc/malloc.c b/src/mm/boehm-gc/malloc.c index 5e25fe460..62fc64cfa 100644 --- a/src/mm/boehm-gc/malloc.c +++ b/src/mm/boehm-gc/malloc.c @@ -1,7 +1,7 @@ /* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. - * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved. + * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. @@ -12,69 +12,65 @@ * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ -/* Boehm, February 7, 1996 4:32 pm PST */ - -#include "config.h" #include #include #include #include "private/gc_priv.h" -extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */ -void GC_extend_size_map(); /* in misc.c. */ +extern void * GC_clear_stack(void *); /* in misc.c, behaves like identity */ +void GC_extend_size_map(size_t); /* in misc.c. */ /* Allocate reclaim list for kind: */ /* Return TRUE on success */ -GC_bool GC_alloc_reclaim_list(kind) -register struct obj_kind * kind; +STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind) { struct hblk ** result = (struct hblk **) - GC_scratch_alloc((MAXOBJSZ+1) * sizeof(struct hblk *)); + GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *)); if (result == 0) return(FALSE); - BZERO(result, (MAXOBJSZ+1)*sizeof(struct hblk *)); + BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *)); kind -> ok_reclaim_list = result; return(TRUE); } -/* Allocate a large block of size lw words. */ +/* Allocate a large block of size lb bytes. */ /* The block is not cleared. */ /* Flags is 0 or IGNORE_OFF_PAGE. */ /* We hold the allocation lock. */ -ptr_t GC_alloc_large(lw, k, flags) -word lw; -int k; -unsigned flags; +/* EXTRA_BYTES were already added to lb. */ +ptr_t GC_alloc_large(size_t lb, int k, unsigned flags) { struct hblk * h; - word n_blocks = OBJ_SZ_TO_BLOCKS(lw); + word n_blocks; ptr_t result; + /* Round up to a multiple of a granule. */ + lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1); + n_blocks = OBJ_SZ_TO_BLOCKS(lb); if (!GC_is_initialized) GC_init_inner(); /* Do our share of marking work */ if(GC_incremental && !GC_dont_gc) GC_collect_a_little_inner((int)n_blocks); - h = GC_allochblk(lw, k, flags); + h = GC_allochblk(lb, k, flags); # ifdef USE_MUNMAP if (0 == h) { GC_merge_unmapped(); - h = GC_allochblk(lw, k, flags); + h = GC_allochblk(lb, k, flags); } # endif while (0 == h && GC_collect_or_expand(n_blocks, (flags != 0))) { - h = GC_allochblk(lw, k, flags); + h = GC_allochblk(lb, k, flags); } if (h == 0) { result = 0; } else { - int total_bytes = n_blocks * HBLKSIZE; + size_t total_bytes = n_blocks * HBLKSIZE; if (n_blocks > 1) { GC_large_allocd_bytes += total_bytes; if (GC_large_allocd_bytes > GC_max_large_allocd_bytes) GC_max_large_allocd_bytes = GC_large_allocd_bytes; } - result = (ptr_t) (h -> hb_body); - GC_words_wasted += BYTES_TO_WORDS(total_bytes) - lw; + result = h -> hb_body; } return result; } @@ -82,13 +78,11 @@ unsigned flags; /* Allocate a large block of size lb bytes. Clear if appropriate. */ /* We hold the allocation lock. */ -ptr_t GC_alloc_large_and_clear(lw, k, flags) -word lw; -int k; -unsigned flags; +/* EXTRA_BYTES were already added to lb. */ +ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags) { - ptr_t result = GC_alloc_large(lw, k, flags); - word n_blocks = OBJ_SZ_TO_BLOCKS(lw); + ptr_t result = GC_alloc_large(lb, k, flags); + word n_blocks = OBJ_SZ_TO_BLOCKS(lb); if (0 == result) return 0; if (GC_debugging_started || GC_obj_kinds[k].ok_init) { @@ -104,60 +98,34 @@ unsigned flags; /* require special handling on allocation. */ /* First a version that assumes we already */ /* hold lock: */ -ptr_t GC_generic_malloc_inner(lb, k) -register word lb; -register int k; +void * GC_generic_malloc_inner(size_t lb, int k) { -register word lw; -register ptr_t op; -register ptr_t *opp; - - if( SMALL_OBJ(lb) ) { - register struct obj_kind * kind = GC_obj_kinds + k; -# ifdef MERGE_SIZES - lw = GC_size_map[lb]; -# else - lw = ALIGNED_WORDS(lb); - if (lw == 0) lw = MIN_WORDS; -# endif - opp = &(kind -> ok_freelist[lw]); + void *op; + + if(SMALL_OBJ(lb)) { + struct obj_kind * kind = GC_obj_kinds + k; + size_t lg = GC_size_map[lb]; + void ** opp = &(kind -> ok_freelist[lg]); + if( (op = *opp) == 0 ) { -# ifdef MERGE_SIZES - if (GC_size_map[lb] == 0) { - if (!GC_is_initialized) GC_init_inner(); - if (GC_size_map[lb] == 0) GC_extend_size_map(lb); - return(GC_generic_malloc_inner(lb, k)); - } -# else - if (!GC_is_initialized) { - GC_init_inner(); - return(GC_generic_malloc_inner(lb, k)); - } -# endif + if (GC_size_map[lb] == 0) { + if (!GC_is_initialized) GC_init_inner(); + if (GC_size_map[lb] == 0) GC_extend_size_map(lb); + return(GC_generic_malloc_inner(lb, k)); + } if (kind -> ok_reclaim_list == 0) { if (!GC_alloc_reclaim_list(kind)) goto out; } - op = GC_allocobj(lw, k); + op = GC_allocobj(lg, k); if (op == 0) goto out; } - /* Here everything is in a consistent state. */ - /* We assume the following assignment is */ - /* atomic. If we get aborted */ - /* after the assignment, we lose an object, */ - /* but that's benign. */ - /* Volatile declarations may need to be added */ - /* to prevent the compiler from breaking things.*/ - /* If we only execute the second of the */ - /* following assignments, we lose the free */ - /* list, but that should still be OK, at least */ - /* for garbage collected memory. */ *opp = obj_link(op); obj_link(op) = 0; + GC_bytes_allocd += GRANULES_TO_BYTES(lg); } else { - lw = ROUNDED_UP_WORDS(lb); - op = (ptr_t)GC_alloc_large_and_clear(lw, k, 0); + op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0); + GC_bytes_allocd += lb; } - GC_words_allocd += lw; out: return op; @@ -166,46 +134,41 @@ out: /* Allocate a composite object of size n bytes. The caller guarantees */ /* that pointers past the first page are not relevant. Caller holds */ /* allocation lock. */ -ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k) -register size_t lb; -register int k; +void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k) { - register word lw; - ptr_t op; + word lb_adjusted; + void * op; if (lb <= HBLKSIZE) - return(GC_generic_malloc_inner((word)lb, k)); - lw = ROUNDED_UP_WORDS(lb); - op = (ptr_t)GC_alloc_large_and_clear(lw, k, IGNORE_OFF_PAGE); - GC_words_allocd += lw; + return(GC_generic_malloc_inner(lb, k)); + lb_adjusted = ADD_SLOP(lb); + op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE); + GC_bytes_allocd += lb_adjusted; return op; } -ptr_t GC_generic_malloc(lb, k) -register word lb; -register int k; +void * GC_generic_malloc(size_t lb, int k) { - ptr_t result; + void * result; DCL_LOCK_STATE; if (GC_have_errors) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); if (SMALL_OBJ(lb)) { - DISABLE_SIGNALS(); LOCK(); result = GC_generic_malloc_inner((word)lb, k); UNLOCK(); - ENABLE_SIGNALS(); } else { - word lw; + size_t lg; + size_t lb_rounded; word n_blocks; GC_bool init; - lw = ROUNDED_UP_WORDS(lb); - n_blocks = OBJ_SZ_TO_BLOCKS(lw); + lg = ROUNDED_UP_GRANULES(lb); + lb_rounded = GRANULES_TO_BYTES(lg); + n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded); init = GC_obj_kinds[k].ok_init; - DISABLE_SIGNALS(); LOCK(); - result = (ptr_t)GC_alloc_large(lw, k, 0); + result = (ptr_t)GC_alloc_large(lb_rounded, k, 0); if (0 != result) { if (GC_debugging_started) { BZERO(result, n_blocks * HBLKSIZE); @@ -215,14 +178,13 @@ register int k; /* before we release the lock. */ ((word *)result)[0] = 0; ((word *)result)[1] = 0; - ((word *)result)[lw-1] = 0; - ((word *)result)[lw-2] = 0; + ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0; + ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0; # endif } } - GC_words_allocd += lw; + GC_bytes_allocd += lb_rounded; UNLOCK(); - ENABLE_SIGNALS(); if (init && !GC_debugging_started && 0 != result) { BZERO(result, n_blocks * HBLKSIZE); } @@ -236,40 +198,34 @@ register int k; #define GENERAL_MALLOC(lb,k) \ - (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k)) + GC_clear_stack(GC_generic_malloc(lb, k)) /* We make the GC_clear_stack_call a tail call, hoping to get more of */ /* the stack. */ /* Allocate lb bytes of atomic (pointerfree) data */ -# ifdef __STDC__ - GC_PTR GC_malloc_atomic(size_t lb) -# else - GC_PTR GC_malloc_atomic(lb) - size_t lb; -# endif +#ifdef THREAD_LOCAL_ALLOC + void * GC_core_malloc_atomic(size_t lb) +#else + GC_API void * GC_CALL GC_malloc_atomic(size_t lb) +#endif { -register ptr_t op; -register ptr_t * opp; -register word lw; -DCL_LOCK_STATE; - - if( EXPECT(SMALL_OBJ(lb), 1) ) { -# ifdef MERGE_SIZES - lw = GC_size_map[lb]; -# else - lw = ALIGNED_WORDS(lb); -# endif - opp = &(GC_aobjfreelist[lw]); - FASTLOCK(); - if( EXPECT(!FASTLOCK_SUCCEEDED() || (op = *opp) == 0, 0) ) { - FASTUNLOCK(); + void *op; + void ** opp; + size_t lg; + DCL_LOCK_STATE; + + if(SMALL_OBJ(lb)) { + lg = GC_size_map[lb]; + opp = &(GC_aobjfreelist[lg]); + LOCK(); + if( EXPECT((op = *opp) == 0, 0) ) { + UNLOCK(); return(GENERAL_MALLOC((word)lb, PTRFREE)); } - /* See above comment on signals. */ *opp = obj_link(op); - GC_words_allocd += lw; - FASTUNLOCK(); - return((GC_PTR) op); + GC_bytes_allocd += GRANULES_TO_BYTES(lg); + UNLOCK(); + return((void *) op); } else { return(GENERAL_MALLOC((word)lb, PTRFREE)); } @@ -277,12 +233,7 @@ DCL_LOCK_STATE; /* provide a version of strdup() that uses the collector to allocate the copy of the string */ -# ifdef __STDC__ - char *GC_strdup(const char *s) -# else - char *GC_strdup(s) - char *s; -#endif +GC_API char * GC_CALL GC_strdup(const char *s) { char *copy; @@ -296,43 +247,38 @@ DCL_LOCK_STATE; } /* Allocate lb bytes of composite (pointerful) data */ -# ifdef __STDC__ - GC_PTR GC_malloc(size_t lb) -# else - GC_PTR GC_malloc(lb) - size_t lb; -# endif +#ifdef THREAD_LOCAL_ALLOC + void * GC_core_malloc(size_t lb) +#else + GC_API void * GC_CALL GC_malloc(size_t lb) +#endif { -register ptr_t op; -register ptr_t *opp; -register word lw; -DCL_LOCK_STATE; - - if( EXPECT(SMALL_OBJ(lb), 1) ) { -# ifdef MERGE_SIZES - lw = GC_size_map[lb]; -# else - lw = ALIGNED_WORDS(lb); -# endif - opp = &(GC_objfreelist[lw]); - FASTLOCK(); - if( EXPECT(!FASTLOCK_SUCCEEDED() || (op = *opp) == 0, 0) ) { - FASTUNLOCK(); + void *op; + void **opp; + size_t lg; + DCL_LOCK_STATE; + + if(SMALL_OBJ(lb)) { + lg = GC_size_map[lb]; + opp = (void **)&(GC_objfreelist[lg]); + LOCK(); + if( EXPECT((op = *opp) == 0, 0) ) { + UNLOCK(); return(GENERAL_MALLOC((word)lb, NORMAL)); } /* See above comment on signals. */ GC_ASSERT(0 == obj_link(op) - || (word)obj_link(op) + || ((word)obj_link(op) <= (word)GC_greatest_plausible_heap_addr && (word)obj_link(op) - >= (word)GC_least_plausible_heap_addr); + >= (word)GC_least_plausible_heap_addr)); *opp = obj_link(op); obj_link(op) = 0; - GC_words_allocd += lw; - FASTUNLOCK(); - return((GC_PTR) op); + GC_bytes_allocd += GRANULES_TO_BYTES(lg); + UNLOCK(); + return op; } else { - return(GENERAL_MALLOC((word)lb, NORMAL)); + return(GENERAL_MALLOC(lb, NORMAL)); } } @@ -350,19 +296,8 @@ DCL_LOCK_STATE; # define GC_debug_malloc_replacement(lb) \ GC_debug_malloc(lb, RA "unknown", 0) -# ifdef __STDC__ - GC_PTR malloc(size_t lb) -# else - GC_PTR malloc(lb) - size_t lb; -# endif +void * malloc(size_t lb) { -# if defined(GC_WIN32_THREADS) && defined(__GNUC__) - /* According to Gerard Allen, this helps with MINGW. */ - /* When using threads need to initalised before use, but GCC uses a malloc - in __w32_sharedptr_initialize (w32-shared-ptr.c) */ - if (!GC_is_initialized) GC_init(); -# endif /* It might help to manually inline the GC_malloc call here. */ /* But any decent compiler should reduce the extra procedure call */ /* to at most a jump instruction in this case. */ @@ -376,27 +311,61 @@ DCL_LOCK_STATE; */ if (!GC_is_initialized) return sbrk(lb); # endif /* I386 && GC_SOLARIS_THREADS */ - return((GC_PTR)REDIRECT_MALLOC(lb)); + return((void *)REDIRECT_MALLOC(lb)); } -# ifdef __STDC__ - GC_PTR calloc(size_t n, size_t lb) -# else - GC_PTR calloc(n, lb) - size_t n, lb; -# endif +#if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ + static ptr_t GC_libpthread_start = 0; + static ptr_t GC_libpthread_end = 0; + static ptr_t GC_libld_start = 0; + static ptr_t GC_libld_end = 0; + extern GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp); + /* From os_dep.c */ + + STATIC void GC_init_lib_bounds(void) { - return((GC_PTR)REDIRECT_MALLOC(n*lb)); + if (GC_libpthread_start != 0) return; + if (!GC_text_mapping("libpthread-", + &GC_libpthread_start, &GC_libpthread_end)) { + WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0); + /* This might still work with some versions of libpthread, */ + /* so we don't abort. Perhaps we should. */ + /* Generate message only once: */ + GC_libpthread_start = (ptr_t)1; + } + if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) { + WARN("Failed to find ld.so text mapping: Expect crash\n", 0); + } } +#endif + +void * calloc(size_t n, size_t lb) +{ +# if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ + /* libpthread allocated some memory that is only pointed to by */ + /* mmapped thread stacks. Make sure it's not collectable. */ + { + static GC_bool lib_bounds_set = FALSE; + ptr_t caller = (ptr_t)__builtin_return_address(0); + /* This test does not need to ensure memory visibility, since */ + /* the bounds will be set when/if we create another thread. */ + if (!lib_bounds_set) { + GC_init_lib_bounds(); + lib_bounds_set = TRUE; + } + if (caller >= GC_libpthread_start && caller < GC_libpthread_end + || (caller >= GC_libld_start && caller < GC_libld_end)) + return GC_malloc_uncollectable(n*lb); + /* The two ranges are actually usually adjacent, so there may */ + /* be a way to speed this up. */ + } +# endif + return((void *)REDIRECT_MALLOC(n*lb)); +} #ifndef strdup # include -# ifdef __STDC__ - char *strdup(const char *s) -# else - char *strdup(s) - char *s; -# endif + char *strdup(const char *s) { size_t len = strlen(s) + 1; char * result = ((char *)REDIRECT_MALLOC(len+1)); @@ -417,68 +386,62 @@ DCL_LOCK_STATE; # endif /* REDIRECT_MALLOC */ /* Explicitly deallocate an object p. */ -# ifdef __STDC__ - void GC_free(GC_PTR p) -# else - void GC_free(p) - GC_PTR p; -# endif +GC_API void GC_CALL GC_free(void * p) { - register struct hblk *h; - register hdr *hhdr; - register signed_word sz; - register ptr_t * flh; - register int knd; - register struct obj_kind * ok; + struct hblk *h; + hdr *hhdr; + size_t sz; /* In bytes */ + size_t ngranules; /* sz in granules */ + void **flh; + int knd; + struct obj_kind * ok; DCL_LOCK_STATE; if (p == 0) return; /* Required by ANSI. It's not my fault ... */ +# ifdef LOG_ALLOCS + GC_err_printf("GC_free(%p): %lu\n", p, (unsigned long)GC_gc_no); +# endif h = HBLKPTR(p); hhdr = HDR(h); - GC_ASSERT(GC_base(p) == p); # if defined(REDIRECT_MALLOC) && \ (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \ - || defined(__MINGW32__)) /* Should this be MSWIN32 in general? */ + || defined(MSWIN32)) /* For Solaris, we have to redirect malloc calls during */ /* initialization. For the others, this seems to happen */ /* implicitly. */ /* Don't try to deallocate that memory. */ if (0 == hhdr) return; # endif - knd = hhdr -> hb_obj_kind; + GC_ASSERT(GC_base(p) == p); sz = hhdr -> hb_sz; + ngranules = BYTES_TO_GRANULES(sz); + knd = hhdr -> hb_obj_kind; ok = &GC_obj_kinds[knd]; - if (EXPECT((sz <= MAXOBJSZ), 1)) { -# ifdef THREADS - DISABLE_SIGNALS(); - LOCK(); -# endif - GC_mem_freed += sz; - /* A signal here can make GC_mem_freed and GC_non_gc_bytes */ - /* inconsistent. We claim this is benign. */ - if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz); + if (EXPECT((ngranules <= MAXOBJGRANULES), 1)) { + LOCK(); + GC_bytes_freed += sz; + if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; /* Its unnecessary to clear the mark bit. If the */ /* object is reallocated, it doesn't matter. O.w. the */ /* collector will do it, since it's on a free list. */ if (ok -> ok_init) { - BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1)); + BZERO((word *)p + 1, sz-sizeof(word)); } - flh = &(ok -> ok_freelist[sz]); + flh = &(ok -> ok_freelist[ngranules]); obj_link(p) = *flh; *flh = (ptr_t)p; -# ifdef THREADS - UNLOCK(); - ENABLE_SIGNALS(); -# endif + UNLOCK(); } else { - DISABLE_SIGNALS(); + size_t nblocks = OBJ_SZ_TO_BLOCKS(sz); LOCK(); - GC_mem_freed += sz; - if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz); + GC_bytes_freed += sz; + if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; + if (nblocks > 1) { + GC_large_allocd_bytes -= nblocks * HBLKSIZE; + } GC_freehblk(h); UNLOCK(); - ENABLE_SIGNALS(); } } @@ -486,33 +449,39 @@ DCL_LOCK_STATE; /* Only used for internally allocated objects, so we can take some */ /* shortcuts. */ #ifdef THREADS -void GC_free_inner(GC_PTR p) +void GC_free_inner(void * p) { - register struct hblk *h; - register hdr *hhdr; - register signed_word sz; - register ptr_t * flh; - register int knd; - register struct obj_kind * ok; + struct hblk *h; + hdr *hhdr; + size_t sz; /* bytes */ + size_t ngranules; /* sz in granules */ + void ** flh; + int knd; + struct obj_kind * ok; DCL_LOCK_STATE; h = HBLKPTR(p); hhdr = HDR(h); knd = hhdr -> hb_obj_kind; sz = hhdr -> hb_sz; + ngranules = BYTES_TO_GRANULES(sz); ok = &GC_obj_kinds[knd]; - if (sz <= MAXOBJSZ) { - GC_mem_freed += sz; - if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz); + if (ngranules <= MAXOBJGRANULES) { + GC_bytes_freed += sz; + if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; if (ok -> ok_init) { - BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1)); + BZERO((word *)p + 1, sz-sizeof(word)); } - flh = &(ok -> ok_freelist[sz]); + flh = &(ok -> ok_freelist[ngranules]); obj_link(p) = *flh; *flh = (ptr_t)p; } else { - GC_mem_freed += sz; - if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz); + size_t nblocks = OBJ_SZ_TO_BLOCKS(sz); + GC_bytes_freed += sz; + if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz; + if (nblocks > 1) { + GC_large_allocd_bytes -= nblocks * HBLKSIZE; + } GC_freehblk(h); } } @@ -522,13 +491,23 @@ void GC_free_inner(GC_PTR p) # define REDIRECT_FREE GC_free # endif # ifdef REDIRECT_FREE -# ifdef __STDC__ - void free(GC_PTR p) -# else - void free(p) - GC_PTR p; -# endif + void free(void * p) { +# if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES) + { + /* Don't bother with initialization checks. If nothing */ + /* has been initialized, the check fails, and that's safe, */ + /* since we haven't allocated uncollectable objects either. */ + ptr_t caller = (ptr_t)__builtin_return_address(0); + /* This test does not need to ensure memory visibility, since */ + /* the bounds will be set when/if we create another thread. */ + if (caller >= GC_libpthread_start && caller < GC_libpthread_end + || (caller >= GC_libld_start && caller < GC_libld_end)) { + GC_free(p); + return; + } + } +# endif # ifndef IGNORE_FREE REDIRECT_FREE(p); # endif