2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
20 #include "private/gc_priv.h"
22 extern void * GC_clear_stack(void *); /* in misc.c, behaves like identity */
23 void GC_extend_size_map(size_t); /* in misc.c. */
25 /* Allocate reclaim list for kind: */
26 /* Return TRUE on success */
27 GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
29 struct hblk ** result = (struct hblk **)
30 GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
31 if (result == 0) return(FALSE);
32 BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
33 kind -> ok_reclaim_list = result;
37 /* Allocate a large block of size lb bytes. */
38 /* The block is not cleared. */
39 /* Flags is 0 or IGNORE_OFF_PAGE. */
40 /* We hold the allocation lock. */
41 /* EXTRA_BYTES were already added to lb. */
42 ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
48 /* Round up to a multiple of a granule. */
49 lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
50 n_blocks = OBJ_SZ_TO_BLOCKS(lb);
51 if (!GC_is_initialized) GC_init_inner();
52 /* Do our share of marking work */
53 if(GC_incremental && !GC_dont_gc)
54 GC_collect_a_little_inner((int)n_blocks);
55 h = GC_allochblk(lb, k, flags);
59 h = GC_allochblk(lb, k, flags);
62 while (0 == h && GC_collect_or_expand(n_blocks, (flags != 0))) {
63 h = GC_allochblk(lb, k, flags);
68 size_t total_bytes = n_blocks * HBLKSIZE;
70 GC_large_allocd_bytes += total_bytes;
71 if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
72 GC_max_large_allocd_bytes = GC_large_allocd_bytes;
74 result = h -> hb_body;
80 /* Allocate a large block of size lb bytes. Clear if appropriate. */
81 /* We hold the allocation lock. */
82 /* EXTRA_BYTES were already added to lb. */
83 ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
85 ptr_t result = GC_alloc_large(lb, k, flags);
86 word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
88 if (0 == result) return 0;
89 if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
90 /* Clear the whole block, in case of GC_realloc call. */
91 BZERO(result, n_blocks * HBLKSIZE);
96 /* allocate lb bytes for an object of kind k. */
97 /* Should not be used to directly to allocate */
98 /* objects such as STUBBORN objects that */
99 /* require special handling on allocation. */
100 /* First a version that assumes we already */
102 void * GC_generic_malloc_inner(size_t lb, int k)
107 struct obj_kind * kind = GC_obj_kinds + k;
108 size_t lg = GC_size_map[lb];
109 void ** opp = &(kind -> ok_freelist[lg]);
111 if( (op = *opp) == 0 ) {
112 if (GC_size_map[lb] == 0) {
113 if (!GC_is_initialized) GC_init_inner();
114 if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
115 return(GC_generic_malloc_inner(lb, k));
117 if (kind -> ok_reclaim_list == 0) {
118 if (!GC_alloc_reclaim_list(kind)) goto out;
120 op = GC_allocobj(lg, k);
121 if (op == 0) goto out;
125 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
127 op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
128 GC_bytes_allocd += lb;
135 /* Allocate a composite object of size n bytes. The caller guarantees */
136 /* that pointers past the first page are not relevant. Caller holds */
137 /* allocation lock. */
138 void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
144 return(GC_generic_malloc_inner(lb, k));
145 lb_adjusted = ADD_SLOP(lb);
146 op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
147 GC_bytes_allocd += lb_adjusted;
151 void * GC_generic_malloc(size_t lb, int k)
156 if (GC_have_errors) GC_print_all_errors();
157 GC_INVOKE_FINALIZERS();
160 result = GC_generic_malloc_inner((word)lb, k);
167 lw = ROUNDED_UP_WORDS(lb);
168 lb_rounded = WORDS_TO_BYTES(lw);
169 n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
170 init = GC_obj_kinds[k].ok_init;
172 result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
174 if (GC_debugging_started) {
175 BZERO(result, n_blocks * HBLKSIZE);
178 /* Clear any memory that might be used for GC descriptors */
179 /* before we release the lock. */
180 ((word *)result)[0] = 0;
181 ((word *)result)[1] = 0;
182 ((word *)result)[lw-1] = 0;
183 ((word *)result)[lw-2] = 0;
187 GC_bytes_allocd += lb_rounded;
189 if (init && !GC_debugging_started && 0 != result) {
190 BZERO(result, n_blocks * HBLKSIZE);
194 return((*GC_oom_fn)(lb));
201 #define GENERAL_MALLOC(lb,k) \
202 GC_clear_stack(GC_generic_malloc(lb, k))
203 /* We make the GC_clear_stack_call a tail call, hoping to get more of */
206 /* Allocate lb bytes of atomic (pointerfree) data */
207 #ifdef THREAD_LOCAL_ALLOC
208 void * GC_core_malloc_atomic(size_t lb)
210 void * GC_malloc_atomic(size_t lb)
219 lg = GC_size_map[lb];
220 opp = &(GC_aobjfreelist[lg]);
222 if( EXPECT((op = *opp) == 0, 0) ) {
224 return(GENERAL_MALLOC((word)lb, PTRFREE));
227 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
231 return(GENERAL_MALLOC((word)lb, PTRFREE));
235 /* provide a version of strdup() that uses the collector to allocate the
236 copy of the string */
238 char *GC_strdup(const char *s)
246 if (s == NULL) return NULL;
247 if ((copy = GC_malloc_atomic(strlen(s) + 1)) == NULL) {
255 /* Allocate lb bytes of composite (pointerful) data */
256 #ifdef THREAD_LOCAL_ALLOC
257 void * GC_core_malloc(size_t lb)
259 void * GC_malloc(size_t lb)
268 lg = GC_size_map[lb];
269 opp = (void **)&(GC_objfreelist[lg]);
271 if( EXPECT((op = *opp) == 0, 0) ) {
273 return(GENERAL_MALLOC((word)lb, NORMAL));
275 /* See above comment on signals. */
276 GC_ASSERT(0 == obj_link(op)
277 || (word)obj_link(op)
278 <= (word)GC_greatest_plausible_heap_addr
279 && (word)obj_link(op)
280 >= (word)GC_least_plausible_heap_addr);
283 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
287 return(GENERAL_MALLOC(lb, NORMAL));
291 # ifdef REDIRECT_MALLOC
293 /* Avoid unnecessary nested procedure calls here, by #defining some */
294 /* malloc replacements. Otherwise we end up saving a */
295 /* meaningless return address in the object. It also speeds things up, */
296 /* but it is admittedly quite ugly. */
297 # ifdef GC_ADD_CALLER
298 # define RA GC_RETURN_ADDR,
302 # define GC_debug_malloc_replacement(lb) \
303 GC_debug_malloc(lb, RA "unknown", 0)
305 void * malloc(size_t lb)
307 /* It might help to manually inline the GC_malloc call here. */
308 /* But any decent compiler should reduce the extra procedure call */
309 /* to at most a jump instruction in this case. */
310 # if defined(I386) && defined(GC_SOLARIS_THREADS)
312 * Thread initialisation can call malloc before
313 * we're ready for it.
314 * It's not clear that this is enough to help matters.
315 * The thread implementation may well call malloc at other
318 if (!GC_is_initialized) return sbrk(lb);
319 # endif /* I386 && GC_SOLARIS_THREADS */
320 return((void *)REDIRECT_MALLOC(lb));
323 #if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
324 static ptr_t GC_libpthread_start = 0;
325 static ptr_t GC_libpthread_end = 0;
326 static ptr_t GC_libld_start = 0;
327 static ptr_t GC_libld_end = 0;
328 extern GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp);
331 void GC_init_lib_bounds(void)
333 if (GC_libpthread_start != 0) return;
334 if (!GC_text_mapping("libpthread-",
335 &GC_libpthread_start, &GC_libpthread_end)) {
336 WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
337 /* This might still work with some versions of libpthread, */
338 /* so we don't abort. Perhaps we should. */
339 /* Generate message only once: */
340 GC_libpthread_start = (ptr_t)1;
342 if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
343 WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
348 void * calloc(size_t n, size_t lb)
350 # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
351 /* libpthread allocated some memory that is only pointed to by */
352 /* mmapped thread stacks. Make sure it's not collectable. */
354 static GC_bool lib_bounds_set = FALSE;
355 ptr_t caller = (ptr_t)__builtin_return_address(0);
356 /* This test does not need to ensure memory visibility, since */
357 /* the bounds will be set when/if we create another thread. */
358 if (!lib_bounds_set) {
359 GC_init_lib_bounds();
360 lib_bounds_set = TRUE;
362 if (caller >= GC_libpthread_start && caller < GC_libpthread_end
363 || (caller >= GC_libld_start && caller < GC_libld_end))
364 return GC_malloc_uncollectable(n*lb);
365 /* The two ranges are actually usually adjacent, so there may */
366 /* be a way to speed this up. */
369 return((void *)REDIRECT_MALLOC(n*lb));
374 char *strdup(const char *s)
376 size_t len = strlen(s) + 1;
377 char * result = ((char *)REDIRECT_MALLOC(len+1));
382 BCOPY(s, result, len+1);
385 #endif /* !defined(strdup) */
386 /* If strdup is macro defined, we assume that it actually calls malloc, */
387 /* and thus the right thing will happen even without overriding it. */
388 /* This seems to be true on most Linux systems. */
390 #undef GC_debug_malloc_replacement
392 # endif /* REDIRECT_MALLOC */
394 /* Explicitly deallocate an object p. */
395 void GC_free(void * p)
399 size_t sz; /* In bytes */
400 size_t ngranules; /* sz in granules */
403 struct obj_kind * ok;
407 /* Required by ANSI. It's not my fault ... */
409 GC_err_printf("GC_free(%p): %d\n", p, GC_gc_no);
414 ngranules = BYTES_TO_GRANULES(sz);
415 # if defined(REDIRECT_MALLOC) && \
416 (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
418 /* For Solaris, we have to redirect malloc calls during */
419 /* initialization. For the others, this seems to happen */
421 /* Don't try to deallocate that memory. */
422 if (0 == hhdr) return;
424 GC_ASSERT(GC_base(p) == p);
425 knd = hhdr -> hb_obj_kind;
426 ok = &GC_obj_kinds[knd];
427 if (EXPECT((ngranules <= MAXOBJGRANULES), 1)) {
429 GC_bytes_freed += sz;
430 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
431 /* Its unnecessary to clear the mark bit. If the */
432 /* object is reallocated, it doesn't matter. O.w. the */
433 /* collector will do it, since it's on a free list. */
435 BZERO((word *)p + 1, sz-sizeof(word));
437 flh = &(ok -> ok_freelist[ngranules]);
442 size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
444 GC_bytes_freed += sz;
445 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
447 GC_large_allocd_bytes -= nblocks * HBLKSIZE;
454 /* Explicitly deallocate an object p when we already hold lock. */
455 /* Only used for internally allocated objects, so we can take some */
458 void GC_free_inner(void * p)
462 size_t sz; /* bytes */
463 size_t ngranules; /* sz in granules */
466 struct obj_kind * ok;
471 knd = hhdr -> hb_obj_kind;
473 ngranules = BYTES_TO_GRANULES(sz);
474 ok = &GC_obj_kinds[knd];
475 if (ngranules <= MAXOBJGRANULES) {
476 GC_bytes_freed += sz;
477 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
479 BZERO((word *)p + 1, sz-sizeof(word));
481 flh = &(ok -> ok_freelist[ngranules]);
485 size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
486 GC_bytes_freed += sz;
487 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
489 GC_large_allocd_bytes -= nblocks * HBLKSIZE;
496 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
497 # define REDIRECT_FREE GC_free
499 # ifdef REDIRECT_FREE
502 # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
504 /* Don't bother with initialization checks. If nothing */
505 /* has been initialized, the check fails, and that's safe, */
506 /* since we haven't allocated uncollectable objects either. */
507 ptr_t caller = (ptr_t)__builtin_return_address(0);
508 /* This test does not need to ensure memory visibility, since */
509 /* the bounds will be set when/if we create another thread. */
510 if (caller >= GC_libpthread_start && caller < GC_libpthread_end
511 || (caller >= GC_libld_start && caller < GC_libld_end)) {
521 # endif /* REDIRECT_MALLOC */