2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
16 #include "private/gc_priv.h"
21 /* Allocate reclaim list for kind: */
22 /* Return TRUE on success */
23 STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
25 struct hblk ** result = (struct hblk **)
26 GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
27 if (result == 0) return(FALSE);
28 BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
29 kind -> ok_reclaim_list = result;
33 GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
34 GC_bool ignore_off_page,
35 GC_bool retry); /* from alloc.c */
37 /* Allocate a large block of size lb bytes. */
38 /* The block is not cleared. */
39 /* Flags is 0 or IGNORE_OFF_PAGE. */
40 /* We hold the allocation lock. */
41 /* EXTRA_BYTES were already added to lb. */
42 GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
47 GC_bool retry = FALSE;
49 /* Round up to a multiple of a granule. */
50 lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
51 n_blocks = OBJ_SZ_TO_BLOCKS(lb);
52 if (!GC_is_initialized) GC_init();
53 /* Do our share of marking work */
54 if (GC_incremental && !GC_dont_gc)
55 GC_collect_a_little_inner((int)n_blocks);
56 h = GC_allochblk(lb, k, flags);
60 h = GC_allochblk(lb, k, flags);
63 while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
64 h = GC_allochblk(lb, k, flags);
70 size_t total_bytes = n_blocks * HBLKSIZE;
72 GC_large_allocd_bytes += total_bytes;
73 if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
74 GC_max_large_allocd_bytes = GC_large_allocd_bytes;
76 result = h -> hb_body;
81 /* Allocate a large block of size lb bytes. Clear if appropriate. */
82 /* We hold the allocation lock. */
83 /* EXTRA_BYTES were already added to lb. */
84 STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
86 ptr_t result = GC_alloc_large(lb, k, flags);
87 word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
89 if (0 == result) return 0;
90 if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
91 /* Clear the whole block, in case of GC_realloc call. */
92 BZERO(result, n_blocks * HBLKSIZE);
97 /* allocate lb bytes for an object of kind k. */
98 /* Should not be used to directly to allocate */
99 /* objects such as STUBBORN objects that */
100 /* require special handling on allocation. */
101 /* First a version that assumes we already */
103 GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
108 struct obj_kind * kind = GC_obj_kinds + k;
109 size_t lg = GC_size_map[lb];
110 void ** opp = &(kind -> ok_freelist[lg]);
112 if( (op = *opp) == 0 ) {
113 if (GC_size_map[lb] == 0) {
114 if (!GC_is_initialized) GC_init();
115 if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
116 return(GC_generic_malloc_inner(lb, k));
118 if (kind -> ok_reclaim_list == 0) {
119 if (!GC_alloc_reclaim_list(kind)) goto out;
121 op = GC_allocobj(lg, k);
122 if (op == 0) goto out;
126 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
128 op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
129 GC_bytes_allocd += lb;
136 /* Allocate a composite object of size n bytes. The caller guarantees */
137 /* that pointers past the first page are not relevant. Caller holds */
138 /* allocation lock. */
139 GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
145 return(GC_generic_malloc_inner(lb, k));
146 lb_adjusted = ADD_SLOP(lb);
147 op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
148 GC_bytes_allocd += lb_adjusted;
152 GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k)
157 if (GC_have_errors) GC_print_all_errors();
158 GC_INVOKE_FINALIZERS();
161 result = GC_generic_malloc_inner((word)lb, k);
168 lg = ROUNDED_UP_GRANULES(lb);
169 lb_rounded = GRANULES_TO_BYTES(lg);
171 return((*GC_get_oom_fn())(lb));
172 n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
173 init = GC_obj_kinds[k].ok_init;
175 result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
177 if (GC_debugging_started) {
178 BZERO(result, n_blocks * HBLKSIZE);
181 /* Clear any memory that might be used for GC descriptors */
182 /* before we release the lock. */
183 ((word *)result)[0] = 0;
184 ((word *)result)[1] = 0;
185 ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
186 ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
190 GC_bytes_allocd += lb_rounded;
192 if (init && !GC_debugging_started && 0 != result) {
193 BZERO(result, n_blocks * HBLKSIZE);
197 return((*GC_get_oom_fn())(lb));
203 /* Allocate lb bytes of atomic (pointerfree) data */
204 #ifdef THREAD_LOCAL_ALLOC
205 GC_INNER void * GC_core_malloc_atomic(size_t lb)
207 GC_API void * GC_CALL GC_malloc_atomic(size_t lb)
216 lg = GC_size_map[lb];
217 opp = &(GC_aobjfreelist[lg]);
219 if (EXPECT((op = *opp) == 0, FALSE)) {
221 return(GENERAL_MALLOC((word)lb, PTRFREE));
224 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
228 return(GENERAL_MALLOC((word)lb, PTRFREE));
232 /* Allocate lb bytes of composite (pointerful) data */
233 #ifdef THREAD_LOCAL_ALLOC
234 GC_INNER void * GC_core_malloc(size_t lb)
236 GC_API void * GC_CALL GC_malloc(size_t lb)
245 lg = GC_size_map[lb];
246 opp = (void **)&(GC_objfreelist[lg]);
248 if (EXPECT((op = *opp) == 0, FALSE)) {
250 return (GENERAL_MALLOC((word)lb, NORMAL));
252 GC_ASSERT(0 == obj_link(op)
253 || ((word)obj_link(op)
254 <= (word)GC_greatest_plausible_heap_addr
255 && (word)obj_link(op)
256 >= (word)GC_least_plausible_heap_addr));
259 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
263 return(GENERAL_MALLOC(lb, NORMAL));
267 /* Allocate lb bytes of pointerful, traced, but not collectable data */
268 GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb)
275 if( SMALL_OBJ(lb) ) {
276 if (EXTRA_BYTES != 0 && lb != 0) lb--;
277 /* We don't need the extra byte, since this won't be */
278 /* collected anyway. */
279 lg = GC_size_map[lb];
280 opp = &(GC_uobjfreelist[lg]);
282 if( (op = *opp) != 0 ) {
285 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
286 /* Mark bit ws already set on free list. It will be */
287 /* cleared only temporarily during a collection, as a */
288 /* result of the normal free list mark bit clearing. */
289 GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
293 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
294 /* For small objects, the free lists are completely marked. */
296 GC_ASSERT(0 == op || GC_is_marked(op));
301 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
302 if (0 == op) return(0);
304 GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
306 /* We don't need the lock here, since we have an undisguised */
307 /* pointer. We do need to hold the lock while we adjust */
310 set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
312 GC_ASSERT(hhdr -> hb_n_marks == 0);
313 /* This is not guaranteed in the multi-threaded case */
314 /* because the counter could be updated before locking. */
316 hhdr -> hb_n_marks = 1;
322 #ifdef REDIRECT_MALLOC
328 /* Avoid unnecessary nested procedure calls here, by #defining some */
329 /* malloc replacements. Otherwise we end up saving a */
330 /* meaningless return address in the object. It also speeds things up, */
331 /* but it is admittedly quite ugly. */
333 # define GC_debug_malloc_replacement(lb) \
334 GC_debug_malloc(lb, GC_DBG_RA "unknown", 0)
336 void * malloc(size_t lb)
338 /* It might help to manually inline the GC_malloc call here. */
339 /* But any decent compiler should reduce the extra procedure call */
340 /* to at most a jump instruction in this case. */
341 # if defined(I386) && defined(GC_SOLARIS_THREADS)
343 * Thread initialisation can call malloc before
344 * we're ready for it.
345 * It's not clear that this is enough to help matters.
346 * The thread implementation may well call malloc at other
349 if (!GC_is_initialized) return sbrk(lb);
350 # endif /* I386 && GC_SOLARIS_THREADS */
351 return((void *)REDIRECT_MALLOC(lb));
354 #if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
355 STATIC ptr_t GC_libpthread_start = 0;
356 STATIC ptr_t GC_libpthread_end = 0;
357 STATIC ptr_t GC_libld_start = 0;
358 STATIC ptr_t GC_libld_end = 0;
360 STATIC void GC_init_lib_bounds(void)
362 if (GC_libpthread_start != 0) return;
363 GC_init(); /* if not called yet */
364 if (!GC_text_mapping("libpthread-",
365 &GC_libpthread_start, &GC_libpthread_end)) {
366 WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
367 /* This might still work with some versions of libpthread, */
368 /* so we don't abort. Perhaps we should. */
369 /* Generate message only once: */
370 GC_libpthread_start = (ptr_t)1;
372 if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
373 WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
376 #endif /* GC_LINUX_THREADS */
380 # define GC_SIZE_MAX SIZE_MAX
382 # define GC_SIZE_MAX (~(size_t)0)
385 #define GC_SQRT_SIZE_MAX ((1U << (WORDSZ / 2)) - 1)
387 void * calloc(size_t n, size_t lb)
389 if ((lb | n) > GC_SQRT_SIZE_MAX /* fast initial test */
390 && lb && n > GC_SIZE_MAX / lb)
392 # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
393 /* libpthread allocated some memory that is only pointed to by */
394 /* mmapped thread stacks. Make sure it's not collectable. */
396 static GC_bool lib_bounds_set = FALSE;
397 ptr_t caller = (ptr_t)__builtin_return_address(0);
398 /* This test does not need to ensure memory visibility, since */
399 /* the bounds will be set when/if we create another thread. */
400 if (!lib_bounds_set) {
401 GC_init_lib_bounds();
402 lib_bounds_set = TRUE;
404 if ((caller >= GC_libpthread_start && caller < GC_libpthread_end)
405 || (caller >= GC_libld_start && caller < GC_libld_end))
406 return GC_malloc_uncollectable(n*lb);
407 /* The two ranges are actually usually adjacent, so there may */
408 /* be a way to speed this up. */
411 return((void *)REDIRECT_MALLOC(n*lb));
415 char *strdup(const char *s)
417 size_t lb = strlen(s) + 1;
418 char *result = (char *)REDIRECT_MALLOC(lb);
423 BCOPY(s, result, lb);
426 #endif /* !defined(strdup) */
427 /* If strdup is macro defined, we assume that it actually calls malloc, */
428 /* and thus the right thing will happen even without overriding it. */
429 /* This seems to be true on most Linux systems. */
432 /* This is similar to strdup(). */
433 char *strndup(const char *str, size_t size)
436 size_t len = strlen(str);
439 copy = (char *)REDIRECT_MALLOC(len + 1);
444 BCOPY(str, copy, len);
448 #endif /* !strndup */
450 #undef GC_debug_malloc_replacement
452 #endif /* REDIRECT_MALLOC */
454 /* Explicitly deallocate an object p. */
455 GC_API void GC_CALL GC_free(void * p)
459 size_t sz; /* In bytes */
460 size_t ngranules; /* sz in granules */
463 struct obj_kind * ok;
467 /* Required by ANSI. It's not my fault ... */
469 GC_err_printf("GC_free(%p): %lu\n", p, (unsigned long)GC_gc_no);
473 # if defined(REDIRECT_MALLOC) && \
474 (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
476 /* For Solaris, we have to redirect malloc calls during */
477 /* initialization. For the others, this seems to happen */
479 /* Don't try to deallocate that memory. */
480 if (0 == hhdr) return;
482 GC_ASSERT(GC_base(p) == p);
484 ngranules = BYTES_TO_GRANULES(sz);
485 knd = hhdr -> hb_obj_kind;
486 ok = &GC_obj_kinds[knd];
487 if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
489 GC_bytes_freed += sz;
490 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
491 /* Its unnecessary to clear the mark bit. If the */
492 /* object is reallocated, it doesn't matter. O.w. the */
493 /* collector will do it, since it's on a free list. */
495 BZERO((word *)p + 1, sz-sizeof(word));
497 flh = &(ok -> ok_freelist[ngranules]);
502 size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
504 GC_bytes_freed += sz;
505 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
507 GC_large_allocd_bytes -= nblocks * HBLKSIZE;
514 /* Explicitly deallocate an object p when we already hold lock. */
515 /* Only used for internally allocated objects, so we can take some */
518 GC_INNER void GC_free_inner(void * p)
522 size_t sz; /* bytes */
523 size_t ngranules; /* sz in granules */
526 struct obj_kind * ok;
531 knd = hhdr -> hb_obj_kind;
533 ngranules = BYTES_TO_GRANULES(sz);
534 ok = &GC_obj_kinds[knd];
535 if (ngranules <= MAXOBJGRANULES) {
536 GC_bytes_freed += sz;
537 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
539 BZERO((word *)p + 1, sz-sizeof(word));
541 flh = &(ok -> ok_freelist[ngranules]);
545 size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
546 GC_bytes_freed += sz;
547 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
549 GC_large_allocd_bytes -= nblocks * HBLKSIZE;
556 #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
557 # define REDIRECT_FREE GC_free
563 # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
565 /* Don't bother with initialization checks. If nothing */
566 /* has been initialized, the check fails, and that's safe, */
567 /* since we haven't allocated uncollectable objects either. */
568 ptr_t caller = (ptr_t)__builtin_return_address(0);
569 /* This test does not need to ensure memory visibility, since */
570 /* the bounds will be set when/if we create another thread. */
571 if (caller >= GC_libpthread_start && caller < GC_libpthread_end
572 || (caller >= GC_libld_start && caller < GC_libld_end)) {
582 #endif /* REDIRECT_FREE */