2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_priv.h"
20 * These are extra allocation routines which are likely to be less
21 * frequently used than those in malloc.c. They are separate in the
22 * hope that the .o file will be excluded from statically linked
23 * executables. We should probably break this up further.
28 /* Some externally visible but unadvertised variables to allow access to */
29 /* free lists from inlined allocators without including gc_priv.h */
30 /* or introducing dependencies on internal data structure layouts. */
31 void ** const GC_objfreelist_ptr = GC_objfreelist;
32 void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
33 void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
34 # ifdef ATOMIC_UNCOLLECTABLE
35 void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
39 STATIC void * GC_generic_or_special_malloc(size_t lb, int knd)
42 # ifdef STUBBORN_ALLOC
44 return(GC_malloc_stubborn((size_t)lb));
47 return(GC_malloc_atomic((size_t)lb));
49 return(GC_malloc((size_t)lb));
51 return(GC_malloc_uncollectable((size_t)lb));
52 # ifdef ATOMIC_UNCOLLECTABLE
54 return(GC_malloc_atomic_uncollectable((size_t)lb));
55 # endif /* ATOMIC_UNCOLLECTABLE */
57 return(GC_generic_malloc(lb,knd));
61 /* Change the size of the block pointed to by p to contain at least */
62 /* lb bytes. The object may be (and quite likely will be) moved. */
63 /* The kind (e.g. atomic) is the same as that of the old. */
64 /* Shrinking of large blocks is not implemented well. */
65 GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
69 size_t sz; /* Current size in bytes */
70 size_t orig_sz; /* Original sz in bytes */
73 if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
77 obj_kind = hhdr -> hb_obj_kind;
80 if (sz > MAXOBJBYTES) {
81 /* Round it up to the next whole heap block */
84 sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
86 descr = GC_obj_kinds[obj_kind].ok_descriptor;
87 if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
88 hhdr -> hb_descr = descr;
89 # ifdef MARK_BIT_PER_OBJ
90 GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
92 GC_ASSERT(hhdr -> hb_large_block &&
93 hhdr -> hb_map[ANY_INDEX] == 1);
95 if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
96 /* Extra area is already cleared by GC_alloc_large_and_clear. */
98 if (ADD_SLOP(lb) <= sz) {
99 if (lb >= (sz >> 1)) {
100 # ifdef STUBBORN_ALLOC
101 if (obj_kind == STUBBORN) GC_change_stubborn(p);
104 /* Clear unneeded part of object to avoid bogus pointer */
106 /* Safe for stubborn objects. */
107 BZERO(((ptr_t)p) + lb, orig_sz - lb);
113 GC_generic_or_special_malloc((word)lb, obj_kind);
115 if (result == 0) return(0);
116 /* Could also return original object. But this */
117 /* gives the client warning of imminent disaster. */
118 BCOPY(p, result, lb);
127 GC_generic_or_special_malloc((word)lb, obj_kind);
129 if (result == 0) return(0);
130 BCOPY(p, result, sz);
138 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
139 # define REDIRECT_REALLOC GC_realloc
142 # ifdef REDIRECT_REALLOC
144 /* As with malloc, avoid two levels of extra calls here. */
145 # ifdef GC_ADD_CALLER
146 # define RA GC_RETURN_ADDR,
150 # define GC_debug_realloc_replacement(p, lb) \
151 GC_debug_realloc(p, lb, RA "unknown", 0)
153 void * realloc(void * p, size_t lb)
155 return(REDIRECT_REALLOC(p, lb));
158 # undef GC_debug_realloc_replacement
159 # endif /* REDIRECT_REALLOC */
162 /* Allocate memory such that only pointers to near the */
163 /* beginning of the object are considered. */
164 /* We avoid holding allocation lock while we clear memory. */
165 GC_INNER void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
175 return(GC_generic_malloc((word)lb, k));
176 lg = ROUNDED_UP_GRANULES(lb);
177 lb_rounded = GRANULES_TO_BYTES(lg);
178 n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
179 init = GC_obj_kinds[k].ok_init;
180 if (GC_have_errors) GC_print_all_errors();
181 GC_INVOKE_FINALIZERS();
183 result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
185 if (GC_debugging_started) {
186 BZERO(result, n_blocks * HBLKSIZE);
189 /* Clear any memory that might be used for GC descriptors */
190 /* before we release the lock. */
191 ((word *)result)[0] = 0;
192 ((word *)result)[1] = 0;
193 ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
194 ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
198 GC_bytes_allocd += lb_rounded;
200 GC_oom_func oom_fn = GC_oom_fn;
202 return((*oom_fn)(lb));
205 if (init && !GC_debugging_started) {
206 BZERO(result, n_blocks * HBLKSIZE);
212 GC_API void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
214 return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL));
217 GC_API void * GC_CALL GC_malloc_atomic_ignore_off_page(size_t lb)
219 return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
222 /* Increment GC_bytes_allocd from code that doesn't have direct access */
224 GC_API void GC_CALL GC_incr_bytes_allocd(size_t n)
226 GC_bytes_allocd += n;
229 /* The same for GC_bytes_freed. */
230 GC_API void GC_CALL GC_incr_bytes_freed(size_t n)
237 # ifdef PARALLEL_MARK
238 STATIC volatile signed_word GC_bytes_allocd_tmp = 0;
239 /* Number of bytes of memory allocated since */
240 /* we released the GC lock. Instead of */
241 /* reacquiring the GC lock just to add this in, */
242 /* we add it in the next time we reacquire */
243 /* the lock. (Atomically adding it doesn't */
244 /* work, since we would have to atomically */
245 /* update it in GC_malloc, which is too */
247 # endif /* PARALLEL_MARK */
249 /* Return a list of 1 or more objects of the indicated size, linked */
250 /* through the first word in the object. This has the advantage that */
251 /* it acquires the allocation lock only once, and may greatly reduce */
252 /* time wasted contending for the allocation lock. Typical usage would */
253 /* be in a thread that requires many items of the same size. It would */
254 /* keep its own free list in thread-local storage, and call */
255 /* GC_malloc_many or friends to replenish it. (We do not round up */
256 /* object sizes, since a call indicates the intention to consume many */
257 /* objects of exactly this size.) */
258 /* We assume that the size is a multiple of GRANULE_BYTES. */
259 /* We return the free-list by assigning it to *result, since it is */
260 /* not safe to return, e.g. a linked list of pointer-free objects, */
261 /* since the collector would not retain the entire list if it were */
262 /* invoked just as we were returning. */
263 /* Note that the client should usually clear the link field. */
264 GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
269 size_t lw; /* Length in words. */
270 size_t lg; /* Length in granules. */
271 signed_word my_bytes_allocd = 0;
272 struct obj_kind * ok = &(GC_obj_kinds[k]);
275 GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
276 if (!SMALL_OBJ(lb)) {
277 op = GC_generic_malloc(lb, k);
278 if(0 != op) obj_link(op) = 0;
282 lw = BYTES_TO_WORDS(lb);
283 lg = BYTES_TO_GRANULES(lb);
284 if (GC_have_errors) GC_print_all_errors();
285 GC_INVOKE_FINALIZERS();
287 if (!GC_is_initialized) GC_init();
288 /* Do our share of marking work */
289 if (GC_incremental && !GC_dont_gc) {
291 GC_collect_a_little_inner(1);
294 /* First see if we can reclaim a page of objects waiting to be */
297 struct hblk ** rlh = ok -> ok_reclaim_list;
302 while ((hbp = *rlh) != 0) {
304 *rlh = hhdr -> hb_next;
305 GC_ASSERT(hhdr -> hb_sz == lb);
306 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
307 # ifdef PARALLEL_MARK
309 signed_word my_bytes_allocd_tmp = GC_bytes_allocd_tmp;
311 GC_ASSERT(my_bytes_allocd_tmp >= 0);
312 /* We only decrement it while holding the GC lock. */
313 /* Thus we can't accidentally adjust it down in more */
314 /* than one thread simultaneously. */
315 if (my_bytes_allocd_tmp != 0) {
316 (void)AO_fetch_and_add(
317 (volatile void *)(&GC_bytes_allocd_tmp),
318 (AO_t)(-my_bytes_allocd_tmp));
319 GC_bytes_allocd += my_bytes_allocd_tmp;
321 GC_acquire_mark_lock();
322 ++ GC_fl_builder_count;
324 GC_release_mark_lock();
327 op = GC_reclaim_generic(hbp, hhdr, lb,
328 ok -> ok_init, 0, &my_bytes_allocd);
330 /* We also reclaimed memory, so we need to adjust */
332 /* This should be atomic, so the results may be */
334 GC_bytes_found += my_bytes_allocd;
335 # ifdef PARALLEL_MARK
338 (void)AO_fetch_and_add(
339 (volatile AO_t *)(&GC_bytes_allocd_tmp),
340 (AO_t)(my_bytes_allocd));
341 GC_acquire_mark_lock();
342 -- GC_fl_builder_count;
343 if (GC_fl_builder_count == 0) GC_notify_all_builder();
344 GC_release_mark_lock();
345 (void) GC_clear_stack(0);
349 GC_bytes_allocd += my_bytes_allocd;
352 # ifdef PARALLEL_MARK
354 GC_acquire_mark_lock();
355 -- GC_fl_builder_count;
356 if (GC_fl_builder_count == 0) GC_notify_all_builder();
357 GC_release_mark_lock();
359 /* GC lock is needed for reclaim list access. We */
360 /* must decrement fl_builder_count before reaquiring GC */
361 /* lock. Hopefully this path is rare. */
366 /* Next try to use prefix of global free list if there is one. */
367 /* We don't refill it, but we need to use it up before allocating */
368 /* a new block ourselves. */
369 opp = &(GC_obj_kinds[k].ok_freelist[lg]);
370 if ( (op = *opp) != 0 ) {
373 for (p = op; p != 0; p = obj_link(p)) {
374 my_bytes_allocd += lb;
375 if (my_bytes_allocd >= HBLKSIZE) {
381 GC_bytes_allocd += my_bytes_allocd;
384 /* Next try to allocate a new block worth of objects of this size. */
386 struct hblk *h = GC_allochblk(lb, k, 0);
388 if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
389 GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
390 # ifdef PARALLEL_MARK
392 GC_acquire_mark_lock();
393 ++ GC_fl_builder_count;
395 GC_release_mark_lock();
397 op = GC_build_fl(h, lw,
398 (ok -> ok_init || GC_debugging_started), 0);
401 GC_acquire_mark_lock();
402 -- GC_fl_builder_count;
403 if (GC_fl_builder_count == 0) GC_notify_all_builder();
404 GC_release_mark_lock();
405 (void) GC_clear_stack(0);
409 op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
414 /* As a last attempt, try allocating a single object. Note that */
415 /* this may trigger a collection or expand the heap. */
416 op = GC_generic_malloc_inner(lb, k);
417 if (0 != op) obj_link(op) = 0;
422 (void) GC_clear_stack(0);
425 GC_API void * GC_CALL GC_malloc_many(size_t lb)
428 GC_generic_malloc_many(((lb + EXTRA_BYTES + GRANULE_BYTES-1)
429 & ~(GRANULE_BYTES-1)),
434 /* Note that the "atomic" version of this would be unsafe, since the */
435 /* links would not be seen by the collector. */
438 /* Allocate lb bytes of pointerful, traced, but not collectable data */
439 GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb)
446 if( SMALL_OBJ(lb) ) {
447 if (EXTRA_BYTES != 0 && lb != 0) lb--;
448 /* We don't need the extra byte, since this won't be */
449 /* collected anyway. */
450 lg = GC_size_map[lb];
451 opp = &(GC_uobjfreelist[lg]);
453 if( (op = *opp) != 0 ) {
456 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
457 /* Mark bit ws already set on free list. It will be */
458 /* cleared only temporarily during a collection, as a */
459 /* result of the normal free list mark bit clearing. */
460 GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
464 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
465 /* For small objects, the free lists are completely marked. */
467 GC_ASSERT(0 == op || GC_is_marked(op));
472 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
473 if (0 == op) return(0);
475 GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
477 /* We don't need the lock here, since we have an undisguised */
478 /* pointer. We do need to hold the lock while we adjust */
481 set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
482 GC_ASSERT(hhdr -> hb_n_marks == 0);
483 hhdr -> hb_n_marks = 1;
489 /* Not well tested nor integrated. */
490 /* Debug version is tricky and currently missing. */
493 GC_API void * GC_CALL GC_memalign(size_t align, size_t lb)
499 if (align <= GRANULE_BYTES) return GC_malloc(lb);
500 if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
501 if (align > HBLKSIZE) {
502 return (*GC_get_oom_fn())(LONG_MAX-1024); /* Fail */
504 return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
505 /* Will be HBLKSIZE aligned. */
507 /* We could also try to make sure that the real rounded-up object size */
508 /* is a multiple of align. That would be correct up to HBLKSIZE. */
509 new_lb = lb + align - 1;
510 result = GC_malloc(new_lb);
511 offset = (word)result % align;
513 offset = align - offset;
514 if (!GC_all_interior_pointers) {
515 if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
516 GC_register_displacement(offset);
519 result = (void *) ((ptr_t)result + offset);
520 GC_ASSERT((word)result % align == 0);
524 #ifdef ATOMIC_UNCOLLECTABLE
525 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
526 /* This is normally roughly equivalent to the system malloc. */
527 /* But it may be useful if malloc is redefined. */
528 GC_API void * GC_CALL GC_malloc_atomic_uncollectable(size_t lb)
535 if( SMALL_OBJ(lb) ) {
536 if (EXTRA_BYTES != 0 && lb != 0) lb--;
537 /* We don't need the extra byte, since this won't be */
538 /* collected anyway. */
539 lg = GC_size_map[lb];
540 opp = &(GC_auobjfreelist[lg]);
542 if( (op = *opp) != 0 ) {
545 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
546 /* Mark bit was already set while object was on free list. */
547 GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
551 op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
553 GC_ASSERT(0 == op || GC_is_marked(op));
558 op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
559 if (0 == op) return(0);
561 GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0);
565 set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
566 GC_ASSERT(hhdr -> hb_n_marks == 0);
567 hhdr -> hb_n_marks = 1;
572 #endif /* ATOMIC_UNCOLLECTABLE */