2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 * These are extra allocation routines which are likely to be less
19 * frequently used than those in malloc.c. They are separate in the
20 * hope that the .o file will be excluded from statically linked
21 * executables. We should probably break this up further.
25 #include "private/gc_priv.h"
27 void * GC_clear_stack(void *); /* in misc.c, behaves like identity */
29 /* Some externally visible but unadvertised variables to allow access to */
30 /* free lists from inlined allocators without including gc_priv.h */
31 /* or introducing dependencies on internal data structure layouts. */
32 void ** const GC_objfreelist_ptr = GC_objfreelist;
33 void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
34 void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
35 # ifdef ATOMIC_UNCOLLECTABLE
36 void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
40 STATIC void * GC_generic_or_special_malloc(size_t lb, int knd)
43 # ifdef STUBBORN_ALLOC
45 return(GC_malloc_stubborn((size_t)lb));
48 return(GC_malloc_atomic((size_t)lb));
50 return(GC_malloc((size_t)lb));
52 return(GC_malloc_uncollectable((size_t)lb));
53 # ifdef ATOMIC_UNCOLLECTABLE
55 return(GC_malloc_atomic_uncollectable((size_t)lb));
56 # endif /* ATOMIC_UNCOLLECTABLE */
58 return(GC_generic_malloc(lb,knd));
63 /* Change the size of the block pointed to by p to contain at least */
64 /* lb bytes. The object may be (and quite likely will be) moved. */
65 /* The kind (e.g. atomic) is the same as that of the old. */
66 /* Shrinking of large blocks is not implemented well. */
67 GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
71 size_t sz; /* Current size in bytes */
72 size_t orig_sz; /* Original sz in bytes */
75 if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
79 obj_kind = hhdr -> hb_obj_kind;
82 if (sz > MAXOBJBYTES) {
83 /* Round it up to the next whole heap block */
86 sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
88 descr = GC_obj_kinds[obj_kind].ok_descriptor;
89 if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
90 hhdr -> hb_descr = descr;
91 # ifdef MARK_BIT_PER_OBJ
92 GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
94 GC_ASSERT(hhdr -> hb_large_block &&
95 hhdr -> hb_map[ANY_INDEX] == 1);
97 if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
98 /* Extra area is already cleared by GC_alloc_large_and_clear. */
100 if (ADD_SLOP(lb) <= sz) {
101 if (lb >= (sz >> 1)) {
102 # ifdef STUBBORN_ALLOC
103 if (obj_kind == STUBBORN) GC_change_stubborn(p);
106 /* Clear unneeded part of object to avoid bogus pointer */
108 /* Safe for stubborn objects. */
109 BZERO(((ptr_t)p) + lb, orig_sz - lb);
115 GC_generic_or_special_malloc((word)lb, obj_kind);
117 if (result == 0) return(0);
118 /* Could also return original object. But this */
119 /* gives the client warning of imminent disaster. */
120 BCOPY(p, result, lb);
129 GC_generic_or_special_malloc((word)lb, obj_kind);
131 if (result == 0) return(0);
132 BCOPY(p, result, sz);
140 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
141 # define REDIRECT_REALLOC GC_realloc
144 # ifdef REDIRECT_REALLOC
146 /* As with malloc, avoid two levels of extra calls here. */
147 # ifdef GC_ADD_CALLER
148 # define RA GC_RETURN_ADDR,
152 # define GC_debug_realloc_replacement(p, lb) \
153 GC_debug_realloc(p, lb, RA "unknown", 0)
155 void * realloc(void * p, size_t lb)
157 return(REDIRECT_REALLOC(p, lb));
160 # undef GC_debug_realloc_replacement
161 # endif /* REDIRECT_REALLOC */
164 /* Allocate memory such that only pointers to near the */
165 /* beginning of the object are considered. */
166 /* We avoid holding allocation lock while we clear memory. */
167 void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
177 return(GC_generic_malloc((word)lb, k));
178 lg = ROUNDED_UP_GRANULES(lb);
179 lb_rounded = GRANULES_TO_BYTES(lg);
180 n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
181 init = GC_obj_kinds[k].ok_init;
182 if (GC_have_errors) GC_print_all_errors();
183 GC_INVOKE_FINALIZERS();
185 result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
187 if (GC_debugging_started) {
188 BZERO(result, n_blocks * HBLKSIZE);
191 /* Clear any memory that might be used for GC descriptors */
192 /* before we release the lock. */
193 ((word *)result)[0] = 0;
194 ((word *)result)[1] = 0;
195 ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
196 ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
200 GC_bytes_allocd += lb_rounded;
203 return((*GC_oom_fn)(lb));
205 if (init && !GC_debugging_started) {
206 BZERO(result, n_blocks * HBLKSIZE);
212 GC_API void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
214 return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL));
217 GC_API void * GC_CALL GC_malloc_atomic_ignore_off_page(size_t lb)
219 return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
222 /* Increment GC_bytes_allocd from code that doesn't have direct access */
224 void GC_incr_bytes_allocd(size_t n)
226 GC_bytes_allocd += n;
229 /* The same for GC_bytes_freed. */
230 void GC_incr_bytes_freed(size_t n)
237 extern signed_word GC_bytes_found; /* Protected by GC lock. */
240 volatile signed_word GC_bytes_allocd_tmp = 0;
241 /* Number of bytes of memory allocated since */
242 /* we released the GC lock. Instead of */
243 /* reacquiring the GC lock just to add this in, */
244 /* we add it in the next time we reacquire */
245 /* the lock. (Atomically adding it doesn't */
246 /* work, since we would have to atomically */
247 /* update it in GC_malloc, which is too */
249 #endif /* PARALLEL_MARK */
251 /* Return a list of 1 or more objects of the indicated size, linked */
252 /* through the first word in the object. This has the advantage that */
253 /* it acquires the allocation lock only once, and may greatly reduce */
254 /* time wasted contending for the allocation lock. Typical usage would */
255 /* be in a thread that requires many items of the same size. It would */
256 /* keep its own free list in thread-local storage, and call */
257 /* GC_malloc_many or friends to replenish it. (We do not round up */
258 /* object sizes, since a call indicates the intention to consume many */
259 /* objects of exactly this size.) */
260 /* We assume that the size is a multiple of GRANULE_BYTES. */
261 /* We return the free-list by assigning it to *result, since it is */
262 /* not safe to return, e.g. a linked list of pointer-free objects, */
263 /* since the collector would not retain the entire list if it were */
264 /* invoked just as we were returning. */
265 /* Note that the client should usually clear the link field. */
266 void GC_generic_malloc_many(size_t lb, int k, void **result)
271 size_t lw; /* Length in words. */
272 size_t lg; /* Length in granules. */
273 signed_word my_bytes_allocd = 0;
274 struct obj_kind * ok = &(GC_obj_kinds[k]);
277 GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
278 if (!SMALL_OBJ(lb)) {
279 op = GC_generic_malloc(lb, k);
280 if(0 != op) obj_link(op) = 0;
284 lw = BYTES_TO_WORDS(lb);
285 lg = BYTES_TO_GRANULES(lb);
286 if (GC_have_errors) GC_print_all_errors();
287 GC_INVOKE_FINALIZERS();
289 if (!GC_is_initialized) GC_init_inner();
290 /* Do our share of marking work */
291 if (GC_incremental && !GC_dont_gc) {
293 GC_collect_a_little_inner(1);
296 /* First see if we can reclaim a page of objects waiting to be */
299 struct hblk ** rlh = ok -> ok_reclaim_list;
304 while ((hbp = *rlh) != 0) {
306 *rlh = hhdr -> hb_next;
307 GC_ASSERT(hhdr -> hb_sz == lb);
308 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
309 # ifdef PARALLEL_MARK
311 signed_word my_bytes_allocd_tmp = GC_bytes_allocd_tmp;
313 GC_ASSERT(my_bytes_allocd_tmp >= 0);
314 /* We only decrement it while holding the GC lock. */
315 /* Thus we can't accidentally adjust it down in more */
316 /* than one thread simultaneously. */
317 if (my_bytes_allocd_tmp != 0) {
318 (void)AO_fetch_and_add(
319 (volatile void *)(&GC_bytes_allocd_tmp),
320 (AO_t)(-my_bytes_allocd_tmp));
321 GC_bytes_allocd += my_bytes_allocd_tmp;
323 GC_acquire_mark_lock();
324 ++ GC_fl_builder_count;
326 GC_release_mark_lock();
329 op = GC_reclaim_generic(hbp, hhdr, lb,
330 ok -> ok_init, 0, &my_bytes_allocd);
332 /* We also reclaimed memory, so we need to adjust */
334 /* This should be atomic, so the results may be */
336 GC_bytes_found += my_bytes_allocd;
337 # ifdef PARALLEL_MARK
340 (void)AO_fetch_and_add(
341 (volatile AO_t *)(&GC_bytes_allocd_tmp),
342 (AO_t)(my_bytes_allocd));
343 GC_acquire_mark_lock();
344 -- GC_fl_builder_count;
345 if (GC_fl_builder_count == 0) GC_notify_all_builder();
346 GC_release_mark_lock();
347 (void) GC_clear_stack(0);
351 GC_bytes_allocd += my_bytes_allocd;
354 # ifdef PARALLEL_MARK
356 GC_acquire_mark_lock();
357 -- GC_fl_builder_count;
358 if (GC_fl_builder_count == 0) GC_notify_all_builder();
359 GC_release_mark_lock();
361 /* GC lock is needed for reclaim list access. We */
362 /* must decrement fl_builder_count before reaquiring GC */
363 /* lock. Hopefully this path is rare. */
368 /* Next try to use prefix of global free list if there is one. */
369 /* We don't refill it, but we need to use it up before allocating */
370 /* a new block ourselves. */
371 opp = &(GC_obj_kinds[k].ok_freelist[lg]);
372 if ( (op = *opp) != 0 ) {
375 for (p = op; p != 0; p = obj_link(p)) {
376 my_bytes_allocd += lb;
377 if (my_bytes_allocd >= HBLKSIZE) {
383 GC_bytes_allocd += my_bytes_allocd;
386 /* Next try to allocate a new block worth of objects of this size. */
388 struct hblk *h = GC_allochblk(lb, k, 0);
390 if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
391 GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
392 # ifdef PARALLEL_MARK
394 GC_acquire_mark_lock();
395 ++ GC_fl_builder_count;
397 GC_release_mark_lock();
399 op = GC_build_fl(h, lw,
400 (ok -> ok_init || GC_debugging_started), 0);
403 GC_acquire_mark_lock();
404 -- GC_fl_builder_count;
405 if (GC_fl_builder_count == 0) GC_notify_all_builder();
406 GC_release_mark_lock();
407 (void) GC_clear_stack(0);
411 op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
416 /* As a last attempt, try allocating a single object. Note that */
417 /* this may trigger a collection or expand the heap. */
418 op = GC_generic_malloc_inner(lb, k);
419 if (0 != op) obj_link(op) = 0;
424 (void) GC_clear_stack(0);
427 GC_API void * GC_CALL GC_malloc_many(size_t lb)
430 GC_generic_malloc_many(((lb + EXTRA_BYTES + GRANULE_BYTES-1)
431 & ~(GRANULE_BYTES-1)),
436 /* Note that the "atomic" version of this would be unsafe, since the */
437 /* links would not be seen by the collector. */
440 /* Allocate lb bytes of pointerful, traced, but not collectable data */
441 GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb)
448 if( SMALL_OBJ(lb) ) {
449 if (EXTRA_BYTES != 0 && lb != 0) lb--;
450 /* We don't need the extra byte, since this won't be */
451 /* collected anyway. */
452 lg = GC_size_map[lb];
453 opp = &(GC_uobjfreelist[lg]);
455 if( (op = *opp) != 0 ) {
458 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
459 /* Mark bit ws already set on free list. It will be */
460 /* cleared only temporarily during a collection, as a */
461 /* result of the normal free list mark bit clearing. */
462 GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
466 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
467 /* For small objects, the free lists are completely marked. */
469 GC_ASSERT(0 == op || GC_is_marked(op));
474 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
475 if (0 == op) return(0);
477 GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
479 /* We don't need the lock here, since we have an undisguised */
480 /* pointer. We do need to hold the lock while we adjust */
483 set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
484 GC_ASSERT(hhdr -> hb_n_marks == 0);
485 hhdr -> hb_n_marks = 1;
491 /* Not well tested nor integrated. */
492 /* Debug version is tricky and currently missing. */
495 GC_API void * GC_CALL GC_memalign(size_t align, size_t lb)
501 if (align <= GRANULE_BYTES) return GC_malloc(lb);
502 if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
503 if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */;
504 return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
505 /* Will be HBLKSIZE aligned. */
507 /* We could also try to make sure that the real rounded-up object size */
508 /* is a multiple of align. That would be correct up to HBLKSIZE. */
509 new_lb = lb + align - 1;
510 result = GC_malloc(new_lb);
511 offset = (word)result % align;
513 offset = align - offset;
514 if (!GC_all_interior_pointers) {
515 if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
516 GC_register_displacement(offset);
519 result = (void *) ((ptr_t)result + offset);
520 GC_ASSERT((word)result % align == 0);
524 # ifdef ATOMIC_UNCOLLECTABLE
525 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
526 /* This is normally roughly equivalent to the system malloc. */
527 /* But it may be useful if malloc is redefined. */
528 GC_API void * GC_CALL GC_malloc_atomic_uncollectable(size_t lb)
535 if( SMALL_OBJ(lb) ) {
536 if (EXTRA_BYTES != 0 && lb != 0) lb--;
537 /* We don't need the extra byte, since this won't be */
538 /* collected anyway. */
539 lg = GC_size_map[lb];
540 opp = &(GC_auobjfreelist[lg]);
542 if( (op = *opp) != 0 ) {
545 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
546 /* Mark bit was already set while object was on free list. */
547 GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
551 op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
553 GC_ASSERT(0 == op || GC_is_marked(op));
558 op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
559 if (0 == op) return(0);
561 GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0);
565 set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
566 GC_ASSERT(hhdr -> hb_n_marks == 0);
567 hhdr -> hb_n_marks = 1;
573 #endif /* ATOMIC_UNCOLLECTABLE */