2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 * These are extra allocation routines which are likely to be less
19 * frequently used than those in malloc.c. They are separate in the
20 * hope that the .o file will be excluded from statically linked
21 * executables. We should probably break this up further.
27 #include "private/gc_priv.h"
29 extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
30 void GC_extend_size_map(); /* in misc.c. */
31 GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
33 /* Some externally visible but unadvertised variables to allow access to */
34 /* free lists from inlined allocators without including gc_priv.h */
35 /* or introducing dependencies on internal data structure layouts. */
36 ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
37 ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
38 ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
39 # ifdef ATOMIC_UNCOLLECTABLE
40 ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
44 GC_PTR GC_generic_or_special_malloc(lb,knd)
49 # ifdef STUBBORN_ALLOC
51 return(GC_malloc_stubborn((size_t)lb));
54 return(GC_malloc_atomic((size_t)lb));
56 return(GC_malloc((size_t)lb));
58 return(GC_malloc_uncollectable((size_t)lb));
59 # ifdef ATOMIC_UNCOLLECTABLE
61 return(GC_malloc_atomic_uncollectable((size_t)lb));
62 # endif /* ATOMIC_UNCOLLECTABLE */
64 return(GC_generic_malloc(lb,knd));
69 /* Change the size of the block pointed to by p to contain at least */
70 /* lb bytes. The object may be (and quite likely will be) moved. */
71 /* The kind (e.g. atomic) is the same as that of the old. */
72 /* Shrinking of large blocks is not implemented well. */
74 GC_PTR GC_realloc(GC_PTR p, size_t lb)
76 GC_PTR GC_realloc(p,lb)
81 register struct hblk * h;
83 register word sz; /* Current size in bytes */
84 register word orig_sz; /* Original sz in bytes */
87 if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
91 obj_kind = hhdr -> hb_obj_kind;
92 sz = WORDS_TO_BYTES(sz);
95 if (sz > MAXOBJBYTES) {
96 /* Round it up to the next whole heap block */
99 sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
100 hhdr -> hb_sz = BYTES_TO_WORDS(sz);
101 descr = GC_obj_kinds[obj_kind].ok_descriptor;
102 if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
103 hhdr -> hb_descr = descr;
104 if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
105 /* Extra area is already cleared by GC_alloc_large_and_clear. */
107 if (ADD_SLOP(lb) <= sz) {
108 if (lb >= (sz >> 1)) {
109 # ifdef STUBBORN_ALLOC
110 if (obj_kind == STUBBORN) GC_change_stubborn(p);
113 /* Clear unneeded part of object to avoid bogus pointer */
115 /* Safe for stubborn objects. */
116 BZERO(((ptr_t)p) + lb, orig_sz - lb);
122 GC_generic_or_special_malloc((word)lb, obj_kind);
124 if (result == 0) return(0);
125 /* Could also return original object. But this */
126 /* gives the client warning of imminent disaster. */
127 BCOPY(p, result, lb);
136 GC_generic_or_special_malloc((word)lb, obj_kind);
138 if (result == 0) return(0);
139 BCOPY(p, result, sz);
147 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
148 # define REDIRECT_REALLOC GC_realloc
151 # ifdef REDIRECT_REALLOC
153 /* As with malloc, avoid two levels of extra calls here. */
154 # ifdef GC_ADD_CALLER
155 # define RA GC_RETURN_ADDR,
159 # define GC_debug_realloc_replacement(p, lb) \
160 GC_debug_realloc(p, lb, RA "unknown", 0)
163 GC_PTR realloc(GC_PTR p, size_t lb)
170 return(REDIRECT_REALLOC(p, lb));
173 # undef GC_debug_realloc_replacement
174 # endif /* REDIRECT_REALLOC */
177 /* The same thing, except caller does not hold allocation lock. */
178 /* We avoid holding allocation lock while we clear memory. */
179 ptr_t GC_generic_malloc_ignore_off_page(lb, k)
183 register ptr_t result;
190 return(GC_generic_malloc((word)lb, k));
191 lw = ROUNDED_UP_WORDS(lb);
192 n_blocks = OBJ_SZ_TO_BLOCKS(lw);
193 init = GC_obj_kinds[k].ok_init;
194 if (GC_have_errors) GC_print_all_errors();
195 GC_INVOKE_FINALIZERS();
198 result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
200 if (GC_debugging_started) {
201 BZERO(result, n_blocks * HBLKSIZE);
204 /* Clear any memory that might be used for GC descriptors */
205 /* before we release the lock. */
206 ((word *)result)[0] = 0;
207 ((word *)result)[1] = 0;
208 ((word *)result)[lw-1] = 0;
209 ((word *)result)[lw-2] = 0;
213 GC_words_allocd += lw;
217 return((*GC_oom_fn)(lb));
219 if (init && !GC_debugging_started) {
220 BZERO(result, n_blocks * HBLKSIZE);
226 # if defined(__STDC__) || defined(__cplusplus)
227 void * GC_malloc_ignore_off_page(size_t lb)
229 char * GC_malloc_ignore_off_page(lb)
233 return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
236 # if defined(__STDC__) || defined(__cplusplus)
237 void * GC_malloc_atomic_ignore_off_page(size_t lb)
239 char * GC_malloc_atomic_ignore_off_page(lb)
243 return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
246 /* Increment GC_words_allocd from code that doesn't have direct access */
249 void GC_incr_words_allocd(size_t n)
251 GC_words_allocd += n;
254 /* The same for GC_mem_freed. */
255 void GC_incr_mem_freed(size_t n)
259 # endif /* __STDC__ */
261 /* Analogous to the above, but assumes a small object size, and */
262 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
263 ptr_t GC_generic_malloc_words_small_inner(lw, k)
269 register struct obj_kind * kind = GC_obj_kinds + k;
271 opp = &(kind -> ok_freelist[lw]);
272 if( (op = *opp) == 0 ) {
273 if (!GC_is_initialized) {
276 if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
277 op = GC_clear_stack(GC_allocobj((word)lw, k));
282 return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
287 GC_words_allocd += lw;
291 /* Analogous to the above, but assumes a small object size, and */
292 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
294 ptr_t GC_generic_malloc_words_small(size_t lw, int k)
296 ptr_t GC_generic_malloc_words_small(lw, k)
304 if (GC_have_errors) GC_print_all_errors();
305 GC_INVOKE_FINALIZERS();
308 op = GC_generic_malloc_words_small_inner(lw, k);
314 #if defined(THREADS) && !defined(SRC_M3)
316 extern signed_word GC_mem_found; /* Protected by GC lock. */
319 volatile signed_word GC_words_allocd_tmp = 0;
320 /* Number of words of memory allocated since */
321 /* we released the GC lock. Instead of */
322 /* reacquiring the GC lock just to add this in, */
323 /* we add it in the next time we reacquire */
324 /* the lock. (Atomically adding it doesn't */
325 /* work, since we would have to atomically */
326 /* update it in GC_malloc, which is too */
328 #endif /* PARALLEL_MARK */
331 extern ptr_t GC_reclaim_generic();
333 /* Return a list of 1 or more objects of the indicated size, linked */
334 /* through the first word in the object. This has the advantage that */
335 /* it acquires the allocation lock only once, and may greatly reduce */
336 /* time wasted contending for the allocation lock. Typical usage would */
337 /* be in a thread that requires many items of the same size. It would */
338 /* keep its own free list in thread-local storage, and call */
339 /* GC_malloc_many or friends to replenish it. (We do not round up */
340 /* object sizes, since a call indicates the intention to consume many */
341 /* objects of exactly this size.) */
342 /* We return the free-list by assigning it to *result, since it is */
343 /* not safe to return, e.g. a linked list of pointer-free objects, */
344 /* since the collector would not retain the entire list if it were */
345 /* invoked just as we were returning. */
346 /* Note that the client should usually clear the link field. */
347 void GC_generic_malloc_many(lb, k, result)
356 word my_words_allocd = 0;
357 struct obj_kind * ok = &(GC_obj_kinds[k]);
360 # if defined(GATHERSTATS) || defined(PARALLEL_MARK)
361 # define COUNT_ARG , &my_words_allocd
364 # define NEED_TO_COUNT
366 if (!SMALL_OBJ(lb)) {
367 op = GC_generic_malloc(lb, k);
368 if(0 != op) obj_link(op) = 0;
372 lw = ALIGNED_WORDS(lb);
373 if (GC_have_errors) GC_print_all_errors();
374 GC_INVOKE_FINALIZERS();
377 if (!GC_is_initialized) GC_init_inner();
378 /* Do our share of marking work */
379 if (GC_incremental && !GC_dont_gc) {
381 GC_collect_a_little_inner(1);
384 /* First see if we can reclaim a page of objects waiting to be */
387 struct hblk ** rlh = ok -> ok_reclaim_list;
392 while ((hbp = *rlh) != 0) {
394 *rlh = hhdr -> hb_next;
395 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
396 # ifdef PARALLEL_MARK
398 signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
400 GC_ASSERT(my_words_allocd_tmp >= 0);
401 /* We only decrement it while holding the GC lock. */
402 /* Thus we can't accidentally adjust it down in more */
403 /* than one thread simultaneously. */
404 if (my_words_allocd_tmp != 0) {
406 (volatile GC_word *)(&GC_words_allocd_tmp),
407 (GC_word)(-my_words_allocd_tmp));
408 GC_words_allocd += my_words_allocd_tmp;
411 GC_acquire_mark_lock();
412 ++ GC_fl_builder_count;
415 GC_release_mark_lock();
417 op = GC_reclaim_generic(hbp, hhdr, lw,
418 ok -> ok_init, 0 COUNT_ARG);
420 # ifdef NEED_TO_COUNT
421 /* We are neither gathering statistics, nor marking in */
422 /* parallel. Thus GC_reclaim_generic doesn't count */
424 for (p = op; p != 0; p = obj_link(p)) {
425 my_words_allocd += lw;
428 # if defined(GATHERSTATS)
429 /* We also reclaimed memory, so we need to adjust */
431 /* This should be atomic, so the results may be */
433 GC_mem_found += my_words_allocd;
435 # ifdef PARALLEL_MARK
438 (volatile GC_word *)(&GC_words_allocd_tmp),
439 (GC_word)(my_words_allocd));
440 GC_acquire_mark_lock();
441 -- GC_fl_builder_count;
442 if (GC_fl_builder_count == 0) GC_notify_all_builder();
443 GC_release_mark_lock();
444 (void) GC_clear_stack(0);
447 GC_words_allocd += my_words_allocd;
451 # ifdef PARALLEL_MARK
452 GC_acquire_mark_lock();
453 -- GC_fl_builder_count;
454 if (GC_fl_builder_count == 0) GC_notify_all_builder();
455 GC_release_mark_lock();
458 /* GC lock is needed for reclaim list access. We */
459 /* must decrement fl_builder_count before reaquiring GC */
460 /* lock. Hopefully this path is rare. */
464 /* Next try to use prefix of global free list if there is one. */
465 /* We don't refill it, but we need to use it up before allocating */
466 /* a new block ourselves. */
467 opp = &(GC_obj_kinds[k].ok_freelist[lw]);
468 if ( (op = *opp) != 0 ) {
471 for (p = op; p != 0; p = obj_link(p)) {
472 my_words_allocd += lw;
473 if (my_words_allocd >= BODY_SZ) {
479 GC_words_allocd += my_words_allocd;
482 /* Next try to allocate a new block worth of objects of this size. */
484 struct hblk *h = GC_allochblk(lw, k, 0);
486 if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
487 GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
488 - BYTES_TO_WORDS(HBLKSIZE) % lw;
489 # ifdef PARALLEL_MARK
490 GC_acquire_mark_lock();
491 ++ GC_fl_builder_count;
494 GC_release_mark_lock();
497 op = GC_build_fl(h, lw, ok -> ok_init, 0);
498 # ifdef PARALLEL_MARK
500 GC_acquire_mark_lock();
501 -- GC_fl_builder_count;
502 if (GC_fl_builder_count == 0) GC_notify_all_builder();
503 GC_release_mark_lock();
504 (void) GC_clear_stack(0);
512 /* As a last attempt, try allocating a single object. Note that */
513 /* this may trigger a collection or expand the heap. */
514 op = GC_generic_malloc_inner(lb, k);
515 if (0 != op) obj_link(op) = 0;
521 (void) GC_clear_stack(0);
524 GC_PTR GC_malloc_many(size_t lb)
527 GC_generic_malloc_many(lb, NORMAL, &result);
531 /* Note that the "atomic" version of this would be unsafe, since the */
532 /* links would not be seen by the collector. */
535 /* Allocate lb bytes of pointerful, traced, but not collectable data */
537 GC_PTR GC_malloc_uncollectable(size_t lb)
539 GC_PTR GC_malloc_uncollectable(lb)
548 if( SMALL_OBJ(lb) ) {
550 if (EXTRA_BYTES != 0 && lb != 0) lb--;
551 /* We don't need the extra byte, since this won't be */
552 /* collected anyway. */
553 lw = GC_size_map[lb];
555 lw = ALIGNED_WORDS(lb);
557 opp = &(GC_uobjfreelist[lw]);
559 if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
560 /* See above comment on signals. */
563 GC_words_allocd += lw;
564 /* Mark bit ws already set on free list. It will be */
565 /* cleared only temporarily during a collection, as a */
566 /* result of the normal free list mark bit clearing. */
567 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
572 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
574 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
576 if (0 == op) return(0);
577 /* We don't need the lock here, since we have an undisguised */
578 /* pointer. We do need to hold the lock while we adjust */
581 register struct hblk * h;
584 lw = HDR(h) -> hb_sz;
589 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
597 /* Not well tested nor integrated. */
598 /* Debug version is tricky and currently missing. */
601 GC_PTR GC_memalign(size_t align, size_t lb)
608 if (align <= WORDS_TO_BYTES(2) && lb > align) return GC_malloc(lb);
610 if (align <= WORDS_TO_BYTES(1)) return GC_malloc(lb);
611 if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
612 if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */;
613 return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
614 /* Will be HBLKSIZE aligned. */
616 /* We could also try to make sure that the real rounded-up object size */
617 /* is a multiple of align. That would be correct up to HBLKSIZE. */
618 new_lb = lb + align - 1;
619 result = GC_malloc(new_lb);
620 offset = (word)result % align;
622 offset = align - offset;
623 if (!GC_all_interior_pointers) {
624 if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
625 GC_register_displacement(offset);
628 result = (GC_PTR) ((ptr_t)result + offset);
629 GC_ASSERT((word)result % align == 0);
634 # ifdef ATOMIC_UNCOLLECTABLE
635 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
636 /* This is normally roughly equivalent to the system malloc. */
637 /* But it may be useful if malloc is redefined. */
639 GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
641 GC_PTR GC_malloc_atomic_uncollectable(lb)
650 if( SMALL_OBJ(lb) ) {
652 if (EXTRA_BYTES != 0 && lb != 0) lb--;
653 /* We don't need the extra byte, since this won't be */
654 /* collected anyway. */
655 lw = GC_size_map[lb];
657 lw = ALIGNED_WORDS(lb);
659 opp = &(GC_auobjfreelist[lw]);
661 if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
662 /* See above comment on signals. */
665 GC_words_allocd += lw;
666 /* Mark bit was already set while object was on free list. */
667 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
672 op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
674 op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
676 if (0 == op) return(0);
677 /* We don't need the lock here, since we have an undisguised */
678 /* pointer. We do need to hold the lock while we adjust */
681 register struct hblk * h;
684 lw = HDR(h) -> hb_sz;
689 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
696 #endif /* ATOMIC_UNCOLLECTABLE */