2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
21 #include "private/gc_priv.h"
23 GC_bool GC_use_entire_heap = 0;
26 * Free heap blocks are kept on one of several free lists,
27 * depending on the size of the block. Each free list is doubly linked.
28 * Adjacent free blocks are coalesced.
32 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
33 /* largest block we will allocate starting on a black */
34 /* listed block. Must be >= HBLKSIZE. */
37 # define UNIQUE_THRESHOLD 32
38 /* Sizes up to this many HBLKs each have their own free list */
39 # define HUGE_THRESHOLD 256
40 /* Sizes of at least this many heap blocks are mapped to a */
41 /* single free list. */
42 # define FL_COMPRESSION 8
43 /* In between sizes map this many distinct sizes to a single */
46 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
49 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
53 word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
54 /* Number of free bytes on each list. */
56 /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */
57 /* > GC_max_large_allocd_bytes? */
61 static GC_bool GC_enough_large_bytes_left(word bytes, int n)
64 for (i = N_HBLK_FLS; i >= n; --i) {
65 bytes += GC_free_bytes[i];
66 if (bytes > GC_max_large_allocd_bytes) return TRUE;
71 # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
73 # define FREE_ASSERT(e) GC_ASSERT(e)
75 #else /* USE_MUNMAP */
77 # define INCR_FREE_BYTES(n, b)
78 # define FREE_ASSERT(e)
80 #endif /* USE_MUNMAP */
82 /* Map a number of blocks to the appropriate large block free list index. */
83 int GC_hblk_fl_from_blocks(word blocks_needed)
85 if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
86 if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
87 return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
92 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
93 # define NHDR(hhdr) HDR(hhdr -> hb_next)
96 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
97 # else /* !USE_MMAP */
98 # define IS_MAPPED(hhdr) 1
99 # endif /* USE_MUNMAP */
101 # if !defined(NO_DEBUGGING)
102 void GC_print_hblkfreelist()
110 for (i = 0; i <= N_HBLK_FLS; ++i) {
111 h = GC_hblkfreelist[i];
113 if (0 != h) GC_printf("Free list %ld:\n",
116 if (0 != h) GC_printf("Free list %lu (Total size %lu):\n",
117 i, (unsigned long)GC_free_bytes[i]);
122 GC_printf("\t%p size %lu ", h, (unsigned long)sz);
124 if (GC_is_black_listed(h, HBLKSIZE) != 0) {
125 GC_printf("start black listed\n");
126 } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
127 GC_printf("partially black listed\n");
129 GC_printf("not black listed\n");
135 if (total_free != GC_large_free_bytes) {
136 GC_printf("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
137 (unsigned long) GC_large_free_bytes);
140 GC_printf("Total of %lu bytes on free list\n", (unsigned long)total_free);
143 /* Return the free list index on which the block described by the header */
144 /* appears, or -1 if it appears nowhere. */
145 int free_list_index_of(hdr *wanted)
151 for (i = 0; i <= N_HBLK_FLS; ++i) {
152 h = GC_hblkfreelist[i];
155 if (hhdr == wanted) return i;
162 void GC_dump_regions()
169 for (i = 0; i < GC_n_heap_sects; ++i) {
170 start = GC_heap_sects[i].hs_start;
171 bytes = GC_heap_sects[i].hs_bytes;
173 /* Merge in contiguous sections. */
174 while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
176 end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
178 GC_printf("***Section from %p to %p\n", start, end);
179 for (p = start; p < end;) {
181 GC_printf("\t%p ", p);
182 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
183 GC_printf("Missing header!!(%d)\n", hhdr);
187 if (HBLK_IS_FREE(hhdr)) {
188 int correct_index = GC_hblk_fl_from_blocks(
189 divHBLKSZ(hhdr -> hb_sz));
192 GC_printf("\tfree block of size 0x%lx bytes",
193 (unsigned long)(hhdr -> hb_sz));
194 if (IS_MAPPED(hhdr)) {
197 GC_printf("(unmapped)\n");
199 actual_index = free_list_index_of(hhdr);
200 if (-1 == actual_index) {
201 GC_printf("\t\tBlock not on free list %d!!\n",
203 } else if (correct_index != actual_index) {
204 GC_printf("\t\tBlock on list %d, should be on %d!!\n",
205 actual_index, correct_index);
209 GC_printf("\tused for blocks of size 0x%lx bytes\n",
210 (unsigned long)(hhdr -> hb_sz));
211 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
217 # endif /* NO_DEBUGGING */
219 /* Initialize hdr for a block containing the indicated size and */
220 /* kind of objects. */
221 /* Return FALSE on failure. */
222 static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
223 int kind, unsigned flags)
228 /* Set size, kind and mark proc fields */
229 hhdr -> hb_sz = byte_sz;
230 hhdr -> hb_obj_kind = (unsigned char)kind;
231 hhdr -> hb_flags = (unsigned char)flags;
232 hhdr -> hb_block = block;
233 descr = GC_obj_kinds[kind].ok_descriptor;
234 if (GC_obj_kinds[kind].ok_relocate_descr) descr += byte_sz;
235 hhdr -> hb_descr = descr;
237 # ifdef MARK_BIT_PER_OBJ
238 /* Set hb_inv_sz as portably as possible. */
239 /* We set it to the smallest value such that sz * inv_sz > 2**32 */
240 /* This may be more precision than necessary. */
241 if (byte_sz > MAXOBJBYTES) {
242 hhdr -> hb_inv_sz = LARGE_INV_SZ;
246 # if CPP_WORDSZ == 64
247 inv_sz = ((word)1 << 32)/byte_sz;
248 if (((inv_sz*byte_sz) >> 32) == 0) ++inv_sz;
249 # else /* 32 bit words */
250 GC_ASSERT(byte_sz >= 4);
251 inv_sz = ((unsigned)1 << 31)/byte_sz;
253 while (inv_sz*byte_sz > byte_sz) ++inv_sz;
255 hhdr -> hb_inv_sz = inv_sz;
257 # else /* MARK_BIT_PER_GRANULE */
258 hhdr -> hb_large_block = (unsigned char)(byte_sz > MAXOBJBYTES);
259 granules = BYTES_TO_GRANULES(byte_sz);
260 if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
261 /* Make it look like a valid block. */
262 hhdr -> hb_sz = HBLKSIZE;
263 hhdr -> hb_descr = 0;
264 hhdr -> hb_large_block = TRUE;
268 size_t index = (hhdr -> hb_large_block? 0 : granules);
269 hhdr -> hb_map = GC_obj_map[index];
271 # endif /* MARK_BIT_PER_GRANULE */
273 /* Clear mark bits */
274 GC_clear_hdr_marks(hhdr);
276 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
280 #define FL_UNKNOWN -1
282 * Remove hhdr from the appropriate free list.
283 * We assume it is on the nth free list, or on the size
284 * appropriate free list if n is FL_UNKNOWN.
286 void GC_remove_from_fl(hdr *hhdr, int n)
290 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
292 /* We always need index to mainatin free counts. */
293 if (FL_UNKNOWN == n) {
294 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
299 if (hhdr -> hb_prev == 0) {
301 if (FL_UNKNOWN == n) {
302 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
307 GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
308 GC_hblkfreelist[index] = hhdr -> hb_next;
311 GET_HDR(hhdr -> hb_prev, phdr);
312 phdr -> hb_next = hhdr -> hb_next;
314 FREE_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
315 INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
316 if (0 != hhdr -> hb_next) {
318 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
319 GET_HDR(hhdr -> hb_next, nhdr);
320 nhdr -> hb_prev = hhdr -> hb_prev;
325 * Return a pointer to the free block ending just before h, if any.
327 struct hblk * GC_free_block_ending_at(struct hblk *h)
329 struct hblk * p = h - 1;
333 while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
334 p = FORWARDED_ADDR(p,phdr);
338 if(HBLK_IS_FREE(phdr)) {
344 p = GC_prev_block(h - 1);
347 if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
355 * Add hhdr to the appropriate free list.
356 * We maintain individual free lists sorted by address.
358 void GC_add_to_fl(struct hblk *h, hdr *hhdr)
360 int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
361 struct hblk *second = GC_hblkfreelist[index];
363 # ifdef GC_ASSERTIONS
364 struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
365 hdr * nexthdr = HDR(next);
366 struct hblk *prev = GC_free_block_ending_at(h);
367 hdr * prevhdr = HDR(prev);
368 GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
369 GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
371 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
372 GC_hblkfreelist[index] = h;
373 INCR_FREE_BYTES(index, hhdr -> hb_sz);
374 FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
375 hhdr -> hb_next = second;
378 GET_HDR(second, second_hdr);
379 second_hdr -> hb_prev = h;
381 hhdr -> hb_flags |= FREE_BLK;
386 /* Unmap blocks that haven't been recently touched. This is the only way */
387 /* way blocks are ever unmapped. */
388 void GC_unmap_old(void)
393 unsigned short last_rec, threshold;
395 # define UNMAP_THRESHOLD 6
397 for (i = 0; i <= N_HBLK_FLS; ++i) {
398 for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
400 if (!IS_MAPPED(hhdr)) continue;
401 threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
402 last_rec = hhdr -> hb_last_reclaimed;
403 if ((last_rec > GC_gc_no || last_rec < threshold)
404 && threshold < GC_gc_no /* not recently wrapped */) {
406 GC_unmap((ptr_t)h, sz);
407 hhdr -> hb_flags |= WAS_UNMAPPED;
413 /* Merge all unmapped blocks that are adjacent to other free */
414 /* blocks. This may involve remapping, since all blocks are either */
415 /* fully mapped or fully unmapped. */
416 void GC_merge_unmapped(void)
418 struct hblk * h, *next;
419 hdr * hhdr, *nexthdr;
423 for (i = 0; i <= N_HBLK_FLS; ++i) {
424 h = GC_hblkfreelist[i];
428 next = (struct hblk *)((word)h + size);
429 GET_HDR(next, nexthdr);
430 /* Coalesce with successor, if possible */
431 if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
432 nextsize = nexthdr -> hb_sz;
433 if (IS_MAPPED(hhdr)) {
434 GC_ASSERT(!IS_MAPPED(nexthdr));
435 /* make both consistent, so that we can merge */
436 if (size > nextsize) {
437 GC_remap((ptr_t)next, nextsize);
439 GC_unmap((ptr_t)h, size);
440 hhdr -> hb_flags |= WAS_UNMAPPED;
442 } else if (IS_MAPPED(nexthdr)) {
443 GC_ASSERT(!IS_MAPPED(hhdr));
444 if (size > nextsize) {
445 GC_unmap((ptr_t)next, nextsize);
447 GC_remap((ptr_t)h, size);
448 hhdr -> hb_flags &= ~WAS_UNMAPPED;
449 hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
452 /* Unmap any gap in the middle */
453 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
455 /* If they are both unmapped, we merge, but leave unmapped. */
456 GC_remove_from_fl(hhdr, i);
457 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
458 hhdr -> hb_sz += nexthdr -> hb_sz;
459 GC_remove_header(next);
460 GC_add_to_fl(h, hhdr);
461 /* Start over at beginning of list */
462 h = GC_hblkfreelist[i];
463 } else /* not mergable with successor */ {
466 } /* while (h != 0) ... */
470 #endif /* USE_MUNMAP */
473 * Return a pointer to a block starting at h of length bytes.
474 * Memory for the block is mapped.
475 * Remove the block from its free list, and return the remainder (if any)
476 * to its appropriate free list.
477 * May fail by returning 0.
478 * The header for the returned block must be set up by the caller.
479 * If the return value is not 0, then hhdr is the header for it.
481 struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
482 size_t bytes, int index)
484 word total_size = hhdr -> hb_sz;
488 GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
489 GC_remove_from_fl(hhdr, index);
490 if (total_size == bytes) return h;
491 rest = (struct hblk *)((word)h + bytes);
492 rest_hdr = GC_install_header(rest);
494 /* FIXME: This is likely to be very bad news ... */
495 WARN("Header allocation failed: Dropping block.\n", 0);
498 rest_hdr -> hb_sz = total_size - bytes;
499 rest_hdr -> hb_flags = 0;
500 # ifdef GC_ASSERTIONS
501 /* Mark h not free, to avoid assertion about adjacent free blocks. */
502 hhdr -> hb_flags &= ~FREE_BLK;
504 GC_add_to_fl(rest, rest_hdr);
509 * H is a free block. N points at an address inside it.
510 * A new header for n has already been set up. Fix up h's header
511 * to reflect the fact that it is being split, move it to the
512 * appropriate free list.
513 * N replaces h in the original free list.
515 * Nhdr is not completely filled in, since it is about to allocated.
516 * It may in fact end up on the wrong free list for its size.
517 * (Hence adding it to a free list is silly. But this path is hopefully
518 * rare enough that it doesn't matter. The code is cleaner this way.)
520 void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
521 hdr *nhdr, int index /* Index of free list */)
523 word total_size = hhdr -> hb_sz;
524 word h_size = (word)n - (word)h;
525 struct hblk *prev = hhdr -> hb_prev;
526 struct hblk *next = hhdr -> hb_next;
528 /* Replace h with n on its freelist */
529 nhdr -> hb_prev = prev;
530 nhdr -> hb_next = next;
531 nhdr -> hb_sz = total_size - h_size;
532 nhdr -> hb_flags = 0;
534 HDR(prev) -> hb_next = n;
536 GC_hblkfreelist[index] = n;
539 HDR(next) -> hb_prev = n;
541 INCR_FREE_BYTES(index, -(signed_word)h_size);
542 FREE_ASSERT(GC_free_bytes[index] > 0);
543 # ifdef GC_ASSERTIONS
544 nhdr -> hb_flags &= ~FREE_BLK;
545 /* Don't fail test for consecutive */
546 /* free blocks in GC_add_to_fl. */
549 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
551 hhdr -> hb_sz = h_size;
552 GC_add_to_fl(h, hhdr);
553 nhdr -> hb_flags |= FREE_BLK;
557 GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned flags, int n);
560 * Allocate (and return pointer to) a heap block
561 * for objects of size sz bytes, searching the nth free list.
563 * NOTE: We set obj_map field in header correctly.
564 * Caller is responsible for building an object freelist in block.
566 * The client is responsible for clearing the block, if necessary.
569 GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
575 GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
576 blocks = OBJ_SZ_TO_BLOCKS(sz);
577 start_list = GC_hblk_fl_from_blocks(blocks);
578 for (i = start_list; i <= N_HBLK_FLS; ++i) {
579 struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
587 * The same, but with search restricted to nth free list.
588 * Flags is IGNORE_OFF_PAGE or zero.
589 * Unlike the above, sz is in bytes.
592 GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n)
595 hdr * hhdr; /* Header corr. to hbp */
596 /* Initialized after loop if hbp !=0 */
597 /* Gcc uninitialized use warning is bogus. */
598 struct hblk *thishbp;
599 hdr * thishdr; /* Header corr. to hbp */
600 signed_word size_needed; /* number of bytes in requested objects */
601 signed_word size_avail; /* bytes available in this block */
603 size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
605 /* search for a big enough block in free list */
606 hbp = GC_hblkfreelist[n];
607 for(; 0 != hbp; hbp = hhdr -> hb_next) {
609 size_avail = hhdr->hb_sz;
610 if (size_avail < size_needed) continue;
611 if (size_avail != size_needed
612 && !GC_use_entire_heap
614 && USED_HEAP_SIZE >= GC_requested_heapsize
615 && !TRUE_INCREMENTAL && GC_should_collect()) {
619 /* If we have enough large blocks left to cover any */
620 /* previous request for large blocks, we go ahead */
621 /* and split. Assuming a steady state, that should */
622 /* be safe. It means that we can use the full */
623 /* heap if we allocate only small objects. */
624 if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
627 /* If we are deallocating lots of memory from */
628 /* finalizers, fail and collect sooner rather */
630 if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
633 # endif /* !USE_MUNMAP */
635 /* If the next heap block is obviously better, go on. */
636 /* This prevents us from disassembling a single large block */
637 /* to get tiny blocks. */
639 signed_word next_size;
641 thishbp = hhdr -> hb_next;
643 GET_HDR(thishbp, thishdr);
644 next_size = (signed_word)(thishdr -> hb_sz);
645 if (next_size < size_avail
646 && next_size >= size_needed
647 && !GC_is_black_listed(thishbp, (word)size_needed)) {
652 if ( !IS_UNCOLLECTABLE(kind) &&
653 (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
654 struct hblk * lasthbp = hbp;
655 ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
656 signed_word orig_avail = size_avail;
657 signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
662 while ((ptr_t)lasthbp <= search_end
663 && (thishbp = GC_is_black_listed(lasthbp,
664 (word)eff_size_needed))
668 size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
670 if (size_avail >= size_needed) {
671 if (thishbp != hbp &&
672 0 != (thishdr = GC_install_header(thishbp))) {
673 /* Make sure it's mapped before we mangle it. */
675 if (!IS_MAPPED(hhdr)) {
676 GC_remap((ptr_t)hbp, hhdr -> hb_sz);
677 hhdr -> hb_flags &= ~WAS_UNMAPPED;
680 /* Split the block at thishbp */
681 GC_split_block(hbp, hhdr, thishbp, thishdr, n);
682 /* Advance to thishbp */
685 /* We must now allocate thishbp, since it may */
686 /* be on the wrong free list. */
688 } else if (size_needed > (signed_word)BL_LIMIT
689 && orig_avail - size_needed
690 > (signed_word)BL_LIMIT) {
691 /* Punt, since anything else risks unreasonable heap growth. */
692 if (++GC_large_alloc_warn_suppressed
693 >= GC_large_alloc_warn_interval) {
694 WARN("Repeated allocation of very large block "
695 "(appr. size %ld):\n"
696 "\tMay lead to memory leak and poor performance.\n",
698 GC_large_alloc_warn_suppressed = 0;
700 size_avail = orig_avail;
701 } else if (size_avail == 0 && size_needed == HBLKSIZE
702 && IS_MAPPED(hhdr)) {
704 static unsigned count = 0;
706 /* The block is completely blacklisted. We need */
707 /* to drop some such blocks, since otherwise we spend */
708 /* all our time traversing them if pointerfree */
709 /* blocks are unpopular. */
710 /* A dropped block will be reconsidered at next GC. */
711 if ((++count & 3) == 0) {
712 /* Allocate and drop the block in small chunks, to */
713 /* maximize the chance that we will recover some */
715 word total_size = hhdr -> hb_sz;
716 struct hblk * limit = hbp + divHBLKSZ(total_size);
718 struct hblk * prev = hhdr -> hb_prev;
720 GC_large_free_bytes -= total_size;
721 GC_remove_from_fl(hhdr, n);
722 for (h = hbp; h < limit; h++) {
723 if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
727 PTRFREE, 0); /* Cant fail */
728 if (GC_debugging_started) {
733 /* Restore hbp to point at free block */
736 return GC_allochblk_nth(sz, kind, flags, n);
743 if( size_avail >= size_needed ) {
745 if (!IS_MAPPED(hhdr)) {
746 GC_remap((ptr_t)hbp, hhdr -> hb_sz);
747 hhdr -> hb_flags &= ~WAS_UNMAPPED;
750 /* hbp may be on the wrong freelist; the parameter n */
752 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
757 if (0 == hbp) return 0;
759 /* Add it to map of valid blocks */
760 if (!GC_install_counts(hbp, (word)size_needed)) return(0);
761 /* This leaks memory under very rare conditions. */
764 if (!setup_header(hhdr, hbp, sz, kind, flags)) {
765 GC_remove_counts(hbp, (word)size_needed);
766 return(0); /* ditto */
769 /* Notify virtual dirty bit implementation that we are about to write. */
770 /* Ensure that pointerfree objects are not protected if it's avoidable. */
771 GC_remove_protection(hbp, divHBLKSZ(size_needed),
772 (hhdr -> hb_descr == 0) /* pointer-free */);
774 /* We just successfully allocated a block. Restart count of */
775 /* consecutive failures. */
777 extern unsigned GC_fail_count;
782 GC_large_free_bytes -= size_needed;
784 GC_ASSERT(IS_MAPPED(hhdr));
788 struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
793 * Coalesce the block with its neighbors if possible.
795 * All mark words are assumed to be cleared.
798 GC_freehblk(struct hblk *hbp)
800 struct hblk *next, *prev;
801 hdr *hhdr, *prevhdr, *nexthdr;
807 size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
808 GC_remove_counts(hbp, (word)size);
811 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
814 /* Check for duplicate deallocation in the easy case */
815 if (HBLK_IS_FREE(hhdr)) {
816 GC_printf("Duplicate large block deallocation of %p\n", hbp);
817 ABORT("Duplicate large block deallocation");
820 GC_ASSERT(IS_MAPPED(hhdr));
821 hhdr -> hb_flags |= FREE_BLK;
822 next = (struct hblk *)((word)hbp + size);
823 GET_HDR(next, nexthdr);
824 prev = GC_free_block_ending_at(hbp);
825 /* Coalesce with successor, if possible */
826 if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
827 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
828 hhdr -> hb_sz += nexthdr -> hb_sz;
829 GC_remove_header(next);
831 /* Coalesce with predecessor, if possible. */
834 if (IS_MAPPED(prevhdr)) {
835 GC_remove_from_fl(prevhdr, FL_UNKNOWN);
836 prevhdr -> hb_sz += hhdr -> hb_sz;
838 prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
840 GC_remove_header(hbp);
845 /* FIXME: It is not clear we really always want to do these merges */
846 /* with -DUSE_MUNMAP, since it updates ages and hence prevents */
849 GC_large_free_bytes += size;
850 GC_add_to_fl(hbp, hhdr);