2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_priv.h"
21 #ifdef GC_USE_ENTIRE_HEAP
22 int GC_use_entire_heap = TRUE;
24 int GC_use_entire_heap = FALSE;
28 * Free heap blocks are kept on one of several free lists,
29 * depending on the size of the block. Each free list is doubly linked.
30 * Adjacent free blocks are coalesced.
34 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
35 /* largest block we will allocate starting on a black */
36 /* listed block. Must be >= HBLKSIZE. */
39 # define UNIQUE_THRESHOLD 32
40 /* Sizes up to this many HBLKs each have their own free list */
41 # define HUGE_THRESHOLD 256
42 /* Sizes of at least this many heap blocks are mapped to a */
43 /* single free list. */
44 # define FL_COMPRESSION 8
45 /* In between sizes map this many distinct sizes to a single */
48 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
51 #ifndef GC_GCJ_SUPPORT
54 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
55 /* List of completely empty heap blocks */
56 /* Linked through hb_next field of */
57 /* header structure associated with */
58 /* block. Remains externally visible */
59 /* as used by GNU GCJ currently. */
63 #ifndef GC_GCJ_SUPPORT
66 word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
67 /* Number of free bytes on each list. Remains visible to GCJ. */
69 /* Return the largest n such that */
70 /* Is GC_large_allocd_bytes + the number of free bytes on lists */
71 /* n .. N_HBLK_FLS > GC_max_large_allocd_bytes. */
72 /* If there is no such n, return 0. */
73 GC_INLINE int GC_enough_large_bytes_left(void)
76 word bytes = GC_large_allocd_bytes;
78 GC_ASSERT(GC_max_large_allocd_bytes <= GC_heapsize);
79 for (n = N_HBLK_FLS; n >= 0; --n) {
80 bytes += GC_free_bytes[n];
81 if (bytes >= GC_max_large_allocd_bytes) return n;
86 # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
88 # define FREE_ASSERT(e) GC_ASSERT(e)
90 #else /* USE_MUNMAP */
92 # define INCR_FREE_BYTES(n, b)
93 # define FREE_ASSERT(e)
95 #endif /* USE_MUNMAP */
97 /* Map a number of blocks to the appropriate large block free list index. */
98 STATIC int GC_hblk_fl_from_blocks(word blocks_needed)
100 if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
101 if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
102 return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
107 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
108 # define NHDR(hhdr) HDR(hhdr -> hb_next)
111 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
112 # else /* !USE_MUNMAP */
113 # define IS_MAPPED(hhdr) 1
114 # endif /* USE_MUNMAP */
116 # if !defined(NO_DEBUGGING)
117 void GC_print_hblkfreelist(void)
125 for (i = 0; i <= N_HBLK_FLS; ++i) {
126 h = GC_hblkfreelist[i];
128 if (0 != h) GC_printf("Free list %u:\n", i);
130 if (0 != h) GC_printf("Free list %u (total size %lu):\n",
131 i, (unsigned long)GC_free_bytes[i]);
137 GC_printf("\t%p size %lu %s black listed\n", h, (unsigned long)sz,
138 GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" :
139 GC_is_black_listed(h, hhdr -> hb_sz) != 0 ? "partially" :
145 if (total_free != GC_large_free_bytes) {
146 GC_printf("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
147 (unsigned long) GC_large_free_bytes);
150 GC_printf("Total of %lu bytes on free list\n", (unsigned long)total_free);
153 /* Return the free list index on which the block described by the header */
154 /* appears, or -1 if it appears nowhere. */
155 static int free_list_index_of(hdr *wanted)
161 for (i = 0; i <= N_HBLK_FLS; ++i) {
162 h = GC_hblkfreelist[i];
165 if (hhdr == wanted) return i;
172 void GC_dump_regions(void)
179 for (i = 0; i < GC_n_heap_sects; ++i) {
180 start = GC_heap_sects[i].hs_start;
181 bytes = GC_heap_sects[i].hs_bytes;
183 /* Merge in contiguous sections. */
184 while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
186 end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
188 GC_printf("***Section from %p to %p\n", start, end);
189 for (p = start; p < end;) {
191 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
192 GC_printf("\t%p Missing header!!(%p)\n", p, hhdr);
196 if (HBLK_IS_FREE(hhdr)) {
197 int correct_index = GC_hblk_fl_from_blocks(
198 divHBLKSZ(hhdr -> hb_sz));
201 GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n", p,
202 (unsigned long)(hhdr -> hb_sz),
203 IS_MAPPED(hhdr) ? "" : " (unmapped)");
204 actual_index = free_list_index_of(hhdr);
205 if (-1 == actual_index) {
206 GC_printf("\t\tBlock not on free list %d!!\n",
208 } else if (correct_index != actual_index) {
209 GC_printf("\t\tBlock on list %d, should be on %d!!\n",
210 actual_index, correct_index);
214 GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n", p,
215 (unsigned long)(hhdr -> hb_sz));
216 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
222 # endif /* NO_DEBUGGING */
224 /* Initialize hdr for a block containing the indicated size and */
225 /* kind of objects. */
226 /* Return FALSE on failure. */
227 static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
228 int kind, unsigned flags)
231 # ifndef MARK_BIT_PER_OBJ
235 /* Set size, kind and mark proc fields */
236 hhdr -> hb_sz = byte_sz;
237 hhdr -> hb_obj_kind = (unsigned char)kind;
238 hhdr -> hb_flags = (unsigned char)flags;
239 hhdr -> hb_block = block;
240 descr = GC_obj_kinds[kind].ok_descriptor;
241 if (GC_obj_kinds[kind].ok_relocate_descr) descr += byte_sz;
242 hhdr -> hb_descr = descr;
244 # ifdef MARK_BIT_PER_OBJ
245 /* Set hb_inv_sz as portably as possible. */
246 /* We set it to the smallest value such that sz * inv_sz > 2**32 */
247 /* This may be more precision than necessary. */
248 if (byte_sz > MAXOBJBYTES) {
249 hhdr -> hb_inv_sz = LARGE_INV_SZ;
253 # if CPP_WORDSZ == 64
254 inv_sz = ((word)1 << 32)/byte_sz;
255 if (((inv_sz*byte_sz) >> 32) == 0) ++inv_sz;
256 # else /* 32 bit words */
257 GC_ASSERT(byte_sz >= 4);
258 inv_sz = ((unsigned)1 << 31)/byte_sz;
260 while (inv_sz*byte_sz > byte_sz) ++inv_sz;
262 hhdr -> hb_inv_sz = inv_sz;
264 # else /* MARK_BIT_PER_GRANULE */
265 hhdr -> hb_large_block = (unsigned char)(byte_sz > MAXOBJBYTES);
266 granules = BYTES_TO_GRANULES(byte_sz);
267 if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
268 /* Make it look like a valid block. */
269 hhdr -> hb_sz = HBLKSIZE;
270 hhdr -> hb_descr = 0;
271 hhdr -> hb_large_block = TRUE;
275 size_t index = (hhdr -> hb_large_block? 0 : granules);
276 hhdr -> hb_map = GC_obj_map[index];
278 # endif /* MARK_BIT_PER_GRANULE */
280 /* Clear mark bits */
281 GC_clear_hdr_marks(hhdr);
283 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
287 #define FL_UNKNOWN -1
289 * Remove hhdr from the appropriate free list.
290 * We assume it is on the nth free list, or on the size
291 * appropriate free list if n is FL_UNKNOWN.
293 STATIC void GC_remove_from_fl(hdr *hhdr, int n)
297 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
299 /* We always need index to maintain free counts. */
300 if (FL_UNKNOWN == n) {
301 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
306 if (hhdr -> hb_prev == 0) {
308 if (FL_UNKNOWN == n) {
309 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
314 GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
315 GC_hblkfreelist[index] = hhdr -> hb_next;
318 GET_HDR(hhdr -> hb_prev, phdr);
319 phdr -> hb_next = hhdr -> hb_next;
321 FREE_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
322 INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
323 if (0 != hhdr -> hb_next) {
325 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
326 GET_HDR(hhdr -> hb_next, nhdr);
327 nhdr -> hb_prev = hhdr -> hb_prev;
332 * Return a pointer to the free block ending just before h, if any.
334 STATIC struct hblk * GC_free_block_ending_at(struct hblk *h)
336 struct hblk * p = h - 1;
340 while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
341 p = FORWARDED_ADDR(p,phdr);
345 if(HBLK_IS_FREE(phdr)) {
351 p = GC_prev_block(h - 1);
354 if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
362 * Add hhdr to the appropriate free list.
363 * We maintain individual free lists sorted by address.
365 STATIC void GC_add_to_fl(struct hblk *h, hdr *hhdr)
367 int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
368 struct hblk *second = GC_hblkfreelist[index];
370 # if defined(GC_ASSERTIONS) && !defined(USE_MUNMAP)
371 struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
372 hdr * nexthdr = HDR(next);
373 struct hblk *prev = GC_free_block_ending_at(h);
374 hdr * prevhdr = HDR(prev);
375 GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr)
376 || (signed_word)GC_heapsize < 0);
377 /* In the last case, blocks may be too large to merge. */
378 GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr)
379 || (signed_word)GC_heapsize < 0);
381 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
382 GC_hblkfreelist[index] = h;
383 INCR_FREE_BYTES(index, hhdr -> hb_sz);
384 FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes);
385 hhdr -> hb_next = second;
388 GET_HDR(second, second_hdr);
389 second_hdr -> hb_prev = h;
391 hhdr -> hb_flags |= FREE_BLK;
396 # ifndef MUNMAP_THRESHOLD
397 # define MUNMAP_THRESHOLD 6
400 GC_INNER int GC_unmap_threshold = MUNMAP_THRESHOLD;
402 /* Unmap blocks that haven't been recently touched. This is the only way */
403 /* way blocks are ever unmapped. */
404 GC_INNER void GC_unmap_old(void)
410 if (GC_unmap_threshold == 0)
411 return; /* unmapping disabled */
413 for (i = 0; i <= N_HBLK_FLS; ++i) {
414 for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
416 if (!IS_MAPPED(hhdr)) continue;
418 if ((unsigned short)GC_gc_no - hhdr -> hb_last_reclaimed >
419 (unsigned short)GC_unmap_threshold) {
420 GC_unmap((ptr_t)h, hhdr -> hb_sz);
421 hhdr -> hb_flags |= WAS_UNMAPPED;
427 /* Merge all unmapped blocks that are adjacent to other free */
428 /* blocks. This may involve remapping, since all blocks are either */
429 /* fully mapped or fully unmapped. */
430 GC_INNER void GC_merge_unmapped(void)
432 struct hblk * h, *next;
433 hdr * hhdr, *nexthdr;
437 for (i = 0; i <= N_HBLK_FLS; ++i) {
438 h = GC_hblkfreelist[i];
442 next = (struct hblk *)((word)h + size);
443 GET_HDR(next, nexthdr);
444 /* Coalesce with successor, if possible */
445 if (0 != nexthdr && HBLK_IS_FREE(nexthdr)
446 && (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0
447 /* no pot. overflow */) {
448 /* Note that we usually try to avoid adjacent free blocks */
449 /* that are either both mapped or both unmapped. But that */
450 /* isn't guaranteed to hold since we remap blocks when we */
451 /* split them, and don't merge at that point. It may also */
452 /* not hold if the merged block would be too big. */
453 if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
454 /* make both consistent, so that we can merge */
455 if (size > nextsize) {
456 GC_remap((ptr_t)next, nextsize);
458 GC_unmap((ptr_t)h, size);
459 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
460 hhdr -> hb_flags |= WAS_UNMAPPED;
462 } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) {
463 if (size > nextsize) {
464 GC_unmap((ptr_t)next, nextsize);
465 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
467 GC_remap((ptr_t)h, size);
468 hhdr -> hb_flags &= ~WAS_UNMAPPED;
469 hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
471 } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
472 /* Unmap any gap in the middle */
473 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
475 /* If they are both unmapped, we merge, but leave unmapped. */
476 GC_remove_from_fl(hhdr, i);
477 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
478 hhdr -> hb_sz += nexthdr -> hb_sz;
479 GC_remove_header(next);
480 GC_add_to_fl(h, hhdr);
481 /* Start over at beginning of list */
482 h = GC_hblkfreelist[i];
483 } else /* not mergable with successor */ {
486 } /* while (h != 0) ... */
490 #endif /* USE_MUNMAP */
493 * Return a pointer to a block starting at h of length bytes.
494 * Memory for the block is mapped.
495 * Remove the block from its free list, and return the remainder (if any)
496 * to its appropriate free list.
497 * May fail by returning 0.
498 * The header for the returned block must be set up by the caller.
499 * If the return value is not 0, then hhdr is the header for it.
501 STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
502 size_t bytes, int index)
504 word total_size = hhdr -> hb_sz;
508 GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
509 GC_remove_from_fl(hhdr, index);
510 if (total_size == bytes) return h;
511 rest = (struct hblk *)((word)h + bytes);
512 rest_hdr = GC_install_header(rest);
514 /* FIXME: This is likely to be very bad news ... */
515 WARN("Header allocation failed: Dropping block.\n", 0);
518 rest_hdr -> hb_sz = total_size - bytes;
519 rest_hdr -> hb_flags = 0;
520 # ifdef GC_ASSERTIONS
521 /* Mark h not free, to avoid assertion about adjacent free blocks. */
522 hhdr -> hb_flags &= ~FREE_BLK;
524 GC_add_to_fl(rest, rest_hdr);
529 * H is a free block. N points at an address inside it.
530 * A new header for n has already been set up. Fix up h's header
531 * to reflect the fact that it is being split, move it to the
532 * appropriate free list.
533 * N replaces h in the original free list.
535 * Nhdr is not completely filled in, since it is about to allocated.
536 * It may in fact end up on the wrong free list for its size.
537 * That's not a disaster, since n is about to be allocated
539 * (Hence adding it to a free list is silly. But this path is hopefully
540 * rare enough that it doesn't matter. The code is cleaner this way.)
542 STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
543 hdr *nhdr, int index /* Index of free list */)
545 word total_size = hhdr -> hb_sz;
546 word h_size = (word)n - (word)h;
547 struct hblk *prev = hhdr -> hb_prev;
548 struct hblk *next = hhdr -> hb_next;
550 /* Replace h with n on its freelist */
551 nhdr -> hb_prev = prev;
552 nhdr -> hb_next = next;
553 nhdr -> hb_sz = total_size - h_size;
554 nhdr -> hb_flags = 0;
556 HDR(prev) -> hb_next = n;
558 GC_hblkfreelist[index] = n;
561 HDR(next) -> hb_prev = n;
563 INCR_FREE_BYTES(index, -(signed_word)h_size);
564 FREE_ASSERT(GC_free_bytes[index] > 0);
566 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
568 hhdr -> hb_sz = h_size;
569 GC_add_to_fl(h, hhdr);
570 nhdr -> hb_flags |= FREE_BLK;
574 GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned flags, int n,
578 * Allocate (and return pointer to) a heap block
579 * for objects of size sz bytes, searching the nth free list.
581 * NOTE: We set obj_map field in header correctly.
582 * Caller is responsible for building an object freelist in block.
584 * The client is responsible for clearing the block, if necessary.
586 GC_INNER struct hblk *
587 GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
593 int split_limit; /* Highest index of free list whose blocks we */
596 GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
597 blocks = OBJ_SZ_TO_BLOCKS(sz);
598 if ((signed_word)(blocks * HBLKSIZE) < 0) {
601 start_list = GC_hblk_fl_from_blocks(blocks);
602 /* Try for an exact match first. */
603 result = GC_allochblk_nth(sz, kind, flags, start_list, FALSE);
604 if (0 != result) return result;
605 if (GC_use_entire_heap || GC_dont_gc
606 || USED_HEAP_SIZE < GC_requested_heapsize
607 || GC_incremental || !GC_should_collect()) {
608 /* Should use more of the heap, even if it requires splitting. */
609 split_limit = N_HBLK_FLS;
612 /* avoid splitting, since that might require remapping */
615 if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
616 /* If we are deallocating lots of memory from */
617 /* finalizers, fail and collect sooner rather */
621 /* If we have enough large blocks left to cover any */
622 /* previous request for large blocks, we go ahead */
623 /* and split. Assuming a steady state, that should */
624 /* be safe. It means that we can use the full */
625 /* heap if we allocate only small objects. */
626 split_limit = GC_enough_large_bytes_left();
630 if (start_list < UNIQUE_THRESHOLD) {
631 /* No reason to try start_list again, since all blocks are exact */
635 for (i = start_list; i <= split_limit; ++i) {
636 struct hblk * result = GC_allochblk_nth(sz, kind, flags, i, TRUE);
637 if (0 != result) return result;
642 STATIC long GC_large_alloc_warn_suppressed = 0;
643 /* Number of warnings suppressed so far. */
646 * The same, but with search restricted to nth free list.
647 * Flags is IGNORE_OFF_PAGE or zero.
648 * Unlike the above, sz is in bytes.
649 * The may_split flag indicates whether it's OK to split larger blocks.
652 GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n,
656 hdr * hhdr; /* Header corr. to hbp */
657 /* Initialized after loop if hbp !=0 */
658 /* Gcc uninitialized use warning is bogus. */
659 struct hblk *thishbp;
660 hdr * thishdr; /* Header corr. to hbp */
661 signed_word size_needed; /* number of bytes in requested objects */
662 signed_word size_avail; /* bytes available in this block */
664 size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
666 /* search for a big enough block in free list */
667 hbp = GC_hblkfreelist[n];
668 for(; 0 != hbp; hbp = hhdr -> hb_next) {
670 size_avail = hhdr->hb_sz;
671 if (size_avail < size_needed) continue;
672 if (size_avail != size_needed) {
673 signed_word next_size;
675 if (!may_split) continue;
676 /* If the next heap block is obviously better, go on. */
677 /* This prevents us from disassembling a single large block */
678 /* to get tiny blocks. */
679 thishbp = hhdr -> hb_next;
681 GET_HDR(thishbp, thishdr);
682 next_size = (signed_word)(thishdr -> hb_sz);
683 if (next_size < size_avail
684 && next_size >= size_needed
685 && !GC_is_black_listed(thishbp, (word)size_needed)) {
690 if ( !IS_UNCOLLECTABLE(kind) && (kind != PTRFREE
691 || size_needed > (signed_word)MAX_BLACK_LIST_ALLOC)) {
692 struct hblk * lasthbp = hbp;
693 ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
694 signed_word orig_avail = size_avail;
695 signed_word eff_size_needed = (flags & IGNORE_OFF_PAGE) != 0 ?
696 (signed_word)HBLKSIZE
700 while ((ptr_t)lasthbp <= search_end
701 && (thishbp = GC_is_black_listed(lasthbp,
702 (word)eff_size_needed))
706 size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
708 if (size_avail >= size_needed) {
709 if (thishbp != hbp &&
710 0 != (thishdr = GC_install_header(thishbp))) {
711 /* Make sure it's mapped before we mangle it. */
713 if (!IS_MAPPED(hhdr)) {
714 GC_remap((ptr_t)hbp, hhdr -> hb_sz);
715 hhdr -> hb_flags &= ~WAS_UNMAPPED;
718 /* Split the block at thishbp */
719 GC_split_block(hbp, hhdr, thishbp, thishdr, n);
720 /* Advance to thishbp */
723 /* We must now allocate thishbp, since it may */
724 /* be on the wrong free list. */
726 } else if (size_needed > (signed_word)BL_LIMIT
727 && orig_avail - size_needed
728 > (signed_word)BL_LIMIT) {
729 /* Punt, since anything else risks unreasonable heap growth. */
730 if (++GC_large_alloc_warn_suppressed
731 >= GC_large_alloc_warn_interval) {
732 WARN("Repeated allocation of very large block "
733 "(appr. size %" GC_PRIdPTR "):\n"
734 "\tMay lead to memory leak and poor performance.\n",
736 GC_large_alloc_warn_suppressed = 0;
738 size_avail = orig_avail;
739 } else if (size_avail == 0 && size_needed == HBLKSIZE
740 && IS_MAPPED(hhdr)) {
742 static unsigned count = 0;
744 /* The block is completely blacklisted. We need */
745 /* to drop some such blocks, since otherwise we spend */
746 /* all our time traversing them if pointerfree */
747 /* blocks are unpopular. */
748 /* A dropped block will be reconsidered at next GC. */
749 if ((++count & 3) == 0) {
750 /* Allocate and drop the block in small chunks, to */
751 /* maximize the chance that we will recover some */
753 word total_size = hhdr -> hb_sz;
754 struct hblk * limit = hbp + divHBLKSZ(total_size);
756 struct hblk * prev = hhdr -> hb_prev;
758 GC_large_free_bytes -= total_size;
759 GC_bytes_dropped += total_size;
760 GC_remove_from_fl(hhdr, n);
761 for (h = hbp; h < limit; h++) {
762 if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
766 PTRFREE, 0); /* Can't fail */
767 if (GC_debugging_started) {
772 /* Restore hbp to point at free block */
775 return GC_allochblk_nth(sz, kind, flags, n, may_split);
782 if( size_avail >= size_needed ) {
784 if (!IS_MAPPED(hhdr)) {
785 GC_remap((ptr_t)hbp, hhdr -> hb_sz);
786 hhdr -> hb_flags &= ~WAS_UNMAPPED;
787 /* Note: This may leave adjacent, mapped free blocks. */
790 /* hbp may be on the wrong freelist; the parameter n */
792 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
797 if (0 == hbp) return 0;
799 /* Add it to map of valid blocks */
800 if (!GC_install_counts(hbp, (word)size_needed)) return(0);
801 /* This leaks memory under very rare conditions. */
804 if (!setup_header(hhdr, hbp, sz, kind, flags)) {
805 GC_remove_counts(hbp, (word)size_needed);
806 return(0); /* ditto */
808 # ifndef GC_DISABLE_INCREMENTAL
809 /* Notify virtual dirty bit implementation that we are about to */
810 /* write. Ensure that pointerfree objects are not protected if */
811 /* it's avoidable. This also ensures that newly allocated */
812 /* blocks are treated as dirty. Necessary since we don't */
813 /* protect free blocks. */
814 GC_ASSERT((size_needed & (HBLKSIZE-1)) == 0);
815 GC_remove_protection(hbp, divHBLKSZ(size_needed),
816 (hhdr -> hb_descr == 0) /* pointer-free */);
818 /* We just successfully allocated a block. Restart count of */
819 /* consecutive failures. */
822 GC_large_free_bytes -= size_needed;
824 GC_ASSERT(IS_MAPPED(hhdr));
831 * Coalesce the block with its neighbors if possible.
833 * All mark words are assumed to be cleared.
835 GC_INNER void GC_freehblk(struct hblk *hbp)
837 struct hblk *next, *prev;
838 hdr *hhdr, *prevhdr, *nexthdr;
843 size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
845 ABORT("Deallocating excessively large block. Too large an allocation?");
846 /* Probably possible if we try to allocate more than half the address */
847 /* space at once. If we don't catch it here, strange things happen */
849 GC_remove_counts(hbp, (word)size);
852 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
855 /* Check for duplicate deallocation in the easy case */
856 if (HBLK_IS_FREE(hhdr)) {
858 GC_log_printf("Duplicate large block deallocation of %p\n", hbp);
859 ABORT("Duplicate large block deallocation");
862 GC_ASSERT(IS_MAPPED(hhdr));
863 hhdr -> hb_flags |= FREE_BLK;
864 next = (struct hblk *)((word)hbp + size);
865 GET_HDR(next, nexthdr);
866 prev = GC_free_block_ending_at(hbp);
867 /* Coalesce with successor, if possible */
868 if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)
869 && (signed_word)(hhdr -> hb_sz + nexthdr -> hb_sz) > 0
871 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
872 hhdr -> hb_sz += nexthdr -> hb_sz;
873 GC_remove_header(next);
875 /* Coalesce with predecessor, if possible. */
878 if (IS_MAPPED(prevhdr)
879 && (signed_word)(hhdr -> hb_sz + prevhdr -> hb_sz) > 0) {
880 GC_remove_from_fl(prevhdr, FL_UNKNOWN);
881 prevhdr -> hb_sz += hhdr -> hb_sz;
883 prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
885 GC_remove_header(hbp);
890 /* FIXME: It is not clear we really always want to do these merges */
891 /* with USE_MUNMAP, since it updates ages and hence prevents */
894 GC_large_free_bytes += size;
895 GC_add_to_fl(hbp, hhdr);