- hbp = GC_hblkfreelist[n];
- for(; 0 != hbp; hbp = hhdr -> hb_next) {
- GET_HDR(hbp, hhdr);
- size_avail = hhdr->hb_sz;
- if (size_avail < size_needed) continue;
- if (size_avail != size_needed
- && !GC_use_entire_heap
- && !GC_dont_gc
- && USED_HEAP_SIZE >= GC_requested_heapsize
- && !TRUE_INCREMENTAL && GC_should_collect()) {
-# ifdef USE_MUNMAP
- continue;
-# else
- /* If we have enough large blocks left to cover any */
- /* previous request for large blocks, we go ahead */
- /* and split. Assuming a steady state, that should */
- /* be safe. It means that we can use the full */
- /* heap if we allocate only small objects. */
- if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
- continue;
- }
- /* If we are deallocating lots of memory from */
- /* finalizers, fail and collect sooner rather */
- /* than later. */
- if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
- continue;
- }
-# endif /* !USE_MUNMAP */
- }
- /* If the next heap block is obviously better, go on. */
- /* This prevents us from disassembling a single large block */
- /* to get tiny blocks. */
- {
- signed_word next_size;
-
- thishbp = hhdr -> hb_next;
- if (thishbp != 0) {
- GET_HDR(thishbp, thishdr);
- next_size = (signed_word)(thishdr -> hb_sz);
- if (next_size < size_avail
- && next_size >= size_needed
- && !GC_is_black_listed(thishbp, (word)size_needed)) {
- continue;
- }
- }
- }
- if ( !IS_UNCOLLECTABLE(kind) &&
- (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
- struct hblk * lasthbp = hbp;
- ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
- signed_word orig_avail = size_avail;
- signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
- HBLKSIZE
- : size_needed);
-
-
- while ((ptr_t)lasthbp <= search_end
- && (thishbp = GC_is_black_listed(lasthbp,
- (word)eff_size_needed))
- != 0) {
- lasthbp = thishbp;
- }
- size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
- thishbp = lasthbp;
- if (size_avail >= size_needed) {
- if (thishbp != hbp &&
- 0 != (thishdr = GC_install_header(thishbp))) {
- /* Make sure it's mapped before we mangle it. */
-# ifdef USE_MUNMAP
- if (!IS_MAPPED(hhdr)) {
- GC_remap((ptr_t)hbp, hhdr -> hb_sz);
- hhdr -> hb_flags &= ~WAS_UNMAPPED;
- }
-# endif
- /* Split the block at thishbp */
- GC_split_block(hbp, hhdr, thishbp, thishdr, n);
- /* Advance to thishbp */
- hbp = thishbp;
- hhdr = thishdr;
- /* We must now allocate thishbp, since it may */
- /* be on the wrong free list. */
- }
- } else if (size_needed > (signed_word)BL_LIMIT
- && orig_avail - size_needed
- > (signed_word)BL_LIMIT) {
- /* Punt, since anything else risks unreasonable heap growth. */
- if (++GC_large_alloc_warn_suppressed
- >= GC_large_alloc_warn_interval) {
- WARN("Repeated allocation of very large block "
- "(appr. size %ld):\n"
- "\tMay lead to memory leak and poor performance.\n",
- size_needed);
- GC_large_alloc_warn_suppressed = 0;
- }
- size_avail = orig_avail;
- } else if (size_avail == 0 && size_needed == HBLKSIZE
- && IS_MAPPED(hhdr)) {
- if (!GC_find_leak) {
- static unsigned count = 0;
-
- /* The block is completely blacklisted. We need */
- /* to drop some such blocks, since otherwise we spend */
- /* all our time traversing them if pointerfree */
- /* blocks are unpopular. */
- /* A dropped block will be reconsidered at next GC. */
- if ((++count & 3) == 0) {
- /* Allocate and drop the block in small chunks, to */
- /* maximize the chance that we will recover some */
- /* later. */
- word total_size = hhdr -> hb_sz;
- struct hblk * limit = hbp + divHBLKSZ(total_size);
- struct hblk * h;
- struct hblk * prev = hhdr -> hb_prev;
-
- GC_large_free_bytes -= total_size;
- GC_remove_from_fl(hhdr, n);
- for (h = hbp; h < limit; h++) {
- if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
- (void) setup_header(
- hhdr, h,
- HBLKSIZE,
- PTRFREE, 0); /* Cant fail */
- if (GC_debugging_started) {
- BZERO(h, HBLKSIZE);
- }
- }
- }
- /* Restore hbp to point at free block */
- hbp = prev;
- if (0 == hbp) {
- return GC_allochblk_nth(sz, kind, flags, n);
- }
- hhdr = HDR(hbp);
- }
- }
- }
- }
- if( size_avail >= size_needed ) {
-# ifdef USE_MUNMAP
- if (!IS_MAPPED(hhdr)) {
- GC_remap((ptr_t)hbp, hhdr -> hb_sz);
- hhdr -> hb_flags &= ~WAS_UNMAPPED;
- }
-# endif
- /* hbp may be on the wrong freelist; the parameter n */
- /* is important. */
- hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
- break;
- }
- }
+ hbp = GC_hblkfreelist[n];
+ for(; 0 != hbp; hbp = hhdr -> hb_next) {
+ GET_HDR(hbp, hhdr);
+ size_avail = hhdr->hb_sz;
+ if (size_avail < size_needed) continue;
+ if (size_avail != size_needed) {
+ signed_word next_size;
+
+ if (!may_split) continue;
+ /* If the next heap block is obviously better, go on. */
+ /* This prevents us from disassembling a single large block */
+ /* to get tiny blocks. */
+ thishbp = hhdr -> hb_next;
+ if (thishbp != 0) {
+ GET_HDR(thishbp, thishdr);
+ next_size = (signed_word)(thishdr -> hb_sz);
+ if (next_size < size_avail
+ && next_size >= size_needed
+ && !GC_is_black_listed(thishbp, (word)size_needed)) {
+ continue;
+ }
+ }
+ }
+ if ( !IS_UNCOLLECTABLE(kind) &&
+ (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
+ struct hblk * lasthbp = hbp;
+ ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
+ signed_word orig_avail = size_avail;
+ signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
+ HBLKSIZE
+ : size_needed);
+
+
+ while ((ptr_t)lasthbp <= search_end
+ && (thishbp = GC_is_black_listed(lasthbp,
+ (word)eff_size_needed))
+ != 0) {
+ lasthbp = thishbp;
+ }
+ size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
+ thishbp = lasthbp;
+ if (size_avail >= size_needed) {
+ if (thishbp != hbp &&
+ 0 != (thishdr = GC_install_header(thishbp))) {
+ /* Make sure it's mapped before we mangle it. */
+# ifdef USE_MUNMAP
+ if (!IS_MAPPED(hhdr)) {
+ GC_remap((ptr_t)hbp, hhdr -> hb_sz);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ }
+# endif
+ /* Split the block at thishbp */
+ GC_split_block(hbp, hhdr, thishbp, thishdr, n);
+ /* Advance to thishbp */
+ hbp = thishbp;
+ hhdr = thishdr;
+ /* We must now allocate thishbp, since it may */
+ /* be on the wrong free list. */
+ }
+ } else if (size_needed > (signed_word)BL_LIMIT
+ && orig_avail - size_needed
+ > (signed_word)BL_LIMIT) {
+ /* Punt, since anything else risks unreasonable heap growth. */
+ if (++GC_large_alloc_warn_suppressed
+ >= GC_large_alloc_warn_interval) {
+ WARN("Repeated allocation of very large block "
+ "(appr. size %" GC_PRIdPTR "):\n"
+ "\tMay lead to memory leak and poor performance.\n",
+ size_needed);
+ GC_large_alloc_warn_suppressed = 0;
+ }
+ size_avail = orig_avail;
+ } else if (size_avail == 0 && size_needed == HBLKSIZE
+ && IS_MAPPED(hhdr)) {
+ if (!GC_find_leak) {
+ static unsigned count = 0;
+
+ /* The block is completely blacklisted. We need */
+ /* to drop some such blocks, since otherwise we spend */
+ /* all our time traversing them if pointerfree */
+ /* blocks are unpopular. */
+ /* A dropped block will be reconsidered at next GC. */
+ if ((++count & 3) == 0) {
+ /* Allocate and drop the block in small chunks, to */
+ /* maximize the chance that we will recover some */
+ /* later. */
+ word total_size = hhdr -> hb_sz;
+ struct hblk * limit = hbp + divHBLKSZ(total_size);
+ struct hblk * h;
+ struct hblk * prev = hhdr -> hb_prev;
+
+ GC_large_free_bytes -= total_size;
+ GC_bytes_dropped += total_size;
+ GC_remove_from_fl(hhdr, n);
+ for (h = hbp; h < limit; h++) {
+ if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
+ (void) setup_header(
+ hhdr, h,
+ HBLKSIZE,
+ PTRFREE, 0); /* Can't fail */
+ if (GC_debugging_started) {
+ BZERO(h, HBLKSIZE);
+ }
+ }
+ }
+ /* Restore hbp to point at free block */
+ hbp = prev;
+ if (0 == hbp) {
+ return GC_allochblk_nth(sz, kind, flags, n, may_split);
+ }
+ hhdr = HDR(hbp);
+ }
+ }
+ }
+ }
+ if( size_avail >= size_needed ) {
+# ifdef USE_MUNMAP
+ if (!IS_MAPPED(hhdr)) {
+ GC_remap((ptr_t)hbp, hhdr -> hb_sz);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ /* Note: This may leave adjacent, mapped free blocks. */
+ }
+# endif
+ /* hbp may be on the wrong freelist; the parameter n */
+ /* is important. */
+ hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
+ break;
+ }
+ }