X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;ds=sidebyside;f=src%2Fmm%2Fboehm-gc%2Fallchblk.c;h=627ef904a199e4ad4f08fb78dd4e4487fb474585;hb=c83bff94e40ee0a218a39931af17814d1a42cb5c;hp=995d6585f7b5c76190f30d72e4cb290bb384b363;hpb=db5750e3eaf39a47dbccccfe7cdd180fb686aac6;p=cacao.git diff --git a/src/mm/boehm-gc/allchblk.c b/src/mm/boehm-gc/allchblk.c index 995d6585f..627ef904a 100644 --- a/src/mm/boehm-gc/allchblk.c +++ b/src/mm/boehm-gc/allchblk.c @@ -14,8 +14,6 @@ * modified is included with the above copyright notice. */ -#include "config.h" - /* #define DEBUG */ #include #include "private/gc_priv.h" @@ -50,7 +48,7 @@ struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 }; #ifndef USE_MUNMAP - word GC_free_bytes[N_HBLK_FLS+1] = { 0 }; + STATIC word GC_free_bytes[N_HBLK_FLS+1] = { 0 }; /* Number of free bytes on each list. */ /* Return the largest n such that */ @@ -85,7 +83,7 @@ struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 }; #endif /* USE_MUNMAP */ /* Map a number of blocks to the appropriate large block free list index. */ -int GC_hblk_fl_from_blocks(word blocks_needed) +STATIC int GC_hblk_fl_from_blocks(word blocks_needed) { if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed; if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS; @@ -99,12 +97,12 @@ int GC_hblk_fl_from_blocks(word blocks_needed) # ifdef USE_MUNMAP # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0) -# else /* !USE_MMAP */ +# else /* !USE_MUNMAP */ # define IS_MAPPED(hhdr) 1 # endif /* USE_MUNMAP */ # if !defined(NO_DEBUGGING) -void GC_print_hblkfreelist() +void GC_print_hblkfreelist(void) { struct hblk * h; word total_free = 0; @@ -115,24 +113,19 @@ void GC_print_hblkfreelist() for (i = 0; i <= N_HBLK_FLS; ++i) { h = GC_hblkfreelist[i]; # ifdef USE_MUNMAP - if (0 != h) GC_printf("Free list %ld:\n", - (unsigned long)i); + if (0 != h) GC_printf("Free list %u:\n", i); # else - if (0 != h) GC_printf("Free list %lu (Total size %lu):\n", + if (0 != h) GC_printf("Free list %u (Total size %lu):\n", i, (unsigned long)GC_free_bytes[i]); # endif while (h != 0) { hhdr = HDR(h); sz = hhdr -> hb_sz; - GC_printf("\t%p size %lu ", h, (unsigned long)sz); total_free += sz; - if (GC_is_black_listed(h, HBLKSIZE) != 0) { - GC_printf("start black listed\n"); - } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) { - GC_printf("partially black listed\n"); - } else { - GC_printf("not black listed\n"); - } + GC_printf("\t%p size %lu %s black listed\n", h, (unsigned long)sz, + GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" : + GC_is_black_listed(h, hhdr -> hb_sz) != 0 ? "partially" : + "not"); h = hhdr -> hb_next; } } @@ -147,7 +140,7 @@ void GC_print_hblkfreelist() /* Return the free list index on which the block described by the header */ /* appears, or -1 if it appears nowhere. */ -int free_list_index_of(hdr *wanted) +static int free_list_index_of(hdr *wanted) { struct hblk * h; hdr * hhdr; @@ -164,7 +157,7 @@ int free_list_index_of(hdr *wanted) return -1; } -void GC_dump_regions() +void GC_dump_regions(void) { unsigned i; ptr_t start, end; @@ -183,9 +176,8 @@ void GC_dump_regions() GC_printf("***Section from %p to %p\n", start, end); for (p = start; p < end;) { hhdr = HDR(p); - GC_printf("\t%p ", p); if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { - GC_printf("Missing header!!(%d)\n", hhdr); + GC_printf("\t%p Missing header!!(%p)\n", p, hhdr); p += HBLKSIZE; continue; } @@ -194,13 +186,9 @@ void GC_dump_regions() divHBLKSZ(hhdr -> hb_sz)); int actual_index; - GC_printf("\tfree block of size 0x%lx bytes", - (unsigned long)(hhdr -> hb_sz)); - if (IS_MAPPED(hhdr)) { - GC_printf("\n"); - } else { - GC_printf("(unmapped)\n"); - } + GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n", p, + (unsigned long)(hhdr -> hb_sz), + IS_MAPPED(hhdr) ? "" : " (unmapped)"); actual_index = free_list_index_of(hhdr); if (-1 == actual_index) { GC_printf("\t\tBlock not on free list %d!!\n", @@ -211,7 +199,7 @@ void GC_dump_regions() } p += hhdr -> hb_sz; } else { - GC_printf("\tused for blocks of size 0x%lx bytes\n", + GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n", p, (unsigned long)(hhdr -> hb_sz)); p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz); } @@ -228,7 +216,9 @@ static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz, int kind, unsigned flags) { word descr; - size_t granules; +# ifndef MARK_BIT_PER_OBJ + size_t granules; +# endif /* Set size, kind and mark proc fields */ hhdr -> hb_sz = byte_sz; @@ -288,7 +278,7 @@ static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz, * We assume it is on the nth free list, or on the size * appropriate free list if n is FL_UNKNOWN. */ -void GC_remove_from_fl(hdr *hhdr, int n) +STATIC void GC_remove_from_fl(hdr *hhdr, int n) { int index; @@ -329,7 +319,7 @@ void GC_remove_from_fl(hdr *hhdr, int n) /* * Return a pointer to the free block ending just before h, if any. */ -struct hblk * GC_free_block_ending_at(struct hblk *h) +STATIC struct hblk * GC_free_block_ending_at(struct hblk *h) { struct hblk * p = h - 1; hdr * phdr; @@ -360,7 +350,7 @@ struct hblk * GC_free_block_ending_at(struct hblk *h) * Add hhdr to the appropriate free list. * We maintain individual free lists sorted by address. */ -void GC_add_to_fl(struct hblk *h, hdr *hhdr) +STATIC void GC_add_to_fl(struct hblk *h, hdr *hhdr) { int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz)); struct hblk *second = GC_hblkfreelist[index]; @@ -441,27 +431,32 @@ void GC_merge_unmapped(void) if (0 != nexthdr && HBLK_IS_FREE(nexthdr) && (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0 /* no pot. overflow */) { - if (IS_MAPPED(hhdr)) { - GC_ASSERT(!IS_MAPPED(nexthdr)); + /* Note that we usually try to avoid adjacent free blocks */ + /* that are either both mapped or both unmapped. But that */ + /* isn't guaranteed to hold since we remap blocks when we */ + /* split them, and don't merge at that point. It may also */ + /* not hold if the merged block would be too big. */ + if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) { /* make both consistent, so that we can merge */ if (size > nextsize) { GC_remap((ptr_t)next, nextsize); } else { GC_unmap((ptr_t)h, size); + GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize); hhdr -> hb_flags |= WAS_UNMAPPED; } - } else if (IS_MAPPED(nexthdr)) { - GC_ASSERT(!IS_MAPPED(hhdr)); + } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) { if (size > nextsize) { GC_unmap((ptr_t)next, nextsize); + GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize); } else { GC_remap((ptr_t)h, size); hhdr -> hb_flags &= ~WAS_UNMAPPED; hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed; } - } else { + } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) { /* Unmap any gap in the middle */ - GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz); + GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize); } /* If they are both unmapped, we merge, but leave unmapped. */ GC_remove_from_fl(hhdr, i); @@ -489,8 +484,8 @@ void GC_merge_unmapped(void) * The header for the returned block must be set up by the caller. * If the return value is not 0, then hhdr is the header for it. */ -struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr, - size_t bytes, int index) +STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr, + size_t bytes, int index) { word total_size = hhdr -> hb_sz; struct hblk * rest; @@ -525,11 +520,13 @@ struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr, * * Nhdr is not completely filled in, since it is about to allocated. * It may in fact end up on the wrong free list for its size. + * That's not a disaster, since n is about to be allocated + * by our caller. * (Hence adding it to a free list is silly. But this path is hopefully * rare enough that it doesn't matter. The code is cleaner this way.) */ -void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n, - hdr *nhdr, int index /* Index of free list */) +STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n, + hdr *nhdr, int index /* Index of free list */) { word total_size = hhdr -> hb_sz; word h_size = (word)n - (word)h; @@ -551,11 +548,6 @@ void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n, } INCR_FREE_BYTES(index, -(signed_word)h_size); FREE_ASSERT(GC_free_bytes[index] > 0); -# ifdef GC_ASSERTIONS - nhdr -> hb_flags &= ~FREE_BLK; - /* Don't fail test for consecutive */ - /* free blocks in GC_add_to_fl. */ -# endif # ifdef USE_MUNMAP hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no; # endif @@ -564,7 +556,7 @@ void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n, nhdr -> hb_flags |= FREE_BLK; } -struct hblk * +STATIC struct hblk * GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned flags, int n, GC_bool may_split); @@ -598,7 +590,7 @@ GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */) if (0 != result) return result; if (GC_use_entire_heap || GC_dont_gc || USED_HEAP_SIZE < GC_requested_heapsize - || TRUE_INCREMENTAL || !GC_should_collect()) { + || GC_incremental || !GC_should_collect()) { /* Should use more of the heap, even if it requires splitting. */ split_limit = N_HBLK_FLS; } else { @@ -638,7 +630,7 @@ GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */) * Unlike the above, sz is in bytes. * The may_split flag indicates whether it's OK to split larger blocks. */ -struct hblk * +STATIC struct hblk * GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, GC_bool may_split) { struct hblk *hbp; @@ -797,6 +789,9 @@ GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, GC_bool may_split) /* Notify virtual dirty bit implementation that we are about to write. */ /* Ensure that pointerfree objects are not protected if it's avoidable. */ + /* This also ensures that newly allocated blocks are treated as dirty. */ + /* Necessary since we don't protect free blocks. */ + GC_ASSERT((size_needed & (HBLKSIZE-1)) == 0); GC_remove_protection(hbp, divHBLKSZ(size_needed), (hhdr -> hb_descr == 0) /* pointer-free */); @@ -814,8 +809,6 @@ GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, GC_bool may_split) return( hbp ); } -struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */ - /* * Free a heap block. *