* modified is included with the above copyright notice.
*/
-#include "config.h"
-
/* #define DEBUG */
#include <stdio.h>
#include "private/gc_priv.h"
#ifndef USE_MUNMAP
- word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
+ STATIC word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
/* Number of free bytes on each list. */
/* Return the largest n such that */
#endif /* USE_MUNMAP */
/* Map a number of blocks to the appropriate large block free list index. */
-int GC_hblk_fl_from_blocks(word blocks_needed)
+STATIC int GC_hblk_fl_from_blocks(word blocks_needed)
{
if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
# ifdef USE_MUNMAP
# define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
-# else /* !USE_MMAP */
+# else /* !USE_MUNMAP */
# define IS_MAPPED(hhdr) 1
# endif /* USE_MUNMAP */
# if !defined(NO_DEBUGGING)
-void GC_print_hblkfreelist()
+void GC_print_hblkfreelist(void)
{
struct hblk * h;
word total_free = 0;
for (i = 0; i <= N_HBLK_FLS; ++i) {
h = GC_hblkfreelist[i];
# ifdef USE_MUNMAP
- if (0 != h) GC_printf("Free list %ld:\n",
- (unsigned long)i);
+ if (0 != h) GC_printf("Free list %u:\n", i);
# else
- if (0 != h) GC_printf("Free list %lu (Total size %lu):\n",
+ if (0 != h) GC_printf("Free list %u (Total size %lu):\n",
i, (unsigned long)GC_free_bytes[i]);
# endif
while (h != 0) {
hhdr = HDR(h);
sz = hhdr -> hb_sz;
- GC_printf("\t%p size %lu ", h, (unsigned long)sz);
total_free += sz;
- if (GC_is_black_listed(h, HBLKSIZE) != 0) {
- GC_printf("start black listed\n");
- } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
- GC_printf("partially black listed\n");
- } else {
- GC_printf("not black listed\n");
- }
+ GC_printf("\t%p size %lu %s black listed\n", h, (unsigned long)sz,
+ GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" :
+ GC_is_black_listed(h, hhdr -> hb_sz) != 0 ? "partially" :
+ "not");
h = hhdr -> hb_next;
}
}
/* Return the free list index on which the block described by the header */
/* appears, or -1 if it appears nowhere. */
-int free_list_index_of(hdr *wanted)
+static int free_list_index_of(hdr *wanted)
{
struct hblk * h;
hdr * hhdr;
return -1;
}
-void GC_dump_regions()
+void GC_dump_regions(void)
{
unsigned i;
ptr_t start, end;
GC_printf("***Section from %p to %p\n", start, end);
for (p = start; p < end;) {
hhdr = HDR(p);
- GC_printf("\t%p ", p);
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
- GC_printf("Missing header!!(%d)\n", hhdr);
+ GC_printf("\t%p Missing header!!(%p)\n", p, hhdr);
p += HBLKSIZE;
continue;
}
divHBLKSZ(hhdr -> hb_sz));
int actual_index;
- GC_printf("\tfree block of size 0x%lx bytes",
- (unsigned long)(hhdr -> hb_sz));
- if (IS_MAPPED(hhdr)) {
- GC_printf("\n");
- } else {
- GC_printf("(unmapped)\n");
- }
+ GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n", p,
+ (unsigned long)(hhdr -> hb_sz),
+ IS_MAPPED(hhdr) ? "" : " (unmapped)");
actual_index = free_list_index_of(hhdr);
if (-1 == actual_index) {
GC_printf("\t\tBlock not on free list %d!!\n",
}
p += hhdr -> hb_sz;
} else {
- GC_printf("\tused for blocks of size 0x%lx bytes\n",
+ GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n", p,
(unsigned long)(hhdr -> hb_sz));
p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
}
int kind, unsigned flags)
{
word descr;
- size_t granules;
+# ifndef MARK_BIT_PER_OBJ
+ size_t granules;
+# endif
/* Set size, kind and mark proc fields */
hhdr -> hb_sz = byte_sz;
* We assume it is on the nth free list, or on the size
* appropriate free list if n is FL_UNKNOWN.
*/
-void GC_remove_from_fl(hdr *hhdr, int n)
+STATIC void GC_remove_from_fl(hdr *hhdr, int n)
{
int index;
/*
* Return a pointer to the free block ending just before h, if any.
*/
-struct hblk * GC_free_block_ending_at(struct hblk *h)
+STATIC struct hblk * GC_free_block_ending_at(struct hblk *h)
{
struct hblk * p = h - 1;
hdr * phdr;
* Add hhdr to the appropriate free list.
* We maintain individual free lists sorted by address.
*/
-void GC_add_to_fl(struct hblk *h, hdr *hhdr)
+STATIC void GC_add_to_fl(struct hblk *h, hdr *hhdr)
{
int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
struct hblk *second = GC_hblkfreelist[index];
if (0 != nexthdr && HBLK_IS_FREE(nexthdr)
&& (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0
/* no pot. overflow */) {
- if (IS_MAPPED(hhdr)) {
- GC_ASSERT(!IS_MAPPED(nexthdr));
+ /* Note that we usually try to avoid adjacent free blocks */
+ /* that are either both mapped or both unmapped. But that */
+ /* isn't guaranteed to hold since we remap blocks when we */
+ /* split them, and don't merge at that point. It may also */
+ /* not hold if the merged block would be too big. */
+ if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
/* make both consistent, so that we can merge */
if (size > nextsize) {
GC_remap((ptr_t)next, nextsize);
} else {
GC_unmap((ptr_t)h, size);
+ GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
hhdr -> hb_flags |= WAS_UNMAPPED;
}
- } else if (IS_MAPPED(nexthdr)) {
- GC_ASSERT(!IS_MAPPED(hhdr));
+ } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) {
if (size > nextsize) {
GC_unmap((ptr_t)next, nextsize);
+ GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
} else {
GC_remap((ptr_t)h, size);
hhdr -> hb_flags &= ~WAS_UNMAPPED;
hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
}
- } else {
+ } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
/* Unmap any gap in the middle */
- GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
+ GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
}
/* If they are both unmapped, we merge, but leave unmapped. */
GC_remove_from_fl(hhdr, i);
* The header for the returned block must be set up by the caller.
* If the return value is not 0, then hhdr is the header for it.
*/
-struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
- size_t bytes, int index)
+STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
+ size_t bytes, int index)
{
word total_size = hhdr -> hb_sz;
struct hblk * rest;
*
* Nhdr is not completely filled in, since it is about to allocated.
* It may in fact end up on the wrong free list for its size.
+ * That's not a disaster, since n is about to be allocated
+ * by our caller.
* (Hence adding it to a free list is silly. But this path is hopefully
* rare enough that it doesn't matter. The code is cleaner this way.)
*/
-void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
- hdr *nhdr, int index /* Index of free list */)
+STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
+ hdr *nhdr, int index /* Index of free list */)
{
word total_size = hhdr -> hb_sz;
word h_size = (word)n - (word)h;
}
INCR_FREE_BYTES(index, -(signed_word)h_size);
FREE_ASSERT(GC_free_bytes[index] > 0);
-# ifdef GC_ASSERTIONS
- nhdr -> hb_flags &= ~FREE_BLK;
- /* Don't fail test for consecutive */
- /* free blocks in GC_add_to_fl. */
-# endif
# ifdef USE_MUNMAP
hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
# endif
nhdr -> hb_flags |= FREE_BLK;
}
-struct hblk *
+STATIC struct hblk *
GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned flags, int n,
GC_bool may_split);
if (0 != result) return result;
if (GC_use_entire_heap || GC_dont_gc
|| USED_HEAP_SIZE < GC_requested_heapsize
- || TRUE_INCREMENTAL || !GC_should_collect()) {
+ || GC_incremental || !GC_should_collect()) {
/* Should use more of the heap, even if it requires splitting. */
split_limit = N_HBLK_FLS;
} else {
* Unlike the above, sz is in bytes.
* The may_split flag indicates whether it's OK to split larger blocks.
*/
-struct hblk *
+STATIC struct hblk *
GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, GC_bool may_split)
{
struct hblk *hbp;
/* Notify virtual dirty bit implementation that we are about to write. */
/* Ensure that pointerfree objects are not protected if it's avoidable. */
+ /* This also ensures that newly allocated blocks are treated as dirty. */
+ /* Necessary since we don't protect free blocks. */
+ GC_ASSERT((size_needed & (HBLKSIZE-1)) == 0);
GC_remove_protection(hbp, divHBLKSZ(size_needed),
(hhdr -> hb_descr == 0) /* pointer-free */);
return( hbp );
}
-struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
-
/*
* Free a heap block.
*