#include "../callargs.h"
#include "../threads/thread.h"
#include "../threads/locks.h"
+#include "../sysdep/threads.h"
#include <assert.h>
#include "lifespan.h"
#undef ALIGN
#undef OFFSET
+#define HEURISTIC_SEL 0
+#define HEURISTIC_PARAM 2UL
+
+
+#define next_collection_heuristic_init() \
+ (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> 4))
+
+#if HEURISTIC_SEL == 0
+#define next_collection_heuristic() \
+ (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> HEURISTIC_PARAM))
+#elif HEURISTIC_SEL == 1
+#define next_collection_heuristic() \
+ (void*)((long)heap_top + (((long)heap_top - (long)heap_base) << HEURISTIC_PARAM))
+#elif HEURISTIC_SEL == 2
+#define next_collection_heuristic() \
+ (void*)((long)heap_top + HEURISTIC_PARAM)
+#endif
+
//#define PSEUDO_GENERATIONAL
//#define COLLECT_LIFESPAN
//#define NEW_COLLECT_LIFESPAN
//#define COLLECT_FRAGMENTATION
-
//#define GC_COLLECT_STATISTICS
//#define FINALIZER_COUNTING
/* 6. calculate a useful first collection limit */
/* This is extremly primitive at this point...
we should replace it with something more useful -- phil. */
- heap_next_collection = (void*)((long)heap_base + (heap_size / 4));
+ heap_next_collection = next_collection_heuristic_init();
/* 7. Init the global reference lists & finalizer addresses */
references = NULL;
}
success:
- /* 3.a. mark all necessary bits, store the finalizer & return the newly allocate block */
+ /* 3.a. mark all necessary bits, store the finalizer & return the newly allocated block */
/* I don't mark the object-start anymore, as it always is at the beginning of a free-block,
which already is marked (Note: The first free-block gets marked in heap_init). -- phil. */
bitmap_setbit(start_bits, free_chunk); /* mark the new object */
-#if 1 /* FIXME: will become unecessary soon */
+#ifndef SIZE_FROM_CLASSINFO
bitmap_setbit(start_bits, (void*)((long)free_chunk + (long)length)); /* mark the freespace behind the new object */
#endif
{
#ifdef PSEUDO_GENERATIONAL
static void* generation_start = 0;
- staitc int generation_num = 0;
+ static int generation_num = 0;
+ void* addr = heap_base;
#endif
void* free_start;
void* free_end = heap_base;
#endif
#ifdef PSEUDO_GENERATIONAL
- if (!generation_start || !(generation_start % 5))
+ if (!generation_start || !(generation_num % 5))
generation_start = heap_base;
+
+ ++generation_num;
#endif
/* 1. reset the freelists */
-#if 1
+#if 0
+ allocator_mark_free_kludge(start_bits); /* this line will be kicked out, when
+ the SIZE_FROM_CLASSINFO reclaim
+ is implemented (very soon!!) */
+#endif
+
+#ifdef PSEUDO_GENERATIONAL
+ for (addr = heap_base; addr <= generation_start; ++addr) {
+ if (bitmap_testbit(start_bits, addr))
+ bitmap_setbit(mark_bits, addr);
+ }
+
allocator_mark_free_kludge(start_bits); /* this line will be kicked out, when
the SIZE_FROM_CLASSINFO reclaim
is implemented (very soon!!) */
#endif
}
+#if 0
if (heap_top < heap_limit)
bitmap_setbit(start_bits, heap_top);
+#endif
/* 3.4. emit fragmentation info */
#ifdef COLLECT_FRAGMENTATION
- fprintf(fragfile,
- "%ld\t%ld\t%ld\t%ld\n",
- (unsigned long)heap_top - (unsigned long)heap_base,
- (unsigned long)heap_top - (unsigned long)heap_base - free_size,
- free_size,
- free_fragments);
+ {
+ unsigned long heap_full = (unsigned long)heap_top - (unsigned long)heap_base;
+ unsigned long heap_life = (unsigned long)heap_top - (unsigned long)heap_base - free_size;
+
+ fprintf(fragfile,
+ "%ld\t%ld\t%ld\t%ld\t%f\t%f\t%f\n",
+ heap_full,
+ heap_life,
+ free_size,
+ free_fragments,
+ 100*(float)free_size/free_fragments,
+ 100*(float)heap_life/heap_full,
+ 100*(float)free_size/heap_full
+ );
+ }
fflush(fragfile);
allocator_dump_to_file(fragsizefile);
#endif
/* 4. adjust the collection threshold */
- heap_next_collection = (void*)((long)heap_top + ((long)heap_limit - (long)heap_top) / 8);
+ heap_next_collection = next_collection_heuristic();
if (heap_next_collection > heap_limit)
heap_next_collection = heap_limit;
/* 1.a. if addr doesn't point into the heap, return. */
if ((unsigned long)addr - (unsigned long)heap_base >=
- (heap_top - heap_base)) {
+ ((long)heap_top - (long)heap_base)) {
#ifdef GC_COLLECT_STATISTICS
++gc_mark_not_inheap;
#endif
for (aThread = liveThreads; aThread != 0;
aThread = CONTEXT(aThread).nextlive) {
gc_mark_object_at((void*)aThread);
- if (aThread == currentThread) {
- void **top_of_stack = &dummy;
-
- if (top_of_stack > (void**)CONTEXT(aThread).stackEnd)
- markreferences((void**)CONTEXT(aThread).stackEnd, top_of_stack);
- else
- markreferences(top_of_stack, (void**)CONTEXT(aThread).stackEnd);
- }
- else {
- if (CONTEXT(aThread).usedStackTop > CONTEXT(aThread).stackEnd)
- markreferences((void**)CONTEXT(aThread).stackEnd,
- (void**)CONTEXT(aThread).usedStackTop);
- else
- markreferences((void**)CONTEXT(aThread).usedStackTop,
- (void**)CONTEXT(aThread).stackEnd);
- }
+ if (CONTEXT(aThread).usedStackTop > CONTEXT(aThread).stackEnd)
+ markreferences((void**)CONTEXT(aThread).stackEnd,
+ (void**)CONTEXT(aThread).usedStackTop);
+ else
+ markreferences((void**)CONTEXT(aThread).usedStackTop,
+ (void**)CONTEXT(aThread).stackEnd);
}
markreferences((void**)&threadQhead[0],
gc_call (void)
{
#ifdef USE_THREADS
+ u1 dummy;
+
assert(blockInts == 0);
intsDisable();
- if (currentThread == NULL || currentThread == mainThread)
+ if (currentThread == NULL || currentThread == mainThread) {
+ CONTEXT(mainThread).usedStackTop = &dummy;
gc_run();
+ }
else
- asm_switchstackandcall(CONTEXT(mainThread).usedStackTop, gc_run);
+ asm_switchstackandcall(CONTEXT(mainThread).usedStackTop, gc_run,
+ (void**)&(CONTEXT(currentThread).usedStackTop));
intsRestore();
#else
gc_run();