# include "version.h"
+#if defined(SAVE_CALL_CHAIN) && \
+ !(defined(REDIRECT_MALLOC) && defined(GC_HAVE_BUILTIN_BACKTRACE))
+# define SAVE_CALL_CHAIN_IN_GC
+ /* This is only safe if the call chain save mechanism won't end up */
+ /* calling GC_malloc. The GNU C library documentation suggests */
+ /* that backtrace doesn't use malloc, but at least the initial */
+ /* call in some versions does seem to invoke the dynamic linker, */
+ /* which uses malloc. */
+#endif
+
/* some more variables */
extern signed_word GC_mem_found; /* Number of reclaimed longwords */
extern GC_bool GC_collection_in_progress();
/* Collection is in progress, or was abandoned. */
-extern GC_bool GC_print_back_height;
-
int GC_never_stop_func GC_PROTO((void)) { return(0); }
unsigned long GC_time_limit = TIME_LIMIT;
if (GC_print_stats) {
GC_printf0("Abandoning stopped marking after ");
GC_printf1("%lu msecs", (unsigned long)time_diff);
- GC_printf1("(attempt %d)\n", (unsigned long) GC_n_attempts);
+ GC_printf1("(attempt %ld)\n", (unsigned long) GC_n_attempts);
}
# endif
return(1);
/* had been reallocated this round. Finalization is user */
/* visible progress. And if we don't count this, we have */
/* stability problems for programs that finalize all objects. */
- result += GC_words_wasted;
+ if ((GC_words_wasted >> 3) < result)
+ result += GC_words_wasted;
/* This doesn't reflect useful work. But if there is lots of */
/* new fragmentation, the same is probably true of the heap, */
/* and the collection will be correspondingly cheaper. */
{
# define NWORDS 64
word frames[NWORDS];
+ /* Some compilers will warn that frames was set but never used. */
+ /* That's the whole idea ... */
register int i;
for (i = 0; i < NWORDS; i++) frames[i] = 0;
# endif
if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
GC_never_stop_func : GC_timeout_stop_func)) {
-# ifdef SAVE_CALL_CHAIN
+# ifdef SAVE_CALL_CHAIN_IN_GC
GC_save_callers(GC_last_stack);
# endif
GC_finish_collection();
}
GC_invalidate_mark_state(); /* Flush mark stack. */
GC_clear_marks();
-# ifdef SAVE_CALL_CHAIN
+# ifdef SAVE_CALL_CHAIN_IN_GC
GC_save_callers(GC_last_stack);
# endif
GC_is_full_gc = TRUE;
for (i = GC_deficit; i < GC_RATE*n; i++) {
if (GC_mark_some((ptr_t)0)) {
/* Need to finish a collection */
-# ifdef SAVE_CALL_CHAIN
+# ifdef SAVE_CALL_CHAIN_IN_GC
GC_save_callers(GC_last_stack);
# endif
# ifdef PARALLEL_MARK
# endif
expansion_slop = WORDS_TO_BYTES(min_words_allocd()) + 4*MAXHINCR*HBLKSIZE;
if (GC_last_heap_addr == 0 && !((word)space & SIGNB)
- || GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space) {
+ || (GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space)) {
/* Assume the heap is growing up */
GC_greatest_plausible_heap_addr =
(GC_PTR)GC_max((ptr_t)GC_greatest_plausible_heap_addr,
/* How many consecutive GC/expansion failures? */
/* Reset by GC_allochblk. */
+static word last_fo_entries = 0;
+static word last_words_finalized = 0;
+
GC_bool GC_collect_or_expand(needed_blocks, ignore_off_page)
word needed_blocks;
GC_bool ignore_off_page;
{
if (!GC_incremental && !GC_dont_gc &&
- (GC_dont_expand && GC_words_allocd > 0 || GC_should_collect())) {
+ ((GC_dont_expand && GC_words_allocd > 0)
+ || (GC_fo_entries > (last_fo_entries + 500) && (last_words_finalized || GC_words_finalized))
+ || GC_should_collect())) {
GC_gcollect_inner();
+ last_fo_entries = GC_fo_entries;
+ last_words_finalized = GC_words_finalized;
} else {
word blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
+ needed_blocks;
if (blocks_to_get > MAXHINCR) {
word slop;
+ /* Get the minimum required to make it likely that we */
+ /* can satisfy the current request in the presence of black- */
+ /* listing. This will probably be more than MAXHINCR. */
if (ignore_off_page) {
slop = 4;
} else {