X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=libgc%2Falloc.c;h=2ffafd441d7ed48dde9265fb787ffb92c6ba806c;hb=45d6da52ce69cbd24f5930e1cad88d425e706186;hp=99a61f6f287cfc70e04f608a7d26caa27216afd7;hpb=6b6435d1b3206b0162c37e5ecce8d9a699fe6467;p=mono.git diff --git a/libgc/alloc.c b/libgc/alloc.c index 99a61f6f287..2ffafd441d7 100644 --- a/libgc/alloc.c +++ b/libgc/alloc.c @@ -92,6 +92,16 @@ char * GC_copyright[] = # include "version.h" +#if defined(SAVE_CALL_CHAIN) && \ + !(defined(REDIRECT_MALLOC) && defined(GC_HAVE_BUILTIN_BACKTRACE)) +# define SAVE_CALL_CHAIN_IN_GC + /* This is only safe if the call chain save mechanism won't end up */ + /* calling GC_malloc. The GNU C library documentation suggests */ + /* that backtrace doesn't use malloc, but at least the initial */ + /* call in some versions does seem to invoke the dynamic linker, */ + /* which uses malloc. */ +#endif + /* some more variables */ extern signed_word GC_mem_found; /* Number of reclaimed longwords */ @@ -104,8 +114,6 @@ word GC_free_space_divisor = 3; extern GC_bool GC_collection_in_progress(); /* Collection is in progress, or was abandoned. */ -extern GC_bool GC_print_back_height; - int GC_never_stop_func GC_PROTO((void)) { return(0); } unsigned long GC_time_limit = TIME_LIMIT; @@ -133,7 +141,7 @@ int GC_n_attempts = 0; /* Number of attempts at finishing */ if (GC_print_stats) { GC_printf0("Abandoning stopped marking after "); GC_printf1("%lu msecs", (unsigned long)time_diff); - GC_printf1("(attempt %d)\n", (unsigned long) GC_n_attempts); + GC_printf1("(attempt %ld)\n", (unsigned long) GC_n_attempts); } # endif return(1); @@ -198,7 +206,8 @@ word GC_adj_words_allocd() /* had been reallocated this round. Finalization is user */ /* visible progress. And if we don't count this, we have */ /* stability problems for programs that finalize all objects. */ - result += GC_words_wasted; + if ((GC_words_wasted >> 3) < result) + result += GC_words_wasted; /* This doesn't reflect useful work. But if there is lots of */ /* new fragmentation, the same is probably true of the heap, */ /* and the collection will be correspondingly cheaper. */ @@ -223,6 +232,8 @@ void GC_clear_a_few_frames() { # define NWORDS 64 word frames[NWORDS]; + /* Some compilers will warn that frames was set but never used. */ + /* That's the whole idea ... */ register int i; for (i = 0; i < NWORDS; i++) frames[i] = 0; @@ -260,10 +271,6 @@ void GC_maybe_gc() static int n_partial_gcs = 0; if (GC_should_collect()) { - if (GC_notify_event) - GC_notify_event (GC_EVENT_START); - - if (!GC_incremental) { GC_gcollect_inner(); n_partial_gcs = 0; @@ -299,7 +306,7 @@ void GC_maybe_gc() # endif if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED? GC_never_stop_func : GC_timeout_stop_func)) { -# ifdef SAVE_CALL_CHAIN +# ifdef SAVE_CALL_CHAIN_IN_GC GC_save_callers(GC_last_stack); # endif GC_finish_collection(); @@ -309,10 +316,6 @@ void GC_maybe_gc() GC_n_attempts++; } } - - - if (GC_notify_event) - GC_notify_event (GC_EVENT_END); } } @@ -329,6 +332,10 @@ GC_stop_func stop_func; CLOCK_TYPE start_time, current_time; # endif if (GC_dont_gc) return FALSE; + + if (GC_notify_event) + GC_notify_event (GC_EVENT_START); + if (GC_incremental && GC_collection_in_progress()) { # ifdef CONDPRINT if (GC_print_stats) { @@ -368,7 +375,7 @@ GC_stop_func stop_func; } GC_invalidate_mark_state(); /* Flush mark stack. */ GC_clear_marks(); -# ifdef SAVE_CALL_CHAIN +# ifdef SAVE_CALL_CHAIN_IN_GC GC_save_callers(GC_last_stack); # endif GC_is_full_gc = TRUE; @@ -391,6 +398,9 @@ GC_stop_func stop_func; MS_TIME_DIFF(current_time,start_time)); } # endif + if (GC_notify_event) + GC_notify_event (GC_EVENT_END); + return(TRUE); } @@ -423,7 +433,7 @@ int n; for (i = GC_deficit; i < GC_RATE*n; i++) { if (GC_mark_some((ptr_t)0)) { /* Need to finish a collection */ -# ifdef SAVE_CALL_CHAIN +# ifdef SAVE_CALL_CHAIN_IN_GC GC_save_callers(GC_last_stack); # endif # ifdef PARALLEL_MARK @@ -634,9 +644,17 @@ GC_stop_func stop_func; } } -void (*GC_notify_event) GC_PROTO((GCEventType e)); +void (*GC_notify_event) GC_PROTO((GC_EventType e)); void (*GC_on_heap_resize) GC_PROTO((size_t new_size)); +GC_API void GC_set_on_collection_event (void (*fn) (GC_EventType)) +{ + DCL_LOCK_STATE; + LOCK(); + GC_notify_event = fn; + UNLOCK(); +} + /* Finish up a collection. Assumes lock is held, signals are disabled, */ /* but the world is otherwise running. */ void GC_finish_collection() @@ -959,8 +977,8 @@ word n; } # endif expansion_slop = WORDS_TO_BYTES(min_words_allocd()) + 4*MAXHINCR*HBLKSIZE; - if (GC_last_heap_addr == 0 && !((word)space & SIGNB) - || GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space) { + if ((GC_last_heap_addr == 0 && !((word)space & SIGNB)) + || (GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space)) { /* Assume the heap is growing up */ GC_greatest_plausible_heap_addr = (GC_PTR)GC_max((ptr_t)GC_greatest_plausible_heap_addr, @@ -1029,9 +1047,9 @@ word needed_blocks; GC_bool ignore_off_page; { if (!GC_incremental && !GC_dont_gc && - (GC_dont_expand && GC_words_allocd > 0 - || (GC_fo_entries > (last_fo_entries + 500) && (last_words_finalized || GC_words_finalized)) - || GC_should_collect())) { + ((GC_dont_expand && GC_words_allocd > 0) + || (GC_fo_entries > (last_fo_entries + 500) && (last_words_finalized || GC_words_finalized)) + || GC_should_collect())) { GC_gcollect_inner(); last_fo_entries = GC_fo_entries; last_words_finalized = GC_words_finalized; @@ -1042,6 +1060,9 @@ GC_bool ignore_off_page; if (blocks_to_get > MAXHINCR) { word slop; + /* Get the minimum required to make it likely that we */ + /* can satisfy the current request in the presence of black- */ + /* listing. This will probably be more than MAXHINCR. */ if (ignore_off_page) { slop = 4; } else {