2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 #include "private/gc_priv.h"
21 #if !defined(MACOS) && !defined(MSWINCE)
23 # include <sys/types.h>
27 * Separate free lists are maintained for different sized objects
29 * The call GC_allocobj(i,k) ensures that the freelist for
30 * kind k objects of size i points to a non-empty
31 * free list. It returns a pointer to the first entry on the free list.
32 * In a single-threaded world, GC_allocobj may be called to allocate
33 * an object of (small) size i as follows:
35 * opp = &(GC_objfreelist[i]);
36 * if (*opp == 0) GC_allocobj(i, NORMAL);
38 * *opp = obj_link(ptr);
40 * Note that this is very fast if the free list is non-empty; it should
41 * only involve the execution of 4 or 5 simple instructions.
42 * All composite objects on freelists are cleared, except for
47 * The allocator uses GC_allochblk to allocate large chunks of objects.
48 * These chunks all start on addresses which are multiples of
49 * HBLKSZ. Each allocated chunk has an associated header,
50 * which can be located quickly based on the address of the chunk.
51 * (See headers.c for details.)
52 * This makes it possible to check quickly whether an
53 * arbitrary address corresponds to an object administered by the
57 word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */
61 #ifndef GC_DISABLE_INCREMENTAL
62 GC_INNER int GC_incremental = 0; /* By default, stop the world. */
65 int GC_parallel = FALSE; /* By default, parallel GC is off. */
68 # define GC_FULL_FREQ 19 /* Every 20th collection is a full */
69 /* collection, whether we need it */
73 int GC_full_freq = GC_FULL_FREQ;
75 STATIC GC_bool GC_need_full_gc = FALSE;
76 /* Need full GC do to heap growth. */
78 #ifdef THREAD_LOCAL_ALLOC
79 GC_INNER GC_bool GC_world_stopped = FALSE;
82 STATIC word GC_used_heap_size_after_full = 0;
84 /* GC_copyright symbol is externally visible. */
85 char * const GC_copyright[] =
86 {"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ",
87 "Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. ",
88 "Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved. ",
89 "Copyright (c) 1999-2009 by Hewlett-Packard Company. All rights reserved. ",
90 "THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
91 " EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.",
92 "See source code for details." };
94 /* Version macros are now defined in gc_version.h, which is included by */
95 /* gc.h, which is included by gc_priv.h. */
96 #ifndef GC_NO_VERSION_VAR
97 const unsigned GC_version = ((GC_VERSION_MAJOR << 16) |
98 (GC_VERSION_MINOR << 8) | GC_TMP_ALPHA_VERSION);
101 GC_API unsigned GC_CALL GC_get_version(void)
103 return (GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8) |
104 GC_TMP_ALPHA_VERSION;
107 /* some more variables */
109 #ifdef GC_DONT_EXPAND
110 GC_bool GC_dont_expand = TRUE;
112 GC_bool GC_dont_expand = FALSE;
115 #ifndef GC_FREE_SPACE_DIVISOR
116 # define GC_FREE_SPACE_DIVISOR 3 /* must be > 0 */
119 word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR;
121 GC_INNER int GC_CALLBACK GC_never_stop_func(void)
126 #ifndef GC_TIME_LIMIT
127 # define GC_TIME_LIMIT 50 /* We try to keep pause times from exceeding */
128 /* this by much. In milliseconds. */
131 unsigned long GC_time_limit = GC_TIME_LIMIT;
134 STATIC CLOCK_TYPE GC_start_time = 0;
135 /* Time at which we stopped world. */
136 /* used only in GC_timeout_stop_func. */
139 STATIC int GC_n_attempts = 0; /* Number of attempts at finishing */
140 /* collection within GC_time_limit. */
142 STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
143 /* accessed holding the lock. */
145 GC_API void GC_CALL GC_set_stop_func(GC_stop_func stop_func)
147 GC_ASSERT(stop_func != 0);
149 GC_default_stop_func = stop_func;
153 GC_API GC_stop_func GC_CALL GC_get_stop_func(void)
155 GC_stop_func stop_func;
157 stop_func = GC_default_stop_func;
162 #if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
163 # define GC_timeout_stop_func GC_default_stop_func
165 STATIC int GC_CALLBACK GC_timeout_stop_func (void)
167 CLOCK_TYPE current_time;
168 static unsigned count = 0;
169 unsigned long time_diff;
171 if ((*GC_default_stop_func)())
174 if ((count++ & 3) != 0) return(0);
175 GET_TIME(current_time);
176 time_diff = MS_TIME_DIFF(current_time,GC_start_time);
177 if (time_diff >= GC_time_limit) {
178 if (GC_print_stats) {
180 "Abandoning stopped marking after %lu msecs (attempt %d)\n",
181 time_diff, GC_n_attempts);
187 #endif /* !GC_DISABLE_INCREMENTAL */
190 GC_INNER word GC_total_stacksize = 0; /* updated on every push_all_stacks */
193 /* Return the minimum number of words that must be allocated between */
194 /* collections to amortize the collection cost. */
195 static word min_bytes_allocd(void)
197 int dummy; /* GC_stackbottom is used only for a single-threaded case. */
198 # ifdef STACK_GROWS_UP
199 word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
201 word stack_size = GC_stackbottom - (ptr_t)(&dummy);
204 word total_root_size; /* includes double stack size, */
205 /* since the stack is expensive */
207 word scan_size; /* Estimate of memory to be scanned */
208 /* during normal GC. */
211 if (GC_need_to_lock) {
212 /* We are multi-threaded... */
213 stack_size = GC_total_stacksize;
214 /* For now, we just use the value computed during the latest GC. */
215 # ifdef DEBUG_THREADS
216 GC_printf("Total stacks size: %lu\n", (unsigned long)stack_size);
221 total_root_size = 2 * stack_size + GC_root_size;
222 scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4
224 if (GC_incremental) {
225 return scan_size / (2 * GC_free_space_divisor);
227 return scan_size / GC_free_space_divisor;
231 /* Return the number of bytes allocated, adjusted for explicit storage */
232 /* management, etc.. This number is used in deciding when to trigger */
234 STATIC word GC_adj_bytes_allocd(void)
237 signed_word expl_managed =
238 (signed_word)GC_non_gc_bytes
239 - (signed_word)GC_non_gc_bytes_at_gc;
241 /* Don't count what was explicitly freed, or newly allocated for */
242 /* explicit management. Note that deallocating an explicitly */
243 /* managed object should not alter result, assuming the client */
244 /* is playing by the rules. */
245 result = (signed_word)GC_bytes_allocd
246 + (signed_word)GC_bytes_dropped
247 - (signed_word)GC_bytes_freed
248 + (signed_word)GC_finalizer_bytes_freed
250 if (result > (signed_word)GC_bytes_allocd) {
251 result = GC_bytes_allocd;
252 /* probably client bug or unfortunate scheduling */
254 result += GC_bytes_finalized;
255 /* We count objects enqueued for finalization as though they */
256 /* had been reallocated this round. Finalization is user */
257 /* visible progress. And if we don't count this, we have */
258 /* stability problems for programs that finalize all objects. */
259 if (result < (signed_word)(GC_bytes_allocd >> 3)) {
260 /* Always count at least 1/8 of the allocations. We don't want */
261 /* to collect too infrequently, since that would inhibit */
262 /* coalescing of free storage blocks. */
263 /* This also makes us partially robust against client bugs. */
264 return(GC_bytes_allocd >> 3);
271 /* Clear up a few frames worth of garbage left at the top of the stack. */
272 /* This is used to prevent us from accidentally treating garbage left */
273 /* on the stack by other parts of the collector as roots. This */
274 /* differs from the code in misc.c, which actually tries to keep the */
275 /* stack clear of long-lived, client-generated garbage. */
276 STATIC void GC_clear_a_few_frames(void)
278 # ifndef CLEAR_NWORDS
279 # define CLEAR_NWORDS 64
281 volatile word frames[CLEAR_NWORDS];
284 for (i = 0; i < CLEAR_NWORDS; i++) frames[i] = 0;
287 /* Heap size at which we need a collection to avoid expanding past */
288 /* limits used by blacklisting. */
289 STATIC word GC_collect_at_heapsize = (word)(-1);
291 /* Have we allocated enough to amortize a collection? */
292 GC_INNER GC_bool GC_should_collect(void)
294 static word last_min_bytes_allocd;
295 static word last_gc_no;
296 if (last_gc_no != GC_gc_no) {
297 last_gc_no = GC_gc_no;
298 last_min_bytes_allocd = min_bytes_allocd();
300 return(GC_adj_bytes_allocd() >= last_min_bytes_allocd
301 || GC_heapsize >= GC_collect_at_heapsize);
304 /* STATIC */ void (*GC_start_call_back) (void) = 0;
305 /* Called at start of full collections. */
306 /* Not called if 0. Called with the allocation */
307 /* lock held. Not used by GC itself. */
309 GC_INLINE void GC_notify_full_gc(void)
311 if (GC_start_call_back != 0) {
312 (*GC_start_call_back)();
316 STATIC GC_bool GC_is_full_gc = FALSE;
318 STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
319 STATIC void GC_finish_collection(void);
322 * Initiate a garbage collection if appropriate.
324 * between partial, full, and stop-world collections.
326 STATIC void GC_maybe_gc(void)
328 static int n_partial_gcs = 0;
330 GC_ASSERT(I_HOLD_LOCK());
331 ASSERT_CANCEL_DISABLED();
332 if (GC_should_collect()) {
333 if (!GC_incremental) {
334 /* FIXME: If possible, GC_default_stop_func should be used here */
335 GC_try_to_collect_inner(GC_never_stop_func);
339 # ifdef PARALLEL_MARK
341 GC_wait_for_reclaim();
343 if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
344 if (GC_print_stats) {
346 "***>Full mark for collection %lu after %ld allocd bytes\n",
347 (unsigned long)GC_gc_no+1,
348 (long)GC_bytes_allocd);
350 GC_promote_black_lists();
351 (void)GC_reclaim_all((GC_stop_func)0, TRUE);
355 GC_is_full_gc = TRUE;
360 /* We try to mark with the world stopped. */
361 /* If we run out of time, this turns into */
362 /* incremental marking. */
364 if (GC_time_limit != GC_TIME_UNLIMITED) { GET_TIME(GC_start_time); }
366 /* FIXME: If possible, GC_default_stop_func should be */
367 /* used instead of GC_never_stop_func here. */
368 if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
369 GC_never_stop_func : GC_timeout_stop_func)) {
370 # ifdef SAVE_CALL_CHAIN
371 GC_save_callers(GC_last_stack);
373 GC_finish_collection();
375 if (!GC_is_full_gc) {
376 /* Count this as the first attempt */
385 * Stop the world garbage collection. Assumes lock held. If stop_func is
386 * not GC_never_stop_func then abort if stop_func returns TRUE.
387 * Return TRUE if we successfully completed the collection.
389 GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
391 # ifndef SMALL_CONFIG
392 CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
393 CLOCK_TYPE current_time;
395 ASSERT_CANCEL_DISABLED();
396 if (GC_dont_gc || (*stop_func)()) return FALSE;
397 if (GC_incremental && GC_collection_in_progress()) {
398 if (GC_print_stats) {
400 "GC_try_to_collect_inner: finishing collection in progress\n");
402 /* Just finish collection already in progress. */
403 while(GC_collection_in_progress()) {
404 if ((*stop_func)()) return(FALSE);
405 GC_collect_a_little_inner(1);
408 if (stop_func == GC_never_stop_func) GC_notify_full_gc();
409 # ifndef SMALL_CONFIG
410 if (GC_print_stats) {
411 GET_TIME(start_time);
412 GC_log_printf("Initiating full world-stop collection!\n");
415 GC_promote_black_lists();
416 /* Make sure all blocks have been reclaimed, so sweep routines */
417 /* don't see cleared mark bits. */
418 /* If we're guaranteed to finish, then this is unnecessary. */
419 /* In the find_leak case, we have to finish to guarantee that */
420 /* previously unmarked objects are not reported as leaks. */
421 # ifdef PARALLEL_MARK
423 GC_wait_for_reclaim();
425 if ((GC_find_leak || stop_func != GC_never_stop_func)
426 && !GC_reclaim_all(stop_func, FALSE)) {
427 /* Aborted. So far everything is still consistent. */
430 GC_invalidate_mark_state(); /* Flush mark stack. */
432 # ifdef SAVE_CALL_CHAIN
433 GC_save_callers(GC_last_stack);
435 GC_is_full_gc = TRUE;
436 if (!GC_stopped_mark(stop_func)) {
437 if (!GC_incremental) {
438 /* We're partially done and have no way to complete or use */
439 /* current work. Reestablish invariants as cheaply as */
441 GC_invalidate_mark_state();
442 GC_unpromote_black_lists();
443 } /* else we claim the world is already still consistent. We'll */
444 /* finish incrementally. */
447 GC_finish_collection();
448 # ifndef SMALL_CONFIG
449 if (GC_print_stats) {
450 GET_TIME(current_time);
451 GC_log_printf("Complete collection took %lu msecs\n",
452 MS_TIME_DIFF(current_time,start_time));
459 * Perform n units of garbage collection work. A unit is intended to touch
460 * roughly GC_RATE pages. Every once in a while, we do more than that.
461 * This needs to be a fairly large number with our current incremental
462 * GC strategy, since otherwise we allocate too much during GC, and the
463 * cleanup gets expensive.
468 #ifndef MAX_PRIOR_ATTEMPTS
469 # define MAX_PRIOR_ATTEMPTS 1
471 /* Maximum number of prior attempts at world stop marking */
472 /* A value of 1 means that we finish the second time, no matter */
473 /* how long it takes. Doesn't count the initial root scan */
476 STATIC int GC_deficit = 0;/* The number of extra calls to GC_mark_some */
477 /* that we have made. */
479 GC_INNER void GC_collect_a_little_inner(int n)
482 IF_CANCEL(int cancel_state;)
484 if (GC_dont_gc) return;
485 DISABLE_CANCEL(cancel_state);
486 if (GC_incremental && GC_collection_in_progress()) {
487 for (i = GC_deficit; i < GC_RATE*n; i++) {
488 if (GC_mark_some((ptr_t)0)) {
489 /* Need to finish a collection */
490 # ifdef SAVE_CALL_CHAIN
491 GC_save_callers(GC_last_stack);
493 # ifdef PARALLEL_MARK
495 GC_wait_for_reclaim();
497 if (GC_n_attempts < MAX_PRIOR_ATTEMPTS
498 && GC_time_limit != GC_TIME_UNLIMITED) {
500 GET_TIME(GC_start_time);
502 if (!GC_stopped_mark(GC_timeout_stop_func)) {
507 /* FIXME: If possible, GC_default_stop_func should be */
509 (void)GC_stopped_mark(GC_never_stop_func);
511 GC_finish_collection();
515 if (GC_deficit > 0) GC_deficit -= GC_RATE*n;
516 if (GC_deficit < 0) GC_deficit = 0;
520 RESTORE_CANCEL(cancel_state);
523 GC_API int GC_CALL GC_collect_a_little(void)
529 GC_collect_a_little_inner(1);
530 result = (int)GC_collection_in_progress();
532 if (!result && GC_debugging_started) GC_print_all_smashed();
536 #if !defined(REDIRECT_MALLOC) && (defined(MSWIN32) || defined(MSWINCE))
537 GC_INNER void GC_add_current_malloc_heap(void);
540 #ifdef MAKE_BACK_GRAPH
541 GC_INNER void GC_build_back_graph(void);
545 /* Variables for world-stop average delay time statistic computation. */
546 /* "divisor" is incremented every world-stop and halved when reached */
547 /* its maximum (or upon "total_time" oveflow). */
548 static unsigned world_stopped_total_time = 0;
549 static unsigned world_stopped_total_divisor = 0;
550 # ifndef MAX_TOTAL_TIME_DIVISOR
551 /* We shall not use big values here (so "outdated" delay time */
552 /* values would have less impact on "average" delay time value than */
554 # define MAX_TOTAL_TIME_DIVISOR 1000
559 * Assumes lock is held. We stop the world and mark from all roots.
560 * If stop_func() ever returns TRUE, we may fail and return FALSE.
561 * Increment GC_gc_no if we succeed.
563 STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
567 # ifndef SMALL_CONFIG
568 CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
569 CLOCK_TYPE current_time;
572 # if !defined(REDIRECT_MALLOC) && (defined(MSWIN32) || defined(MSWINCE))
573 GC_add_current_malloc_heap();
575 # if defined(REGISTER_LIBRARIES_EARLY)
576 GC_cond_register_dynamic_libraries();
579 # ifndef SMALL_CONFIG
581 GET_TIME(start_time);
585 # ifdef THREAD_LOCAL_ALLOC
586 GC_world_stopped = TRUE;
588 if (GC_print_stats) {
589 /* Output blank line for convenience here */
591 "\n--> Marking for collection %lu after %lu allocated bytes\n",
592 (unsigned long)GC_gc_no + 1, (unsigned long) GC_bytes_allocd);
594 # ifdef MAKE_BACK_GRAPH
595 if (GC_print_back_height) {
596 GC_build_back_graph();
600 /* Mark from all roots. */
601 /* Minimize junk left in my registers and on the stack */
602 GC_clear_a_few_frames();
603 GC_noop(0,0,0,0,0,0);
606 if ((*stop_func)()) {
607 if (GC_print_stats) {
608 GC_log_printf("Abandoned stopped marking after "
609 "%u iterations\n", i);
611 GC_deficit = i; /* Give the mutator a chance. */
612 # ifdef THREAD_LOCAL_ALLOC
613 GC_world_stopped = FALSE;
618 if (GC_mark_some((ptr_t)(&dummy))) break;
622 if (GC_print_stats) {
624 "Collection %lu reclaimed %ld bytes ---> heapsize = %lu bytes\n",
625 (unsigned long)(GC_gc_no - 1), (long)GC_bytes_found,
626 (unsigned long)GC_heapsize);
629 /* Check all debugged objects for consistency */
630 if (GC_debugging_started) {
634 # ifdef THREAD_LOCAL_ALLOC
635 GC_world_stopped = FALSE;
638 # ifndef SMALL_CONFIG
639 if (GC_print_stats) {
640 unsigned long time_diff;
641 unsigned total_time, divisor;
642 GET_TIME(current_time);
643 time_diff = MS_TIME_DIFF(current_time,start_time);
645 /* Compute new world-stop delay total time */
646 total_time = world_stopped_total_time;
647 divisor = world_stopped_total_divisor;
648 if ((int)total_time < 0 || divisor >= MAX_TOTAL_TIME_DIVISOR) {
649 /* Halve values if overflow occurs */
653 total_time += time_diff < (((unsigned)-1) >> 1) ?
654 (unsigned)time_diff : ((unsigned)-1) >> 1;
655 /* Update old world_stopped_total_time and its divisor */
656 world_stopped_total_time = total_time;
657 world_stopped_total_divisor = ++divisor;
659 GC_ASSERT(divisor != 0);
661 "World-stopped marking took %lu msecs (%u in average)\n",
662 time_diff, total_time / divisor);
668 /* Set all mark bits for the free list whose first entry is q */
669 GC_INNER void GC_set_fl_marks(ptr_t q)
671 struct hblk *h, *last_h;
673 IF_PER_OBJ(size_t sz;)
680 IF_PER_OBJ(sz = hhdr->hb_sz;)
683 bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
684 if (!mark_bit_from_hdr(hhdr, bit_no)) {
685 set_mark_bit_from_hdr(hhdr, bit_no);
686 ++hhdr -> hb_n_marks;
697 IF_PER_OBJ(sz = hhdr->hb_sz;)
704 /* Check that all mark bits for the free list whose first entry is q */
706 void GC_check_fl_marks(ptr_t q)
709 for (p = q; p != 0; p = obj_link(p)) {
710 if (!GC_is_marked(p)) {
711 GC_err_printf("Unmarked object %p on list %p\n", p, q);
712 ABORT("Unmarked local free list entry.");
718 /* Clear all mark bits for the free list whose first entry is q */
719 /* Decrement GC_bytes_found by number of bytes on free list. */
720 STATIC void GC_clear_fl_marks(ptr_t q)
722 struct hblk *h, *last_h;
731 sz = hhdr->hb_sz; /* Normally set only once. */
734 bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
735 if (mark_bit_from_hdr(hhdr, bit_no)) {
736 size_t n_marks = hhdr -> hb_n_marks - 1;
737 clear_mark_bit_from_hdr(hhdr, bit_no);
738 # ifdef PARALLEL_MARK
739 /* Appr. count, don't decrement to zero! */
740 if (0 != n_marks || !GC_parallel) {
741 hhdr -> hb_n_marks = n_marks;
744 hhdr -> hb_n_marks = n_marks;
747 GC_bytes_found -= sz;
763 #if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
764 void GC_check_tls(void);
767 #ifdef MAKE_BACK_GRAPH
768 GC_INNER void GC_traverse_back_graph(void);
771 /* Finish up a collection. Assumes mark bits are consistent, lock is */
772 /* held, but the world is otherwise running. */
773 STATIC void GC_finish_collection(void)
775 # ifndef SMALL_CONFIG
776 CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
777 CLOCK_TYPE finalize_time = 0;
778 CLOCK_TYPE done_time;
781 # if defined(GC_ASSERTIONS) && defined(THREADS) \
782 && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
783 /* Check that we marked some of our own data. */
784 /* FIXME: Add more checks. */
788 # ifndef SMALL_CONFIG
790 GET_TIME(start_time);
794 # if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
795 if (GETENV("GC_PRINT_ADDRESS_MAP") != 0) {
796 GC_print_address_map();
801 /* Mark all objects on the free list. All objects should be */
802 /* marked when we're done. */
804 word size; /* current object size */
808 for (kind = 0; kind < GC_n_kinds; kind++) {
809 for (size = 1; size <= MAXOBJGRANULES; size++) {
810 q = GC_obj_kinds[kind].ok_freelist[size];
811 if (q != 0) GC_set_fl_marks(q);
815 GC_start_reclaim(TRUE);
816 /* The above just checks; it doesn't really reclaim anything. */
820 # ifdef STUBBORN_ALLOC
821 GC_clean_changing_list();
824 # ifndef SMALL_CONFIG
826 GET_TIME(finalize_time);
829 if (GC_print_back_height) {
830 # ifdef MAKE_BACK_GRAPH
831 GC_traverse_back_graph();
833 # ifndef SMALL_CONFIG
834 GC_err_printf("Back height not available: "
835 "Rebuild collector with -DMAKE_BACK_GRAPH\n");
840 /* Clear free list mark bits, in case they got accidentally marked */
841 /* (or GC_find_leak is set and they were intentionally marked). */
842 /* Also subtract memory remaining from GC_bytes_found count. */
843 /* Note that composite objects on free list are cleared. */
844 /* Thus accidentally marking a free list is not a problem; only */
845 /* objects on the list itself will be marked, and that's fixed here. */
847 word size; /* current object size */
848 ptr_t q; /* pointer to current object */
851 for (kind = 0; kind < GC_n_kinds; kind++) {
852 for (size = 1; size <= MAXOBJGRANULES; size++) {
853 q = GC_obj_kinds[kind].ok_freelist[size];
854 if (q != 0) GC_clear_fl_marks(q);
860 if (GC_print_stats == VERBOSE)
861 GC_log_printf("Bytes recovered before sweep - f.l. count = %ld\n",
862 (long)GC_bytes_found);
864 /* Reconstruct free lists to contain everything not marked */
865 GC_start_reclaim(FALSE);
866 if (GC_print_stats) {
867 GC_log_printf("Heap contains %lu pointer-containing "
868 "+ %lu pointer-free reachable bytes\n",
869 (unsigned long)GC_composite_in_use,
870 (unsigned long)GC_atomic_in_use);
873 GC_used_heap_size_after_full = USED_HEAP_SIZE;
874 GC_need_full_gc = FALSE;
877 USED_HEAP_SIZE - GC_used_heap_size_after_full
878 > min_bytes_allocd();
881 if (GC_print_stats == VERBOSE) {
883 GC_log_printf("Immediately reclaimed %ld bytes in heap"
884 " of size %lu bytes (%lu unmapped)\n",
885 (long)GC_bytes_found, (unsigned long)GC_heapsize,
886 (unsigned long)GC_unmapped_bytes);
888 GC_log_printf("Immediately reclaimed %ld bytes in heap"
889 " of size %lu bytes\n",
890 (long)GC_bytes_found, (unsigned long)GC_heapsize);
894 /* Reset or increment counters for next cycle */
896 GC_is_full_gc = FALSE;
897 GC_bytes_allocd_before_gc += GC_bytes_allocd;
898 GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
900 GC_bytes_dropped = 0;
902 GC_finalizer_bytes_freed = 0;
908 # ifndef SMALL_CONFIG
909 if (GC_print_stats) {
912 /* A convenient place to output finalization statistics. */
913 GC_print_finalization_stats();
915 GC_log_printf("Finalize + initiate sweep took %lu + %lu msecs\n",
916 MS_TIME_DIFF(finalize_time,start_time),
917 MS_TIME_DIFF(done_time,finalize_time));
922 /* If stop_func == 0 then GC_default_stop_func is used instead. */
923 STATIC GC_bool GC_try_to_collect_general(GC_stop_func stop_func,
928 int old_unmap_threshold;
930 IF_CANCEL(int cancel_state;)
933 if (!GC_is_initialized) GC_init();
934 if (GC_debugging_started) GC_print_all_smashed();
935 GC_INVOKE_FINALIZERS();
937 DISABLE_CANCEL(cancel_state);
939 old_unmap_threshold = GC_unmap_threshold;
941 (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
942 GC_unmap_threshold = 1; /* unmap as much as possible */
945 /* Minimize junk left in my registers */
946 GC_noop(0,0,0,0,0,0);
947 result = GC_try_to_collect_inner(stop_func != 0 ? stop_func :
948 GC_default_stop_func);
951 GC_unmap_threshold = old_unmap_threshold; /* restore */
953 RESTORE_CANCEL(cancel_state);
956 if (GC_debugging_started) GC_print_all_smashed();
957 GC_INVOKE_FINALIZERS();
962 /* Externally callable routines to invoke full, stop-the-world collection. */
963 GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func)
965 GC_ASSERT(stop_func != 0);
966 return (int)GC_try_to_collect_general(stop_func, FALSE);
969 GC_API void GC_CALL GC_gcollect(void)
971 /* 0 is passed as stop_func to get GC_default_stop_func value */
972 /* while holding the allocation lock (to prevent data races). */
973 (void)GC_try_to_collect_general(0, FALSE);
974 if (GC_have_errors) GC_print_all_errors();
977 GC_API void GC_CALL GC_gcollect_and_unmap(void)
979 (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
982 GC_INNER word GC_n_heap_sects = 0;
983 /* Number of sections currently in heap. */
985 #ifdef USE_PROC_FOR_LIBRARIES
986 GC_INNER word GC_n_memory = 0;
987 /* Number of GET_MEM allocated memory sections. */
990 #ifdef USE_PROC_FOR_LIBRARIES
991 /* Add HBLKSIZE aligned, GET_MEM-generated block to GC_our_memory. */
992 /* Defined to do nothing if USE_PROC_FOR_LIBRARIES not set. */
993 GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes)
996 if (GC_n_memory >= MAX_HEAP_SECTS)
997 ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
998 GC_our_memory[GC_n_memory].hs_start = p;
999 GC_our_memory[GC_n_memory].hs_bytes = bytes;
1005 * Use the chunk of memory starting at p of size bytes as part of the heap.
1006 * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
1008 GC_INNER void GC_add_to_heap(struct hblk *p, size_t bytes)
1013 if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
1014 ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
1016 while ((word)p <= HBLKSIZE) {
1017 /* Can't handle memory near address zero. */
1020 if (0 == bytes) return;
1022 endp = (word)p + bytes;
1023 if (endp <= (word)p) {
1024 /* Address wrapped. */
1026 if (0 == bytes) return;
1029 phdr = GC_install_header(p);
1031 /* This is extremely unlikely. Can't add it. This will */
1032 /* almost certainly result in a 0 return from the allocator, */
1033 /* which is entirely appropriate. */
1036 GC_ASSERT(endp > (word)p && endp == (word)p + bytes);
1037 GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
1038 GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
1040 phdr -> hb_sz = bytes;
1041 phdr -> hb_flags = 0;
1043 GC_heapsize += bytes;
1044 if ((ptr_t)p <= (ptr_t)GC_least_plausible_heap_addr
1045 || GC_least_plausible_heap_addr == 0) {
1046 GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
1047 /* Making it a little smaller than necessary prevents */
1048 /* us from getting a false hit from the variable */
1049 /* itself. There's some unintentional reflection */
1052 if ((ptr_t)p + bytes >= (ptr_t)GC_greatest_plausible_heap_addr) {
1053 GC_greatest_plausible_heap_addr = (void *)endp;
1057 #if !defined(NO_DEBUGGING)
1058 void GC_print_heap_sects(void)
1062 GC_printf("Total heap size: %lu\n", (unsigned long) GC_heapsize);
1063 for (i = 0; i < GC_n_heap_sects; i++) {
1064 ptr_t start = GC_heap_sects[i].hs_start;
1065 size_t len = GC_heap_sects[i].hs_bytes;
1069 for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) {
1070 if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
1072 GC_printf("Section %d from %p to %p %lu/%lu blacklisted\n",
1073 i, start, start + len,
1074 (unsigned long)nbl, (unsigned long)(len/HBLKSIZE));
1079 void * GC_least_plausible_heap_addr = (void *)ONES;
1080 void * GC_greatest_plausible_heap_addr = 0;
1082 GC_INLINE word GC_max(word x, word y)
1084 return(x > y? x : y);
1087 GC_INLINE word GC_min(word x, word y)
1089 return(x < y? x : y);
1092 GC_API void GC_CALL GC_set_max_heap_size(GC_word n)
1094 GC_max_heapsize = n;
1097 GC_word GC_max_retries = 0;
1100 * this explicitly increases the size of the heap. It is used
1101 * internally, but may also be invoked from GC_expand_hp by the user.
1102 * The argument is in units of HBLKSIZE.
1103 * Tiny values of n are rounded up.
1104 * Returns FALSE on failure.
1106 GC_INNER GC_bool GC_expand_hp_inner(word n)
1109 struct hblk * space;
1110 word expansion_slop; /* Number of bytes by which we expect the */
1111 /* heap to expand soon. */
1113 if (n < MINHINCR) n = MINHINCR;
1114 bytes = n * HBLKSIZE;
1115 /* Make sure bytes is a multiple of GC_page_size */
1117 word mask = GC_page_size - 1;
1122 if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
1123 /* Exceeded self-imposed limit */
1126 space = GET_MEM(bytes);
1127 GC_add_to_our_memory((ptr_t)space, bytes);
1129 if (GC_print_stats) {
1130 GC_log_printf("Failed to expand heap by %ld bytes\n",
1131 (unsigned long)bytes);
1135 if (GC_print_stats) {
1136 GC_log_printf("Increasing heap size by %lu after %lu allocated bytes\n",
1137 (unsigned long)bytes,
1138 (unsigned long)GC_bytes_allocd);
1140 /* Adjust heap limits generously for blacklisting to work better. */
1141 /* GC_add_to_heap performs minimal adjustment needed for */
1143 expansion_slop = min_bytes_allocd() + 4*MAXHINCR*HBLKSIZE;
1144 if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
1145 || (GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space)) {
1146 /* Assume the heap is growing up */
1147 word new_limit = (word)space + bytes + expansion_slop;
1148 if (new_limit > (word)space) {
1149 GC_greatest_plausible_heap_addr =
1150 (void *)GC_max((word)GC_greatest_plausible_heap_addr,
1154 /* Heap is growing down */
1155 word new_limit = (word)space - expansion_slop;
1156 if (new_limit < (word)space) {
1157 GC_least_plausible_heap_addr =
1158 (void *)GC_min((word)GC_least_plausible_heap_addr,
1159 (word)space - expansion_slop);
1162 GC_prev_heap_addr = GC_last_heap_addr;
1163 GC_last_heap_addr = (ptr_t)space;
1164 GC_add_to_heap(space, bytes);
1165 /* Force GC before we are likely to allocate past expansion_slop */
1166 GC_collect_at_heapsize =
1167 GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
1168 # if defined(LARGE_CONFIG)
1169 if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
1170 GC_collect_at_heapsize = (word)(-1);
1175 /* Really returns a bool, but it's externally visible, so that's clumsy. */
1176 /* Arguments is in bytes. Includes GC_init() call. */
1177 GC_API int GC_CALL GC_expand_hp(size_t bytes)
1183 if (!GC_is_initialized) GC_init();
1184 result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
1185 if (result) GC_requested_heapsize += bytes;
1190 GC_INNER unsigned GC_fail_count = 0;
1191 /* How many consecutive GC/expansion failures? */
1192 /* Reset by GC_allochblk. */
1194 /* Collect or expand heap in an attempt make the indicated number of */
1195 /* free blocks available. Should be called until the blocks are */
1196 /* available (seting retry value to TRUE unless this is the first call */
1197 /* in a loop) or until it fails by returning FALSE. */
1198 GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
1199 GC_bool ignore_off_page,
1202 GC_bool gc_not_stopped = TRUE;
1204 IF_CANCEL(int cancel_state;)
1206 DISABLE_CANCEL(cancel_state);
1207 if (!GC_incremental && !GC_dont_gc &&
1208 ((GC_dont_expand && GC_bytes_allocd > 0) || GC_should_collect())) {
1209 /* Try to do a full collection using 'default' stop_func (unless */
1210 /* nothing has been allocated since the latest collection or heap */
1211 /* expansion is disabled). */
1212 gc_not_stopped = GC_try_to_collect_inner(
1213 GC_bytes_allocd > 0 && (!GC_dont_expand || !retry) ?
1214 GC_default_stop_func : GC_never_stop_func);
1215 if (gc_not_stopped == TRUE || !retry) {
1216 /* Either the collection hasn't been aborted or this is the */
1217 /* first attempt (in a loop). */
1218 RESTORE_CANCEL(cancel_state);
1223 blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
1225 if (blocks_to_get > MAXHINCR) {
1228 /* Get the minimum required to make it likely that we can satisfy */
1229 /* the current request in the presence of black-listing. */
1230 /* This will probably be more than MAXHINCR. */
1231 if (ignore_off_page) {
1234 slop = 2 * divHBLKSZ(BL_LIMIT);
1235 if (slop > needed_blocks) slop = needed_blocks;
1237 if (needed_blocks + slop > MAXHINCR) {
1238 blocks_to_get = needed_blocks + slop;
1240 blocks_to_get = MAXHINCR;
1244 if (!GC_expand_hp_inner(blocks_to_get)
1245 && !GC_expand_hp_inner(needed_blocks)) {
1246 if (gc_not_stopped == FALSE) {
1247 /* Don't increment GC_fail_count here (and no warning). */
1248 GC_gcollect_inner();
1249 GC_ASSERT(GC_bytes_allocd == 0);
1250 } else if (GC_fail_count++ < GC_max_retries) {
1251 WARN("Out of Memory! Trying to continue ...\n", 0);
1252 GC_gcollect_inner();
1254 # if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
1255 WARN("Out of Memory! Heap size: %" GC_PRIdPTR " MiB."
1256 " Returning NIL!\n", (GC_heapsize - GC_unmapped_bytes) >> 20);
1258 RESTORE_CANCEL(cancel_state);
1261 } else if (GC_fail_count && GC_print_stats) {
1262 GC_printf("Memory available again ...\n");
1264 RESTORE_CANCEL(cancel_state);
1269 * Make sure the object free list for size gran (in granules) is not empty.
1270 * Return a pointer to the first object on the free list.
1271 * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
1272 * Assumes we hold the allocator lock.
1274 GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
1276 void ** flh = &(GC_obj_kinds[kind].ok_freelist[gran]);
1277 GC_bool tried_minor = FALSE;
1278 GC_bool retry = FALSE;
1280 if (gran == 0) return(0);
1284 /* Do our share of marking work */
1285 if(TRUE_INCREMENTAL) GC_collect_a_little_inner(1);
1286 /* Sweep blocks for objects of this size */
1287 GC_continue_reclaim(gran, kind);
1290 GC_new_hblk(gran, kind);
1294 if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
1296 GC_collect_a_little_inner(1);
1299 if (!GC_collect_or_expand(1, FALSE, retry)) {
1308 /* Successful allocation; reset failure count. */