2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
19 # include "private/gc_priv.h"
22 # if !defined(MACOS) && !defined(MSWINCE)
24 # include <sys/types.h>
28 * Separate free lists are maintained for different sized objects
30 * The call GC_allocobj(i,k) ensures that the freelist for
31 * kind k objects of size i points to a non-empty
32 * free list. It returns a pointer to the first entry on the free list.
33 * In a single-threaded world, GC_allocobj may be called to allocate
34 * an object of (small) size i as follows:
36 * opp = &(GC_objfreelist[i]);
37 * if (*opp == 0) GC_allocobj(i, NORMAL);
39 * *opp = obj_link(ptr);
41 * Note that this is very fast if the free list is non-empty; it should
42 * only involve the execution of 4 or 5 simple instructions.
43 * All composite objects on freelists are cleared, except for
48 * The allocator uses GC_allochblk to allocate large chunks of objects.
49 * These chunks all start on addresses which are multiples of
50 * HBLKSZ. Each allocated chunk has an associated header,
51 * which can be located quickly based on the address of the chunk.
52 * (See headers.c for details.)
53 * This makes it possible to check quickly whether an
54 * arbitrary address corresponds to an object administered by the
58 word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */
63 int GC_incremental = 0; /* By default, stop the world. */
66 int GC_parallel = FALSE; /* By default, parallel GC is off. */
69 # define GC_FULL_FREQ 19 /* Every 20th collection is a full */
70 /* collection, whether we need it */
74 int GC_full_freq = GC_FULL_FREQ;
76 STATIC GC_bool GC_need_full_gc = FALSE;
77 /* Need full GC do to heap growth. */
80 GC_bool GC_world_stopped = FALSE;
81 # define IF_THREADS(x) x
83 # define IF_THREADS(x)
86 STATIC word GC_used_heap_size_after_full = 0;
88 char * GC_copyright[] =
89 {"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ",
90 "Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. ",
91 "Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved. ",
92 "Copyright (c) 1999-2009 by Hewlett-Packard Company. All rights reserved. ",
93 "THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
94 " EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.",
95 "See source code for details." };
97 /* Version macros are now defined in gc_version.h, which is included by */
98 /* gc.h, which is included by gc_priv.h". */
100 #ifndef GC_NO_VERSION_VAR
102 unsigned GC_version = ((GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8) | GC_TMP_ALPHA_VERSION);
104 #endif /* GC_NO_VERSION_VAR */
106 /* some more variables */
108 extern signed_word GC_bytes_found; /* Number of reclaimed bytes */
109 /* after garbage collection */
111 #ifdef GC_DONT_EXPAND
112 GC_bool GC_dont_expand = TRUE;
114 GC_bool GC_dont_expand = FALSE;
117 #ifndef GC_FREE_SPACE_DIVISOR
118 # define GC_FREE_SPACE_DIVISOR 3 /* must be > 0 */
121 word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR;
123 extern GC_bool GC_collection_in_progress(void);
124 /* Collection is in progress, or was abandoned. */
126 int GC_CALLBACK GC_never_stop_func (void) { return(0); }
128 #ifndef GC_TIME_LIMIT
129 # define GC_TIME_LIMIT 50 /* We try to keep pause times from exceeding */
130 /* this by much. In milliseconds. */
133 unsigned long GC_time_limit = GC_TIME_LIMIT;
136 STATIC CLOCK_TYPE GC_start_time;/* Time at which we stopped world. */
137 /* used only in GC_timeout_stop_func. */
140 STATIC int GC_n_attempts = 0; /* Number of attempts at finishing */
141 /* collection within GC_time_limit. */
143 #if defined(SMALL_CONFIG) || defined(NO_CLOCK)
144 # define GC_timeout_stop_func GC_never_stop_func
146 STATIC int GC_CALLBACK GC_timeout_stop_func (void)
148 CLOCK_TYPE current_time;
149 static unsigned count = 0;
150 unsigned long time_diff;
152 if ((count++ & 3) != 0) return(0);
153 GET_TIME(current_time);
154 time_diff = MS_TIME_DIFF(current_time,GC_start_time);
155 if (time_diff >= GC_time_limit) {
156 if (GC_print_stats) {
158 "Abandoning stopped marking after %lu msecs (attempt %d)\n",
159 time_diff, GC_n_attempts);
165 #endif /* !SMALL_CONFIG */
167 /* Return the minimum number of words that must be allocated between */
168 /* collections to amortize the collection cost. */
169 static word min_bytes_allocd(void)
172 /* We punt, for now. */
173 signed_word stack_size = 10000;
176 signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
178 word total_root_size; /* includes double stack size, */
179 /* since the stack is expensive */
181 word scan_size; /* Estimate of memory to be scanned */
182 /* during normal GC. */
184 if (stack_size < 0) stack_size = -stack_size;
185 total_root_size = 2 * stack_size + GC_root_size;
186 scan_size = 2 * GC_composite_in_use + GC_atomic_in_use/4
188 if (TRUE_INCREMENTAL) {
189 return scan_size / (2 * GC_free_space_divisor);
191 return scan_size / GC_free_space_divisor;
195 /* Return the number of bytes allocated, adjusted for explicit storage */
196 /* management, etc.. This number is used in deciding when to trigger */
198 STATIC word GC_adj_bytes_allocd(void)
201 signed_word expl_managed =
202 (signed_word)GC_non_gc_bytes
203 - (signed_word)GC_non_gc_bytes_at_gc;
205 /* Don't count what was explicitly freed, or newly allocated for */
206 /* explicit management. Note that deallocating an explicitly */
207 /* managed object should not alter result, assuming the client */
208 /* is playing by the rules. */
209 result = (signed_word)GC_bytes_allocd
210 + (signed_word)GC_bytes_dropped
211 - (signed_word)GC_bytes_freed
212 + (signed_word)GC_finalizer_bytes_freed
214 if (result > (signed_word)GC_bytes_allocd) {
215 result = GC_bytes_allocd;
216 /* probably client bug or unfortunate scheduling */
218 result += GC_bytes_finalized;
219 /* We count objects enqueued for finalization as though they */
220 /* had been reallocated this round. Finalization is user */
221 /* visible progress. And if we don't count this, we have */
222 /* stability problems for programs that finalize all objects. */
223 if (result < (signed_word)(GC_bytes_allocd >> 3)) {
224 /* Always count at least 1/8 of the allocations. We don't want */
225 /* to collect too infrequently, since that would inhibit */
226 /* coalescing of free storage blocks. */
227 /* This also makes us partially robust against client bugs. */
228 return(GC_bytes_allocd >> 3);
235 /* Clear up a few frames worth of garbage left at the top of the stack. */
236 /* This is used to prevent us from accidentally treating garbage left */
237 /* on the stack by other parts of the collector as roots. This */
238 /* differs from the code in misc.c, which actually tries to keep the */
239 /* stack clear of long-lived, client-generated garbage. */
240 STATIC void GC_clear_a_few_frames(void)
242 # ifndef CLEAR_NWORDS
243 # define CLEAR_NWORDS 64
245 volatile word frames[CLEAR_NWORDS];
248 for (i = 0; i < CLEAR_NWORDS; i++) frames[i] = 0;
251 /* Heap size at which we need a collection to avoid expanding past */
252 /* limits used by blacklisting. */
253 static word GC_collect_at_heapsize = (word)(-1);
255 /* Have we allocated enough to amortize a collection? */
256 GC_bool GC_should_collect(void)
258 static word last_min_bytes_allocd;
259 static word last_gc_no;
260 if (last_gc_no != GC_gc_no) {
261 last_gc_no = GC_gc_no;
262 last_min_bytes_allocd = min_bytes_allocd();
264 return(GC_adj_bytes_allocd() >= last_min_bytes_allocd
265 || GC_heapsize >= GC_collect_at_heapsize);
269 STATIC void GC_notify_full_gc(void)
271 if (GC_start_call_back != (void (*) (void))0) {
272 (*GC_start_call_back)();
276 STATIC GC_bool GC_is_full_gc = FALSE;
278 STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
279 STATIC void GC_finish_collection(void);
282 * Initiate a garbage collection if appropriate.
284 * between partial, full, and stop-world collections.
286 STATIC void GC_maybe_gc(void)
288 static int n_partial_gcs = 0;
290 GC_ASSERT(I_HOLD_LOCK());
291 if (GC_should_collect()) {
292 if (!GC_incremental) {
297 # ifdef PARALLEL_MARK
299 GC_wait_for_reclaim();
301 if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
302 if (GC_print_stats) {
304 "***>Full mark for collection %lu after %ld allocd bytes\n",
305 (unsigned long)GC_gc_no+1,
306 (long)GC_bytes_allocd);
308 GC_promote_black_lists();
309 (void)GC_reclaim_all((GC_stop_func)0, TRUE);
313 GC_is_full_gc = TRUE;
318 /* We try to mark with the world stopped. */
319 /* If we run out of time, this turns into */
320 /* incremental marking. */
322 if (GC_time_limit != GC_TIME_UNLIMITED) { GET_TIME(GC_start_time); }
324 if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
325 GC_never_stop_func : GC_timeout_stop_func)) {
326 # ifdef SAVE_CALL_CHAIN
327 GC_save_callers(GC_last_stack);
329 GC_finish_collection();
331 if (!GC_is_full_gc) {
332 /* Count this as the first attempt */
341 * Stop the world garbage collection. Assumes lock held. If stop_func is
342 * not GC_never_stop_func then abort if stop_func returns TRUE.
343 * Return TRUE if we successfully completed the collection.
345 GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
347 # ifndef SMALL_CONFIG
348 CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
349 CLOCK_TYPE current_time;
351 if (GC_dont_gc) return FALSE;
352 if (GC_incremental && GC_collection_in_progress()) {
353 if (GC_print_stats) {
355 "GC_try_to_collect_inner: finishing collection in progress\n");
357 /* Just finish collection already in progress. */
358 while(GC_collection_in_progress()) {
359 if (stop_func()) return(FALSE);
360 GC_collect_a_little_inner(1);
363 if (stop_func == GC_never_stop_func) GC_notify_full_gc();
364 # ifndef SMALL_CONFIG
365 if (GC_print_stats) {
366 GET_TIME(start_time);
367 GC_log_printf("Initiating full world-stop collection!\n");
370 GC_promote_black_lists();
371 /* Make sure all blocks have been reclaimed, so sweep routines */
372 /* don't see cleared mark bits. */
373 /* If we're guaranteed to finish, then this is unnecessary. */
374 /* In the find_leak case, we have to finish to guarantee that */
375 /* previously unmarked objects are not reported as leaks. */
376 # ifdef PARALLEL_MARK
378 GC_wait_for_reclaim();
380 if ((GC_find_leak || stop_func != GC_never_stop_func)
381 && !GC_reclaim_all(stop_func, FALSE)) {
382 /* Aborted. So far everything is still consistent. */
385 GC_invalidate_mark_state(); /* Flush mark stack. */
387 # ifdef SAVE_CALL_CHAIN
388 GC_save_callers(GC_last_stack);
390 GC_is_full_gc = TRUE;
391 if (!GC_stopped_mark(stop_func)) {
392 if (!GC_incremental) {
393 /* We're partially done and have no way to complete or use */
394 /* current work. Reestablish invariants as cheaply as */
396 GC_invalidate_mark_state();
397 GC_unpromote_black_lists();
398 } /* else we claim the world is already still consistent. We'll */
399 /* finish incrementally. */
402 GC_finish_collection();
403 # ifndef SMALL_CONFIG
404 if (GC_print_stats) {
405 GET_TIME(current_time);
406 GC_log_printf("Complete collection took %lu msecs\n",
407 MS_TIME_DIFF(current_time,start_time));
416 * Perform n units of garbage collection work. A unit is intended to touch
417 * roughly GC_RATE pages. Every once in a while, we do more than that.
418 * This needs to be a fairly large number with our current incremental
419 * GC strategy, since otherwise we allocate too much during GC, and the
420 * cleanup gets expensive.
425 #ifndef MAX_PRIOR_ATTEMPTS
426 # define MAX_PRIOR_ATTEMPTS 1
428 /* Maximum number of prior attempts at world stop marking */
429 /* A value of 1 means that we finish the second time, no matter */
430 /* how long it takes. Doesn't count the initial root scan */
433 STATIC int GC_deficit = 0;/* The number of extra calls to GC_mark_some */
434 /* that we have made. */
436 void GC_collect_a_little_inner(int n)
440 if (GC_dont_gc) return;
441 if (GC_incremental && GC_collection_in_progress()) {
442 for (i = GC_deficit; i < GC_RATE*n; i++) {
443 if (GC_mark_some((ptr_t)0)) {
444 /* Need to finish a collection */
445 # ifdef SAVE_CALL_CHAIN
446 GC_save_callers(GC_last_stack);
448 # ifdef PARALLEL_MARK
450 GC_wait_for_reclaim();
452 if (GC_n_attempts < MAX_PRIOR_ATTEMPTS
453 && GC_time_limit != GC_TIME_UNLIMITED) {
455 GET_TIME(GC_start_time);
457 if (!GC_stopped_mark(GC_timeout_stop_func)) {
462 (void)GC_stopped_mark(GC_never_stop_func);
464 GC_finish_collection();
468 if (GC_deficit > 0) GC_deficit -= GC_RATE*n;
469 if (GC_deficit < 0) GC_deficit = 0;
475 GC_API int GC_CALL GC_collect_a_little(void)
481 GC_collect_a_little_inner(1);
482 result = (int)GC_collection_in_progress();
484 if (!result && GC_debugging_started) GC_print_all_smashed();
488 # if !defined(REDIRECT_MALLOC) && (defined(MSWIN32) || defined(MSWINCE))
489 void GC_add_current_malloc_heap(void);
491 #ifdef MAKE_BACK_GRAPH
492 void GC_build_back_graph(void);
496 /* Variables for world-stop average delay time statistic computation. */
497 /* "divisor" is incremented every world-stop and halved when reached */
498 /* its maximum (or upon "total_time" oveflow). */
499 STATIC unsigned world_stopped_total_time = 0;
500 STATIC unsigned world_stopped_total_divisor = 0;
501 # ifndef MAX_TOTAL_TIME_DIVISOR
502 /* We shall not use big values here (so "outdated" delay time */
503 /* values would have less impact on "average" delay time value than */
505 # define MAX_TOTAL_TIME_DIVISOR 1000
510 * Assumes lock is held. We stop the world and mark from all roots.
511 * If stop_func() ever returns TRUE, we may fail and return FALSE.
512 * Increment GC_gc_no if we succeed.
514 STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
518 # ifndef SMALL_CONFIG
519 CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
520 CLOCK_TYPE current_time;
523 # if !defined(REDIRECT_MALLOC) && (defined(MSWIN32) || defined(MSWINCE))
524 GC_add_current_malloc_heap();
526 # if defined(REGISTER_LIBRARIES_EARLY)
527 GC_cond_register_dynamic_libraries();
530 # ifndef SMALL_CONFIG
532 GET_TIME(start_time);
536 IF_THREADS(GC_world_stopped = TRUE);
537 if (GC_print_stats) {
538 /* Output blank line for convenience here */
540 "\n--> Marking for collection %lu after %lu allocated bytes\n",
541 (unsigned long)GC_gc_no + 1, (unsigned long) GC_bytes_allocd);
543 # ifdef MAKE_BACK_GRAPH
544 if (GC_print_back_height) {
545 GC_build_back_graph();
549 /* Mark from all roots. */
550 /* Minimize junk left in my registers and on the stack */
551 GC_clear_a_few_frames();
552 GC_noop(0,0,0,0,0,0);
555 if ((*stop_func)()) {
556 if (GC_print_stats) {
557 GC_log_printf("Abandoned stopped marking after "
558 "%u iterations\n", i);
560 GC_deficit = i; /* Give the mutator a chance. */
561 IF_THREADS(GC_world_stopped = FALSE);
565 if (GC_mark_some((ptr_t)(&dummy))) break;
569 if (GC_print_stats) {
571 "Collection %lu reclaimed %ld bytes ---> heapsize = %lu bytes\n",
572 (unsigned long)(GC_gc_no - 1), (long)GC_bytes_found,
573 (unsigned long)GC_heapsize);
574 /* Printf arguments may be pushed in funny places. Clear the */
579 /* Check all debugged objects for consistency */
580 if (GC_debugging_started) {
584 IF_THREADS(GC_world_stopped = FALSE);
586 # ifndef SMALL_CONFIG
587 if (GC_print_stats) {
588 unsigned long time_diff;
589 unsigned total_time, divisor;
590 GET_TIME(current_time);
591 time_diff = MS_TIME_DIFF(current_time,start_time);
593 /* Compute new world-stop delay total time */
594 total_time = world_stopped_total_time;
595 divisor = world_stopped_total_divisor;
596 if ((int)total_time < 0 || divisor >= MAX_TOTAL_TIME_DIVISOR) {
597 /* Halve values if overflow occurs */
601 total_time += time_diff < (((unsigned)-1) >> 1) ?
602 (unsigned)time_diff : ((unsigned)-1) >> 1;
603 /* Update old world_stopped_total_time and its divisor */
604 world_stopped_total_time = total_time;
605 world_stopped_total_divisor = ++divisor;
607 GC_ASSERT(divisor != 0);
609 "World-stopped marking took %lu msecs (%lu in average)\n",
610 time_diff, total_time / divisor);
616 /* Set all mark bits for the free list whose first entry is q */
617 void GC_set_fl_marks(ptr_t q)
620 struct hblk * h, * last_h = 0;
621 hdr *hhdr; /* gcc "might be uninitialized" warning is bogus. */
622 IF_PER_OBJ(size_t sz;)
625 for (p = q; p != 0; p = obj_link(p)){
630 IF_PER_OBJ(sz = hhdr->hb_sz;)
632 bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, sz);
633 if (!mark_bit_from_hdr(hhdr, bit_no)) {
634 set_mark_bit_from_hdr(hhdr, bit_no);
635 ++hhdr -> hb_n_marks;
641 /* Check that all mark bits for the free list whose first entry is q */
643 void GC_check_fl_marks(ptr_t q)
647 for (p = q; p != 0; p = obj_link(p)){
648 if (!GC_is_marked(p)) {
649 GC_err_printf("Unmarked object %p on list %p\n", p, q);
650 ABORT("Unmarked local free list entry.");
656 /* Clear all mark bits for the free list whose first entry is q */
657 /* Decrement GC_bytes_found by number of bytes on free list. */
658 STATIC void GC_clear_fl_marks(ptr_t q)
661 struct hblk * h, * last_h = 0;
666 for (p = q; p != 0; p = obj_link(p)){
671 sz = hhdr->hb_sz; /* Normally set only once. */
673 bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, sz);
674 if (mark_bit_from_hdr(hhdr, bit_no)) {
675 size_t n_marks = hhdr -> hb_n_marks - 1;
676 clear_mark_bit_from_hdr(hhdr, bit_no);
677 # ifdef PARALLEL_MARK
678 /* Appr. count, don't decrement to zero! */
679 if (0 != n_marks || !GC_parallel) {
680 hhdr -> hb_n_marks = n_marks;
683 hhdr -> hb_n_marks = n_marks;
686 GC_bytes_found -= sz;
690 #if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
691 extern void GC_check_tls(void);
694 #ifdef MAKE_BACK_GRAPH
695 void GC_traverse_back_graph(void);
698 /* Finish up a collection. Assumes mark bits are consistent, lock is */
699 /* held, but the world is otherwise running. */
700 STATIC void GC_finish_collection(void)
702 # ifndef SMALL_CONFIG
703 CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
704 CLOCK_TYPE finalize_time = 0;
705 CLOCK_TYPE done_time;
708 # if defined(GC_ASSERTIONS) && defined(THREADS) \
709 && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
710 /* Check that we marked some of our own data. */
711 /* FIXME: Add more checks. */
715 # ifndef SMALL_CONFIG
717 GET_TIME(start_time);
721 # if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
722 if (getenv("GC_PRINT_ADDRESS_MAP") != 0) {
723 GC_print_address_map();
728 /* Mark all objects on the free list. All objects should be */
729 /* marked when we're done. */
731 word size; /* current object size */
735 for (kind = 0; kind < GC_n_kinds; kind++) {
736 for (size = 1; size <= MAXOBJGRANULES; size++) {
737 q = GC_obj_kinds[kind].ok_freelist[size];
738 if (q != 0) GC_set_fl_marks(q);
742 GC_start_reclaim(TRUE);
743 /* The above just checks; it doesn't really reclaim anything. */
747 # ifdef STUBBORN_ALLOC
748 GC_clean_changing_list();
751 # ifndef SMALL_CONFIG
753 GET_TIME(finalize_time);
756 if (GC_print_back_height) {
757 # ifdef MAKE_BACK_GRAPH
758 GC_traverse_back_graph();
760 # ifndef SMALL_CONFIG
761 GC_err_printf("Back height not available: "
762 "Rebuild collector with -DMAKE_BACK_GRAPH\n");
767 /* Clear free list mark bits, in case they got accidentally marked */
768 /* (or GC_find_leak is set and they were intentionally marked). */
769 /* Also subtract memory remaining from GC_bytes_found count. */
770 /* Note that composite objects on free list are cleared. */
771 /* Thus accidentally marking a free list is not a problem; only */
772 /* objects on the list itself will be marked, and that's fixed here. */
774 word size; /* current object size */
775 ptr_t q; /* pointer to current object */
778 for (kind = 0; kind < GC_n_kinds; kind++) {
779 for (size = 1; size <= MAXOBJGRANULES; size++) {
780 q = GC_obj_kinds[kind].ok_freelist[size];
781 if (q != 0) GC_clear_fl_marks(q);
787 if (GC_print_stats == VERBOSE)
788 GC_log_printf("Bytes recovered before sweep - f.l. count = %ld\n",
789 (long)GC_bytes_found);
791 /* Reconstruct free lists to contain everything not marked */
792 GC_start_reclaim(FALSE);
793 if (GC_print_stats) {
794 GC_log_printf("Heap contains %lu pointer-containing "
795 "+ %lu pointer-free reachable bytes\n",
796 (unsigned long)GC_composite_in_use,
797 (unsigned long)GC_atomic_in_use);
800 GC_used_heap_size_after_full = USED_HEAP_SIZE;
801 GC_need_full_gc = FALSE;
804 USED_HEAP_SIZE - GC_used_heap_size_after_full
805 > min_bytes_allocd();
808 if (GC_print_stats == VERBOSE) {
810 GC_log_printf("Immediately reclaimed %ld bytes in heap"
811 " of size %lu bytes (%lu unmapped)\n",
812 (long)GC_bytes_found, (unsigned long)GC_heapsize,
813 (unsigned long)GC_unmapped_bytes);
815 GC_log_printf("Immediately reclaimed %ld bytes in heap"
816 " of size %lu bytes\n",
817 (long)GC_bytes_found, (unsigned long)GC_heapsize);
821 /* Reset or increment counters for next cycle */
823 GC_is_full_gc = FALSE;
824 GC_bytes_allocd_before_gc += GC_bytes_allocd;
825 GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
827 GC_bytes_dropped = 0;
829 GC_finalizer_bytes_freed = 0;
835 # ifndef SMALL_CONFIG
836 if (GC_print_stats) {
838 GC_log_printf("Finalize + initiate sweep took %lu + %lu msecs\n",
839 MS_TIME_DIFF(finalize_time,start_time),
840 MS_TIME_DIFF(done_time,finalize_time));
845 /* Externally callable routine to invoke full, stop-world collection */
846 GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func)
851 if (!GC_is_initialized) GC_init();
852 GC_ASSERT(stop_func != 0);
853 if (GC_debugging_started) GC_print_all_smashed();
854 GC_INVOKE_FINALIZERS();
857 /* Minimize junk left in my registers */
858 GC_noop(0,0,0,0,0,0);
859 result = (int)GC_try_to_collect_inner(stop_func);
863 if (GC_debugging_started) GC_print_all_smashed();
864 GC_INVOKE_FINALIZERS();
869 GC_API void GC_CALL GC_gcollect(void)
871 (void)GC_try_to_collect(GC_never_stop_func);
872 if (GC_have_errors) GC_print_all_errors();
875 word GC_n_heap_sects = 0; /* Number of sections currently in heap. */
877 #ifdef USE_PROC_FOR_LIBRARIES
878 word GC_n_memory = 0; /* Number of GET_MEM allocated memory */
882 #ifdef USE_PROC_FOR_LIBRARIES
883 /* Add HBLKSIZE aligned, GET_MEM-generated block to GC_our_memory. */
884 /* Defined to do nothing if USE_PROC_FOR_LIBRARIES not set. */
885 void GC_add_to_our_memory(ptr_t p, size_t bytes)
888 GC_our_memory[GC_n_memory].hs_start = p;
889 GC_our_memory[GC_n_memory].hs_bytes = bytes;
894 * Use the chunk of memory starting at p of size bytes as part of the heap.
895 * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
897 void GC_add_to_heap(struct hblk *p, size_t bytes)
902 if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
903 ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
905 while ((word)p <= HBLKSIZE) {
906 /* Can't handle memory near address zero. */
909 if (0 == bytes) return;
911 endp = (word)p + bytes;
912 if (endp <= (word)p) {
913 /* Address wrapped. */
915 if (0 == bytes) return;
918 phdr = GC_install_header(p);
920 /* This is extremely unlikely. Can't add it. This will */
921 /* almost certainly result in a 0 return from the allocator, */
922 /* which is entirely appropriate. */
925 GC_ASSERT(endp > (word)p && endp == (word)p + bytes);
926 GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
927 GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
929 phdr -> hb_sz = bytes;
930 phdr -> hb_flags = 0;
932 GC_heapsize += bytes;
933 if ((ptr_t)p <= (ptr_t)GC_least_plausible_heap_addr
934 || GC_least_plausible_heap_addr == 0) {
935 GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
936 /* Making it a little smaller than necessary prevents */
937 /* us from getting a false hit from the variable */
938 /* itself. There's some unintentional reflection */
941 if ((ptr_t)p + bytes >= (ptr_t)GC_greatest_plausible_heap_addr) {
942 GC_greatest_plausible_heap_addr = (void *)endp;
946 # if !defined(NO_DEBUGGING)
947 void GC_print_heap_sects(void)
951 GC_printf("Total heap size: %lu\n", (unsigned long) GC_heapsize);
952 for (i = 0; i < GC_n_heap_sects; i++) {
953 ptr_t start = GC_heap_sects[i].hs_start;
954 size_t len = GC_heap_sects[i].hs_bytes;
958 for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) {
959 if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
961 GC_printf("Section %d from %p to %p %lu/%lu blacklisted\n",
962 i, start, start + len,
963 (unsigned long)nbl, (unsigned long)(len/HBLKSIZE));
968 void * GC_least_plausible_heap_addr = (void *)ONES;
969 void * GC_greatest_plausible_heap_addr = 0;
971 static INLINE word GC_max(word x, word y)
973 return(x > y? x : y);
976 static INLINE word GC_min(word x, word y)
978 return(x < y? x : y);
981 GC_API void GC_CALL GC_set_max_heap_size(GC_word n)
986 GC_word GC_max_retries = 0;
989 * this explicitly increases the size of the heap. It is used
990 * internally, but may also be invoked from GC_expand_hp by the user.
991 * The argument is in units of HBLKSIZE.
992 * Tiny values of n are rounded up.
993 * Returns FALSE on failure.
995 GC_bool GC_expand_hp_inner(word n)
999 word expansion_slop; /* Number of bytes by which we expect the */
1000 /* heap to expand soon. */
1002 if (n < MINHINCR) n = MINHINCR;
1003 bytes = n * HBLKSIZE;
1004 /* Make sure bytes is a multiple of GC_page_size */
1006 word mask = GC_page_size - 1;
1011 if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
1012 /* Exceeded self-imposed limit */
1015 space = GET_MEM(bytes);
1016 GC_add_to_our_memory((ptr_t)space, bytes);
1018 if (GC_print_stats) {
1019 GC_log_printf("Failed to expand heap by %ld bytes\n",
1020 (unsigned long)bytes);
1024 if (GC_print_stats) {
1025 GC_log_printf("Increasing heap size by %lu after %lu allocated bytes\n",
1026 (unsigned long)bytes,
1027 (unsigned long)GC_bytes_allocd);
1029 /* Adjust heap limits generously for blacklisting to work better. */
1030 /* GC_add_to_heap performs minimal adjustment needed for */
1032 expansion_slop = min_bytes_allocd() + 4*MAXHINCR*HBLKSIZE;
1033 if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
1034 || (GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space)) {
1035 /* Assume the heap is growing up */
1036 word new_limit = (word)space + bytes + expansion_slop;
1037 if (new_limit > (word)space) {
1038 GC_greatest_plausible_heap_addr =
1039 (void *)GC_max((word)GC_greatest_plausible_heap_addr,
1043 /* Heap is growing down */
1044 word new_limit = (word)space - expansion_slop;
1045 if (new_limit < (word)space) {
1046 GC_least_plausible_heap_addr =
1047 (void *)GC_min((word)GC_least_plausible_heap_addr,
1048 (word)space - expansion_slop);
1051 GC_prev_heap_addr = GC_last_heap_addr;
1052 GC_last_heap_addr = (ptr_t)space;
1053 GC_add_to_heap(space, bytes);
1054 /* Force GC before we are likely to allocate past expansion_slop */
1055 GC_collect_at_heapsize =
1056 GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
1057 # if defined(LARGE_CONFIG)
1058 if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
1059 GC_collect_at_heapsize = (word)(-1);
1064 /* Really returns a bool, but it's externally visible, so that's clumsy. */
1065 /* Arguments is in bytes. */
1066 GC_API int GC_CALL GC_expand_hp(size_t bytes)
1072 if (!GC_is_initialized) GC_init_inner();
1073 result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
1074 if (result) GC_requested_heapsize += bytes;
1079 unsigned GC_fail_count = 0;
1080 /* How many consecutive GC/expansion failures? */
1081 /* Reset by GC_allochblk. */
1083 GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page)
1085 if (!GC_incremental && !GC_dont_gc &&
1086 ((GC_dont_expand && GC_bytes_allocd > 0) || GC_should_collect())) {
1087 GC_gcollect_inner();
1089 word blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
1092 if (blocks_to_get > MAXHINCR) {
1095 /* Get the minimum required to make it likely that we */
1096 /* can satisfy the current request in the presence of black- */
1097 /* listing. This will probably be more than MAXHINCR. */
1098 if (ignore_off_page) {
1101 slop = 2*divHBLKSZ(BL_LIMIT);
1102 if (slop > needed_blocks) slop = needed_blocks;
1104 if (needed_blocks + slop > MAXHINCR) {
1105 blocks_to_get = needed_blocks + slop;
1107 blocks_to_get = MAXHINCR;
1110 if (!GC_expand_hp_inner(blocks_to_get)
1111 && !GC_expand_hp_inner(needed_blocks)) {
1112 if (GC_fail_count++ < GC_max_retries) {
1113 WARN("Out of Memory! Trying to continue ...\n", 0);
1114 GC_gcollect_inner();
1116 # if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
1117 WARN("Out of Memory! Returning NIL!\n", 0);
1122 if (GC_fail_count && GC_print_stats) {
1123 GC_printf("Memory available again ...\n");
1131 * Make sure the object free list for size gran (in granules) is not empty.
1132 * Return a pointer to the first object on the free list.
1133 * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
1134 * Assumes we hold the allocator lock.
1136 ptr_t GC_allocobj(size_t gran, int kind)
1138 void ** flh = &(GC_obj_kinds[kind].ok_freelist[gran]);
1139 GC_bool tried_minor = FALSE;
1141 if (gran == 0) return(0);
1145 /* Do our share of marking work */
1146 if(TRUE_INCREMENTAL) GC_collect_a_little_inner(1);
1147 /* Sweep blocks for objects of this size */
1148 GC_continue_reclaim(gran, kind);
1151 GC_new_hblk(gran, kind);
1155 if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
1156 && ! tried_minor ) {
1157 GC_collect_a_little_inner(1);
1160 if (!GC_collect_or_expand((word)1,FALSE)) {
1168 /* Successful allocation; reset failure count. */