2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
18 # include "private/gc_priv.h"
20 /* Data structure for list of root sets. */
21 /* We keep a hash table, so that we can filter out duplicate additions. */
22 /* Under Win32, we need to do a better job of filtering overlaps, so */
23 /* we resort to sequential search, and pay the price. */
24 /* This is really declared in gc_priv.h:
28 # if !defined(MSWIN32) && !defined(MSWINCE)
29 struct roots * r_next;
32 -- Delete before registering new dynamic libraries
35 struct roots GC_static_roots[MAX_ROOT_SETS];
38 int GC_no_dls = 0; /* Register dynamic library data segments. */
40 static int n_root_sets = 0;
42 /* GC_static_roots[0..n_root_sets) contains the valid root sets. */
44 # if !defined(NO_DEBUGGING)
46 void GC_print_static_roots(void)
51 for (i = 0; i < n_root_sets; i++) {
52 GC_printf("From %p to %p ",
53 GC_static_roots[i].r_start,
54 GC_static_roots[i].r_end);
55 if (GC_static_roots[i].r_tmp) {
56 GC_printf(" (temporary)\n");
60 total += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
62 GC_printf("Total size: %ld\n", (unsigned long) total);
63 if (GC_root_size != total) {
64 GC_printf("GC_root_size incorrect: %ld!!\n",
65 (unsigned long) GC_root_size);
68 # endif /* NO_DEBUGGING */
70 /* Primarily for debugging support: */
71 /* Is the address p in one of the registered static */
73 GC_bool GC_is_static_root(ptr_t p)
75 static int last_root_set = MAX_ROOT_SETS;
79 if (last_root_set < n_root_sets
80 && p >= GC_static_roots[last_root_set].r_start
81 && p < GC_static_roots[last_root_set].r_end) return(TRUE);
82 for (i = 0; i < n_root_sets; i++) {
83 if (p >= GC_static_roots[i].r_start
84 && p < GC_static_roots[i].r_end) {
92 #if !defined(MSWIN32) && !defined(MSWINCE)
94 # define LOG_RT_SIZE 6
95 # define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS
97 struct roots * GC_root_index[RT_SIZE];
98 -- Hash table header. Used only to check whether a range is
100 -- really defined in gc_priv.h
103 static INLINE int rt_hash(ptr_t addr)
105 word result = (word) addr;
106 # if CPP_WORDSZ > 8*LOG_RT_SIZE
107 result ^= result >> 8*LOG_RT_SIZE;
109 # if CPP_WORDSZ > 4*LOG_RT_SIZE
110 result ^= result >> 4*LOG_RT_SIZE;
112 result ^= result >> 2*LOG_RT_SIZE;
113 result ^= result >> LOG_RT_SIZE;
114 result &= (RT_SIZE-1);
118 /* Is a range starting at b already in the table? If so return a */
119 /* pointer to it, else NIL. */
120 struct roots * GC_roots_present(ptr_t b)
123 struct roots *p = GC_root_index[h];
126 if (p -> r_start == (ptr_t)b) return(p);
132 /* Add the given root structure to the index. */
133 static void add_roots_to_index(struct roots *p)
135 int h = rt_hash(p -> r_start);
137 p -> r_next = GC_root_index[h];
138 GC_root_index[h] = p;
141 # else /* MSWIN32 || MSWINCE */
143 # define add_roots_to_index(p)
150 word GC_root_size = 0;
152 void GC_add_roots(void *b, void *e)
156 if (!GC_is_initialized) GC_init();
158 GC_add_roots_inner((ptr_t)b, (ptr_t)e, FALSE);
163 /* Add [b,e) to the root set. Adding the same interval a second time */
164 /* is a moderately fast noop, and hence benign. We do not handle */
165 /* different but overlapping intervals efficiently. (We do handle */
166 /* them correctly.) */
167 /* Tmp specifies that the interval may be deleted before */
168 /* reregistering dynamic libraries. */
169 void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
173 # if defined(MSWIN32) || defined(MSWINCE)
174 /* Spend the time to ensure that there are no overlapping */
175 /* or adjacent intervals. */
176 /* This could be done faster with e.g. a */
177 /* balanced tree. But the execution time here is */
178 /* virtually guaranteed to be dominated by the time it */
179 /* takes to scan the roots. */
183 for (i = 0; i < n_root_sets; i++) {
184 old = GC_static_roots + i;
185 if (b <= old -> r_end && e >= old -> r_start) {
186 if (b < old -> r_start) {
188 GC_root_size += (old -> r_start - b);
190 if (e > old -> r_end) {
192 GC_root_size += (e - old -> r_end);
198 if (i < n_root_sets) {
199 /* merge other overlapping intervals */
202 for (i++; i < n_root_sets; i++) {
203 other = GC_static_roots + i;
204 b = other -> r_start;
206 if (b <= old -> r_end && e >= old -> r_start) {
207 if (b < old -> r_start) {
209 GC_root_size += (old -> r_start - b);
211 if (e > old -> r_end) {
213 GC_root_size += (e - old -> r_end);
215 old -> r_tmp &= other -> r_tmp;
216 /* Delete this entry. */
217 GC_root_size -= (other -> r_end - other -> r_start);
218 other -> r_start = GC_static_roots[n_root_sets-1].r_start;
219 other -> r_end = GC_static_roots[n_root_sets-1].r_end;
227 old = GC_roots_present(b);
229 if (e <= old -> r_end) /* already there */ return;
231 GC_root_size += e - old -> r_end;
236 if (n_root_sets == MAX_ROOT_SETS) {
237 ABORT("Too many root sets\n");
239 GC_static_roots[n_root_sets].r_start = (ptr_t)b;
240 GC_static_roots[n_root_sets].r_end = (ptr_t)e;
241 GC_static_roots[n_root_sets].r_tmp = tmp;
242 # if !defined(MSWIN32) && !defined(MSWINCE)
243 GC_static_roots[n_root_sets].r_next = 0;
245 add_roots_to_index(GC_static_roots + n_root_sets);
246 GC_root_size += e - b;
250 static GC_bool roots_were_cleared = FALSE;
252 void GC_clear_roots (void)
256 if (!GC_is_initialized) GC_init();
258 roots_were_cleared = TRUE;
261 # if !defined(MSWIN32) && !defined(MSWINCE)
265 for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
271 /* Internal use only; lock held. */
272 static void GC_remove_root_at_pos(int i)
274 GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
275 GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
276 GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
277 GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
281 #if !defined(MSWIN32) && !defined(MSWINCE)
282 static void GC_rebuild_root_index(void)
286 for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
287 for (i = 0; i < n_root_sets; i++)
288 add_roots_to_index(GC_static_roots + i);
292 /* Internal use only; lock held. */
293 void GC_remove_tmp_roots(void)
297 for (i = 0; i < n_root_sets; ) {
298 if (GC_static_roots[i].r_tmp) {
299 GC_remove_root_at_pos(i);
304 #if !defined(MSWIN32) && !defined(MSWINCE)
305 GC_rebuild_root_index();
309 #if !defined(MSWIN32) && !defined(MSWINCE)
310 void GC_remove_roots(void *b, void *e)
315 GC_remove_roots_inner((ptr_t)b, (ptr_t)e);
319 /* Should only be called when the lock is held */
320 void GC_remove_roots_inner(ptr_t b, ptr_t e)
323 for (i = 0; i < n_root_sets; ) {
324 if (GC_static_roots[i].r_start >= b
325 && GC_static_roots[i].r_end <= e) {
326 GC_remove_root_at_pos(i);
331 GC_rebuild_root_index();
333 #endif /* !defined(MSWIN32) && !defined(MSWINCE) */
335 #if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION)
336 /* Workaround for the OS mapping and unmapping behind our back: */
337 /* Is the address p in one of the temporary static root sections? */
338 GC_bool GC_is_tmp_root(ptr_t p)
340 static int last_root_set = MAX_ROOT_SETS;
343 if (last_root_set < n_root_sets
344 && p >= GC_static_roots[last_root_set].r_start
345 && p < GC_static_roots[last_root_set].r_end)
346 return GC_static_roots[last_root_set].r_tmp;
347 for (i = 0; i < n_root_sets; i++) {
348 if (p >= GC_static_roots[i].r_start
349 && p < GC_static_roots[i].r_end) {
351 return GC_static_roots[i].r_tmp;
356 #endif /* MSWIN32 || _WIN32_WCE_EMULATION */
358 ptr_t GC_approx_sp(void)
362 dummy = 42; /* Force stack to grow if necessary. Otherwise the */
363 /* later accesses might cause the kernel to think we're */
364 /* doing something wrong. */
366 # pragma warning(disable:4172)
368 return((ptr_t)(&dummy));
370 # pragma warning(default:4172)
375 * Data structure for excluded static roots.
376 * Real declaration is in gc_priv.h.
383 struct exclusion GC_excl_table[MAX_EXCLUSIONS];
384 -- Array of exclusions, ascending
388 size_t GC_excl_table_entries = 0; /* Number of entries in use. */
390 /* Return the first exclusion range that includes an address >= start_addr */
391 /* Assumes the exclusion table contains at least one entry (namely the */
392 /* GC data structures). */
393 struct exclusion * GC_next_exclusion(ptr_t start_addr)
396 size_t high = GC_excl_table_entries - 1;
400 mid = (low + high) >> 1;
401 /* low <= mid < high */
402 if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
408 if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0;
409 return GC_excl_table + low;
412 void GC_exclude_static_roots(void *start, void *finish)
414 struct exclusion * next;
415 size_t next_index, i;
417 if (0 == GC_excl_table_entries) {
420 next = GC_next_exclusion(start);
423 if ((word)(next -> e_start) < (word) finish) {
424 /* incomplete error check. */
425 ABORT("exclusion ranges overlap");
427 if ((word)(next -> e_start) == (word) finish) {
428 /* extend old range backwards */
429 next -> e_start = (ptr_t)start;
432 next_index = next - GC_excl_table;
433 for (i = GC_excl_table_entries; i > next_index; --i) {
434 GC_excl_table[i] = GC_excl_table[i-1];
437 next_index = GC_excl_table_entries;
439 if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions");
440 GC_excl_table[next_index].e_start = (ptr_t)start;
441 GC_excl_table[next_index].e_end = (ptr_t)finish;
442 ++GC_excl_table_entries;
445 /* Invoke push_conditional on ranges that are not excluded. */
446 void GC_push_conditional_with_exclusions(ptr_t bottom, ptr_t top, GC_bool all)
448 struct exclusion * next;
451 while (bottom < top) {
452 next = GC_next_exclusion(bottom);
453 if (0 == next || (excl_start = next -> e_start) >= top) {
454 GC_push_conditional(bottom, top, all);
457 if (excl_start > bottom) GC_push_conditional(bottom, excl_start, all);
458 bottom = next -> e_end;
463 * In the absence of threads, push the stack contents.
464 * In the presence of threads, push enough of the current stack
465 * to ensure that callee-save registers saved in collector frames have been
467 * FIXME: Merge with per-thread stuff.
469 void GC_push_current_stack(ptr_t cold_gc_frame, void * context)
471 # if defined(THREADS)
472 if (0 == cold_gc_frame) return;
473 # ifdef STACK_GROWS_DOWN
474 GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
475 /* For IA64, the register stack backing store is handled */
476 /* in the thread-specific code. */
478 GC_push_all_eager( cold_gc_frame, GC_approx_sp() );
481 # ifdef STACK_GROWS_DOWN
482 GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom,
485 /* We also need to push the register stack backing store. */
486 /* This should really be done in the same way as the */
487 /* regular stack. For now we fudge it a bit. */
488 /* Note that the backing store grows up, so we can't use */
489 /* GC_push_all_stack_partially_eager. */
491 extern word GC_save_regs_ret_val;
492 /* Previously set to backing store pointer. */
493 ptr_t bsp = (ptr_t) GC_save_regs_ret_val;
494 ptr_t cold_gc_bs_pointer;
495 if (GC_all_interior_pointers) {
496 cold_gc_bs_pointer = bsp - 2048;
497 if (cold_gc_bs_pointer < BACKING_STORE_BASE) {
498 cold_gc_bs_pointer = BACKING_STORE_BASE;
500 GC_push_all_stack(BACKING_STORE_BASE, cold_gc_bs_pointer);
503 cold_gc_bs_pointer = BACKING_STORE_BASE;
505 GC_push_all_eager(cold_gc_bs_pointer, bsp);
506 /* All values should be sufficiently aligned that we */
507 /* dont have to worry about the boundary. */
511 GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(),
514 # endif /* !THREADS */
517 void (*GC_push_typed_structures) (void) = NULL;
520 * Push GC internal roots. Only called if there is some reason to believe
521 * these would not otherwise get registered.
523 void GC_push_gc_structures(void)
525 GC_push_finalizer_structures();
526 # if defined(THREADS)
527 GC_push_thread_structures();
529 if( GC_push_typed_structures )
530 GC_push_typed_structures();
533 #ifdef THREAD_LOCAL_ALLOC
534 void GC_mark_thread_local_free_lists(void);
537 void GC_cond_register_dynamic_libraries(void)
539 # if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
541 GC_remove_tmp_roots();
542 if (!GC_no_dls) GC_register_dynamic_libraries();
549 * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
550 * on groups of pointers) on every top level accessible pointer.
551 * If all is FALSE, arrange to push only possibly altered values.
552 * Cold_gc_frame is an address inside a GC frame that
553 * remains valid until all marking is complete.
554 * A zero value indicates that it's OK to miss some
557 void GC_push_roots(GC_bool all, ptr_t cold_gc_frame)
563 * Next push static data. This must happen early on, since it's
564 * not robust against mark stack overflow.
566 /* Reregister dynamic libraries, in case one got added. */
567 /* There is some argument for doing this as late as possible, */
568 /* especially on win32, where it can change asynchronously. */
569 /* In those cases, we do it here. But on other platforms, it's */
570 /* not safe with the world stopped, so we do it earlier. */
571 # if !defined(REGISTER_LIBRARIES_EARLY)
572 GC_cond_register_dynamic_libraries();
575 /* Mark everything in static data areas */
576 for (i = 0; i < n_root_sets; i++) {
577 GC_push_conditional_with_exclusions(
578 GC_static_roots[i].r_start,
579 GC_static_roots[i].r_end, all);
582 /* Mark all free list header blocks, if those were allocated from */
583 /* the garbage collected heap. This makes sure they don't */
584 /* disappear if we are not marking from static data. It also */
585 /* saves us the trouble of scanning them, and possibly that of */
586 /* marking the freelists. */
587 for (kind = 0; kind < GC_n_kinds; kind++) {
588 void *base = GC_base(GC_obj_kinds[kind].ok_freelist);
590 GC_set_mark_bit(base);
594 /* Mark from GC internal roots if those might otherwise have */
596 if (GC_no_dls || roots_were_cleared) {
597 GC_push_gc_structures();
600 /* Mark thread local free lists, even if their mark */
601 /* descriptor excludes the link field. */
602 /* If the world is not stopped, this is unsafe. It is */
603 /* also unnecessary, since we will do this again with the */
605 # if defined(THREAD_LOCAL_ALLOC)
606 if (GC_world_stopped) GC_mark_thread_local_free_lists();
610 * Now traverse stacks, and mark from register contents.
611 * These must be done last, since they can legitimately overflow
613 * This is usually done by saving the current context on the
614 * stack, and then just tracing from the stack.
616 GC_push_regs_and_stack(cold_gc_frame);
618 if (GC_push_other_roots != 0) (*GC_push_other_roots)();
619 /* In the threads case, this also pushes thread stacks. */
620 /* Note that without interior pointer recognition lots */
621 /* of stuff may have been pushed already, and this */
622 /* should be careful about mark stack overflows. */