2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
15 # include "private/gc_priv.h"
17 /* Data structure for list of root sets. */
18 /* We keep a hash table, so that we can filter out duplicate additions. */
19 /* Under Win32, we need to do a better job of filtering overlaps, so */
20 /* we resort to sequential search, and pay the price. */
21 /* This is really declared in gc_priv.h:
25 # if !defined(MSWIN32) && !defined(MSWINCE)
26 struct roots * r_next;
29 -- Delete before registering new dynamic libraries
32 struct roots GC_static_roots[MAX_ROOT_SETS];
35 int GC_no_dls = 0; /* Register dynamic library data segments. */
37 static int n_root_sets = 0;
39 /* GC_static_roots[0..n_root_sets) contains the valid root sets. */
41 # if !defined(NO_DEBUGGING)
43 void GC_print_static_roots(void)
48 for (i = 0; i < n_root_sets; i++) {
49 GC_printf("From %p to %p%s\n",
50 GC_static_roots[i].r_start,
51 GC_static_roots[i].r_end,
52 GC_static_roots[i].r_tmp ? " (temporary)" : "");
53 total += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
55 GC_printf("Total size: %ld\n", (unsigned long) total);
56 if (GC_root_size != total) {
57 GC_printf("GC_root_size incorrect: %ld!!\n",
58 (unsigned long) GC_root_size);
61 # endif /* NO_DEBUGGING */
63 /* Primarily for debugging support: */
64 /* Is the address p in one of the registered static */
66 GC_bool GC_is_static_root(ptr_t p)
68 static int last_root_set = MAX_ROOT_SETS;
72 if (last_root_set < n_root_sets
73 && p >= GC_static_roots[last_root_set].r_start
74 && p < GC_static_roots[last_root_set].r_end) return(TRUE);
75 for (i = 0; i < n_root_sets; i++) {
76 if (p >= GC_static_roots[i].r_start
77 && p < GC_static_roots[i].r_end) {
85 #if !defined(MSWIN32) && !defined(MSWINCE)
87 # define LOG_RT_SIZE 6
88 # define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS
90 struct roots * GC_root_index[RT_SIZE];
91 -- Hash table header. Used only to check whether a range is
93 -- really defined in gc_priv.h
96 static INLINE int rt_hash(ptr_t addr)
98 word result = (word) addr;
99 # if CPP_WORDSZ > 8*LOG_RT_SIZE
100 result ^= result >> 8*LOG_RT_SIZE;
102 # if CPP_WORDSZ > 4*LOG_RT_SIZE
103 result ^= result >> 4*LOG_RT_SIZE;
105 result ^= result >> 2*LOG_RT_SIZE;
106 result ^= result >> LOG_RT_SIZE;
107 result &= (RT_SIZE-1);
111 /* Is a range starting at b already in the table? If so return a */
112 /* pointer to it, else NIL. */
113 struct roots * GC_roots_present(ptr_t b)
116 struct roots *p = GC_root_index[h];
119 if (p -> r_start == (ptr_t)b) return(p);
125 /* Add the given root structure to the index. */
126 static void add_roots_to_index(struct roots *p)
128 int h = rt_hash(p -> r_start);
130 p -> r_next = GC_root_index[h];
131 GC_root_index[h] = p;
139 word GC_root_size = 0;
141 GC_API void GC_CALL GC_add_roots(void *b, void *e)
145 if (!GC_is_initialized) GC_init();
147 GC_add_roots_inner((ptr_t)b, (ptr_t)e, FALSE);
152 /* Add [b,e) to the root set. Adding the same interval a second time */
153 /* is a moderately fast no-op, and hence benign. We do not handle */
154 /* different but overlapping intervals efficiently. (We do handle */
155 /* them correctly.) */
156 /* Tmp specifies that the interval may be deleted before */
157 /* re-registering dynamic libraries. */
158 void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
162 # if defined(MSWIN32) || defined(MSWINCE)
163 /* Spend the time to ensure that there are no overlapping */
164 /* or adjacent intervals. */
165 /* This could be done faster with e.g. a */
166 /* balanced tree. But the execution time here is */
167 /* virtually guaranteed to be dominated by the time it */
168 /* takes to scan the roots. */
171 old = 0; /* initialized to prevent warning. */
172 for (i = 0; i < n_root_sets; i++) {
173 old = GC_static_roots + i;
174 if (b <= old -> r_end && e >= old -> r_start) {
175 if (b < old -> r_start) {
177 GC_root_size += (old -> r_start - b);
179 if (e > old -> r_end) {
181 GC_root_size += (e - old -> r_end);
187 if (i < n_root_sets) {
188 /* merge other overlapping intervals */
191 for (i++; i < n_root_sets; i++) {
192 other = GC_static_roots + i;
193 b = other -> r_start;
195 if (b <= old -> r_end && e >= old -> r_start) {
196 if (b < old -> r_start) {
198 GC_root_size += (old -> r_start - b);
200 if (e > old -> r_end) {
202 GC_root_size += (e - old -> r_end);
204 old -> r_tmp &= other -> r_tmp;
205 /* Delete this entry. */
206 GC_root_size -= (other -> r_end - other -> r_start);
207 other -> r_start = GC_static_roots[n_root_sets-1].r_start;
208 other -> r_end = GC_static_roots[n_root_sets-1].r_end;
216 old = GC_roots_present(b);
218 if (e <= old -> r_end) /* already there */ return;
220 GC_root_size += e - old -> r_end;
225 if (n_root_sets == MAX_ROOT_SETS) {
226 ABORT("Too many root sets\n");
228 GC_static_roots[n_root_sets].r_start = (ptr_t)b;
229 GC_static_roots[n_root_sets].r_end = (ptr_t)e;
230 GC_static_roots[n_root_sets].r_tmp = tmp;
231 # if !defined(MSWIN32) && !defined(MSWINCE)
232 GC_static_roots[n_root_sets].r_next = 0;
233 add_roots_to_index(GC_static_roots + n_root_sets);
235 GC_root_size += e - b;
239 static GC_bool roots_were_cleared = FALSE;
241 GC_API void GC_CALL GC_clear_roots (void)
245 if (!GC_is_initialized) GC_init();
247 roots_were_cleared = TRUE;
250 # if !defined(MSWIN32) && !defined(MSWINCE)
254 for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
260 /* Internal use only; lock held. */
261 static void GC_remove_root_at_pos(int i)
263 GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
264 GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
265 GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
266 GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
270 #if !defined(MSWIN32) && !defined(MSWINCE)
271 static void GC_rebuild_root_index(void)
275 for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
276 for (i = 0; i < n_root_sets; i++)
277 add_roots_to_index(GC_static_roots + i);
281 #if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
283 /* Internal use only; lock held. */
284 STATIC void GC_remove_tmp_roots(void)
288 for (i = 0; i < n_root_sets; ) {
289 if (GC_static_roots[i].r_tmp) {
290 GC_remove_root_at_pos(i);
295 # if !defined(MSWIN32) && !defined(MSWINCE)
296 GC_rebuild_root_index();
301 #if !defined(MSWIN32) && !defined(MSWINCE)
302 GC_API void GC_CALL GC_remove_roots(void *b, void *e)
307 GC_remove_roots_inner((ptr_t)b, (ptr_t)e);
311 /* Should only be called when the lock is held */
312 void GC_remove_roots_inner(ptr_t b, ptr_t e)
315 for (i = 0; i < n_root_sets; ) {
316 if (GC_static_roots[i].r_start >= b
317 && GC_static_roots[i].r_end <= e) {
318 GC_remove_root_at_pos(i);
323 GC_rebuild_root_index();
325 #endif /* !defined(MSWIN32) && !defined(MSWINCE) */
327 #if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION)
328 /* Workaround for the OS mapping and unmapping behind our back: */
329 /* Is the address p in one of the temporary static root sections? */
330 GC_bool GC_is_tmp_root(ptr_t p)
332 static int last_root_set = MAX_ROOT_SETS;
335 if (last_root_set < n_root_sets
336 && p >= GC_static_roots[last_root_set].r_start
337 && p < GC_static_roots[last_root_set].r_end)
338 return GC_static_roots[last_root_set].r_tmp;
339 for (i = 0; i < n_root_sets; i++) {
340 if (p >= GC_static_roots[i].r_start
341 && p < GC_static_roots[i].r_end) {
343 return GC_static_roots[i].r_tmp;
348 #endif /* MSWIN32 || _WIN32_WCE_EMULATION */
350 ptr_t GC_approx_sp(void)
354 /* Also force stack to grow if necessary. Otherwise the */
355 /* later accesses might cause the kernel to think we're */
356 /* doing something wrong. */
362 * Data structure for excluded static roots.
363 * Real declaration is in gc_priv.h.
370 struct exclusion GC_excl_table[MAX_EXCLUSIONS];
371 -- Array of exclusions, ascending
375 STATIC size_t GC_excl_table_entries = 0;/* Number of entries in use. */
377 /* Return the first exclusion range that includes an address >= start_addr */
378 /* Assumes the exclusion table contains at least one entry (namely the */
379 /* GC data structures). */
380 STATIC struct exclusion * GC_next_exclusion(ptr_t start_addr)
383 size_t high = GC_excl_table_entries - 1;
387 mid = (low + high) >> 1;
388 /* low <= mid < high */
389 if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
395 if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0;
396 return GC_excl_table + low;
399 GC_API void GC_CALL GC_exclude_static_roots(void *start, void *finish)
401 struct exclusion * next;
402 size_t next_index, i;
404 if (0 == GC_excl_table_entries) {
407 next = GC_next_exclusion(start);
410 if ((word)(next -> e_start) < (word) finish) {
411 /* incomplete error check. */
412 ABORT("exclusion ranges overlap");
414 if ((word)(next -> e_start) == (word) finish) {
415 /* extend old range backwards */
416 next -> e_start = (ptr_t)start;
419 next_index = next - GC_excl_table;
420 for (i = GC_excl_table_entries; i > next_index; --i) {
421 GC_excl_table[i] = GC_excl_table[i-1];
424 next_index = GC_excl_table_entries;
426 if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions");
427 GC_excl_table[next_index].e_start = (ptr_t)start;
428 GC_excl_table[next_index].e_end = (ptr_t)finish;
429 ++GC_excl_table_entries;
432 /* Invoke push_conditional on ranges that are not excluded. */
434 STATIC void GC_push_conditional_with_exclusions(ptr_t bottom, ptr_t top,
437 struct exclusion * next;
440 while (bottom < top) {
441 next = GC_next_exclusion(bottom);
442 if (0 == next || (excl_start = next -> e_start) >= top) {
443 GC_push_conditional(bottom, top, all);
446 if (excl_start > bottom) GC_push_conditional(bottom, excl_start, all);
447 bottom = next -> e_end;
451 /* Push enough of the current stack eagerly to */
452 /* ensure that callee-save registers saved in */
453 /* GC frames are scanned. */
454 /* In the non-threads case, schedule entire */
455 /* stack for scanning. */
456 /* The second argument is a pointer to the */
457 /* (possibly null) thread context, for */
458 /* (currently hypothetical) more precise */
459 /* stack scanning. */
461 * In the absence of threads, push the stack contents.
462 * In the presence of threads, push enough of the current stack
463 * to ensure that callee-save registers saved in collector frames have been
465 * FIXME: Merge with per-thread stuff.
468 STATIC void GC_push_current_stack(ptr_t cold_gc_frame, void * context)
470 # if defined(THREADS)
471 if (0 == cold_gc_frame) return;
472 # ifdef STACK_GROWS_DOWN
473 GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
474 /* For IA64, the register stack backing store is handled */
475 /* in the thread-specific code. */
477 GC_push_all_eager( cold_gc_frame, GC_approx_sp() );
480 # ifdef STACK_GROWS_DOWN
481 GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom,
484 /* We also need to push the register stack backing store. */
485 /* This should really be done in the same way as the */
486 /* regular stack. For now we fudge it a bit. */
487 /* Note that the backing store grows up, so we can't use */
488 /* GC_push_all_stack_partially_eager. */
490 extern word GC_save_regs_ret_val;
491 /* Previously set to backing store pointer. */
492 ptr_t bsp = (ptr_t) GC_save_regs_ret_val;
493 ptr_t cold_gc_bs_pointer;
494 if (GC_all_interior_pointers) {
495 cold_gc_bs_pointer = bsp - 2048;
496 if (cold_gc_bs_pointer < BACKING_STORE_BASE) {
497 cold_gc_bs_pointer = BACKING_STORE_BASE;
499 GC_push_all_stack(BACKING_STORE_BASE, cold_gc_bs_pointer);
502 cold_gc_bs_pointer = BACKING_STORE_BASE;
504 GC_push_all_eager(cold_gc_bs_pointer, bsp);
505 /* All values should be sufficiently aligned that we */
506 /* dont have to worry about the boundary. */
510 GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(),
513 # endif /* !THREADS */
516 void (*GC_push_typed_structures) (void) = NULL;
518 /* Push GC internal roots. These are normally */
519 /* included in the static data segment, and */
520 /* Thus implicitly pushed. But we must do this */
521 /* explicitly if normal root processing is */
524 * Push GC internal roots. Only called if there is some reason to believe
525 * these would not otherwise get registered.
527 STATIC void GC_push_gc_structures(void)
529 GC_push_finalizer_structures();
530 # if defined(THREADS)
531 GC_push_thread_structures();
533 if( GC_push_typed_structures )
534 GC_push_typed_structures();
537 #ifdef THREAD_LOCAL_ALLOC
538 void GC_mark_thread_local_free_lists(void);
541 void GC_cond_register_dynamic_libraries(void)
543 # if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
545 GC_remove_tmp_roots();
546 if (!GC_no_dls) GC_register_dynamic_libraries();
552 STATIC void GC_push_regs_and_stack(ptr_t cold_gc_frame)
554 GC_with_callee_saves_pushed(GC_push_current_stack, cold_gc_frame);
558 * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
559 * on groups of pointers) on every top level accessible pointer.
560 * If all is FALSE, arrange to push only possibly altered values.
561 * Cold_gc_frame is an address inside a GC frame that
562 * remains valid until all marking is complete.
563 * A zero value indicates that it's OK to miss some
566 void GC_push_roots(GC_bool all, ptr_t cold_gc_frame)
572 * Next push static data. This must happen early on, since it's
573 * not robust against mark stack overflow.
575 /* Re-register dynamic libraries, in case one got added. */
576 /* There is some argument for doing this as late as possible, */
577 /* especially on win32, where it can change asynchronously. */
578 /* In those cases, we do it here. But on other platforms, it's */
579 /* not safe with the world stopped, so we do it earlier. */
580 # if !defined(REGISTER_LIBRARIES_EARLY)
581 GC_cond_register_dynamic_libraries();
584 /* Mark everything in static data areas */
585 for (i = 0; i < n_root_sets; i++) {
586 GC_push_conditional_with_exclusions(
587 GC_static_roots[i].r_start,
588 GC_static_roots[i].r_end, all);
591 /* Mark all free list header blocks, if those were allocated from */
592 /* the garbage collected heap. This makes sure they don't */
593 /* disappear if we are not marking from static data. It also */
594 /* saves us the trouble of scanning them, and possibly that of */
595 /* marking the freelists. */
596 for (kind = 0; kind < GC_n_kinds; kind++) {
597 void *base = GC_base(GC_obj_kinds[kind].ok_freelist);
599 GC_set_mark_bit(base);
603 /* Mark from GC internal roots if those might otherwise have */
605 if (GC_no_dls || roots_were_cleared) {
606 GC_push_gc_structures();
609 /* Mark thread local free lists, even if their mark */
610 /* descriptor excludes the link field. */
611 /* If the world is not stopped, this is unsafe. It is */
612 /* also unnecessary, since we will do this again with the */
614 # if defined(THREAD_LOCAL_ALLOC)
615 if (GC_world_stopped) GC_mark_thread_local_free_lists();
619 * Now traverse stacks, and mark from register contents.
620 * These must be done last, since they can legitimately overflow
622 * This is usually done by saving the current context on the
623 * stack, and then just tracing from the stack.
625 GC_push_regs_and_stack(cold_gc_frame);
627 if (GC_push_other_roots != 0) (*GC_push_other_roots)();
628 /* In the threads case, this also pushes thread stacks. */
629 /* Note that without interior pointer recognition lots */
630 /* of stuff may have been pushed already, and this */
631 /* should be careful about mark stack overflows. */