2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
18 # include "private/gc_priv.h"
20 /* Data structure for list of root sets. */
21 /* We keep a hash table, so that we can filter out duplicate additions. */
22 /* Under Win32, we need to do a better job of filtering overlaps, so */
23 /* we resort to sequential search, and pay the price. */
24 /* This is really declared in gc_priv.h:
28 # if !defined(MSWIN32) && !defined(MSWINCE)
29 struct roots * r_next;
32 -- Delete before registering new dynamic libraries
35 struct roots GC_static_roots[MAX_ROOT_SETS];
38 int GC_no_dls = 0; /* Register dynamic library data segments. */
40 static int n_root_sets = 0;
42 /* GC_static_roots[0..n_root_sets) contains the valid root sets. */
44 # if !defined(NO_DEBUGGING)
46 void GC_print_static_roots()
51 for (i = 0; i < n_root_sets; i++) {
52 GC_printf2("From 0x%lx to 0x%lx ",
53 (unsigned long) GC_static_roots[i].r_start,
54 (unsigned long) GC_static_roots[i].r_end);
55 if (GC_static_roots[i].r_tmp) {
56 GC_printf0(" (temporary)\n");
60 total += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
62 GC_printf1("Total size: %ld\n", (unsigned long) total);
63 if (GC_root_size != total) {
64 GC_printf1("GC_root_size incorrect: %ld!!\n",
65 (unsigned long) GC_root_size);
68 # endif /* NO_DEBUGGING */
70 /* Primarily for debugging support: */
71 /* Is the address p in one of the registered static */
73 GC_bool GC_is_static_root(p)
76 static int last_root_set = MAX_ROOT_SETS;
80 if (last_root_set < n_root_sets
81 && p >= GC_static_roots[last_root_set].r_start
82 && p < GC_static_roots[last_root_set].r_end) return(TRUE);
83 for (i = 0; i < n_root_sets; i++) {
84 if (p >= GC_static_roots[i].r_start
85 && p < GC_static_roots[i].r_end) {
93 #if !defined(MSWIN32) && !defined(MSWINCE)
95 # define LOG_RT_SIZE 6
96 # define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS
98 struct roots * GC_root_index[RT_SIZE];
99 -- Hash table header. Used only to check whether a range is
101 -- really defined in gc_priv.h
104 static int rt_hash(addr)
107 word result = (word) addr;
108 # if CPP_WORDSZ > 8*LOG_RT_SIZE
109 result ^= result >> 8*LOG_RT_SIZE;
111 # if CPP_WORDSZ > 4*LOG_RT_SIZE
112 result ^= result >> 4*LOG_RT_SIZE;
114 result ^= result >> 2*LOG_RT_SIZE;
115 result ^= result >> LOG_RT_SIZE;
116 result &= (RT_SIZE-1);
120 /* Is a range starting at b already in the table? If so return a */
121 /* pointer to it, else NIL. */
122 struct roots * GC_roots_present(b)
125 register int h = rt_hash(b);
126 register struct roots *p = GC_root_index[h];
129 if (p -> r_start == (ptr_t)b) return(p);
135 /* Add the given root structure to the index. */
136 static void add_roots_to_index(p)
139 register int h = rt_hash(p -> r_start);
141 p -> r_next = GC_root_index[h];
142 GC_root_index[h] = p;
145 # else /* MSWIN32 || MSWINCE */
147 # define add_roots_to_index(p)
154 word GC_root_size = 0;
156 void GC_add_roots(b, e)
163 GC_add_roots_inner(b, e, FALSE);
169 /* Add [b,e) to the root set. Adding the same interval a second time */
170 /* is a moderately fast noop, and hence benign. We do not handle */
171 /* different but overlapping intervals efficiently. (We do handle */
172 /* them correctly.) */
173 /* Tmp specifies that the interval may be deleted before */
174 /* reregistering dynamic libraries. */
175 void GC_add_roots_inner(b, e, tmp)
181 # if defined(MSWIN32) || defined(MSWINCE)
182 /* Spend the time to ensure that there are no overlapping */
183 /* or adjacent intervals. */
184 /* This could be done faster with e.g. a */
185 /* balanced tree. But the execution time here is */
186 /* virtually guaranteed to be dominated by the time it */
187 /* takes to scan the roots. */
191 for (i = 0; i < n_root_sets; i++) {
192 old = GC_static_roots + i;
193 if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
194 if ((ptr_t)b < old -> r_start) {
195 old -> r_start = (ptr_t)b;
196 GC_root_size += (old -> r_start - (ptr_t)b);
198 if ((ptr_t)e > old -> r_end) {
199 old -> r_end = (ptr_t)e;
200 GC_root_size += ((ptr_t)e - old -> r_end);
206 if (i < n_root_sets) {
207 /* merge other overlapping intervals */
210 for (i++; i < n_root_sets; i++) {
211 other = GC_static_roots + i;
212 b = (char *)(other -> r_start);
213 e = (char *)(other -> r_end);
214 if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
215 if ((ptr_t)b < old -> r_start) {
216 old -> r_start = (ptr_t)b;
217 GC_root_size += (old -> r_start - (ptr_t)b);
219 if ((ptr_t)e > old -> r_end) {
220 old -> r_end = (ptr_t)e;
221 GC_root_size += ((ptr_t)e - old -> r_end);
223 old -> r_tmp &= other -> r_tmp;
224 /* Delete this entry. */
225 GC_root_size -= (other -> r_end - other -> r_start);
226 other -> r_start = GC_static_roots[n_root_sets-1].r_start;
227 other -> r_end = GC_static_roots[n_root_sets-1].r_end;
235 old = GC_roots_present(b);
237 if ((ptr_t)e <= old -> r_end) /* already there */ return;
239 GC_root_size += (ptr_t)e - old -> r_end;
240 old -> r_end = (ptr_t)e;
244 if (n_root_sets == MAX_ROOT_SETS) {
245 ABORT("Too many root sets\n");
247 GC_static_roots[n_root_sets].r_start = (ptr_t)b;
248 GC_static_roots[n_root_sets].r_end = (ptr_t)e;
249 GC_static_roots[n_root_sets].r_tmp = tmp;
250 # if !defined(MSWIN32) && !defined(MSWINCE)
251 GC_static_roots[n_root_sets].r_next = 0;
253 add_roots_to_index(GC_static_roots + n_root_sets);
254 GC_root_size += (ptr_t)e - (ptr_t)b;
258 static GC_bool roots_were_cleared = FALSE;
260 void GC_clear_roots GC_PROTO((void))
266 roots_were_cleared = TRUE;
269 # if !defined(MSWIN32) && !defined(MSWINCE)
273 for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
280 /* Internal use only; lock held. */
281 static void GC_remove_root_at_pos(i)
284 GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
285 GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
286 GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
287 GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
291 #if !defined(MSWIN32) && !defined(MSWINCE)
292 static void GC_rebuild_root_index()
296 for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
297 for (i = 0; i < n_root_sets; i++)
298 add_roots_to_index(GC_static_roots + i);
302 /* Internal use only; lock held. */
303 void GC_remove_tmp_roots()
307 for (i = 0; i < n_root_sets; ) {
308 if (GC_static_roots[i].r_tmp) {
309 GC_remove_root_at_pos(i);
314 #if !defined(MSWIN32) && !defined(MSWINCE)
315 GC_rebuild_root_index();
319 #if !defined(MSWIN32) && !defined(MSWINCE)
320 void GC_remove_roots(b, e)
327 GC_remove_roots_inner(b, e);
332 /* Should only be called when the lock is held */
333 void GC_remove_roots_inner(b,e)
337 for (i = 0; i < n_root_sets; ) {
338 if (GC_static_roots[i].r_start >= (ptr_t)b && GC_static_roots[i].r_end <= (ptr_t)e) {
339 GC_remove_root_at_pos(i);
344 GC_rebuild_root_index();
346 #endif /* !defined(MSWIN32) && !defined(MSWINCE) */
348 #if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION)
349 /* Workaround for the OS mapping and unmapping behind our back: */
350 /* Is the address p in one of the temporary static root sections? */
351 GC_bool GC_is_tmp_root(p)
354 static int last_root_set = MAX_ROOT_SETS;
357 if (last_root_set < n_root_sets
358 && p >= GC_static_roots[last_root_set].r_start
359 && p < GC_static_roots[last_root_set].r_end)
360 return GC_static_roots[last_root_set].r_tmp;
361 for (i = 0; i < n_root_sets; i++) {
362 if (p >= GC_static_roots[i].r_start
363 && p < GC_static_roots[i].r_end) {
365 return GC_static_roots[i].r_tmp;
370 #endif /* MSWIN32 || _WIN32_WCE_EMULATION */
376 dummy = 42; /* Force stack to grow if necessary. Otherwise the */
377 /* later accesses might cause the kernel to think we're */
378 /* doing something wrong. */
380 # pragma warning(disable:4172)
382 return((ptr_t)(&dummy));
384 # pragma warning(default:4172)
389 * Data structure for excluded static roots.
390 * Real declaration is in gc_priv.h.
397 struct exclusion GC_excl_table[MAX_EXCLUSIONS];
398 -- Array of exclusions, ascending
402 size_t GC_excl_table_entries = 0; /* Number of entries in use. */
404 /* Return the first exclusion range that includes an address >= start_addr */
405 /* Assumes the exclusion table contains at least one entry (namely the */
406 /* GC data structures). */
407 struct exclusion * GC_next_exclusion(start_addr)
411 size_t high = GC_excl_table_entries - 1;
415 mid = (low + high) >> 1;
416 /* low <= mid < high */
417 if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
423 if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0;
424 return GC_excl_table + low;
427 void GC_exclude_static_roots(start, finish)
431 struct exclusion * next;
432 size_t next_index, i;
434 if (0 == GC_excl_table_entries) {
437 next = GC_next_exclusion(start);
440 if ((word)(next -> e_start) < (word) finish) {
441 /* incomplete error check. */
442 ABORT("exclusion ranges overlap");
444 if ((word)(next -> e_start) == (word) finish) {
445 /* extend old range backwards */
446 next -> e_start = (ptr_t)start;
449 next_index = next - GC_excl_table;
450 for (i = GC_excl_table_entries; i > next_index; --i) {
451 GC_excl_table[i] = GC_excl_table[i-1];
454 next_index = GC_excl_table_entries;
456 if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions");
457 GC_excl_table[next_index].e_start = (ptr_t)start;
458 GC_excl_table[next_index].e_end = (ptr_t)finish;
459 ++GC_excl_table_entries;
462 /* Invoke push_conditional on ranges that are not excluded. */
463 void GC_push_conditional_with_exclusions(bottom, top, all)
468 struct exclusion * next;
471 while (bottom < top) {
472 next = GC_next_exclusion(bottom);
473 if (0 == next || (excl_start = next -> e_start) >= top) {
474 GC_push_conditional(bottom, top, all);
477 if (excl_start > bottom) GC_push_conditional(bottom, excl_start, all);
478 bottom = next -> e_end;
483 * In the absence of threads, push the stack contents.
484 * In the presence of threads, push enough of the current stack
485 * to ensure that callee-save registers saved in collector frames have been
488 void GC_push_current_stack(cold_gc_frame)
491 # if defined(THREADS)
492 if (0 == cold_gc_frame) return;
493 # ifdef STACK_GROWS_DOWN
494 GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
495 /* For IA64, the register stack backing store is handled */
496 /* in the thread-specific code. */
498 GC_push_all_eager( cold_gc_frame, GC_approx_sp() );
501 # ifdef STACK_GROWS_DOWN
502 GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom,
505 /* We also need to push the register stack backing store. */
506 /* This should really be done in the same way as the */
507 /* regular stack. For now we fudge it a bit. */
508 /* Note that the backing store grows up, so we can't use */
509 /* GC_push_all_stack_partially_eager. */
511 extern word GC_save_regs_ret_val;
512 /* Previously set to backing store pointer. */
513 ptr_t bsp = (ptr_t) GC_save_regs_ret_val;
514 ptr_t cold_gc_bs_pointer;
515 if (GC_all_interior_pointers) {
516 cold_gc_bs_pointer = bsp - 2048;
517 if (cold_gc_bs_pointer < BACKING_STORE_BASE) {
518 cold_gc_bs_pointer = BACKING_STORE_BASE;
520 GC_push_all_stack(BACKING_STORE_BASE, cold_gc_bs_pointer);
523 cold_gc_bs_pointer = BACKING_STORE_BASE;
525 GC_push_all_eager(cold_gc_bs_pointer, bsp);
526 /* All values should be sufficiently aligned that we */
527 /* dont have to worry about the boundary. */
531 GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(),
534 # endif /* !THREADS */
538 * Push GC internal roots. Only called if there is some reason to believe
539 * these would not otherwise get registered.
541 void GC_push_gc_structures GC_PROTO((void))
543 GC_push_finalizer_structures();
544 GC_push_stubborn_structures();
545 # if defined(THREADS)
546 GC_push_thread_structures();
550 #ifdef THREAD_LOCAL_ALLOC
551 void GC_mark_thread_local_free_lists();
554 void GC_cond_register_dynamic_libraries()
556 # if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
557 || defined(PCR)) && !defined(SRC_M3)
558 GC_remove_tmp_roots();
559 if (!GC_no_dls) GC_register_dynamic_libraries();
566 * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
567 * on groups of pointers) on every top level accessible pointer.
568 * If all is FALSE, arrange to push only possibly altered values.
569 * Cold_gc_frame is an address inside a GC frame that
570 * remains valid until all marking is complete.
571 * A zero value indicates that it's OK to miss some
574 void GC_push_roots(all, cold_gc_frame)
582 * Next push static data. This must happen early on, since it's
583 * not robust against mark stack overflow.
585 /* Reregister dynamic libraries, in case one got added. */
586 /* There is some argument for doing this as late as possible, */
587 /* especially on win32, where it can change asynchronously. */
588 /* In those cases, we do it here. But on other platforms, it's */
589 /* not safe with the world stopped, so we do it earlier. */
590 # if !defined(REGISTER_LIBRARIES_EARLY)
591 GC_cond_register_dynamic_libraries();
594 /* Mark everything in static data areas */
595 for (i = 0; i < n_root_sets; i++) {
596 GC_push_conditional_with_exclusions(
597 GC_static_roots[i].r_start,
598 GC_static_roots[i].r_end, all);
601 /* Mark all free list header blocks, if those were allocated from */
602 /* the garbage collected heap. This makes sure they don't */
603 /* disappear if we are not marking from static data. It also */
604 /* saves us the trouble of scanning them, and possibly that of */
605 /* marking the freelists. */
606 for (kind = 0; kind < GC_n_kinds; kind++) {
607 GC_PTR base = GC_base(GC_obj_kinds[kind].ok_freelist);
609 GC_set_mark_bit(base);
613 /* Mark from GC internal roots if those might otherwise have */
615 if (GC_no_dls || roots_were_cleared) {
616 GC_push_gc_structures();
619 /* Mark thread local free lists, even if their mark */
620 /* descriptor excludes the link field. */
621 /* If the world is not stopped, this is unsafe. It is */
622 /* also unnecessary, since we will do this again with the */
624 # ifdef THREAD_LOCAL_ALLOC
625 if (GC_world_stopped) GC_mark_thread_local_free_lists();
629 * Now traverse stacks, and mark from register contents.
630 * These must be done last, since they can legitimately overflow
633 # ifdef USE_GENERIC_PUSH_REGS
634 GC_generic_push_regs(cold_gc_frame);
635 /* Also pushes stack, so that we catch callee-save registers */
636 /* saved inside the GC_push_regs frame. */
639 * push registers - i.e., call GC_push_one(r) for each
640 * register contents r.
642 GC_push_regs(); /* usually defined in machine_dep.c */
643 GC_push_current_stack(cold_gc_frame);
644 /* In the threads case, this only pushes collector frames. */
645 /* In the case of linux threads on IA64, the hot section of */
646 /* the main stack is marked here, but the register stack */
647 /* backing store is handled in the threads-specific code. */
649 if (GC_push_other_roots != 0) (*GC_push_other_roots)();
650 /* In the threads case, this also pushes thread stacks. */
651 /* Note that without interior pointer recognition lots */
652 /* of stuff may have been pushed already, and this */
653 /* should be careful about mark stack overflows. */