2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
15 #include "private/gc_priv.h"
19 /* Data structure for list of root sets. */
20 /* We keep a hash table, so that we can filter out duplicate additions. */
21 /* Under Win32, we need to do a better job of filtering overlaps, so */
22 /* we resort to sequential search, and pay the price. */
23 /* This is really declared in gc_priv.h:
27 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
28 struct roots * r_next;
31 -- Delete before registering new dynamic libraries
34 struct roots GC_static_roots[MAX_ROOT_SETS];
37 int GC_no_dls = 0; /* Register dynamic library data segments. */
39 static int n_root_sets = 0;
40 /* GC_static_roots[0..n_root_sets) contains the valid root sets. */
42 #if !defined(NO_DEBUGGING)
44 void GC_print_static_roots(void)
49 for (i = 0; i < n_root_sets; i++) {
50 GC_printf("From %p to %p%s\n",
51 GC_static_roots[i].r_start,
52 GC_static_roots[i].r_end,
53 GC_static_roots[i].r_tmp ? " (temporary)" : "");
54 total += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
56 GC_printf("Total size: %ld\n", (unsigned long) total);
57 if (GC_root_size != total) {
58 GC_err_printf("GC_root_size incorrect: %ld!!\n",
62 #endif /* !NO_DEBUGGING */
65 /* Primarily for debugging support: */
66 /* Is the address p in one of the registered static root sections? */
67 GC_INNER GC_bool GC_is_static_root(ptr_t p)
69 static int last_root_set = MAX_ROOT_SETS;
72 if (last_root_set < n_root_sets
73 && p >= GC_static_roots[last_root_set].r_start
74 && p < GC_static_roots[last_root_set].r_end) return(TRUE);
75 for (i = 0; i < n_root_sets; i++) {
76 if (p >= GC_static_roots[i].r_start
77 && p < GC_static_roots[i].r_end) {
86 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
88 # define LOG_RT_SIZE 6
89 # define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS
91 struct roots * GC_root_index[RT_SIZE];
92 -- Hash table header. Used only to check whether a range is
94 -- really defined in gc_priv.h
97 GC_INLINE int rt_hash(ptr_t addr)
99 word result = (word) addr;
100 # if CPP_WORDSZ > 8*LOG_RT_SIZE
101 result ^= result >> 8*LOG_RT_SIZE;
103 # if CPP_WORDSZ > 4*LOG_RT_SIZE
104 result ^= result >> 4*LOG_RT_SIZE;
106 result ^= result >> 2*LOG_RT_SIZE;
107 result ^= result >> LOG_RT_SIZE;
108 result &= (RT_SIZE-1);
112 /* Is a range starting at b already in the table? If so return a */
113 /* pointer to it, else NULL. */
114 GC_INNER void * GC_roots_present(ptr_t b)
117 struct roots *p = GC_root_index[h];
120 if (p -> r_start == (ptr_t)b) return(p);
126 /* Add the given root structure to the index. */
127 GC_INLINE void add_roots_to_index(struct roots *p)
129 int h = rt_hash(p -> r_start);
131 p -> r_next = GC_root_index[h];
132 GC_root_index[h] = p;
134 #endif /* !MSWIN32 && !MSWINCE && !CYGWIN32 */
136 GC_INNER word GC_root_size = 0;
138 GC_API void GC_CALL GC_add_roots(void *b, void *e)
142 if (!GC_is_initialized) GC_init();
144 GC_add_roots_inner((ptr_t)b, (ptr_t)e, FALSE);
149 /* Add [b,e) to the root set. Adding the same interval a second time */
150 /* is a moderately fast no-op, and hence benign. We do not handle */
151 /* different but overlapping intervals efficiently. (We do handle */
152 /* them correctly.) */
153 /* Tmp specifies that the interval may be deleted before */
154 /* re-registering dynamic libraries. */
155 void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
160 b = (ptr_t)(((word)b + (sizeof(word) - 1)) & ~(sizeof(word) - 1));
161 /* round b up to word boundary */
162 e = (ptr_t)((word)e & ~(sizeof(word) - 1));
163 /* round e down to word boundary */
164 if (b >= e) return; /* nothing to do */
166 # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
167 /* Spend the time to ensure that there are no overlapping */
168 /* or adjacent intervals. */
169 /* This could be done faster with e.g. a */
170 /* balanced tree. But the execution time here is */
171 /* virtually guaranteed to be dominated by the time it */
172 /* takes to scan the roots. */
175 old = 0; /* initialized to prevent warning. */
176 for (i = 0; i < n_root_sets; i++) {
177 old = GC_static_roots + i;
178 if (b <= old -> r_end && e >= old -> r_start) {
179 if (b < old -> r_start) {
180 GC_root_size += old->r_start - b;
183 if (e > old -> r_end) {
184 GC_root_size += e - old->r_end;
191 if (i < n_root_sets) {
192 /* merge other overlapping intervals */
195 for (i++; i < n_root_sets; i++) {
196 other = GC_static_roots + i;
197 b = other -> r_start;
199 if (b <= old -> r_end && e >= old -> r_start) {
200 if (b < old -> r_start) {
201 GC_root_size += old->r_start - b;
204 if (e > old -> r_end) {
205 GC_root_size += e - old->r_end;
208 old -> r_tmp &= other -> r_tmp;
209 /* Delete this entry. */
210 GC_root_size -= (other -> r_end - other -> r_start);
211 other -> r_start = GC_static_roots[n_root_sets-1].r_start;
212 other -> r_end = GC_static_roots[n_root_sets-1].r_end;
220 old = (struct roots *)GC_roots_present(b);
222 if (e <= old -> r_end) /* already there */ return;
224 GC_root_size += e - old -> r_end;
229 if (n_root_sets == MAX_ROOT_SETS) {
230 ABORT("Too many root sets");
232 GC_static_roots[n_root_sets].r_start = (ptr_t)b;
233 GC_static_roots[n_root_sets].r_end = (ptr_t)e;
234 GC_static_roots[n_root_sets].r_tmp = tmp;
235 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
236 GC_static_roots[n_root_sets].r_next = 0;
237 add_roots_to_index(GC_static_roots + n_root_sets);
239 GC_root_size += e - b;
243 static GC_bool roots_were_cleared = FALSE;
245 GC_API void GC_CALL GC_clear_roots(void)
249 if (!GC_is_initialized) GC_init();
251 roots_were_cleared = TRUE;
254 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
255 BZERO(GC_root_index, RT_SIZE * sizeof(void *));
260 /* Internal use only; lock held. */
261 STATIC void GC_remove_root_at_pos(int i)
263 GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
264 GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
265 GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
266 GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
270 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
271 STATIC void GC_rebuild_root_index(void)
274 BZERO(GC_root_index, RT_SIZE * sizeof(void *));
275 for (i = 0; i < n_root_sets; i++)
276 add_roots_to_index(GC_static_roots + i);
280 #if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
281 || defined(PCR) || defined(CYGWIN32)
282 /* Internal use only; lock held. */
283 STATIC void GC_remove_tmp_roots(void)
287 for (i = 0; i < n_root_sets; ) {
288 if (GC_static_roots[i].r_tmp) {
289 GC_remove_root_at_pos(i);
294 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
295 GC_rebuild_root_index();
300 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
301 STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e);
303 GC_API void GC_CALL GC_remove_roots(void *b, void *e)
307 /* Quick check whether has nothing to do */
308 if ((((word)b + (sizeof(word) - 1)) & ~(sizeof(word) - 1)) >=
309 ((word)e & ~(sizeof(word) - 1)))
313 GC_remove_roots_inner((ptr_t)b, (ptr_t)e);
317 /* Should only be called when the lock is held */
318 STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e)
321 for (i = 0; i < n_root_sets; ) {
322 if (GC_static_roots[i].r_start >= b
323 && GC_static_roots[i].r_end <= e) {
324 GC_remove_root_at_pos(i);
329 GC_rebuild_root_index();
331 #endif /* !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) */
333 #if (defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)) \
334 && !defined(NO_DEBUGGING)
335 /* Not used at present (except for, may be, debugging purpose). */
336 /* Workaround for the OS mapping and unmapping behind our back: */
337 /* Is the address p in one of the temporary static root sections? */
338 GC_bool GC_is_tmp_root(ptr_t p)
340 static int last_root_set = MAX_ROOT_SETS;
343 if (last_root_set < n_root_sets
344 && p >= GC_static_roots[last_root_set].r_start
345 && p < GC_static_roots[last_root_set].r_end)
346 return GC_static_roots[last_root_set].r_tmp;
347 for (i = 0; i < n_root_sets; i++) {
348 if (p >= GC_static_roots[i].r_start
349 && p < GC_static_roots[i].r_end) {
351 return GC_static_roots[i].r_tmp;
356 #endif /* MSWIN32 || MSWINCE || CYGWIN32 */
358 GC_INNER ptr_t GC_approx_sp(void)
362 /* Also force stack to grow if necessary. Otherwise the */
363 /* later accesses might cause the kernel to think we're */
364 /* doing something wrong. */
366 /* GNU C: alternatively, we may return the value of */
367 /*__builtin_frame_address(0). */
371 * Data structure for excluded static roots.
372 * Real declaration is in gc_priv.h.
379 struct exclusion GC_excl_table[MAX_EXCLUSIONS];
380 -- Array of exclusions, ascending
384 STATIC size_t GC_excl_table_entries = 0;/* Number of entries in use. */
386 /* Return the first exclusion range that includes an address >= start_addr */
387 /* Assumes the exclusion table contains at least one entry (namely the */
388 /* GC data structures). */
389 STATIC struct exclusion * GC_next_exclusion(ptr_t start_addr)
392 size_t high = GC_excl_table_entries - 1;
396 mid = (low + high) >> 1;
397 /* low <= mid < high */
398 if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
404 if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0;
405 return GC_excl_table + low;
408 /* Should only be called when the lock is held. The range boundaries */
409 /* should be properly aligned and valid. */
410 GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish)
412 struct exclusion * next;
413 size_t next_index, i;
415 GC_ASSERT((word)start % sizeof(word) == 0);
416 GC_ASSERT(start < finish);
418 if (0 == GC_excl_table_entries) {
421 next = GC_next_exclusion(start);
424 if ((word)(next -> e_start) < (word) finish) {
425 /* incomplete error check. */
426 ABORT("Exclusion ranges overlap");
428 if ((word)(next -> e_start) == (word) finish) {
429 /* extend old range backwards */
430 next -> e_start = (ptr_t)start;
433 next_index = next - GC_excl_table;
434 for (i = GC_excl_table_entries; i > next_index; --i) {
435 GC_excl_table[i] = GC_excl_table[i-1];
438 next_index = GC_excl_table_entries;
440 if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions");
441 GC_excl_table[next_index].e_start = (ptr_t)start;
442 GC_excl_table[next_index].e_end = (ptr_t)finish;
443 ++GC_excl_table_entries;
446 GC_API void GC_CALL GC_exclude_static_roots(void *b, void *e)
450 /* Adjust the upper boundary for safety (round down) */
451 e = (void *)((word)e & ~(sizeof(word) - 1));
453 if (b == e) return; /* nothing to exclude? */
456 GC_exclude_static_roots_inner(b, e);
460 /* Invoke push_conditional on ranges that are not excluded. */
462 STATIC void GC_push_conditional_with_exclusions(ptr_t bottom, ptr_t top,
465 struct exclusion * next;
468 while (bottom < top) {
469 next = GC_next_exclusion(bottom);
470 if (0 == next || (excl_start = next -> e_start) >= top) {
471 GC_push_conditional(bottom, top, all);
474 if (excl_start > bottom) GC_push_conditional(bottom, excl_start, all);
475 bottom = next -> e_end;
480 /* Similar to GC_push_all_stack_sections() but for IA-64 registers store. */
481 GC_INNER void GC_push_all_register_sections(ptr_t bs_lo, ptr_t bs_hi,
482 int eager, struct GC_traced_stack_sect_s *traced_stack_sect)
484 while (traced_stack_sect != NULL) {
485 ptr_t frame_bs_lo = traced_stack_sect -> backing_store_end;
486 GC_ASSERT(frame_bs_lo <= bs_hi);
488 GC_push_all_eager(frame_bs_lo, bs_hi);
490 GC_push_all_stack(frame_bs_lo, bs_hi);
492 bs_hi = traced_stack_sect -> saved_backing_store_ptr;
493 traced_stack_sect = traced_stack_sect -> prev;
495 GC_ASSERT(bs_lo <= bs_hi);
497 GC_push_all_eager(bs_lo, bs_hi);
499 GC_push_all_stack(bs_lo, bs_hi);
506 GC_INNER void GC_push_all_stack_sections(ptr_t lo, ptr_t hi,
507 struct GC_traced_stack_sect_s *traced_stack_sect)
509 while (traced_stack_sect != NULL) {
510 GC_ASSERT(lo HOTTER_THAN (ptr_t)traced_stack_sect);
511 # ifdef STACK_GROWS_UP
512 GC_push_all_stack((ptr_t)traced_stack_sect, lo);
513 # else /* STACK_GROWS_DOWN */
514 GC_push_all_stack(lo, (ptr_t)traced_stack_sect);
516 lo = traced_stack_sect -> saved_stack_ptr;
517 GC_ASSERT(lo != NULL);
518 traced_stack_sect = traced_stack_sect -> prev;
520 GC_ASSERT(!(hi HOTTER_THAN lo));
521 # ifdef STACK_GROWS_UP
522 /* We got them backwards! */
523 GC_push_all_stack(hi, lo);
524 # else /* STACK_GROWS_DOWN */
525 GC_push_all_stack(lo, hi);
532 /* Defined in mark.c. */
533 void GC_add_trace_entry(char *kind, word arg1, word arg2);
536 /* Similar to GC_push_all_eager, but only the */
537 /* part hotter than cold_gc_frame is scanned */
538 /* immediately. Needed to ensure that callee- */
539 /* save registers are not missed. */
541 * A version of GC_push_all that treats all interior pointers as valid
542 * and scans part of the area immediately, to make sure that saved
543 * register values are not lost.
544 * Cold_gc_frame delimits the stack section that must be scanned
545 * eagerly. A zero value indicates that no eager scanning is needed.
546 * We don't need to worry about the MANUAL_VDB case here, since this
547 * is only called in the single-threaded case. We assume that we
548 * cannot collect between an assignment and the corresponding
551 STATIC void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
554 if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
555 /* Push the hot end of the stack eagerly, so that register values */
556 /* saved inside GC frames are marked before they disappear. */
557 /* The rest of the marking can be deferred until later. */
558 if (0 == cold_gc_frame) {
559 GC_push_all_stack(bottom, top);
562 GC_ASSERT(bottom <= cold_gc_frame && cold_gc_frame <= top);
563 # ifdef STACK_GROWS_DOWN
564 GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
565 GC_push_all_eager(bottom, cold_gc_frame);
566 # else /* STACK_GROWS_UP */
567 GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
568 GC_push_all_eager(cold_gc_frame, top);
569 # endif /* STACK_GROWS_UP */
571 GC_push_all_eager(bottom, top);
574 GC_add_trace_entry("GC_push_all_stack", bottom, top);
578 /* Similar to GC_push_all_stack_sections() but also uses cold_gc_frame. */
579 STATIC void GC_push_all_stack_part_eager_sections(ptr_t lo, ptr_t hi,
580 ptr_t cold_gc_frame, struct GC_traced_stack_sect_s *traced_stack_sect)
582 GC_ASSERT(traced_stack_sect == NULL || cold_gc_frame == NULL ||
583 cold_gc_frame HOTTER_THAN (ptr_t)traced_stack_sect);
585 while (traced_stack_sect != NULL) {
586 GC_ASSERT(lo HOTTER_THAN (ptr_t)traced_stack_sect);
587 # ifdef STACK_GROWS_UP
588 GC_push_all_stack_partially_eager((ptr_t)traced_stack_sect, lo,
590 # else /* STACK_GROWS_DOWN */
591 GC_push_all_stack_partially_eager(lo, (ptr_t)traced_stack_sect,
594 lo = traced_stack_sect -> saved_stack_ptr;
595 GC_ASSERT(lo != NULL);
596 traced_stack_sect = traced_stack_sect -> prev;
597 cold_gc_frame = NULL; /* Use at most once. */
600 GC_ASSERT(!(hi HOTTER_THAN lo));
601 # ifdef STACK_GROWS_UP
602 /* We got them backwards! */
603 GC_push_all_stack_partially_eager(hi, lo, cold_gc_frame);
604 # else /* STACK_GROWS_DOWN */
605 GC_push_all_stack_partially_eager(lo, hi, cold_gc_frame);
609 #endif /* !THREADS */
611 /* Push enough of the current stack eagerly to */
612 /* ensure that callee-save registers saved in */
613 /* GC frames are scanned. */
614 /* In the non-threads case, schedule entire */
615 /* stack for scanning. */
616 /* The second argument is a pointer to the */
617 /* (possibly null) thread context, for */
618 /* (currently hypothetical) more precise */
619 /* stack scanning. */
621 * In the absence of threads, push the stack contents.
622 * In the presence of threads, push enough of the current stack
623 * to ensure that callee-save registers saved in collector frames have been
625 * FIXME: Merge with per-thread stuff.
628 STATIC void GC_push_current_stack(ptr_t cold_gc_frame, void * context)
630 # if defined(THREADS)
631 if (0 == cold_gc_frame) return;
632 # ifdef STACK_GROWS_DOWN
633 GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
634 /* For IA64, the register stack backing store is handled */
635 /* in the thread-specific code. */
637 GC_push_all_eager(cold_gc_frame, GC_approx_sp());
640 GC_push_all_stack_part_eager_sections(GC_approx_sp(), GC_stackbottom,
641 cold_gc_frame, GC_traced_stack_sect);
643 /* We also need to push the register stack backing store. */
644 /* This should really be done in the same way as the */
645 /* regular stack. For now we fudge it a bit. */
646 /* Note that the backing store grows up, so we can't use */
647 /* GC_push_all_stack_partially_eager. */
649 ptr_t bsp = GC_save_regs_ret_val;
650 ptr_t cold_gc_bs_pointer = bsp - 2048;
651 if (GC_all_interior_pointers &&
652 cold_gc_bs_pointer > BACKING_STORE_BASE) {
653 /* Adjust cold_gc_bs_pointer if below our innermost */
654 /* "traced stack section" in backing store. */
655 if (GC_traced_stack_sect != NULL && cold_gc_bs_pointer <
656 GC_traced_stack_sect->backing_store_end)
658 GC_traced_stack_sect->backing_store_end;
659 GC_push_all_register_sections(BACKING_STORE_BASE,
660 cold_gc_bs_pointer, FALSE, GC_traced_stack_sect);
661 GC_push_all_eager(cold_gc_bs_pointer, bsp);
663 GC_push_all_register_sections(BACKING_STORE_BASE, bsp,
664 TRUE /* eager */, GC_traced_stack_sect);
666 /* All values should be sufficiently aligned that we */
667 /* don't have to worry about the boundary. */
670 # endif /* !THREADS */
673 GC_INNER void (*GC_push_typed_structures)(void) = 0;
675 /* Push GC internal roots. These are normally */
676 /* included in the static data segment, and */
677 /* Thus implicitly pushed. But we must do this */
678 /* explicitly if normal root processing is */
681 * Push GC internal roots. Only called if there is some reason to believe
682 * these would not otherwise get registered.
684 STATIC void GC_push_gc_structures(void)
686 GC_push_finalizer_structures();
687 # if defined(THREADS)
688 GC_push_thread_structures();
690 if( GC_push_typed_structures )
691 GC_push_typed_structures();
694 GC_INNER void GC_cond_register_dynamic_libraries(void)
696 # if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
697 || defined(CYGWIN32) || defined(PCR)
698 GC_remove_tmp_roots();
699 if (!GC_no_dls) GC_register_dynamic_libraries();
705 STATIC void GC_push_regs_and_stack(ptr_t cold_gc_frame)
707 GC_with_callee_saves_pushed(GC_push_current_stack, cold_gc_frame);
711 * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
712 * on groups of pointers) on every top level accessible pointer.
713 * If all is FALSE, arrange to push only possibly altered values.
714 * Cold_gc_frame is an address inside a GC frame that
715 * remains valid until all marking is complete.
716 * A zero value indicates that it's OK to miss some
719 GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame)
725 * Next push static data. This must happen early on, since it's
726 * not robust against mark stack overflow.
728 /* Re-register dynamic libraries, in case one got added. */
729 /* There is some argument for doing this as late as possible, */
730 /* especially on win32, where it can change asynchronously. */
731 /* In those cases, we do it here. But on other platforms, it's */
732 /* not safe with the world stopped, so we do it earlier. */
733 # if !defined(REGISTER_LIBRARIES_EARLY)
734 GC_cond_register_dynamic_libraries();
737 /* Mark everything in static data areas */
738 for (i = 0; i < n_root_sets; i++) {
739 GC_push_conditional_with_exclusions(
740 GC_static_roots[i].r_start,
741 GC_static_roots[i].r_end, all);
744 /* Mark all free list header blocks, if those were allocated from */
745 /* the garbage collected heap. This makes sure they don't */
746 /* disappear if we are not marking from static data. It also */
747 /* saves us the trouble of scanning them, and possibly that of */
748 /* marking the freelists. */
749 for (kind = 0; kind < GC_n_kinds; kind++) {
750 void *base = GC_base(GC_obj_kinds[kind].ok_freelist);
752 GC_set_mark_bit(base);
756 /* Mark from GC internal roots if those might otherwise have */
758 if (GC_no_dls || roots_were_cleared) {
759 GC_push_gc_structures();
762 /* Mark thread local free lists, even if their mark */
763 /* descriptor excludes the link field. */
764 /* If the world is not stopped, this is unsafe. It is */
765 /* also unnecessary, since we will do this again with the */
767 # if defined(THREAD_LOCAL_ALLOC)
768 if (GC_world_stopped) GC_mark_thread_local_free_lists();
772 * Now traverse stacks, and mark from register contents.
773 * These must be done last, since they can legitimately overflow
775 * This is usually done by saving the current context on the
776 * stack, and then just tracing from the stack.
778 GC_push_regs_and_stack(cold_gc_frame);
780 if (GC_push_other_roots != 0) (*GC_push_other_roots)();
781 /* In the threads case, this also pushes thread stacks. */
782 /* Note that without interior pointer recognition lots */
783 /* of stuff may have been pushed already, and this */
784 /* should be careful about mark stack overflows. */