3 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
4 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
21 # include "private/gc_pmark.h"
23 #if defined(MSWIN32) && defined(__GNUC__)
27 /* We put this here to minimize the risk of inlining. */
30 void GC_noop(void *p, ...) {}
35 /* Single argument version, robust against whole program analysis. */
39 static VOLATILE word sink;
44 /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
46 word GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
48 /* Initialize GC_obj_kinds properly and standard free lists properly. */
49 /* This must be done statically since they may be accessed before */
50 /* GC_init is called. */
51 /* It's done here, since we need to deal with mark descriptors. */
52 struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
53 /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
54 0 | GC_DS_LENGTH, FALSE, FALSE },
55 /* NORMAL */ { &GC_objfreelist[0], 0,
56 0 | GC_DS_LENGTH, /* Adjusted in GC_init_inner for EXTRA_BYTES */
57 TRUE /* add length to descr */, TRUE },
59 { &GC_uobjfreelist[0], 0,
60 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
61 # ifdef ATOMIC_UNCOLLECTABLE
63 { &GC_auobjfreelist[0], 0,
64 0 | GC_DS_LENGTH, FALSE /* add length to descr */, FALSE },
66 # ifdef STUBBORN_ALLOC
67 /*STUBBORN*/ { &GC_sobjfreelist[0], 0,
68 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
72 # ifdef ATOMIC_UNCOLLECTABLE
73 # ifdef STUBBORN_ALLOC
79 # ifdef STUBBORN_ALLOC
87 # ifndef INITIAL_MARK_STACK_SIZE
88 # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
89 /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
90 /* multiple of HBLKSIZE. */
91 /* The incremental collector actually likes a larger */
92 /* size, since it want to push all marked dirty objs */
93 /* before marking anything new. Currently we let it */
94 /* grow dynamically. */
98 * Limits of stack for GC_mark routine.
99 * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
100 * need to be marked from.
103 word GC_n_rescuing_pages; /* Number of dirty pages we marked from */
104 /* excludes ptrfree pages, etc. */
108 mse * GC_mark_stack_limit;
110 word GC_mark_stack_size = 0;
113 mse * VOLATILE GC_mark_stack_top;
115 mse * GC_mark_stack_top;
118 static struct hblk * scan_ptr;
120 mark_state_t GC_mark_state = MS_NONE;
122 GC_bool GC_mark_stack_too_small = FALSE;
124 GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
125 /* objects in the heap? */
127 /* Is a collection in progress? Note that this can return true in the */
128 /* nonincremental case, if a collection has been abandoned and the */
129 /* mark state is now MS_INVALID. */
130 GC_bool GC_collection_in_progress()
132 return(GC_mark_state != MS_NONE);
135 /* clear all mark bits in the header */
136 void GC_clear_hdr_marks(hhdr)
139 # ifdef USE_MARK_BYTES
140 BZERO(hhdr -> hb_marks, MARK_BITS_SZ);
142 BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
146 /* Set all mark bits in the header. Used for uncollectable blocks. */
147 void GC_set_hdr_marks(hhdr)
152 for (i = 0; i < MARK_BITS_SZ; ++i) {
153 # ifdef USE_MARK_BYTES
154 hhdr -> hb_marks[i] = 1;
156 hhdr -> hb_marks[i] = ONES;
162 * Clear all mark bits associated with block h.
165 # if defined(__STDC__) || defined(__cplusplus)
166 static void clear_marks_for_block(struct hblk *h, word dummy)
168 static void clear_marks_for_block(h, dummy)
173 register hdr * hhdr = HDR(h);
175 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
176 /* Mark bit for these is cleared only once the object is */
177 /* explicitly deallocated. This either frees the block, or */
178 /* the bit is cleared once the object is on the free list. */
179 GC_clear_hdr_marks(hhdr);
182 /* Slow but general routines for setting/clearing/asking about mark bits */
183 void GC_set_mark_bit(p)
186 register struct hblk *h = HBLKPTR(p);
187 register hdr * hhdr = HDR(h);
188 register int word_no = (word *)p - (word *)h;
190 set_mark_bit_from_hdr(hhdr, word_no);
193 void GC_clear_mark_bit(p)
196 register struct hblk *h = HBLKPTR(p);
197 register hdr * hhdr = HDR(h);
198 register int word_no = (word *)p - (word *)h;
200 clear_mark_bit_from_hdr(hhdr, word_no);
203 GC_bool GC_is_marked(p)
206 register struct hblk *h = HBLKPTR(p);
207 register hdr * hhdr = HDR(h);
208 register int word_no = (word *)p - (word *)h;
210 return(mark_bit_from_hdr(hhdr, word_no));
215 * Clear mark bits in all allocated heap blocks. This invalidates
216 * the marker invariant, and sets GC_mark_state to reflect this.
217 * (This implicitly starts marking to reestablish the invariant.)
219 void GC_clear_marks()
221 GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
222 GC_objects_are_marked = FALSE;
223 GC_mark_state = MS_INVALID;
226 /* Counters reflect currently marked objects: reset here */
227 GC_composite_in_use = 0;
228 GC_atomic_in_use = 0;
233 /* Initiate a garbage collection. Initiates a full collection if the */
234 /* mark state is invalid. */
236 void GC_initiate_gc()
238 if (GC_dirty_maintained) GC_read_dirty();
239 # ifdef STUBBORN_ALLOC
244 extern void GC_check_dirty();
246 if (GC_dirty_maintained) GC_check_dirty();
249 GC_n_rescuing_pages = 0;
250 if (GC_mark_state == MS_NONE) {
251 GC_mark_state = MS_PUSH_RESCUERS;
252 } else if (GC_mark_state != MS_INVALID) {
253 ABORT("unexpected state");
254 } /* else this is really a full collection, and mark */
255 /* bits are invalid. */
260 static void alloc_mark_stack();
262 /* Perform a small amount of marking. */
263 /* We try to touch roughly a page of memory. */
264 /* Return TRUE if we just finished a mark phase. */
265 /* Cold_gc_frame is an address inside a GC frame that */
266 /* remains valid until all marking is complete. */
267 /* A zero value indicates that it's OK to miss some */
268 /* register values. */
269 /* We hold the allocation lock. In the case of */
270 /* incremental collection, the world may not be stopped.*/
272 /* For win32, this is called after we establish a structured */
273 /* exception handler, in case Windows unmaps one of our root */
274 /* segments. See below. In either case, we acquire the */
275 /* allocator lock long before we get here. */
276 GC_bool GC_mark_some_inner(cold_gc_frame)
279 GC_bool GC_mark_some(cold_gc_frame)
283 switch(GC_mark_state) {
287 case MS_PUSH_RESCUERS:
288 if (GC_mark_stack_top
289 >= GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2) {
290 /* Go ahead and mark, even though that might cause us to */
291 /* see more marked dirty objects later on. Avoid this */
293 GC_mark_stack_too_small = TRUE;
294 MARK_FROM_MARK_STACK();
297 scan_ptr = GC_push_next_marked_dirty(scan_ptr);
300 if (GC_print_stats) {
301 GC_printf1("Marked from %lu dirty pages\n",
302 (unsigned long)GC_n_rescuing_pages);
305 GC_push_roots(FALSE, cold_gc_frame);
306 GC_objects_are_marked = TRUE;
307 if (GC_mark_state != MS_INVALID) {
308 GC_mark_state = MS_ROOTS_PUSHED;
314 case MS_PUSH_UNCOLLECTABLE:
315 if (GC_mark_stack_top
316 >= GC_mark_stack + GC_mark_stack_size/4) {
317 # ifdef PARALLEL_MARK
318 /* Avoid this, since we don't parallelize the marker */
320 if (GC_parallel) GC_mark_stack_too_small = TRUE;
322 MARK_FROM_MARK_STACK();
325 scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
327 GC_push_roots(TRUE, cold_gc_frame);
328 GC_objects_are_marked = TRUE;
329 if (GC_mark_state != MS_INVALID) {
330 GC_mark_state = MS_ROOTS_PUSHED;
336 case MS_ROOTS_PUSHED:
337 # ifdef PARALLEL_MARK
338 /* In the incremental GC case, this currently doesn't */
339 /* quite do the right thing, since it runs to */
340 /* completion. On the other hand, starting a */
341 /* parallel marker is expensive, so perhaps it is */
342 /* the right thing? */
343 /* Eventually, incremental marking should run */
344 /* asynchronously in multiple threads, without grabbing */
345 /* the allocation lock. */
347 GC_do_parallel_mark();
348 GC_ASSERT(GC_mark_stack_top < GC_first_nonempty);
349 GC_mark_stack_top = GC_mark_stack - 1;
350 if (GC_mark_stack_too_small) {
351 alloc_mark_stack(2*GC_mark_stack_size);
353 if (GC_mark_state == MS_ROOTS_PUSHED) {
354 GC_mark_state = MS_NONE;
361 if (GC_mark_stack_top >= GC_mark_stack) {
362 MARK_FROM_MARK_STACK();
365 GC_mark_state = MS_NONE;
366 if (GC_mark_stack_too_small) {
367 alloc_mark_stack(2*GC_mark_stack_size);
373 case MS_PARTIALLY_INVALID:
374 if (!GC_objects_are_marked) {
375 GC_mark_state = MS_PUSH_UNCOLLECTABLE;
378 if (GC_mark_stack_top >= GC_mark_stack) {
379 MARK_FROM_MARK_STACK();
382 if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
383 /* About to start a heap scan for marked objects. */
384 /* Mark stack is empty. OK to reallocate. */
385 if (GC_mark_stack_too_small) {
386 alloc_mark_stack(2*GC_mark_stack_size);
388 GC_mark_state = MS_PARTIALLY_INVALID;
390 scan_ptr = GC_push_next_marked(scan_ptr);
391 if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
392 GC_push_roots(TRUE, cold_gc_frame);
393 GC_objects_are_marked = TRUE;
394 if (GC_mark_state != MS_INVALID) {
395 GC_mark_state = MS_ROOTS_PUSHED;
400 ABORT("GC_mark_some: bad state");
411 EXCEPTION_REGISTRATION ex_reg;
416 static EXCEPTION_DISPOSITION mark_ex_handler(
417 struct _EXCEPTION_RECORD *ex_rec,
419 struct _CONTEXT *context,
422 if (ex_rec->ExceptionCode == STATUS_ACCESS_VIOLATION) {
423 ext_ex_regn *xer = (ext_ex_regn *)est_frame;
425 /* Unwind from the inner function assuming the standard */
426 /* function prologue. */
427 /* Assumes code has not been compiled with */
428 /* -fomit-frame-pointer. */
429 context->Esp = context->Ebp;
430 context->Ebp = *((DWORD *)context->Esp);
431 context->Esp = context->Esp - 8;
433 /* Resume execution at the "real" handler within the */
434 /* wrapper function. */
435 context->Eip = (DWORD )(xer->alt_path);
437 return ExceptionContinueExecution;
440 return ExceptionContinueSearch;
443 # endif /* __GNUC__ */
446 GC_bool GC_mark_some(cold_gc_frame)
452 /* Windows 98 appears to asynchronously create and remove */
453 /* writable memory mappings, for reasons we haven't yet */
454 /* understood. Since we look for writable regions to */
455 /* determine the root set, we may try to mark from an */
456 /* address range that disappeared since we started the */
457 /* collection. Thus we have to recover from faults here. */
458 /* This code does not appear to be necessary for Windows */
459 /* 95/NT/2000. Note that this code should never generate */
460 /* an incremental GC write fault. */
464 # else /* __GNUC__ */
466 /* Manually install an exception handler since GCC does */
467 /* not yet support Structured Exception Handling (SEH) on */
472 er.alt_path = &&handle_ex;
473 er.ex_reg.handler = mark_ex_handler;
474 asm volatile ("movl %%fs:0, %0" : "=r" (er.ex_reg.prev));
475 asm volatile ("movl %0, %%fs:0" : : "r" (&er));
477 # endif /* __GNUC__ */
479 ret_val = GC_mark_some_inner(cold_gc_frame);
483 } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ?
484 EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
486 # else /* __GNUC__ */
488 /* Prevent GCC from considering the following code unreachable */
489 /* and thus eliminating it. */
490 if (er.alt_path != 0)
494 /* Execution resumes from here on an access violation. */
496 # endif /* __GNUC__ */
499 if (GC_print_stats) {
500 GC_printf0("Caught ACCESS_VIOLATION in marker. "
501 "Memory mapping disappeared.\n");
503 # endif /* CONDPRINT */
505 /* We have bad roots on the stack. Discard mark stack. */
506 /* Rescan from marked objects. Redetermine roots. */
507 GC_invalidate_mark_state();
516 # else /* __GNUC__ */
519 /* Uninstall the exception handler */
520 asm volatile ("mov %0, %%fs:0" : : "r" (er.ex_reg.prev));
522 # endif /* __GNUC__ */
529 GC_bool GC_mark_stack_empty()
531 return(GC_mark_stack_top < GC_mark_stack);
535 word GC_prof_array[10];
536 # define PROF(n) GC_prof_array[n]++
541 /* Given a pointer to someplace other than a small object page or the */
542 /* first page of a large object, either: */
543 /* - return a pointer to somewhere in the first page of the large */
544 /* object, if current points to a large object. */
545 /* In this case *hhdr is replaced with a pointer to the header */
546 /* for the large object. */
547 /* - just return current if it does not point to a large object. */
549 ptr_t GC_find_start(current, hhdr, new_hdr_p)
550 register ptr_t current;
551 register hdr *hhdr, **new_hdr_p;
553 if (GC_all_interior_pointers) {
555 register ptr_t orig = current;
557 current = (ptr_t)HBLKPTR(current);
559 current = current - HBLKSIZE*(word)hhdr;
561 } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
562 /* current points to near the start of the large object */
563 if (hhdr -> hb_flags & IGNORE_OFF_PAGE) return(orig);
564 if ((word *)orig - (word *)current
565 >= (ptrdiff_t)(hhdr->hb_sz)) {
566 /* Pointer past the end of the block */
579 void GC_invalidate_mark_state()
581 GC_mark_state = MS_INVALID;
582 GC_mark_stack_top = GC_mark_stack-1;
585 mse * GC_signal_mark_stack_overflow(msp)
588 GC_mark_state = MS_INVALID;
589 GC_mark_stack_too_small = TRUE;
591 if (GC_print_stats) {
592 GC_printf1("Mark stack overflow; current size = %lu entries\n",
596 return(msp - GC_MARK_STACK_DISCARDS);
600 * Mark objects pointed to by the regions described by
601 * mark stack entries between GC_mark_stack and GC_mark_stack_top,
602 * inclusive. Assumes the upper limit of a mark stack entry
603 * is never 0. A mark stack entry never has size 0.
604 * We try to traverse on the order of a hblk of memory before we return.
605 * Caller is responsible for calling this until the mark stack is empty.
606 * Note that this is the most performance critical routine in the
607 * collector. Hence it contains all sorts of ugly hacks to speed
608 * things up. In particular, we avoid procedure calls on the common
609 * path, we take advantage of peculiarities of the mark descriptor
610 * encoding, we optionally maintain a cache for the block address to
611 * header mapping, we prefetch when an object is "grayed", etc.
613 mse * GC_mark_from(mark_stack_top, mark_stack, mark_stack_limit)
614 mse * mark_stack_top;
616 mse * mark_stack_limit;
618 int credit = HBLKSIZE; /* Remaining credit for marking work */
619 register word * current_p; /* Pointer to current candidate ptr. */
620 register word current; /* Candidate pointer. */
621 register word * limit; /* (Incl) limit of current candidate */
624 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
625 register ptr_t least_ha = GC_least_plausible_heap_addr;
628 # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
630 GC_objects_are_marked = TRUE;
632 # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
633 while (mark_stack_top >= mark_stack && credit >= 0) {
635 while ((((ptr_t)mark_stack_top - (ptr_t)mark_stack) | credit)
638 current_p = mark_stack_top -> mse_start;
639 descr = mark_stack_top -> mse_descr;
641 /* current_p and descr describe the current object. */
642 /* *mark_stack_top is vacant. */
643 /* The following is 0 only for small objects described by a simple */
644 /* length descriptor. For many applications this is the common */
645 /* case, so we try to detect it quickly. */
646 if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | GC_DS_TAGS)) {
647 word tag = descr & GC_DS_TAGS;
652 /* Process part of the range to avoid pushing too much on the */
654 GC_ASSERT(descr < (word)GC_greatest_plausible_heap_addr
655 - (word)GC_least_plausible_heap_addr);
656 # ifdef PARALLEL_MARK
657 # define SHARE_BYTES 2048
658 if (descr > SHARE_BYTES && GC_parallel
659 && mark_stack_top < mark_stack_limit - 1) {
660 int new_size = (descr/2) & ~(sizeof(word)-1);
661 mark_stack_top -> mse_start = current_p;
662 mark_stack_top -> mse_descr = new_size + sizeof(word);
663 /* makes sure we handle */
664 /* misaligned pointers. */
666 current_p = (word *) ((char *)current_p + new_size);
670 # endif /* PARALLEL_MARK */
671 mark_stack_top -> mse_start =
672 limit = current_p + SPLIT_RANGE_WORDS-1;
673 mark_stack_top -> mse_descr =
674 descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
675 /* Make sure that pointers overlapping the two ranges are */
677 limit = (word *)((char *)limit + sizeof(word) - ALIGNMENT);
681 descr &= ~GC_DS_TAGS;
682 credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
684 if ((signed_word)descr < 0) {
685 current = *current_p;
686 FIXUP_POINTER(current);
687 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
688 PREFETCH((ptr_t)current);
689 HC_PUSH_CONTENTS((ptr_t)current, mark_stack_top,
690 mark_stack_limit, current_p, exit1);
699 credit -= GC_PROC_BYTES;
702 (current_p, mark_stack_top,
703 mark_stack_limit, ENV(descr));
705 case GC_DS_PER_OBJECT:
706 if ((signed_word)descr >= 0) {
707 /* Descriptor is in the object. */
708 descr = *(word *)((ptr_t)current_p + descr - GC_DS_PER_OBJECT);
710 /* Descriptor is in type descriptor pointed to by first */
711 /* word in object. */
712 ptr_t type_descr = *(ptr_t *)current_p;
713 /* type_descr is either a valid pointer to the descriptor */
714 /* structure, or this object was on a free list. If it */
715 /* it was anything but the last object on the free list, */
716 /* we will misinterpret the next object on the free list as */
717 /* the type descriptor, and get a 0 GC descriptor, which */
718 /* is ideal. Unfortunately, we need to check for the last */
719 /* object case explicitly. */
720 if (0 == type_descr) {
721 /* Rarely executed. */
725 descr = *(word *)(type_descr
726 - (descr - (GC_DS_PER_OBJECT
727 - GC_INDIR_PER_OBJ_BIAS)));
730 /* Can happen either because we generated a 0 descriptor */
731 /* or we saw a pointer to a free object. */
737 } else /* Small object with length descriptor */ {
739 limit = (word *)(((ptr_t)current_p) + (word)descr);
741 /* The simple case in which we're scanning a range. */
742 GC_ASSERT(!((word)current_p & (ALIGNMENT-1)));
743 credit -= (ptr_t)limit - (ptr_t)current_p;
748 # ifndef SMALL_CONFIG
751 /* Try to prefetch the next pointer to be examined asap. */
752 /* Empirically, this also seems to help slightly without */
753 /* prefetches, at least on linux/X86. Presumably this loop */
754 /* ends up with less register pressure, and gcc thus ends up */
755 /* generating slightly better code. Overall gcc code quality */
756 /* for this loop is still not great. */
758 PREFETCH((ptr_t)limit - PREF_DIST*CACHE_LINE_SIZE);
759 GC_ASSERT(limit >= current_p);
761 FIXUP_POINTER(deferred);
762 limit = (word *)((char *)limit - ALIGNMENT);
763 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
764 PREFETCH((ptr_t)deferred);
767 if (current_p > limit) goto next_object;
768 /* Unroll once, so we don't do too many of the prefetches */
769 /* based on limit. */
771 FIXUP_POINTER(deferred);
772 limit = (word *)((char *)limit - ALIGNMENT);
773 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
774 PREFETCH((ptr_t)deferred);
777 if (current_p > limit) goto next_object;
781 while (current_p <= limit) {
782 /* Empirically, unrolling this loop doesn't help a lot. */
783 /* Since HC_PUSH_CONTENTS expands to a lot of code, */
785 current = *current_p;
786 FIXUP_POINTER(current);
787 PREFETCH((ptr_t)current_p + PREF_DIST*CACHE_LINE_SIZE);
788 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
789 /* Prefetch the contents of the object we just pushed. It's */
790 /* likely we will need them soon. */
791 PREFETCH((ptr_t)current);
792 HC_PUSH_CONTENTS((ptr_t)current, mark_stack_top,
793 mark_stack_limit, current_p, exit2);
795 current_p = (word *)((char *)current_p + ALIGNMENT);
798 # ifndef SMALL_CONFIG
799 /* We still need to mark the entry we previously prefetched. */
800 /* We alrady know that it passes the preliminary pointer */
802 HC_PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
803 mark_stack_limit, current_p, exit4);
808 return mark_stack_top;
813 /* We assume we have an ANSI C Compiler. */
814 GC_bool GC_help_wanted = FALSE;
815 unsigned GC_helper_count = 0;
816 unsigned GC_active_count = 0;
817 mse * VOLATILE GC_first_nonempty;
820 #define LOCAL_MARK_STACK_SIZE HBLKSIZE
821 /* Under normal circumstances, this is big enough to guarantee */
822 /* We don't overflow half of it in a single call to */
826 /* Steal mark stack entries starting at mse low into mark stack local */
827 /* until we either steal mse high, or we have max entries. */
828 /* Return a pointer to the top of the local mark stack. */
829 /* *next is replaced by a pointer to the next unscanned mark stack */
831 mse * GC_steal_mark_stack(mse * low, mse * high, mse * local,
832 unsigned max, mse **next)
835 mse *top = local - 1;
838 /* Make sure that prior writes to the mark stack are visible. */
839 /* On some architectures, the fact that the reads are */
840 /* volatile should suffice. */
841 # if !defined(IA64) && !defined(HP_PA) && !defined(I386)
844 GC_ASSERT(high >= low-1 && high - low + 1 <= GC_mark_stack_size);
845 for (p = low; p <= high && i <= max; ++p) {
846 word descr = *(volatile word *) &(p -> mse_descr);
847 /* In the IA64 memory model, the following volatile store is */
848 /* ordered after this read of descr. Thus a thread must read */
849 /* the original nonzero value. HP_PA appears to be similar, */
850 /* and if I'm reading the P4 spec correctly, X86 is probably */
851 /* also OK. In some other cases we need a barrier. */
852 # if !defined(IA64) && !defined(HP_PA) && !defined(I386)
856 *(volatile word *) &(p -> mse_descr) = 0;
857 /* More than one thread may get this entry, but that's only */
858 /* a minor performance problem. */
860 top -> mse_descr = descr;
861 top -> mse_start = p -> mse_start;
862 GC_ASSERT( (top -> mse_descr & GC_DS_TAGS) != GC_DS_LENGTH ||
863 top -> mse_descr < (ptr_t)GC_greatest_plausible_heap_addr
864 - (ptr_t)GC_least_plausible_heap_addr);
865 /* If this is a big object, count it as */
866 /* size/256 + 1 objects. */
868 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) i += (descr >> 8);
875 /* Copy back a local mark stack. */
876 /* low and high are inclusive bounds. */
877 void GC_return_mark_stack(mse * low, mse * high)
883 if (high < low) return;
884 stack_size = high - low + 1;
885 GC_acquire_mark_lock();
886 my_top = GC_mark_stack_top;
887 my_start = my_top + 1;
888 if (my_start - GC_mark_stack + stack_size > GC_mark_stack_size) {
890 if (GC_print_stats) {
891 GC_printf0("No room to copy back mark stack.");
894 GC_mark_state = MS_INVALID;
895 GC_mark_stack_too_small = TRUE;
896 /* We drop the local mark stack. We'll fix things later. */
898 BCOPY(low, my_start, stack_size * sizeof(mse));
899 GC_ASSERT(GC_mark_stack_top = my_top);
900 # if !defined(IA64) && !defined(HP_PA)
903 /* On IA64, the volatile write acts as a release barrier. */
904 GC_mark_stack_top = my_top + stack_size;
906 GC_release_mark_lock();
907 GC_notify_all_marker();
910 /* Mark from the local mark stack. */
911 /* On return, the local mark stack is empty. */
912 /* But this may be achieved by copying the */
913 /* local mark stack back into the global one. */
914 void GC_do_local_mark(mse *local_mark_stack, mse *local_top)
917 # define N_LOCAL_ITERS 1
919 # ifdef GC_ASSERTIONS
920 /* Make sure we don't hold mark lock. */
921 GC_acquire_mark_lock();
922 GC_release_mark_lock();
925 for (n = 0; n < N_LOCAL_ITERS; ++n) {
926 local_top = GC_mark_from(local_top, local_mark_stack,
927 local_mark_stack + LOCAL_MARK_STACK_SIZE);
928 if (local_top < local_mark_stack) return;
929 if (local_top - local_mark_stack >= LOCAL_MARK_STACK_SIZE/2) {
930 GC_return_mark_stack(local_mark_stack, local_top);
934 if (GC_mark_stack_top < GC_first_nonempty &&
935 GC_active_count < GC_helper_count
936 && local_top > local_mark_stack + 1) {
937 /* Try to share the load, since the main stack is empty, */
938 /* and helper threads are waiting for a refill. */
939 /* The entries near the bottom of the stack are likely */
940 /* to require more work. Thus we return those, eventhough */
943 mse * new_bottom = local_mark_stack
944 + (local_top - local_mark_stack)/2;
945 GC_ASSERT(new_bottom > local_mark_stack
946 && new_bottom < local_top);
947 GC_return_mark_stack(local_mark_stack, new_bottom - 1);
948 memmove(local_mark_stack, new_bottom,
949 (local_top - new_bottom + 1) * sizeof(mse));
950 local_top -= (new_bottom - local_mark_stack);
955 #define ENTRIES_TO_GET 5
957 long GC_markers = 2; /* Normally changed by thread-library- */
958 /* -specific code. */
960 /* Mark using the local mark stack until the global mark stack is empty */
961 /* and there are no active workers. Update GC_first_nonempty to reflect */
963 /* Caller does not hold mark lock. */
964 /* Caller has already incremented GC_helper_count. We decrement it, */
965 /* and maintain GC_active_count. */
966 void GC_mark_local(mse *local_mark_stack, int id)
968 mse * my_first_nonempty;
970 GC_acquire_mark_lock();
972 my_first_nonempty = GC_first_nonempty;
973 GC_ASSERT(GC_first_nonempty >= GC_mark_stack &&
974 GC_first_nonempty <= GC_mark_stack_top + 1);
976 GC_printf1("Starting mark helper %lu\n", (unsigned long)id);
978 GC_release_mark_lock();
985 mse * global_first_nonempty = GC_first_nonempty;
987 GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
988 my_first_nonempty <= GC_mark_stack_top + 1);
989 GC_ASSERT(global_first_nonempty >= GC_mark_stack &&
990 global_first_nonempty <= GC_mark_stack_top + 1);
991 if (my_first_nonempty < global_first_nonempty) {
992 my_first_nonempty = global_first_nonempty;
993 } else if (global_first_nonempty < my_first_nonempty) {
994 GC_compare_and_exchange((word *)(&GC_first_nonempty),
995 (word) global_first_nonempty,
996 (word) my_first_nonempty);
997 /* If this fails, we just go ahead, without updating */
998 /* GC_first_nonempty. */
1000 /* Perhaps we should also update GC_first_nonempty, if it */
1001 /* is less. But that would require using atomic updates. */
1002 my_top = GC_mark_stack_top;
1003 n_on_stack = my_top - my_first_nonempty + 1;
1004 if (0 == n_on_stack) {
1005 GC_acquire_mark_lock();
1006 my_top = GC_mark_stack_top;
1007 n_on_stack = my_top - my_first_nonempty + 1;
1008 if (0 == n_on_stack) {
1010 GC_ASSERT(GC_active_count <= GC_helper_count);
1011 /* Other markers may redeposit objects */
1013 if (0 == GC_active_count) GC_notify_all_marker();
1014 while (GC_active_count > 0
1015 && GC_first_nonempty > GC_mark_stack_top) {
1016 /* We will be notified if either GC_active_count */
1017 /* reaches zero, or if more objects are pushed on */
1018 /* the global mark stack. */
1021 if (GC_active_count == 0 &&
1022 GC_first_nonempty > GC_mark_stack_top) {
1023 GC_bool need_to_notify = FALSE;
1024 /* The above conditions can't be falsified while we */
1025 /* hold the mark lock, since neither */
1026 /* GC_active_count nor GC_mark_stack_top can */
1027 /* change. GC_first_nonempty can only be */
1028 /* incremented asynchronously. Thus we know that */
1029 /* both conditions actually held simultaneously. */
1031 if (0 == GC_helper_count) need_to_notify = TRUE;
1034 "Finished mark helper %lu\n", (unsigned long)id);
1036 GC_release_mark_lock();
1037 if (need_to_notify) GC_notify_all_marker();
1040 /* else there's something on the stack again, or */
1041 /* another helper may push something. */
1043 GC_ASSERT(GC_active_count > 0);
1044 GC_release_mark_lock();
1047 GC_release_mark_lock();
1050 n_to_get = ENTRIES_TO_GET;
1051 if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1;
1052 local_top = GC_steal_mark_stack(my_first_nonempty, my_top,
1053 local_mark_stack, n_to_get,
1054 &my_first_nonempty);
1055 GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
1056 my_first_nonempty <= GC_mark_stack_top + 1);
1057 GC_do_local_mark(local_mark_stack, local_top);
1061 /* Perform Parallel mark. */
1062 /* We hold the GC lock, not the mark lock. */
1063 /* Currently runs until the mark stack is */
1065 void GC_do_parallel_mark()
1067 mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1071 GC_acquire_mark_lock();
1072 GC_ASSERT(I_HOLD_LOCK());
1073 /* This could be a GC_ASSERT, but it seems safer to keep it on */
1074 /* all the time, especially since it's cheap. */
1075 if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0)
1076 ABORT("Tried to start parallel mark in bad state");
1078 GC_printf1("Starting marking for mark phase number %lu\n",
1079 (unsigned long)GC_mark_no);
1081 GC_first_nonempty = GC_mark_stack;
1082 GC_active_count = 0;
1083 GC_helper_count = 1;
1084 GC_help_wanted = TRUE;
1085 GC_release_mark_lock();
1086 GC_notify_all_marker();
1087 /* Wake up potential helpers. */
1088 GC_mark_local(local_mark_stack, 0);
1089 GC_acquire_mark_lock();
1090 GC_help_wanted = FALSE;
1091 /* Done; clean up. */
1092 while (GC_helper_count > 0) GC_wait_marker();
1093 /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */
1096 "Finished marking for mark phase number %lu\n",
1097 (unsigned long)GC_mark_no);
1100 GC_release_mark_lock();
1101 GC_notify_all_marker();
1105 /* Try to help out the marker, if it's running. */
1106 /* We do not hold the GC lock, but the requestor does. */
1107 void GC_help_marker(word my_mark_no)
1109 mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1111 mse * my_first_nonempty;
1113 if (!GC_parallel) return;
1114 GC_acquire_mark_lock();
1115 while (GC_mark_no < my_mark_no
1116 || !GC_help_wanted && GC_mark_no == my_mark_no) {
1119 my_id = GC_helper_count;
1120 if (GC_mark_no != my_mark_no || my_id >= GC_markers) {
1121 /* Second test is useful only if original threads can also */
1122 /* act as helpers. Under Linux they can't. */
1123 GC_release_mark_lock();
1126 GC_helper_count = my_id + 1;
1127 GC_release_mark_lock();
1128 GC_mark_local(local_mark_stack, my_id);
1129 /* GC_mark_local decrements GC_helper_count. */
1132 #endif /* PARALLEL_MARK */
1134 /* Allocate or reallocate space for mark stack of size s words */
1135 /* May silently fail. */
1136 static void alloc_mark_stack(n)
1139 mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry));
1141 GC_mark_stack_too_small = FALSE;
1142 if (GC_mark_stack_size != 0) {
1143 if (new_stack != 0) {
1144 word displ = (word)GC_mark_stack & (GC_page_size - 1);
1145 signed_word size = GC_mark_stack_size * sizeof(struct GC_ms_entry);
1147 /* Recycle old space */
1148 if (0 != displ) displ = GC_page_size - displ;
1149 size = (size - displ) & ~(GC_page_size - 1);
1151 GC_add_to_heap((struct hblk *)
1152 ((word)GC_mark_stack + displ), (word)size);
1154 GC_mark_stack = new_stack;
1155 GC_mark_stack_size = n;
1156 GC_mark_stack_limit = new_stack + n;
1158 if (GC_print_stats) {
1159 GC_printf1("Grew mark stack to %lu frames\n",
1160 (unsigned long) GC_mark_stack_size);
1165 if (GC_print_stats) {
1166 GC_printf1("Failed to grow mark stack to %lu frames\n",
1172 if (new_stack == 0) {
1173 GC_err_printf0("No space for mark stack\n");
1176 GC_mark_stack = new_stack;
1177 GC_mark_stack_size = n;
1178 GC_mark_stack_limit = new_stack + n;
1180 GC_mark_stack_top = GC_mark_stack-1;
1185 alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
1189 * Push all locations between b and t onto the mark stack.
1190 * b is the first location to be checked. t is one past the last
1191 * location to be checked.
1192 * Should only be used if there is no possibility of mark stack
1195 void GC_push_all(bottom, top)
1199 register word length;
1201 bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1202 top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
1203 if (top == 0 || bottom == top) return;
1204 GC_mark_stack_top++;
1205 if (GC_mark_stack_top >= GC_mark_stack_limit) {
1206 ABORT("unexpected mark stack overflow");
1208 length = top - bottom;
1209 # if GC_DS_TAGS > ALIGNMENT - 1
1210 length += GC_DS_TAGS;
1211 length &= ~GC_DS_TAGS;
1213 GC_mark_stack_top -> mse_start = (word *)bottom;
1214 GC_mark_stack_top -> mse_descr = length;
1218 * Analogous to the above, but push only those pages h with dirty_fn(h) != 0.
1219 * We use push_fn to actually push the block.
1220 * Used both to selectively push dirty pages, or to push a block
1221 * in piecemeal fashion, to allow for more marking concurrency.
1222 * Will not overflow mark stack if push_fn pushes a small fixed number
1223 * of entries. (This is invoked only if push_fn pushes a single entry,
1224 * or if it marks each object before pushing it, thus ensuring progress
1225 * in the event of a stack overflow.)
1227 void GC_push_selected(bottom, top, dirty_fn, push_fn)
1230 int (*dirty_fn) GC_PROTO((struct hblk * h));
1231 void (*push_fn) GC_PROTO((ptr_t bottom, ptr_t top));
1233 register struct hblk * h;
1235 bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1236 top = (ptr_t)(((long) top) & ~(ALIGNMENT-1));
1238 if (top == 0 || bottom == top) return;
1239 h = HBLKPTR(bottom + HBLKSIZE);
1240 if (top <= (ptr_t) h) {
1241 if ((*dirty_fn)(h-1)) {
1242 (*push_fn)(bottom, top);
1246 if ((*dirty_fn)(h-1)) {
1247 (*push_fn)(bottom, (ptr_t)h);
1249 while ((ptr_t)(h+1) <= top) {
1250 if ((*dirty_fn)(h)) {
1251 if ((word)(GC_mark_stack_top - GC_mark_stack)
1252 > 3 * GC_mark_stack_size / 4) {
1253 /* Danger of mark stack overflow */
1254 (*push_fn)((ptr_t)h, top);
1257 (*push_fn)((ptr_t)h, (ptr_t)(h+1));
1262 if ((ptr_t)h != top) {
1263 if ((*dirty_fn)(h)) {
1264 (*push_fn)((ptr_t)h, top);
1267 if (GC_mark_stack_top >= GC_mark_stack_limit) {
1268 ABORT("unexpected mark stack overflow");
1272 # ifndef SMALL_CONFIG
1274 #ifdef PARALLEL_MARK
1275 /* Break up root sections into page size chunks to better spread */
1277 GC_bool GC_true_func(struct hblk *h) { return TRUE; }
1278 # define GC_PUSH_ALL(b,t) GC_push_selected(b,t,GC_true_func,GC_push_all);
1280 # define GC_PUSH_ALL(b,t) GC_push_all(b,t);
1284 void GC_push_conditional(bottom, top, all)
1290 if (GC_dirty_maintained) {
1292 /* Pages that were never dirtied cannot contain pointers */
1293 GC_push_selected(bottom, top, GC_page_was_ever_dirty, GC_push_all);
1295 GC_push_all(bottom, top);
1298 GC_push_all(bottom, top);
1301 GC_push_selected(bottom, top, GC_page_was_dirty, GC_push_all);
1306 # if defined(MSWIN32) || defined(MSWINCE)
1307 void __cdecl GC_push_one(p)
1313 GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER);
1316 struct GC_ms_entry *GC_mark_and_push(obj, mark_stack_ptr, mark_stack_limit, src)
1318 struct GC_ms_entry * mark_stack_ptr;
1319 struct GC_ms_entry * mark_stack_limit;
1323 PUSH_CONTENTS(obj, mark_stack_ptr /* modified */, mark_stack_limit, src,
1324 was_marked /* internally generated exit label */);
1325 return mark_stack_ptr;
1329 # define BASE(p) (word)GC_base((void *)(p))
1331 # define BASE(p) (word)GC_base((char *)(p))
1334 /* Mark and push (i.e. gray) a single object p onto the main */
1335 /* mark stack. Consider p to be valid if it is an interior */
1337 /* The object p has passed a preliminary pointer validity */
1338 /* test, but we do not definitely know whether it is valid. */
1339 /* Mark bits are NOT atomically updated. Thus this must be the */
1340 /* only thread setting them. */
1341 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
1342 void GC_mark_and_push_stack(p, source)
1345 void GC_mark_and_push_stack(p)
1351 register hdr * hhdr;
1355 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
1359 displ = BYTES_TO_WORDS(HBLKDISPL(r));
1362 register map_entry_type map_entry;
1364 displ = HBLKDISPL(p);
1365 map_entry = MAP_ENTRY((hhdr -> hb_map), displ);
1366 if (map_entry >= MAX_OFFSET) {
1367 if (map_entry == OFFSET_TOO_BIG || !GC_all_interior_pointers) {
1369 displ = BYTES_TO_WORDS(HBLKDISPL(r));
1370 if (r == 0) hhdr = 0;
1372 /* Offset invalid, but map reflects interior pointers */
1376 displ = BYTES_TO_WORDS(displ);
1378 r = (word)((word *)(HBLKPTR(p)) + displ);
1381 /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
1382 /* displ is the word index within the block. */
1384 # ifdef PRINT_BLACK_LIST
1385 GC_add_to_black_list_stack(p, source);
1387 GC_add_to_black_list_stack(p);
1389 # undef source /* In case we had to define it. */
1391 if (!mark_bit_from_hdr(hhdr, displ)) {
1392 set_mark_bit_from_hdr(hhdr, displ);
1393 GC_STORE_BACK_PTR(source, (ptr_t)r);
1394 PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top,
1395 GC_mark_stack_limit);
1402 # define TRACE_ENTRIES 1000
1404 struct trace_entry {
1410 } GC_trace_buf[TRACE_ENTRIES];
1412 int GC_trace_buf_ptr = 0;
1414 void GC_add_trace_entry(char *kind, word arg1, word arg2)
1416 GC_trace_buf[GC_trace_buf_ptr].kind = kind;
1417 GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
1418 GC_trace_buf[GC_trace_buf_ptr].words_allocd = GC_words_allocd;
1419 GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
1420 GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
1422 if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
1425 void GC_print_trace(word gc_no, GC_bool lock)
1428 struct trace_entry *p;
1431 for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
1432 if (i < 0) i = TRACE_ENTRIES-1;
1433 p = GC_trace_buf + i;
1434 if (p -> gc_no < gc_no || p -> kind == 0) return;
1435 printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n",
1436 p -> kind, p -> gc_no, p -> words_allocd,
1437 (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
1439 printf("Trace incomplete\n");
1443 # endif /* TRACE_BUF */
1446 * A version of GC_push_all that treats all interior pointers as valid
1447 * and scans the entire region immediately, in case the contents
1450 void GC_push_all_eager(bottom, top)
1454 word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1455 word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
1459 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1460 register ptr_t least_ha = GC_least_plausible_heap_addr;
1461 # define GC_greatest_plausible_heap_addr greatest_ha
1462 # define GC_least_plausible_heap_addr least_ha
1464 if (top == 0) return;
1465 /* check all pointers in range and push if they appear */
1467 lim = t - 1 /* longword */;
1468 for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) {
1470 GC_PUSH_ONE_STACK(q, p);
1472 # undef GC_greatest_plausible_heap_addr
1473 # undef GC_least_plausible_heap_addr
1478 * A version of GC_push_all that treats all interior pointers as valid
1479 * and scans part of the area immediately, to make sure that saved
1480 * register values are not lost.
1481 * Cold_gc_frame delimits the stack section that must be scanned
1482 * eagerly. A zero value indicates that no eager scanning is needed.
1484 void GC_push_all_stack_partially_eager(bottom, top, cold_gc_frame)
1487 ptr_t cold_gc_frame;
1489 if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
1490 # define EAGER_BYTES 1024
1491 /* Push the hot end of the stack eagerly, so that register values */
1492 /* saved inside GC frames are marked before they disappear. */
1493 /* The rest of the marking can be deferred until later. */
1494 if (0 == cold_gc_frame) {
1495 GC_push_all_stack(bottom, top);
1498 GC_ASSERT(bottom <= cold_gc_frame && cold_gc_frame <= top);
1499 # ifdef STACK_GROWS_DOWN
1500 GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
1501 GC_push_all_eager(bottom, cold_gc_frame);
1502 # else /* STACK_GROWS_UP */
1503 GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
1504 GC_push_all_eager(cold_gc_frame, top);
1505 # endif /* STACK_GROWS_UP */
1507 GC_push_all_eager(bottom, top);
1510 GC_add_trace_entry("GC_push_all_stack", bottom, top);
1513 #endif /* !THREADS */
1515 void GC_push_all_stack(bottom, top)
1519 if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
1520 GC_push_all(bottom, top);
1522 GC_push_all_eager(bottom, top);
1526 #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
1527 /* Push all objects reachable from marked objects in the given block */
1528 /* of size 1 objects. */
1529 void GC_push_marked1(h, hhdr)
1531 register hdr * hhdr;
1533 word * mark_word_addr = &(hhdr->hb_marks[0]);
1538 register word mark_word;
1539 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1540 register ptr_t least_ha = GC_least_plausible_heap_addr;
1541 register mse * mark_stack_top = GC_mark_stack_top;
1542 register mse * mark_stack_limit = GC_mark_stack_limit;
1543 # define GC_mark_stack_top mark_stack_top
1544 # define GC_mark_stack_limit mark_stack_limit
1545 # define GC_greatest_plausible_heap_addr greatest_ha
1546 # define GC_least_plausible_heap_addr least_ha
1548 p = (word *)(h->hb_body);
1549 plim = (word *)(((word)h) + HBLKSIZE);
1551 /* go through all words in block */
1553 mark_word = *mark_word_addr++;
1555 while(mark_word != 0) {
1556 if (mark_word & 1) {
1558 GC_PUSH_ONE_HEAP(q, p + i);
1565 # undef GC_greatest_plausible_heap_addr
1566 # undef GC_least_plausible_heap_addr
1567 # undef GC_mark_stack_top
1568 # undef GC_mark_stack_limit
1569 GC_mark_stack_top = mark_stack_top;
1575 /* Push all objects reachable from marked objects in the given block */
1576 /* of size 2 objects. */
1577 void GC_push_marked2(h, hhdr)
1579 register hdr * hhdr;
1581 word * mark_word_addr = &(hhdr->hb_marks[0]);
1586 register word mark_word;
1587 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1588 register ptr_t least_ha = GC_least_plausible_heap_addr;
1589 register mse * mark_stack_top = GC_mark_stack_top;
1590 register mse * mark_stack_limit = GC_mark_stack_limit;
1591 # define GC_mark_stack_top mark_stack_top
1592 # define GC_mark_stack_limit mark_stack_limit
1593 # define GC_greatest_plausible_heap_addr greatest_ha
1594 # define GC_least_plausible_heap_addr least_ha
1596 p = (word *)(h->hb_body);
1597 plim = (word *)(((word)h) + HBLKSIZE);
1599 /* go through all words in block */
1601 mark_word = *mark_word_addr++;
1603 while(mark_word != 0) {
1604 if (mark_word & 1) {
1606 GC_PUSH_ONE_HEAP(q, p + i);
1608 GC_PUSH_ONE_HEAP(q, p + i);
1615 # undef GC_greatest_plausible_heap_addr
1616 # undef GC_least_plausible_heap_addr
1617 # undef GC_mark_stack_top
1618 # undef GC_mark_stack_limit
1619 GC_mark_stack_top = mark_stack_top;
1622 /* Push all objects reachable from marked objects in the given block */
1623 /* of size 4 objects. */
1624 /* There is a risk of mark stack overflow here. But we handle that. */
1625 /* And only unmarked objects get pushed, so it's not very likely. */
1626 void GC_push_marked4(h, hhdr)
1628 register hdr * hhdr;
1630 word * mark_word_addr = &(hhdr->hb_marks[0]);
1635 register word mark_word;
1636 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1637 register ptr_t least_ha = GC_least_plausible_heap_addr;
1638 register mse * mark_stack_top = GC_mark_stack_top;
1639 register mse * mark_stack_limit = GC_mark_stack_limit;
1640 # define GC_mark_stack_top mark_stack_top
1641 # define GC_mark_stack_limit mark_stack_limit
1642 # define GC_greatest_plausible_heap_addr greatest_ha
1643 # define GC_least_plausible_heap_addr least_ha
1645 p = (word *)(h->hb_body);
1646 plim = (word *)(((word)h) + HBLKSIZE);
1648 /* go through all words in block */
1650 mark_word = *mark_word_addr++;
1652 while(mark_word != 0) {
1653 if (mark_word & 1) {
1655 GC_PUSH_ONE_HEAP(q, p + i);
1657 GC_PUSH_ONE_HEAP(q, p + i + 1);
1659 GC_PUSH_ONE_HEAP(q, p + i + 2);
1661 GC_PUSH_ONE_HEAP(q, p + i + 3);
1668 # undef GC_greatest_plausible_heap_addr
1669 # undef GC_least_plausible_heap_addr
1670 # undef GC_mark_stack_top
1671 # undef GC_mark_stack_limit
1672 GC_mark_stack_top = mark_stack_top;
1675 #endif /* UNALIGNED */
1677 #endif /* SMALL_CONFIG */
1679 /* Push all objects reachable from marked objects in the given block */
1680 void GC_push_marked(h, hhdr)
1682 register hdr * hhdr;
1684 register int sz = hhdr -> hb_sz;
1685 register int descr = hhdr -> hb_descr;
1687 register int word_no;
1688 register word * lim;
1689 register mse * GC_mark_stack_top_reg;
1690 register mse * mark_stack_limit = GC_mark_stack_limit;
1692 /* Some quick shortcuts: */
1693 if ((0 | GC_DS_LENGTH) == descr) return;
1694 if (GC_block_empty(hhdr)/* nothing marked */) return;
1695 GC_n_rescuing_pages++;
1696 GC_objects_are_marked = TRUE;
1697 if (sz > MAXOBJSZ) {
1700 lim = (word *)(h + 1) - sz;
1704 # if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
1706 GC_push_marked1(h, hhdr);
1709 # if !defined(SMALL_CONFIG) && !defined(UNALIGNED) && \
1710 !defined(USE_MARK_BYTES)
1712 GC_push_marked2(h, hhdr);
1715 GC_push_marked4(h, hhdr);
1719 GC_mark_stack_top_reg = GC_mark_stack_top;
1720 for (p = (word *)h, word_no = 0; p <= lim; p += sz, word_no += sz) {
1721 if (mark_bit_from_hdr(hhdr, word_no)) {
1722 /* Mark from fields inside the object */
1723 PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
1725 /* Subtract this object from total, since it was */
1726 /* added in twice. */
1727 GC_composite_in_use -= sz;
1731 GC_mark_stack_top = GC_mark_stack_top_reg;
1735 #ifndef SMALL_CONFIG
1736 /* Test whether any page in the given block is dirty */
1737 GC_bool GC_block_was_dirty(h, hhdr)
1739 register hdr * hhdr;
1741 register int sz = hhdr -> hb_sz;
1743 if (sz <= MAXOBJSZ) {
1744 return(GC_page_was_dirty(h));
1746 register ptr_t p = (ptr_t)h;
1747 sz = WORDS_TO_BYTES(sz);
1748 while (p < (ptr_t)h + sz) {
1749 if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
1755 #endif /* SMALL_CONFIG */
1757 /* Similar to GC_push_next_marked, but return address of next block */
1758 struct hblk * GC_push_next_marked(h)
1761 register hdr * hhdr;
1763 h = GC_next_used_block(h);
1764 if (h == 0) return(0);
1766 GC_push_marked(h, hhdr);
1767 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1770 #ifndef SMALL_CONFIG
1771 /* Identical to above, but mark only from dirty pages */
1772 struct hblk * GC_push_next_marked_dirty(h)
1775 register hdr * hhdr;
1777 if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
1779 h = GC_next_used_block(h);
1780 if (h == 0) return(0);
1782 # ifdef STUBBORN_ALLOC
1783 if (hhdr -> hb_obj_kind == STUBBORN) {
1784 if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
1788 if (GC_block_was_dirty(h, hhdr)) break;
1791 if (GC_block_was_dirty(h, hhdr)) break;
1793 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1795 GC_push_marked(h, hhdr);
1796 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1800 /* Similar to above, but for uncollectable pages. Needed since we */
1801 /* do not clear marks for such pages, even for full collections. */
1802 struct hblk * GC_push_next_marked_uncollectable(h)
1805 register hdr * hhdr = HDR(h);
1808 h = GC_next_used_block(h);
1809 if (h == 0) return(0);
1811 if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
1812 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1814 GC_push_marked(h, hhdr);
1815 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));