3 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
4 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
20 # include "private/gc_pmark.h"
22 #if defined(MSWIN32) && defined(__GNUC__)
26 /* We put this here to minimize the risk of inlining. */
28 #if defined(__BORLANDC__) || defined(__WATCOMC__)
30 void GC_noop(void *p, ...) {}
39 /* Single argument version, robust against whole program analysis. */
40 GC_API void GC_CALL GC_noop1(word x)
42 static volatile word sink;
47 /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
49 unsigned GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
51 /* Initialize GC_obj_kinds properly and standard free lists properly. */
52 /* This must be done statically since they may be accessed before */
53 /* GC_init is called. */
54 /* It's done here, since we need to deal with mark descriptors. */
55 struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
56 /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
57 0 | GC_DS_LENGTH, FALSE, FALSE },
58 /* NORMAL */ { &GC_objfreelist[0], 0,
59 0 | GC_DS_LENGTH, /* Adjusted in GC_init_inner for EXTRA_BYTES */
60 TRUE /* add length to descr */, TRUE },
62 { &GC_uobjfreelist[0], 0,
63 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
64 # ifdef ATOMIC_UNCOLLECTABLE
66 { &GC_auobjfreelist[0], 0,
67 0 | GC_DS_LENGTH, FALSE /* add length to descr */, FALSE },
69 # ifdef STUBBORN_ALLOC
70 /*STUBBORN*/ { (void **)&GC_sobjfreelist[0], 0,
71 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
75 # ifdef ATOMIC_UNCOLLECTABLE
76 # ifdef STUBBORN_ALLOC
77 unsigned GC_n_kinds = 5;
79 unsigned GC_n_kinds = 4;
82 # ifdef STUBBORN_ALLOC
83 unsigned GC_n_kinds = 4;
85 unsigned GC_n_kinds = 3;
90 # ifndef INITIAL_MARK_STACK_SIZE
91 # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
92 /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
93 /* multiple of HBLKSIZE. */
94 /* The incremental collector actually likes a larger */
95 /* size, since it want to push all marked dirty objs */
96 /* before marking anything new. Currently we let it */
97 /* grow dynamically. */
101 * Limits of stack for GC_mark routine.
102 * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
103 * need to be marked from.
106 STATIC word GC_n_rescuing_pages;/* Number of dirty pages we marked from */
107 /* excludes ptrfree pages, etc. */
111 mse * GC_mark_stack_limit;
113 size_t GC_mark_stack_size = 0;
116 # include "atomic_ops.h"
118 mse * volatile GC_mark_stack_top;
119 /* Updated only with mark lock held, but read asynchronously. */
120 volatile AO_t GC_first_nonempty;
121 /* Lowest entry on mark stack */
122 /* that may be nonempty. */
123 /* Updated only by initiating */
126 mse * GC_mark_stack_top;
129 static struct hblk * scan_ptr;
131 mark_state_t GC_mark_state = MS_NONE;
133 GC_bool GC_mark_stack_too_small = FALSE;
135 GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
136 /* objects in the heap? */
138 /* Is a collection in progress? Note that this can return true in the */
139 /* nonincremental case, if a collection has been abandoned and the */
140 /* mark state is now MS_INVALID. */
141 GC_bool GC_collection_in_progress(void)
143 return(GC_mark_state != MS_NONE);
146 /* clear all mark bits in the header */
147 void GC_clear_hdr_marks(hdr *hhdr)
149 size_t last_bit = FINAL_MARK_BIT(hhdr -> hb_sz);
151 # ifdef USE_MARK_BYTES
152 BZERO(hhdr -> hb_marks, MARK_BITS_SZ);
153 hhdr -> hb_marks[last_bit] = 1;
155 BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
156 set_mark_bit_from_hdr(hhdr, last_bit);
158 hhdr -> hb_n_marks = 0;
161 /* Set all mark bits in the header. Used for uncollectable blocks. */
162 void GC_set_hdr_marks(hdr *hhdr)
165 size_t sz = hhdr -> hb_sz;
166 unsigned n_marks = (unsigned)FINAL_MARK_BIT(sz);
168 # ifdef USE_MARK_BYTES
169 for (i = 0; i <= n_marks; i += (unsigned)MARK_BIT_OFFSET(sz)) {
170 hhdr -> hb_marks[i] = 1;
173 for (i = 0; i < divWORDSZ(n_marks + WORDSZ); ++i) {
174 hhdr -> hb_marks[i] = ONES;
177 # ifdef MARK_BIT_PER_OBJ
178 hhdr -> hb_n_marks = n_marks - 1;
180 hhdr -> hb_n_marks = HBLK_OBJS(sz);
185 * Clear all mark bits associated with block h.
188 static void clear_marks_for_block(struct hblk *h, word dummy)
190 register hdr * hhdr = HDR(h);
192 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
193 /* Mark bit for these is cleared only once the object is */
194 /* explicitly deallocated. This either frees the block, or */
195 /* the bit is cleared once the object is on the free list. */
196 GC_clear_hdr_marks(hhdr);
199 /* Slow but general routines for setting/clearing/asking about mark bits */
200 void GC_set_mark_bit(ptr_t p)
202 struct hblk *h = HBLKPTR(p);
204 word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
206 if (!mark_bit_from_hdr(hhdr, bit_no)) {
207 set_mark_bit_from_hdr(hhdr, bit_no);
208 ++hhdr -> hb_n_marks;
212 void GC_clear_mark_bit(ptr_t p)
214 struct hblk *h = HBLKPTR(p);
216 word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
218 if (mark_bit_from_hdr(hhdr, bit_no)) {
220 clear_mark_bit_from_hdr(hhdr, bit_no);
221 n_marks = hhdr -> hb_n_marks - 1;
222 # ifdef PARALLEL_MARK
223 if (n_marks != 0 || !GC_parallel)
224 hhdr -> hb_n_marks = n_marks;
225 /* Don't decrement to zero. The counts are approximate due to */
226 /* concurrency issues, but we need to ensure that a count of */
227 /* zero implies an empty block. */
229 hhdr -> hb_n_marks = n_marks;
234 GC_bool GC_is_marked(ptr_t p)
236 struct hblk *h = HBLKPTR(p);
238 word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
240 return((GC_bool)mark_bit_from_hdr(hhdr, bit_no));
245 * Clear mark bits in all allocated heap blocks. This invalidates
246 * the marker invariant, and sets GC_mark_state to reflect this.
247 * (This implicitly starts marking to reestablish the invariant.)
249 void GC_clear_marks(void)
251 GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
252 GC_objects_are_marked = FALSE;
253 GC_mark_state = MS_INVALID;
258 extern void GC_check_dirty(void);
261 /* Initiate a garbage collection. Initiates a full collection if the */
262 /* mark state is invalid. */
263 void GC_initiate_gc(void)
265 if (GC_dirty_maintained) GC_read_dirty();
266 # ifdef STUBBORN_ALLOC
270 if (GC_dirty_maintained) GC_check_dirty();
272 GC_n_rescuing_pages = 0;
273 if (GC_mark_state == MS_NONE) {
274 GC_mark_state = MS_PUSH_RESCUERS;
275 } else if (GC_mark_state != MS_INVALID) {
276 ABORT("unexpected state");
277 } /* else this is really a full collection, and mark */
278 /* bits are invalid. */
283 STATIC void GC_do_parallel_mark(void); /* initiate parallel marking. */
284 #endif /* PARALLEL_MARK */
286 static void alloc_mark_stack(size_t);
288 # if defined(MSWIN32) && (!defined(__GNUC__) || !defined(_WIN64)) \
289 || defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS)
290 /* Under rare conditions, we may end up marking from nonexistent memory. */
291 /* Hence we need to be prepared to recover by running GC_mark_some */
292 /* with a suitable handler in place. */
293 # define WRAP_MARK_SOME
296 /* Perform a small amount of marking. */
297 /* We try to touch roughly a page of memory. */
298 /* Return TRUE if we just finished a mark phase. */
299 /* Cold_gc_frame is an address inside a GC frame that */
300 /* remains valid until all marking is complete. */
301 /* A zero value indicates that it's OK to miss some */
302 /* register values. */
303 /* We hold the allocation lock. In the case of */
304 /* incremental collection, the world may not be stopped.*/
305 #ifdef WRAP_MARK_SOME
306 /* For win32, this is called after we establish a structured */
307 /* exception handler, in case Windows unmaps one of our root */
308 /* segments. See below. In either case, we acquire the */
309 /* allocator lock long before we get here. */
310 STATIC GC_bool GC_mark_some_inner(ptr_t cold_gc_frame)
312 GC_bool GC_mark_some(ptr_t cold_gc_frame)
315 switch(GC_mark_state) {
319 case MS_PUSH_RESCUERS:
320 if (GC_mark_stack_top
321 >= GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2) {
322 /* Go ahead and mark, even though that might cause us to */
323 /* see more marked dirty objects later on. Avoid this */
325 GC_mark_stack_too_small = TRUE;
326 MARK_FROM_MARK_STACK();
329 scan_ptr = GC_push_next_marked_dirty(scan_ptr);
331 if (GC_print_stats) {
332 GC_log_printf("Marked from %lu dirty pages\n",
333 (unsigned long)GC_n_rescuing_pages);
335 GC_push_roots(FALSE, cold_gc_frame);
336 GC_objects_are_marked = TRUE;
337 if (GC_mark_state != MS_INVALID) {
338 GC_mark_state = MS_ROOTS_PUSHED;
344 case MS_PUSH_UNCOLLECTABLE:
345 if (GC_mark_stack_top
346 >= GC_mark_stack + GC_mark_stack_size/4) {
347 # ifdef PARALLEL_MARK
348 /* Avoid this, since we don't parallelize the marker */
350 if (GC_parallel) GC_mark_stack_too_small = TRUE;
352 MARK_FROM_MARK_STACK();
355 scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
357 GC_push_roots(TRUE, cold_gc_frame);
358 GC_objects_are_marked = TRUE;
359 if (GC_mark_state != MS_INVALID) {
360 GC_mark_state = MS_ROOTS_PUSHED;
366 case MS_ROOTS_PUSHED:
367 # ifdef PARALLEL_MARK
368 /* In the incremental GC case, this currently doesn't */
369 /* quite do the right thing, since it runs to */
370 /* completion. On the other hand, starting a */
371 /* parallel marker is expensive, so perhaps it is */
372 /* the right thing? */
373 /* Eventually, incremental marking should run */
374 /* asynchronously in multiple threads, without grabbing */
375 /* the allocation lock. */
377 GC_do_parallel_mark();
378 GC_ASSERT(GC_mark_stack_top < (mse *)GC_first_nonempty);
379 GC_mark_stack_top = GC_mark_stack - 1;
380 if (GC_mark_stack_too_small) {
381 alloc_mark_stack(2*GC_mark_stack_size);
383 if (GC_mark_state == MS_ROOTS_PUSHED) {
384 GC_mark_state = MS_NONE;
391 if (GC_mark_stack_top >= GC_mark_stack) {
392 MARK_FROM_MARK_STACK();
395 GC_mark_state = MS_NONE;
396 if (GC_mark_stack_too_small) {
397 alloc_mark_stack(2*GC_mark_stack_size);
403 case MS_PARTIALLY_INVALID:
404 if (!GC_objects_are_marked) {
405 GC_mark_state = MS_PUSH_UNCOLLECTABLE;
408 if (GC_mark_stack_top >= GC_mark_stack) {
409 MARK_FROM_MARK_STACK();
412 if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
413 /* About to start a heap scan for marked objects. */
414 /* Mark stack is empty. OK to reallocate. */
415 if (GC_mark_stack_too_small) {
416 alloc_mark_stack(2*GC_mark_stack_size);
418 GC_mark_state = MS_PARTIALLY_INVALID;
420 scan_ptr = GC_push_next_marked(scan_ptr);
421 if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
422 GC_push_roots(TRUE, cold_gc_frame);
423 GC_objects_are_marked = TRUE;
424 if (GC_mark_state != MS_INVALID) {
425 GC_mark_state = MS_ROOTS_PUSHED;
430 ABORT("GC_mark_some: bad state");
436 #if defined(MSWIN32) && defined(__GNUC__) && !defined(_WIN64)
439 EXCEPTION_REGISTRATION ex_reg;
444 static EXCEPTION_DISPOSITION mark_ex_handler(
445 struct _EXCEPTION_RECORD *ex_rec,
447 struct _CONTEXT *context,
450 if (ex_rec->ExceptionCode == STATUS_ACCESS_VIOLATION) {
451 ext_ex_regn *xer = (ext_ex_regn *)est_frame;
453 /* Unwind from the inner function assuming the standard */
454 /* function prologue. */
455 /* Assumes code has not been compiled with */
456 /* -fomit-frame-pointer. */
457 context->Esp = context->Ebp;
458 context->Ebp = *((DWORD *)context->Esp);
459 context->Esp = context->Esp - 8;
461 /* Resume execution at the "real" handler within the */
462 /* wrapper function. */
463 context->Eip = (DWORD )(xer->alt_path);
465 return ExceptionContinueExecution;
468 return ExceptionContinueSearch;
471 # endif /* __GNUC__ && MSWIN32 */
473 #ifdef GC_WIN32_THREADS
474 extern GC_bool GC_started_thread_while_stopped(void);
475 /* In win32_threads.c. Did we invalidate mark phase with an */
476 /* unexpected thread start? */
479 # ifdef WRAP_MARK_SOME
480 GC_bool GC_mark_some(ptr_t cold_gc_frame)
486 /* Windows 98 appears to asynchronously create and remove */
487 /* writable memory mappings, for reasons we haven't yet */
488 /* understood. Since we look for writable regions to */
489 /* determine the root set, we may try to mark from an */
490 /* address range that disappeared since we started the */
491 /* collection. Thus we have to recover from faults here. */
492 /* This code does not appear to be necessary for Windows */
493 /* 95/NT/2000. Note that this code should never generate */
494 /* an incremental GC write fault. */
495 /* It's conceivable that this is the same issue with */
496 /* terminating threads that we see with Linux and */
497 /* USE_PROC_FOR_LIBRARIES. */
500 ret_val = GC_mark_some_inner(cold_gc_frame);
501 } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ?
502 EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
505 # ifdef GC_WIN32_THREADS
506 /* With DllMain-based thread tracking, a thread may have */
507 /* started while we were marking. This is logically equivalent */
508 /* to the exception case; our results are invalid and we have */
509 /* to start over. This cannot be prevented since we can't */
510 /* block in DllMain. */
511 if (GC_started_thread_while_stopped()) goto handle_ex;
516 # else /* __GNUC__ */
518 /* Manually install an exception handler since GCC does */
519 /* not yet support Structured Exception Handling (SEH) on */
524 er.alt_path = &&handle_ex;
525 er.ex_reg.handler = mark_ex_handler;
526 asm volatile ("movl %%fs:0, %0" : "=r" (er.ex_reg.prev));
527 asm volatile ("movl %0, %%fs:0" : : "r" (&er));
528 ret_val = GC_mark_some_inner(cold_gc_frame);
529 /* Prevent GCC from considering the following code unreachable */
530 /* and thus eliminating it. */
531 if (er.alt_path == 0)
534 /* Uninstall the exception handler */
535 asm volatile ("mov %0, %%fs:0" : : "r" (er.ex_reg.prev));
538 # endif /* __GNUC__ */
539 # else /* !MSWIN32 */
540 /* Here we are handling the case in which /proc is used for root */
541 /* finding, and we have threads. We may find a stack for a */
542 /* thread that is in the process of exiting, and disappears */
543 /* while we are marking it. This seems extremely difficult to */
544 /* avoid otherwise. */
546 WARN("Incremental GC incompatible with /proc roots\n", 0);
547 /* I'm not sure if this could still work ... */
548 GC_setup_temporary_fault_handler();
549 if(SETJMP(GC_jmp_buf) != 0) goto handle_ex;
550 ret_val = GC_mark_some_inner(cold_gc_frame);
552 GC_reset_fault_handler();
555 # endif /* !MSWIN32 */
558 /* Exception handler starts here for all cases. */
559 if (GC_print_stats) {
560 GC_log_printf("Caught ACCESS_VIOLATION in marker. "
561 "Memory mapping disappeared.\n");
564 /* We have bad roots on the stack. Discard mark stack. */
565 /* Rescan from marked objects. Redetermine roots. */
566 GC_invalidate_mark_state();
570 goto rm_handler; /* Back to platform-specific code. */
572 #endif /* WRAP_MARK_SOME */
575 GC_bool GC_mark_stack_empty(void)
577 return(GC_mark_stack_top < GC_mark_stack);
580 void GC_invalidate_mark_state(void)
582 GC_mark_state = MS_INVALID;
583 GC_mark_stack_top = GC_mark_stack-1;
586 mse * GC_signal_mark_stack_overflow(mse *msp)
588 GC_mark_state = MS_INVALID;
589 GC_mark_stack_too_small = TRUE;
590 if (GC_print_stats) {
591 GC_log_printf("Mark stack overflow; current size = %lu entries\n",
592 (unsigned long)GC_mark_stack_size);
594 return(msp - GC_MARK_STACK_DISCARDS);
598 * Mark objects pointed to by the regions described by
599 * mark stack entries between mark_stack and mark_stack_top,
600 * inclusive. Assumes the upper limit of a mark stack entry
601 * is never 0. A mark stack entry never has size 0.
602 * We try to traverse on the order of a hblk of memory before we return.
603 * Caller is responsible for calling this until the mark stack is empty.
604 * Note that this is the most performance critical routine in the
605 * collector. Hence it contains all sorts of ugly hacks to speed
606 * things up. In particular, we avoid procedure calls on the common
607 * path, we take advantage of peculiarities of the mark descriptor
608 * encoding, we optionally maintain a cache for the block address to
609 * header mapping, we prefetch when an object is "grayed", etc.
611 mse * GC_mark_from(mse *mark_stack_top, mse *mark_stack, mse *mark_stack_limit)
613 signed_word credit = HBLKSIZE; /* Remaining credit for marking work */
614 ptr_t current_p; /* Pointer to current candidate ptr. */
615 word current; /* Candidate pointer. */
616 ptr_t limit; /* (Incl) limit of current candidate */
619 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
620 ptr_t least_ha = GC_least_plausible_heap_addr;
623 # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
625 GC_objects_are_marked = TRUE;
627 # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
628 while (mark_stack_top >= mark_stack && credit >= 0) {
630 while ((((ptr_t)mark_stack_top - (ptr_t)mark_stack) | credit)
633 current_p = mark_stack_top -> mse_start;
634 descr = mark_stack_top -> mse_descr;
636 /* current_p and descr describe the current object. */
637 /* *mark_stack_top is vacant. */
638 /* The following is 0 only for small objects described by a simple */
639 /* length descriptor. For many applications this is the common */
640 /* case, so we try to detect it quickly. */
641 if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | GC_DS_TAGS)) {
642 word tag = descr & GC_DS_TAGS;
647 /* Process part of the range to avoid pushing too much on the */
649 GC_ASSERT(descr < (word)GC_greatest_plausible_heap_addr
650 - (word)GC_least_plausible_heap_addr);
652 if (GC_trace_addr >= current_p
653 && GC_trace_addr < current_p + descr) {
654 GC_log_printf("GC:%u Large section; start %p len %lu\n",
655 (unsigned)GC_gc_no, current_p,
656 (unsigned long) descr);
658 # endif /* ENABLE_TRACE */
659 # ifdef PARALLEL_MARK
660 # define SHARE_BYTES 2048
661 if (descr > SHARE_BYTES && GC_parallel
662 && mark_stack_top < mark_stack_limit - 1) {
663 int new_size = (descr/2) & ~(sizeof(word)-1);
664 mark_stack_top -> mse_start = current_p;
665 mark_stack_top -> mse_descr = new_size + sizeof(word);
666 /* makes sure we handle */
667 /* misaligned pointers. */
670 if (GC_trace_addr >= current_p
671 && GC_trace_addr < current_p + descr) {
672 GC_log_printf("GC:%u splitting (parallel) %p at %p\n",
673 (unsigned)GC_gc_no, current_p,
674 current_p + new_size);
676 # endif /* ENABLE_TRACE */
677 current_p += new_size;
681 # endif /* PARALLEL_MARK */
682 mark_stack_top -> mse_start =
683 limit = current_p + WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
684 mark_stack_top -> mse_descr =
685 descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
687 if (GC_trace_addr >= current_p
688 && GC_trace_addr < current_p + descr) {
689 GC_log_printf("GC:%u splitting %p at %p\n",
690 (unsigned)GC_gc_no, current_p, limit);
692 # endif /* ENABLE_TRACE */
693 /* Make sure that pointers overlapping the two ranges are */
695 limit += sizeof(word) - ALIGNMENT;
700 if (GC_trace_addr >= current_p
701 && GC_trace_addr < current_p + WORDS_TO_BYTES(WORDSZ-2)) {
702 GC_log_printf("GC:%u Tracing from %p bitmap descr %lu\n",
703 (unsigned)GC_gc_no, current_p,
704 (unsigned long) descr);
706 # endif /* ENABLE_TRACE */
707 descr &= ~GC_DS_TAGS;
708 credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
710 if ((signed_word)descr < 0) {
711 current = *(word *)current_p;
712 FIXUP_POINTER(current);
713 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
714 PREFETCH((ptr_t)current);
716 if (GC_trace_addr == current_p) {
717 GC_log_printf("GC:%u Considering(3) %p -> %p\n",
718 (unsigned)GC_gc_no, current_p,
721 # endif /* ENABLE_TRACE */
722 PUSH_CONTENTS((ptr_t)current, mark_stack_top,
723 mark_stack_limit, current_p, exit1);
727 current_p += sizeof(word);
733 if (GC_trace_addr >= current_p
734 && GC_base(current_p) != 0
735 && GC_base(current_p) == GC_base(GC_trace_addr)) {
736 GC_log_printf("GC:%u Tracing from %p proc descr %lu\n",
737 (unsigned)GC_gc_no, current_p,
738 (unsigned long) descr);
740 # endif /* ENABLE_TRACE */
741 credit -= GC_PROC_BYTES;
744 ((word *)current_p, mark_stack_top,
745 mark_stack_limit, ENV(descr));
747 case GC_DS_PER_OBJECT:
748 if ((signed_word)descr >= 0) {
749 /* Descriptor is in the object. */
750 descr = *(word *)(current_p + descr - GC_DS_PER_OBJECT);
752 /* Descriptor is in type descriptor pointed to by first */
753 /* word in object. */
754 ptr_t type_descr = *(ptr_t *)current_p;
755 /* type_descr is either a valid pointer to the descriptor */
756 /* structure, or this object was on a free list. If it */
757 /* it was anything but the last object on the free list, */
758 /* we will misinterpret the next object on the free list as */
759 /* the type descriptor, and get a 0 GC descriptor, which */
760 /* is ideal. Unfortunately, we need to check for the last */
761 /* object case explicitly. */
762 if (0 == type_descr) {
763 /* Rarely executed. */
767 descr = *(word *)(type_descr
768 - (descr + (GC_INDIR_PER_OBJ_BIAS
769 - GC_DS_PER_OBJECT)));
772 /* Can happen either because we generated a 0 descriptor */
773 /* or we saw a pointer to a free object. */
780 limit = 0; /* initialized to prevent warning. */
782 } else /* Small object with length descriptor */ {
784 limit = current_p + (word)descr;
787 if (GC_trace_addr >= current_p
788 && GC_trace_addr < limit) {
789 GC_log_printf("GC:%u Tracing from %p len %lu\n",
790 (int)GC_gc_no, current_p, (unsigned long) descr);
792 # endif /* ENABLE_TRACE */
793 /* The simple case in which we're scanning a range. */
794 GC_ASSERT(!((word)current_p & (ALIGNMENT-1)));
795 credit -= limit - current_p;
796 limit -= sizeof(word);
800 # ifndef SMALL_CONFIG
803 /* Try to prefetch the next pointer to be examined asap. */
804 /* Empirically, this also seems to help slightly without */
805 /* prefetches, at least on linux/X86. Presumably this loop */
806 /* ends up with less register pressure, and gcc thus ends up */
807 /* generating slightly better code. Overall gcc code quality */
808 /* for this loop is still not great. */
810 PREFETCH(limit - PREF_DIST*CACHE_LINE_SIZE);
811 GC_ASSERT(limit >= current_p);
812 deferred = *(word *)limit;
813 FIXUP_POINTER(deferred);
815 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
816 PREFETCH((ptr_t)deferred);
819 if (current_p > limit) goto next_object;
820 /* Unroll once, so we don't do too many of the prefetches */
821 /* based on limit. */
822 deferred = *(word *)limit;
823 FIXUP_POINTER(deferred);
825 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
826 PREFETCH((ptr_t)deferred);
829 if (current_p > limit) goto next_object;
833 while (current_p <= limit) {
834 /* Empirically, unrolling this loop doesn't help a lot. */
835 /* Since PUSH_CONTENTS expands to a lot of code, */
837 current = *(word *)current_p;
838 FIXUP_POINTER(current);
839 PREFETCH(current_p + PREF_DIST*CACHE_LINE_SIZE);
840 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
841 /* Prefetch the contents of the object we just pushed. It's */
842 /* likely we will need them soon. */
843 PREFETCH((ptr_t)current);
845 if (GC_trace_addr == current_p) {
846 GC_log_printf("GC:%u Considering(1) %p -> %p\n",
847 (unsigned)GC_gc_no, current_p, (ptr_t) current);
849 # endif /* ENABLE_TRACE */
850 PUSH_CONTENTS((ptr_t)current, mark_stack_top,
851 mark_stack_limit, current_p, exit2);
853 current_p += ALIGNMENT;
856 # ifndef SMALL_CONFIG
857 /* We still need to mark the entry we previously prefetched. */
858 /* We already know that it passes the preliminary pointer */
861 if (GC_trace_addr == current_p) {
862 GC_log_printf("GC:%u Considering(2) %p -> %p\n",
863 (unsigned)GC_gc_no, current_p, (ptr_t) deferred);
865 # endif /* ENABLE_TRACE */
866 PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
867 mark_stack_limit, current_p, exit4);
872 return mark_stack_top;
877 STATIC GC_bool GC_help_wanted = FALSE; /* Protected by mark lock */
878 STATIC unsigned GC_helper_count = 0; /* Number of running helpers. */
879 /* Protected by mark lock */
880 STATIC unsigned GC_active_count = 0; /* Number of active helpers. */
881 /* Protected by mark lock */
882 /* May increase and decrease */
883 /* within each mark cycle. But */
884 /* once it returns to 0, it */
885 /* stays zero for the cycle. */
889 #define LOCAL_MARK_STACK_SIZE HBLKSIZE
890 /* Under normal circumstances, this is big enough to guarantee */
891 /* We don't overflow half of it in a single call to */
895 /* Steal mark stack entries starting at mse low into mark stack local */
896 /* until we either steal mse high, or we have max entries. */
897 /* Return a pointer to the top of the local mark stack. */
898 /* *next is replaced by a pointer to the next unscanned mark stack */
900 STATIC mse * GC_steal_mark_stack(mse * low, mse * high, mse * local,
901 unsigned max, mse **next)
904 mse *top = local - 1;
907 GC_ASSERT(high >= low-1 && (word)(high - low + 1) <= GC_mark_stack_size);
908 for (p = low; p <= high && i <= max; ++p) {
909 word descr = AO_load((volatile AO_t *) &(p -> mse_descr));
911 /* Must be ordered after read of descr: */
912 AO_store_release_write((volatile AO_t *) &(p -> mse_descr), 0);
913 /* More than one thread may get this entry, but that's only */
914 /* a minor performance problem. */
916 top -> mse_descr = descr;
917 top -> mse_start = p -> mse_start;
918 GC_ASSERT((top -> mse_descr & GC_DS_TAGS) != GC_DS_LENGTH ||
919 top -> mse_descr < (word)GC_greatest_plausible_heap_addr
920 - (word)GC_least_plausible_heap_addr);
921 /* If this is a big object, count it as */
922 /* size/256 + 1 objects. */
924 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) i += (int)(descr >> 8);
931 /* Copy back a local mark stack. */
932 /* low and high are inclusive bounds. */
933 STATIC void GC_return_mark_stack(mse * low, mse * high)
939 if (high < low) return;
940 stack_size = high - low + 1;
941 GC_acquire_mark_lock();
942 my_top = GC_mark_stack_top; /* Concurrent modification impossible. */
943 my_start = my_top + 1;
944 if (my_start - GC_mark_stack + stack_size > GC_mark_stack_size) {
945 if (GC_print_stats) {
946 GC_log_printf("No room to copy back mark stack\n");
948 GC_mark_state = MS_INVALID;
949 GC_mark_stack_too_small = TRUE;
950 /* We drop the local mark stack. We'll fix things later. */
952 BCOPY(low, my_start, stack_size * sizeof(mse));
953 GC_ASSERT((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
955 AO_store_release_write((volatile AO_t *)(&GC_mark_stack_top),
956 (AO_t)(my_top + stack_size));
957 /* Ensures visibility of previously written stack contents. */
959 GC_release_mark_lock();
960 GC_notify_all_marker();
963 /* Mark from the local mark stack. */
964 /* On return, the local mark stack is empty. */
965 /* But this may be achieved by copying the */
966 /* local mark stack back into the global one. */
967 STATIC void GC_do_local_mark(mse *local_mark_stack, mse *local_top)
970 # define N_LOCAL_ITERS 1
972 # ifdef GC_ASSERTIONS
973 /* Make sure we don't hold mark lock. */
974 GC_acquire_mark_lock();
975 GC_release_mark_lock();
978 for (n = 0; n < N_LOCAL_ITERS; ++n) {
979 local_top = GC_mark_from(local_top, local_mark_stack,
980 local_mark_stack + LOCAL_MARK_STACK_SIZE);
981 if (local_top < local_mark_stack) return;
982 if (local_top - local_mark_stack >= LOCAL_MARK_STACK_SIZE/2) {
983 GC_return_mark_stack(local_mark_stack, local_top);
987 if ((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
988 < (mse *)AO_load(&GC_first_nonempty)
989 && GC_active_count < GC_helper_count
990 && local_top > local_mark_stack + 1) {
991 /* Try to share the load, since the main stack is empty, */
992 /* and helper threads are waiting for a refill. */
993 /* The entries near the bottom of the stack are likely */
994 /* to require more work. Thus we return those, eventhough */
996 mse * new_bottom = local_mark_stack
997 + (local_top - local_mark_stack)/2;
998 GC_ASSERT(new_bottom > local_mark_stack
999 && new_bottom < local_top);
1000 GC_return_mark_stack(local_mark_stack, new_bottom - 1);
1001 memmove(local_mark_stack, new_bottom,
1002 (local_top - new_bottom + 1) * sizeof(mse));
1003 local_top -= (new_bottom - local_mark_stack);
1008 #define ENTRIES_TO_GET 5
1010 long GC_markers = 2; /* Normally changed by thread-library- */
1011 /* -specific code. */
1013 /* Mark using the local mark stack until the global mark stack is empty */
1014 /* and there are no active workers. Update GC_first_nonempty to reflect */
1016 /* Caller does not hold mark lock. */
1017 /* Caller has already incremented GC_helper_count. We decrement it, */
1018 /* and maintain GC_active_count. */
1019 STATIC void GC_mark_local(mse *local_mark_stack, int id)
1021 mse * my_first_nonempty;
1023 GC_acquire_mark_lock();
1025 my_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
1026 GC_ASSERT((mse *)AO_load(&GC_first_nonempty) >= GC_mark_stack &&
1027 (mse *)AO_load(&GC_first_nonempty) <=
1028 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1029 if (GC_print_stats == VERBOSE)
1030 GC_log_printf("Starting mark helper %lu\n", (unsigned long)id);
1031 GC_release_mark_lock();
1037 mse * global_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
1039 GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
1040 my_first_nonempty <=
1041 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1042 GC_ASSERT(global_first_nonempty >= GC_mark_stack &&
1043 global_first_nonempty <=
1044 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1045 if (my_first_nonempty < global_first_nonempty) {
1046 my_first_nonempty = global_first_nonempty;
1047 } else if (global_first_nonempty < my_first_nonempty) {
1048 AO_compare_and_swap(&GC_first_nonempty,
1049 (AO_t) global_first_nonempty,
1050 (AO_t) my_first_nonempty);
1051 /* If this fails, we just go ahead, without updating */
1052 /* GC_first_nonempty. */
1054 /* Perhaps we should also update GC_first_nonempty, if it */
1055 /* is less. But that would require using atomic updates. */
1056 my_top = (mse *)AO_load_acquire((volatile AO_t *)(&GC_mark_stack_top));
1057 n_on_stack = my_top - my_first_nonempty + 1;
1058 if (0 == n_on_stack) {
1059 GC_acquire_mark_lock();
1060 my_top = GC_mark_stack_top;
1061 /* Asynchronous modification impossible here, */
1062 /* since we hold mark lock. */
1063 n_on_stack = my_top - my_first_nonempty + 1;
1064 if (0 == n_on_stack) {
1066 GC_ASSERT(GC_active_count <= GC_helper_count);
1067 /* Other markers may redeposit objects */
1069 if (0 == GC_active_count) GC_notify_all_marker();
1070 while (GC_active_count > 0
1071 && (mse *)AO_load(&GC_first_nonempty)
1072 > GC_mark_stack_top) {
1073 /* We will be notified if either GC_active_count */
1074 /* reaches zero, or if more objects are pushed on */
1075 /* the global mark stack. */
1078 if (GC_active_count == 0 &&
1079 (mse *)AO_load(&GC_first_nonempty) > GC_mark_stack_top) {
1080 GC_bool need_to_notify = FALSE;
1081 /* The above conditions can't be falsified while we */
1082 /* hold the mark lock, since neither */
1083 /* GC_active_count nor GC_mark_stack_top can */
1084 /* change. GC_first_nonempty can only be */
1085 /* incremented asynchronously. Thus we know that */
1086 /* both conditions actually held simultaneously. */
1088 if (0 == GC_helper_count) need_to_notify = TRUE;
1089 if (GC_print_stats == VERBOSE)
1091 "Finished mark helper %lu\n", (unsigned long)id);
1092 GC_release_mark_lock();
1093 if (need_to_notify) GC_notify_all_marker();
1096 /* else there's something on the stack again, or */
1097 /* another helper may push something. */
1099 GC_ASSERT(GC_active_count > 0);
1100 GC_release_mark_lock();
1103 GC_release_mark_lock();
1106 n_to_get = ENTRIES_TO_GET;
1107 if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1;
1108 local_top = GC_steal_mark_stack(my_first_nonempty, my_top,
1109 local_mark_stack, n_to_get,
1110 &my_first_nonempty);
1111 GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
1112 my_first_nonempty <=
1113 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1114 GC_do_local_mark(local_mark_stack, local_top);
1118 /* Perform Parallel mark. */
1119 /* We hold the GC lock, not the mark lock. */
1120 /* Currently runs until the mark stack is */
1122 STATIC void GC_do_parallel_mark(void)
1124 mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1126 GC_acquire_mark_lock();
1127 GC_ASSERT(I_HOLD_LOCK());
1128 /* This could be a GC_ASSERT, but it seems safer to keep it on */
1129 /* all the time, especially since it's cheap. */
1130 if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0)
1131 ABORT("Tried to start parallel mark in bad state");
1132 if (GC_print_stats == VERBOSE)
1133 GC_log_printf("Starting marking for mark phase number %lu\n",
1134 (unsigned long)GC_mark_no);
1135 GC_first_nonempty = (AO_t)GC_mark_stack;
1136 GC_active_count = 0;
1137 GC_helper_count = 1;
1138 GC_help_wanted = TRUE;
1139 GC_release_mark_lock();
1140 GC_notify_all_marker();
1141 /* Wake up potential helpers. */
1142 GC_mark_local(local_mark_stack, 0);
1143 GC_acquire_mark_lock();
1144 GC_help_wanted = FALSE;
1145 /* Done; clean up. */
1146 while (GC_helper_count > 0) GC_wait_marker();
1147 /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */
1148 if (GC_print_stats == VERBOSE)
1150 "Finished marking for mark phase number %lu\n",
1151 (unsigned long)GC_mark_no);
1153 GC_release_mark_lock();
1154 GC_notify_all_marker();
1158 /* Try to help out the marker, if it's running. */
1159 /* We do not hold the GC lock, but the requestor does. */
1160 void GC_help_marker(word my_mark_no)
1162 mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1165 if (!GC_parallel) return;
1166 GC_acquire_mark_lock();
1167 while (GC_mark_no < my_mark_no
1168 || (!GC_help_wanted && GC_mark_no == my_mark_no)) {
1171 my_id = GC_helper_count;
1172 if (GC_mark_no != my_mark_no || my_id >= (unsigned)GC_markers) {
1173 /* Second test is useful only if original threads can also */
1174 /* act as helpers. Under Linux they can't. */
1175 GC_release_mark_lock();
1178 GC_helper_count = my_id + 1;
1179 GC_release_mark_lock();
1180 GC_mark_local(local_mark_stack, my_id);
1181 /* GC_mark_local decrements GC_helper_count. */
1184 #endif /* PARALLEL_MARK */
1186 /* Allocate or reallocate space for mark stack of size n entries. */
1187 /* May silently fail. */
1188 static void alloc_mark_stack(size_t n)
1190 mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry));
1192 /* Don't recycle a stack segment obtained with the wrong flags. */
1193 /* Win32 GetWriteWatch requires the right kind of memory. */
1194 static GC_bool GC_incremental_at_stack_alloc = 0;
1195 GC_bool recycle_old = (!GC_incremental || GC_incremental_at_stack_alloc);
1197 GC_incremental_at_stack_alloc = GC_incremental;
1199 # define recycle_old 1
1202 GC_mark_stack_too_small = FALSE;
1203 if (GC_mark_stack_size != 0) {
1204 if (new_stack != 0) {
1206 /* Recycle old space */
1207 size_t page_offset = (word)GC_mark_stack & (GC_page_size - 1);
1208 size_t size = GC_mark_stack_size * sizeof(struct GC_ms_entry);
1211 if (0 != page_offset) displ = GC_page_size - page_offset;
1212 size = (size - displ) & ~(GC_page_size - 1);
1214 GC_add_to_heap((struct hblk *)
1215 ((word)GC_mark_stack + displ), (word)size);
1218 GC_mark_stack = new_stack;
1219 GC_mark_stack_size = n;
1220 GC_mark_stack_limit = new_stack + n;
1221 if (GC_print_stats) {
1222 GC_log_printf("Grew mark stack to %lu frames\n",
1223 (unsigned long) GC_mark_stack_size);
1226 if (GC_print_stats) {
1227 GC_log_printf("Failed to grow mark stack to %lu frames\n",
1232 if (new_stack == 0) {
1233 GC_err_printf("No space for mark stack\n");
1236 GC_mark_stack = new_stack;
1237 GC_mark_stack_size = n;
1238 GC_mark_stack_limit = new_stack + n;
1240 GC_mark_stack_top = GC_mark_stack-1;
1243 void GC_mark_init(void)
1245 alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
1249 * Push all locations between b and t onto the mark stack.
1250 * b is the first location to be checked. t is one past the last
1251 * location to be checked.
1252 * Should only be used if there is no possibility of mark stack
1255 void GC_push_all(ptr_t bottom, ptr_t top)
1257 register word length;
1259 bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1260 top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
1261 if (top == 0 || bottom == top) return;
1262 GC_mark_stack_top++;
1263 if (GC_mark_stack_top >= GC_mark_stack_limit) {
1264 ABORT("unexpected mark stack overflow");
1266 length = top - bottom;
1267 # if GC_DS_TAGS > ALIGNMENT - 1
1268 length += GC_DS_TAGS;
1269 length &= ~GC_DS_TAGS;
1271 GC_mark_stack_top -> mse_start = bottom;
1272 GC_mark_stack_top -> mse_descr = length;
1276 * Analogous to the above, but push only those pages h with dirty_fn(h) != 0.
1277 * We use push_fn to actually push the block.
1278 * Used both to selectively push dirty pages, or to push a block
1279 * in piecemeal fashion, to allow for more marking concurrency.
1280 * Will not overflow mark stack if push_fn pushes a small fixed number
1281 * of entries. (This is invoked only if push_fn pushes a single entry,
1282 * or if it marks each object before pushing it, thus ensuring progress
1283 * in the event of a stack overflow.)
1285 void GC_push_selected(ptr_t bottom, ptr_t top,
1286 int (*dirty_fn) (struct hblk *),
1287 void (*push_fn) (ptr_t, ptr_t))
1291 bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1292 top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
1294 if (top == 0 || bottom == top) return;
1295 h = HBLKPTR(bottom + HBLKSIZE);
1296 if (top <= (ptr_t) h) {
1297 if ((*dirty_fn)(h-1)) {
1298 (*push_fn)(bottom, top);
1302 if ((*dirty_fn)(h-1)) {
1303 (*push_fn)(bottom, (ptr_t)h);
1305 while ((ptr_t)(h+1) <= top) {
1306 if ((*dirty_fn)(h)) {
1307 if ((word)(GC_mark_stack_top - GC_mark_stack)
1308 > 3 * GC_mark_stack_size / 4) {
1309 /* Danger of mark stack overflow */
1310 (*push_fn)((ptr_t)h, top);
1313 (*push_fn)((ptr_t)h, (ptr_t)(h+1));
1318 if ((ptr_t)h != top) {
1319 if ((*dirty_fn)(h)) {
1320 (*push_fn)((ptr_t)h, top);
1323 if (GC_mark_stack_top >= GC_mark_stack_limit) {
1324 ABORT("unexpected mark stack overflow");
1328 # ifndef SMALL_CONFIG
1330 void GC_push_conditional(ptr_t bottom, ptr_t top, GC_bool all)
1333 if (GC_dirty_maintained) {
1335 /* Pages that were never dirtied cannot contain pointers */
1336 GC_push_selected(bottom, top, GC_page_was_ever_dirty, GC_push_all);
1338 GC_push_all(bottom, top);
1341 GC_push_all(bottom, top);
1344 GC_push_selected(bottom, top, GC_page_was_dirty, GC_push_all);
1349 # if defined(MSWIN32) || defined(MSWINCE)
1350 void __cdecl GC_push_one(word p)
1352 void GC_push_one(word p)
1355 GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER);
1359 struct GC_ms_entry *GC_mark_and_push(void *obj,
1360 mse *mark_stack_ptr,
1361 mse *mark_stack_limit,
1368 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {
1369 if (GC_all_interior_pointers) {
1370 hhdr = GC_find_header(GC_base(obj));
1372 GC_ADD_TO_BLACK_LIST_NORMAL(obj, (ptr_t)src);
1373 return mark_stack_ptr;
1376 GC_ADD_TO_BLACK_LIST_NORMAL(obj, (ptr_t)src);
1377 return mark_stack_ptr;
1380 if (EXPECT(HBLK_IS_FREE(hhdr),0)) {
1381 GC_ADD_TO_BLACK_LIST_NORMAL(obj, (ptr_t)src);
1382 return mark_stack_ptr;
1385 PUSH_CONTENTS_HDR(obj, mark_stack_ptr /* modified */, mark_stack_limit,
1386 (ptr_t)src, was_marked, hhdr, TRUE);
1388 return mark_stack_ptr;
1391 /* Mark and push (i.e. gray) a single object p onto the main */
1392 /* mark stack. Consider p to be valid if it is an interior */
1394 /* The object p has passed a preliminary pointer validity */
1395 /* test, but we do not definitely know whether it is valid. */
1396 /* Mark bits are NOT atomically updated. Thus this must be the */
1397 /* only thread setting them. */
1398 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
1399 void GC_mark_and_push_stack(ptr_t p, ptr_t source)
1401 void GC_mark_and_push_stack(ptr_t p)
1402 # define source ((ptr_t)0)
1410 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {
1416 GC_ADD_TO_BLACK_LIST_STACK(p, source);
1420 if (EXPECT(HBLK_IS_FREE(hhdr),0)) {
1421 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
1424 # if defined(MANUAL_VDB) && defined(THREADS)
1425 /* Pointer is on the stack. We may have dirtied the object */
1426 /* it points to, but not yet have called GC_dirty(); */
1427 GC_dirty(p); /* Implicitly affects entire object. */
1429 PUSH_CONTENTS_HDR(r, GC_mark_stack_top, GC_mark_stack_limit,
1430 source, mark_and_push_exit, hhdr, FALSE);
1431 mark_and_push_exit: ;
1432 /* We silently ignore pointers to near the end of a block, */
1433 /* which is very mildly suboptimal. */
1434 /* FIXME: We should probably add a header word to address */
1440 # define TRACE_ENTRIES 1000
1442 struct trace_entry {
1448 } GC_trace_buf[TRACE_ENTRIES];
1450 int GC_trace_buf_ptr = 0;
1452 void GC_add_trace_entry(char *kind, word arg1, word arg2)
1454 GC_trace_buf[GC_trace_buf_ptr].kind = kind;
1455 GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
1456 GC_trace_buf[GC_trace_buf_ptr].bytes_allocd = GC_bytes_allocd;
1457 GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
1458 GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
1460 if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
1463 void GC_print_trace(word gc_no, GC_bool lock)
1466 struct trace_entry *p;
1469 for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
1470 if (i < 0) i = TRACE_ENTRIES-1;
1471 p = GC_trace_buf + i;
1472 if (p -> gc_no < gc_no || p -> kind == 0) return;
1473 printf("Trace:%s (gc:%u,bytes:%lu) 0x%X, 0x%X\n",
1474 p -> kind, (unsigned)p -> gc_no,
1475 (unsigned long)p -> bytes_allocd,
1476 (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
1478 printf("Trace incomplete\n");
1482 # endif /* TRACE_BUF */
1485 * A version of GC_push_all that treats all interior pointers as valid
1486 * and scans the entire region immediately, in case the contents
1489 void GC_push_all_eager(ptr_t bottom, ptr_t top)
1491 word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1492 word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
1496 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1497 register ptr_t least_ha = GC_least_plausible_heap_addr;
1498 # define GC_greatest_plausible_heap_addr greatest_ha
1499 # define GC_least_plausible_heap_addr least_ha
1501 if (top == 0) return;
1502 /* check all pointers in range and push if they appear */
1504 lim = t - 1 /* longword */;
1505 for (p = b; p <= lim; p = (word *)(((ptr_t)p) + ALIGNMENT)) {
1507 GC_PUSH_ONE_STACK(q, p);
1509 # undef GC_greatest_plausible_heap_addr
1510 # undef GC_least_plausible_heap_addr
1515 * A version of GC_push_all that treats all interior pointers as valid
1516 * and scans part of the area immediately, to make sure that saved
1517 * register values are not lost.
1518 * Cold_gc_frame delimits the stack section that must be scanned
1519 * eagerly. A zero value indicates that no eager scanning is needed.
1520 * We don't need to worry about the MANUAL_VDB case here, since this
1521 * is only called in the single-threaded case. We assume that we
1522 * cannot collect between an assignment and the corresponding
1525 void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
1526 ptr_t cold_gc_frame)
1528 if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
1529 /* Push the hot end of the stack eagerly, so that register values */
1530 /* saved inside GC frames are marked before they disappear. */
1531 /* The rest of the marking can be deferred until later. */
1532 if (0 == cold_gc_frame) {
1533 GC_push_all_stack(bottom, top);
1536 GC_ASSERT(bottom <= cold_gc_frame && cold_gc_frame <= top);
1537 # ifdef STACK_GROWS_DOWN
1538 GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
1539 GC_push_all_eager(bottom, cold_gc_frame);
1540 # else /* STACK_GROWS_UP */
1541 GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
1542 GC_push_all_eager(cold_gc_frame, top);
1543 # endif /* STACK_GROWS_UP */
1545 GC_push_all_eager(bottom, top);
1548 GC_add_trace_entry("GC_push_all_stack", bottom, top);
1551 #endif /* !THREADS */
1553 void GC_push_all_stack(ptr_t bottom, ptr_t top)
1555 # if defined(THREADS) && defined(MPROTECT_VDB)
1556 GC_push_all_eager(bottom, top);
1558 if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
1559 GC_push_all(bottom, top);
1561 GC_push_all_eager(bottom, top);
1566 #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES) && \
1567 defined(MARK_BIT_PER_GRANULE)
1568 # if GC_GRANULE_WORDS == 1
1569 # define USE_PUSH_MARKED_ACCELERATORS
1570 # define PUSH_GRANULE(q) \
1571 { word qcontents = (q)[0]; \
1572 GC_PUSH_ONE_HEAP(qcontents, (q)); }
1573 # elif GC_GRANULE_WORDS == 2
1574 # define USE_PUSH_MARKED_ACCELERATORS
1575 # define PUSH_GRANULE(q) \
1576 { word qcontents = (q)[0]; \
1577 GC_PUSH_ONE_HEAP(qcontents, (q)); \
1578 qcontents = (q)[1]; \
1579 GC_PUSH_ONE_HEAP(qcontents, (q)+1); }
1580 # elif GC_GRANULE_WORDS == 4
1581 # define USE_PUSH_MARKED_ACCELERATORS
1582 # define PUSH_GRANULE(q) \
1583 { word qcontents = (q)[0]; \
1584 GC_PUSH_ONE_HEAP(qcontents, (q)); \
1585 qcontents = (q)[1]; \
1586 GC_PUSH_ONE_HEAP(qcontents, (q)+1); \
1587 qcontents = (q)[2]; \
1588 GC_PUSH_ONE_HEAP(qcontents, (q)+2); \
1589 qcontents = (q)[3]; \
1590 GC_PUSH_ONE_HEAP(qcontents, (q)+3); }
1594 #ifdef USE_PUSH_MARKED_ACCELERATORS
1595 /* Push all objects reachable from marked objects in the given block */
1596 /* containing objects of size 1 granule. */
1597 void GC_push_marked1(struct hblk *h, hdr *hhdr)
1599 word * mark_word_addr = &(hhdr->hb_marks[0]);
1605 /* Allow registers to be used for some frequently acccessed */
1606 /* global variables. Otherwise aliasing issues are likely */
1607 /* to prevent that. */
1608 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1609 ptr_t least_ha = GC_least_plausible_heap_addr;
1610 mse * mark_stack_top = GC_mark_stack_top;
1611 mse * mark_stack_limit = GC_mark_stack_limit;
1612 # define GC_mark_stack_top mark_stack_top
1613 # define GC_mark_stack_limit mark_stack_limit
1614 # define GC_greatest_plausible_heap_addr greatest_ha
1615 # define GC_least_plausible_heap_addr least_ha
1617 p = (word *)(h->hb_body);
1618 plim = (word *)(((word)h) + HBLKSIZE);
1620 /* go through all words in block */
1622 mark_word = *mark_word_addr++;
1624 while(mark_word != 0) {
1625 if (mark_word & 1) {
1628 q += GC_GRANULE_WORDS;
1631 p += WORDSZ*GC_GRANULE_WORDS;
1634 # undef GC_greatest_plausible_heap_addr
1635 # undef GC_least_plausible_heap_addr
1636 # undef GC_mark_stack_top
1637 # undef GC_mark_stack_limit
1639 GC_mark_stack_top = mark_stack_top;
1645 /* Push all objects reachable from marked objects in the given block */
1646 /* of size 2 (granules) objects. */
1647 void GC_push_marked2(struct hblk *h, hdr *hhdr)
1649 word * mark_word_addr = &(hhdr->hb_marks[0]);
1655 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1656 ptr_t least_ha = GC_least_plausible_heap_addr;
1657 mse * mark_stack_top = GC_mark_stack_top;
1658 mse * mark_stack_limit = GC_mark_stack_limit;
1660 # define GC_mark_stack_top mark_stack_top
1661 # define GC_mark_stack_limit mark_stack_limit
1662 # define GC_greatest_plausible_heap_addr greatest_ha
1663 # define GC_least_plausible_heap_addr least_ha
1665 p = (word *)(h->hb_body);
1666 plim = (word *)(((word)h) + HBLKSIZE);
1668 /* go through all words in block */
1670 mark_word = *mark_word_addr++;
1672 while(mark_word != 0) {
1673 if (mark_word & 1) {
1675 PUSH_GRANULE(q + GC_GRANULE_WORDS);
1677 q += 2 * GC_GRANULE_WORDS;
1680 p += WORDSZ*GC_GRANULE_WORDS;
1683 # undef GC_greatest_plausible_heap_addr
1684 # undef GC_least_plausible_heap_addr
1685 # undef GC_mark_stack_top
1686 # undef GC_mark_stack_limit
1688 GC_mark_stack_top = mark_stack_top;
1691 # if GC_GRANULE_WORDS < 4
1692 /* Push all objects reachable from marked objects in the given block */
1693 /* of size 4 (granules) objects. */
1694 /* There is a risk of mark stack overflow here. But we handle that. */
1695 /* And only unmarked objects get pushed, so it's not very likely. */
1696 void GC_push_marked4(struct hblk *h, hdr *hhdr)
1698 word * mark_word_addr = &(hhdr->hb_marks[0]);
1704 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1705 ptr_t least_ha = GC_least_plausible_heap_addr;
1706 mse * mark_stack_top = GC_mark_stack_top;
1707 mse * mark_stack_limit = GC_mark_stack_limit;
1708 # define GC_mark_stack_top mark_stack_top
1709 # define GC_mark_stack_limit mark_stack_limit
1710 # define GC_greatest_plausible_heap_addr greatest_ha
1711 # define GC_least_plausible_heap_addr least_ha
1713 p = (word *)(h->hb_body);
1714 plim = (word *)(((word)h) + HBLKSIZE);
1716 /* go through all words in block */
1718 mark_word = *mark_word_addr++;
1720 while(mark_word != 0) {
1721 if (mark_word & 1) {
1723 PUSH_GRANULE(q + GC_GRANULE_WORDS);
1724 PUSH_GRANULE(q + 2*GC_GRANULE_WORDS);
1725 PUSH_GRANULE(q + 3*GC_GRANULE_WORDS);
1727 q += 4 * GC_GRANULE_WORDS;
1730 p += WORDSZ*GC_GRANULE_WORDS;
1732 # undef GC_greatest_plausible_heap_addr
1733 # undef GC_least_plausible_heap_addr
1734 # undef GC_mark_stack_top
1735 # undef GC_mark_stack_limit
1736 GC_mark_stack_top = mark_stack_top;
1739 #endif /* GC_GRANULE_WORDS < 4 */
1741 #endif /* UNALIGNED */
1743 #endif /* USE_PUSH_MARKED_ACCELERATORS */
1745 /* Push all objects reachable from marked objects in the given block */
1746 void GC_push_marked(struct hblk *h, hdr *hhdr)
1748 size_t sz = hhdr -> hb_sz;
1749 word descr = hhdr -> hb_descr;
1753 mse * GC_mark_stack_top_reg;
1754 mse * mark_stack_limit = GC_mark_stack_limit;
1756 /* Some quick shortcuts: */
1757 if ((0 | GC_DS_LENGTH) == descr) return;
1758 if (GC_block_empty(hhdr)/* nothing marked */) return;
1759 GC_n_rescuing_pages++;
1760 GC_objects_are_marked = TRUE;
1761 if (sz > MAXOBJBYTES) {
1764 lim = (h + 1)->hb_body - sz;
1767 switch(BYTES_TO_GRANULES(sz)) {
1768 # if defined(USE_PUSH_MARKED_ACCELERATORS)
1770 GC_push_marked1(h, hhdr);
1772 # if !defined(UNALIGNED)
1774 GC_push_marked2(h, hhdr);
1776 # if GC_GRANULE_WORDS < 4
1778 GC_push_marked4(h, hhdr);
1784 GC_mark_stack_top_reg = GC_mark_stack_top;
1785 for (p = h -> hb_body, bit_no = 0; p <= lim;
1786 p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
1787 if (mark_bit_from_hdr(hhdr, bit_no)) {
1788 /* Mark from fields inside the object */
1789 PUSH_OBJ(p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
1792 GC_mark_stack_top = GC_mark_stack_top_reg;
1796 #ifndef SMALL_CONFIG
1797 /* Test whether any page in the given block is dirty */
1798 STATIC GC_bool GC_block_was_dirty(struct hblk *h, hdr *hhdr)
1800 size_t sz = hhdr -> hb_sz;
1802 if (sz <= MAXOBJBYTES) {
1803 return(GC_page_was_dirty(h));
1806 while (p < (ptr_t)h + sz) {
1807 if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
1813 #endif /* SMALL_CONFIG */
1815 /* Similar to GC_push_marked, but skip over unallocated blocks */
1816 /* and return address of next plausible block. */
1817 struct hblk * GC_push_next_marked(struct hblk *h)
1819 hdr * hhdr = HDR(h);
1821 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr) || HBLK_IS_FREE(hhdr), FALSE)) {
1822 h = GC_next_used_block(h);
1823 if (h == 0) return(0);
1824 hhdr = GC_find_header((ptr_t)h);
1826 GC_push_marked(h, hhdr);
1827 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1830 #ifndef SMALL_CONFIG
1831 /* Identical to above, but mark only from dirty pages */
1832 struct hblk * GC_push_next_marked_dirty(struct hblk *h)
1834 hdr * hhdr = HDR(h);
1836 if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
1838 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr)
1839 || HBLK_IS_FREE(hhdr), FALSE)) {
1840 h = GC_next_used_block(h);
1841 if (h == 0) return(0);
1842 hhdr = GC_find_header((ptr_t)h);
1844 # ifdef STUBBORN_ALLOC
1845 if (hhdr -> hb_obj_kind == STUBBORN) {
1846 if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
1850 if (GC_block_was_dirty(h, hhdr)) break;
1853 if (GC_block_was_dirty(h, hhdr)) break;
1855 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1858 GC_push_marked(h, hhdr);
1859 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1863 /* Similar to above, but for uncollectable pages. Needed since we */
1864 /* do not clear marks for such pages, even for full collections. */
1865 struct hblk * GC_push_next_marked_uncollectable(struct hblk *h)
1867 hdr * hhdr = HDR(h);
1870 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr)
1871 || HBLK_IS_FREE(hhdr), FALSE)) {
1872 h = GC_next_used_block(h);
1873 if (h == 0) return(0);
1874 hhdr = GC_find_header((ptr_t)h);
1876 if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
1877 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1880 GC_push_marked(h, hhdr);
1881 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));