2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
17 #include "private/gc_pmark.h"
21 #if defined(MSWIN32) && defined(__GNUC__)
25 /* We put this here to minimize the risk of inlining. */
27 #if defined(__BORLANDC__) || defined(__WATCOMC__) || defined(__CC_ARM)
29 void GC_noop(void *p, ...) {}
38 /* Single argument version, robust against whole program analysis. */
39 volatile word GC_noop_sink;
40 GC_API void GC_CALL GC_noop1(word x)
45 /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
47 GC_INNER unsigned GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
49 /* Initialize GC_obj_kinds properly and standard free lists properly. */
50 /* This must be done statically since they may be accessed before */
51 /* GC_init is called. */
52 /* It's done here, since we need to deal with mark descriptors. */
53 GC_INNER struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
54 /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
55 0 | GC_DS_LENGTH, FALSE, FALSE },
56 /* NORMAL */ { &GC_objfreelist[0], 0,
57 0 | GC_DS_LENGTH, /* Adjusted in GC_init for EXTRA_BYTES */
58 TRUE /* add length to descr */, TRUE },
60 { &GC_uobjfreelist[0], 0,
61 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
62 # ifdef ATOMIC_UNCOLLECTABLE
64 { &GC_auobjfreelist[0], 0,
65 0 | GC_DS_LENGTH, FALSE /* add length to descr */, FALSE },
67 # ifdef STUBBORN_ALLOC
68 /*STUBBORN*/ { (void **)&GC_sobjfreelist[0], 0,
69 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
73 # ifdef ATOMIC_UNCOLLECTABLE
74 # ifdef STUBBORN_ALLOC
75 # define GC_N_KINDS_INITIAL_VALUE 5
77 # define GC_N_KINDS_INITIAL_VALUE 4
80 # ifdef STUBBORN_ALLOC
81 # define GC_N_KINDS_INITIAL_VALUE 4
83 # define GC_N_KINDS_INITIAL_VALUE 3
87 GC_INNER unsigned GC_n_kinds = GC_N_KINDS_INITIAL_VALUE;
89 # ifndef INITIAL_MARK_STACK_SIZE
90 # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
91 /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
92 /* multiple of HBLKSIZE. */
93 /* The incremental collector actually likes a larger */
94 /* size, since it want to push all marked dirty objs */
95 /* before marking anything new. Currently we let it */
96 /* grow dynamically. */
100 * Limits of stack for GC_mark routine.
101 * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
102 * need to be marked from.
105 STATIC word GC_n_rescuing_pages = 0;
106 /* Number of dirty pages we marked from */
107 /* excludes ptrfree pages, etc. */
109 GC_INNER mse * GC_mark_stack = NULL;
110 GC_INNER mse * GC_mark_stack_limit = NULL;
111 GC_INNER size_t GC_mark_stack_size = 0;
114 GC_INNER mse * volatile GC_mark_stack_top = NULL;
115 /* Updated only with mark lock held, but read asynchronously. */
116 STATIC volatile AO_t GC_first_nonempty = 0;
117 /* Lowest entry on mark stack */
118 /* that may be nonempty. */
119 /* Updated only by initiating */
122 GC_INNER mse * GC_mark_stack_top = NULL;
125 GC_INNER mark_state_t GC_mark_state = MS_NONE;
127 GC_INNER GC_bool GC_mark_stack_too_small = FALSE;
129 static struct hblk * scan_ptr;
131 STATIC GC_bool GC_objects_are_marked = FALSE;
132 /* Are there collectable marked objects in the heap? */
134 /* Is a collection in progress? Note that this can return true in the */
135 /* nonincremental case, if a collection has been abandoned and the */
136 /* mark state is now MS_INVALID. */
137 GC_INNER GC_bool GC_collection_in_progress(void)
139 return(GC_mark_state != MS_NONE);
142 /* clear all mark bits in the header */
143 GC_INNER void GC_clear_hdr_marks(hdr *hhdr)
145 size_t last_bit = FINAL_MARK_BIT(hhdr -> hb_sz);
146 BZERO(hhdr -> hb_marks, sizeof(hhdr->hb_marks));
147 set_mark_bit_from_hdr(hhdr, last_bit);
148 hhdr -> hb_n_marks = 0;
151 /* Set all mark bits in the header. Used for uncollectable blocks. */
152 GC_INNER void GC_set_hdr_marks(hdr *hhdr)
155 size_t sz = hhdr -> hb_sz;
156 unsigned n_marks = (unsigned)FINAL_MARK_BIT(sz);
158 # ifdef USE_MARK_BYTES
159 for (i = 0; i <= n_marks; i += (unsigned)MARK_BIT_OFFSET(sz)) {
160 hhdr -> hb_marks[i] = 1;
163 for (i = 0; i < divWORDSZ(n_marks + WORDSZ); ++i) {
164 hhdr -> hb_marks[i] = ONES;
167 # ifdef MARK_BIT_PER_OBJ
168 hhdr -> hb_n_marks = n_marks - 1;
170 hhdr -> hb_n_marks = HBLK_OBJS(sz);
175 * Clear all mark bits associated with block h.
178 static void clear_marks_for_block(struct hblk *h, word dummy)
180 register hdr * hhdr = HDR(h);
182 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
183 /* Mark bit for these is cleared only once the object is */
184 /* explicitly deallocated. This either frees the block, or */
185 /* the bit is cleared once the object is on the free list. */
186 GC_clear_hdr_marks(hhdr);
189 /* Slow but general routines for setting/clearing/asking about mark bits */
190 void GC_set_mark_bit(ptr_t p)
192 struct hblk *h = HBLKPTR(p);
194 word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
196 if (!mark_bit_from_hdr(hhdr, bit_no)) {
197 set_mark_bit_from_hdr(hhdr, bit_no);
198 ++hhdr -> hb_n_marks;
202 void GC_clear_mark_bit(ptr_t p)
204 struct hblk *h = HBLKPTR(p);
206 word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
208 if (mark_bit_from_hdr(hhdr, bit_no)) {
210 clear_mark_bit_from_hdr(hhdr, bit_no);
211 n_marks = hhdr -> hb_n_marks - 1;
212 # ifdef PARALLEL_MARK
213 if (n_marks != 0 || !GC_parallel)
214 hhdr -> hb_n_marks = n_marks;
215 /* Don't decrement to zero. The counts are approximate due to */
216 /* concurrency issues, but we need to ensure that a count of */
217 /* zero implies an empty block. */
219 hhdr -> hb_n_marks = n_marks;
224 GC_bool GC_is_marked(ptr_t p)
226 struct hblk *h = HBLKPTR(p);
228 word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
230 return((GC_bool)mark_bit_from_hdr(hhdr, bit_no));
235 * Clear mark bits in all allocated heap blocks. This invalidates
236 * the marker invariant, and sets GC_mark_state to reflect this.
237 * (This implicitly starts marking to reestablish the invariant.)
239 GC_INNER void GC_clear_marks(void)
241 GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
242 GC_objects_are_marked = FALSE;
243 GC_mark_state = MS_INVALID;
248 void GC_check_dirty(void);
251 /* Initiate a garbage collection. Initiates a full collection if the */
252 /* mark state is invalid. */
253 GC_INNER void GC_initiate_gc(void)
255 # ifndef GC_DISABLE_INCREMENTAL
256 if (GC_dirty_maintained) GC_read_dirty();
258 # ifdef STUBBORN_ALLOC
262 if (GC_dirty_maintained) GC_check_dirty();
264 GC_n_rescuing_pages = 0;
265 if (GC_mark_state == MS_NONE) {
266 GC_mark_state = MS_PUSH_RESCUERS;
267 } else if (GC_mark_state != MS_INVALID) {
268 ABORT("Unexpected state");
269 } /* else this is really a full collection, and mark */
270 /* bits are invalid. */
275 STATIC void GC_do_parallel_mark(void); /* initiate parallel marking. */
276 #endif /* PARALLEL_MARK */
278 #ifdef GC_DISABLE_INCREMENTAL
279 # define GC_push_next_marked_dirty(h) GC_push_next_marked(h)
281 STATIC struct hblk * GC_push_next_marked_dirty(struct hblk *h);
282 /* Invoke GC_push_marked on next dirty block above h. */
283 /* Return a pointer just past the end of this block. */
284 #endif /* !GC_DISABLE_INCREMENTAL */
285 STATIC struct hblk * GC_push_next_marked(struct hblk *h);
286 /* Ditto, but also mark from clean pages. */
287 STATIC struct hblk * GC_push_next_marked_uncollectable(struct hblk *h);
288 /* Ditto, but mark only from uncollectable pages. */
290 static void alloc_mark_stack(size_t);
292 # if (defined(MSWIN32) || defined(MSWINCE)) && !defined(__GNUC__) \
293 || defined(MSWIN32) && defined(I386) /* for Win98 */ \
294 || defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS)
295 /* Under rare conditions, we may end up marking from nonexistent memory. */
296 /* Hence we need to be prepared to recover by running GC_mark_some */
297 /* with a suitable handler in place. */
298 /* FIXME: Should we really need it for WinCE? If yes then */
299 /* WRAP_MARK_SOME should be also defined for CeGCC which requires */
300 /* CPU/OS-specific code in mark_ex_handler() and GC_mark_some() */
301 /* (for manual stack unwinding and exception handler installation). */
302 # define WRAP_MARK_SOME
305 /* Perform a small amount of marking. */
306 /* We try to touch roughly a page of memory. */
307 /* Return TRUE if we just finished a mark phase. */
308 /* Cold_gc_frame is an address inside a GC frame that */
309 /* remains valid until all marking is complete. */
310 /* A zero value indicates that it's OK to miss some */
311 /* register values. */
312 /* We hold the allocation lock. In the case of */
313 /* incremental collection, the world may not be stopped.*/
314 #ifdef WRAP_MARK_SOME
315 /* For win32, this is called after we establish a structured */
316 /* exception handler, in case Windows unmaps one of our root */
317 /* segments. See below. In either case, we acquire the */
318 /* allocator lock long before we get here. */
319 STATIC GC_bool GC_mark_some_inner(ptr_t cold_gc_frame)
321 GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame)
324 switch(GC_mark_state) {
328 case MS_PUSH_RESCUERS:
329 if (GC_mark_stack_top
330 >= GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2) {
331 /* Go ahead and mark, even though that might cause us to */
332 /* see more marked dirty objects later on. Avoid this */
334 GC_mark_stack_too_small = TRUE;
335 MARK_FROM_MARK_STACK();
338 scan_ptr = GC_push_next_marked_dirty(scan_ptr);
340 if (GC_print_stats) {
341 GC_log_printf("Marked from %lu dirty pages\n",
342 (unsigned long)GC_n_rescuing_pages);
344 GC_push_roots(FALSE, cold_gc_frame);
345 GC_objects_are_marked = TRUE;
346 if (GC_mark_state != MS_INVALID) {
347 GC_mark_state = MS_ROOTS_PUSHED;
353 case MS_PUSH_UNCOLLECTABLE:
354 if (GC_mark_stack_top
355 >= GC_mark_stack + GC_mark_stack_size/4) {
356 # ifdef PARALLEL_MARK
357 /* Avoid this, since we don't parallelize the marker */
359 if (GC_parallel) GC_mark_stack_too_small = TRUE;
361 MARK_FROM_MARK_STACK();
364 scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
366 GC_push_roots(TRUE, cold_gc_frame);
367 GC_objects_are_marked = TRUE;
368 if (GC_mark_state != MS_INVALID) {
369 GC_mark_state = MS_ROOTS_PUSHED;
375 case MS_ROOTS_PUSHED:
376 # ifdef PARALLEL_MARK
377 /* In the incremental GC case, this currently doesn't */
378 /* quite do the right thing, since it runs to */
379 /* completion. On the other hand, starting a */
380 /* parallel marker is expensive, so perhaps it is */
381 /* the right thing? */
382 /* Eventually, incremental marking should run */
383 /* asynchronously in multiple threads, without grabbing */
384 /* the allocation lock. */
386 GC_do_parallel_mark();
387 GC_ASSERT(GC_mark_stack_top < (mse *)GC_first_nonempty);
388 GC_mark_stack_top = GC_mark_stack - 1;
389 if (GC_mark_stack_too_small) {
390 alloc_mark_stack(2*GC_mark_stack_size);
392 if (GC_mark_state == MS_ROOTS_PUSHED) {
393 GC_mark_state = MS_NONE;
400 if (GC_mark_stack_top >= GC_mark_stack) {
401 MARK_FROM_MARK_STACK();
404 GC_mark_state = MS_NONE;
405 if (GC_mark_stack_too_small) {
406 alloc_mark_stack(2*GC_mark_stack_size);
412 case MS_PARTIALLY_INVALID:
413 if (!GC_objects_are_marked) {
414 GC_mark_state = MS_PUSH_UNCOLLECTABLE;
417 if (GC_mark_stack_top >= GC_mark_stack) {
418 MARK_FROM_MARK_STACK();
421 if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
422 /* About to start a heap scan for marked objects. */
423 /* Mark stack is empty. OK to reallocate. */
424 if (GC_mark_stack_too_small) {
425 alloc_mark_stack(2*GC_mark_stack_size);
427 GC_mark_state = MS_PARTIALLY_INVALID;
429 scan_ptr = GC_push_next_marked(scan_ptr);
430 if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
431 GC_push_roots(TRUE, cold_gc_frame);
432 GC_objects_are_marked = TRUE;
433 if (GC_mark_state != MS_INVALID) {
434 GC_mark_state = MS_ROOTS_PUSHED;
439 ABORT("GC_mark_some: bad state");
444 #ifdef WRAP_MARK_SOME
446 # if (defined(MSWIN32) || defined(MSWINCE)) && defined(__GNUC__)
449 EXCEPTION_REGISTRATION ex_reg;
453 static EXCEPTION_DISPOSITION mark_ex_handler(
454 struct _EXCEPTION_RECORD *ex_rec,
456 struct _CONTEXT *context,
459 if (ex_rec->ExceptionCode == STATUS_ACCESS_VIOLATION) {
460 ext_ex_regn *xer = (ext_ex_regn *)est_frame;
462 /* Unwind from the inner function assuming the standard */
463 /* function prologue. */
464 /* Assumes code has not been compiled with */
465 /* -fomit-frame-pointer. */
466 context->Esp = context->Ebp;
467 context->Ebp = *((DWORD *)context->Esp);
468 context->Esp = context->Esp - 8;
470 /* Resume execution at the "real" handler within the */
471 /* wrapper function. */
472 context->Eip = (DWORD )(xer->alt_path);
474 return ExceptionContinueExecution;
477 return ExceptionContinueSearch;
480 # endif /* __GNUC__ && MSWIN32 */
482 #if defined(GC_WIN32_THREADS) && !defined(__GNUC__)
483 GC_bool GC_started_thread_while_stopped(void);
484 /* In win32_threads.c. Did we invalidate mark phase with an */
485 /* unexpected thread start? */
488 GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame)
492 # if defined(MSWIN32) || defined(MSWINCE)
494 /* Windows 98 appears to asynchronously create and remove */
495 /* writable memory mappings, for reasons we haven't yet */
496 /* understood. Since we look for writable regions to */
497 /* determine the root set, we may try to mark from an */
498 /* address range that disappeared since we started the */
499 /* collection. Thus we have to recover from faults here. */
500 /* This code does not appear to be necessary for Windows */
501 /* 95/NT/2000+. Note that this code should never generate */
502 /* an incremental GC write fault. */
503 /* This code seems to be necessary for WinCE (at least in */
504 /* the case we'd decide to add MEM_PRIVATE sections to */
505 /* data roots in GC_register_dynamic_libraries()). */
506 /* It's conceivable that this is the same issue with */
507 /* terminating threads that we see with Linux and */
508 /* USE_PROC_FOR_LIBRARIES. */
511 ret_val = GC_mark_some_inner(cold_gc_frame);
512 } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ?
513 EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
516 # ifdef GC_WIN32_THREADS
517 /* With DllMain-based thread tracking, a thread may have */
518 /* started while we were marking. This is logically equivalent */
519 /* to the exception case; our results are invalid and we have */
520 /* to start over. This cannot be prevented since we can't */
521 /* block in DllMain. */
522 if (GC_started_thread_while_stopped()) goto handle_ex;
527 # else /* __GNUC__ */
529 /* Manually install an exception handler since GCC does */
530 /* not yet support Structured Exception Handling (SEH) on */
535 er.alt_path = &&handle_ex;
536 er.ex_reg.handler = mark_ex_handler;
537 __asm__ __volatile__ ("movl %%fs:0, %0" : "=r" (er.ex_reg.prev));
538 __asm__ __volatile__ ("movl %0, %%fs:0" : : "r" (&er));
539 ret_val = GC_mark_some_inner(cold_gc_frame);
540 /* Prevent GCC from considering the following code unreachable */
541 /* and thus eliminating it. */
542 if (er.alt_path == 0)
545 /* Uninstall the exception handler */
546 __asm__ __volatile__ ("mov %0, %%fs:0" : : "r" (er.ex_reg.prev));
549 # endif /* __GNUC__ */
550 # else /* !MSWIN32 */
551 /* Here we are handling the case in which /proc is used for root */
552 /* finding, and we have threads. We may find a stack for a */
553 /* thread that is in the process of exiting, and disappears */
554 /* while we are marking it. This seems extremely difficult to */
555 /* avoid otherwise. */
556 if (GC_incremental) {
557 WARN("Incremental GC incompatible with /proc roots\n", 0);
558 /* I'm not sure if this could still work ... */
560 GC_setup_temporary_fault_handler();
561 if(SETJMP(GC_jmp_buf) != 0) goto handle_ex;
562 ret_val = GC_mark_some_inner(cold_gc_frame);
564 GC_reset_fault_handler();
567 # endif /* !MSWIN32 */
570 /* Exception handler starts here for all cases. */
571 if (GC_print_stats) {
573 "Caught ACCESS_VIOLATION in marker; memory mapping disappeared\n");
576 /* We have bad roots on the stack. Discard mark stack. */
577 /* Rescan from marked objects. Redetermine roots. */
578 GC_invalidate_mark_state();
582 goto rm_handler; /* Back to platform-specific code. */
584 #endif /* WRAP_MARK_SOME */
586 GC_INNER GC_bool GC_mark_stack_empty(void)
588 return(GC_mark_stack_top < GC_mark_stack);
591 GC_INNER void GC_invalidate_mark_state(void)
593 GC_mark_state = MS_INVALID;
594 GC_mark_stack_top = GC_mark_stack-1;
597 GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp)
599 GC_mark_state = MS_INVALID;
600 GC_mark_stack_too_small = TRUE;
601 if (GC_print_stats) {
602 GC_log_printf("Mark stack overflow; current size = %lu entries\n",
603 (unsigned long)GC_mark_stack_size);
605 return(msp - GC_MARK_STACK_DISCARDS);
609 * Mark objects pointed to by the regions described by
610 * mark stack entries between mark_stack and mark_stack_top,
611 * inclusive. Assumes the upper limit of a mark stack entry
612 * is never 0. A mark stack entry never has size 0.
613 * We try to traverse on the order of a hblk of memory before we return.
614 * Caller is responsible for calling this until the mark stack is empty.
615 * Note that this is the most performance critical routine in the
616 * collector. Hence it contains all sorts of ugly hacks to speed
617 * things up. In particular, we avoid procedure calls on the common
618 * path, we take advantage of peculiarities of the mark descriptor
619 * encoding, we optionally maintain a cache for the block address to
620 * header mapping, we prefetch when an object is "grayed", etc.
622 GC_INNER mse * GC_mark_from(mse *mark_stack_top, mse *mark_stack,
623 mse *mark_stack_limit)
625 signed_word credit = HBLKSIZE; /* Remaining credit for marking work */
626 ptr_t current_p; /* Pointer to current candidate ptr. */
627 word current; /* Candidate pointer. */
628 ptr_t limit; /* (Incl) limit of current candidate */
631 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
632 ptr_t least_ha = GC_least_plausible_heap_addr;
635 # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
637 GC_objects_are_marked = TRUE;
639 # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
640 while (mark_stack_top >= mark_stack && credit >= 0)
642 while ((((ptr_t)mark_stack_top - (ptr_t)mark_stack) | credit) >= 0)
645 current_p = mark_stack_top -> mse_start;
646 descr = mark_stack_top -> mse_descr;
648 /* current_p and descr describe the current object. */
649 /* *mark_stack_top is vacant. */
650 /* The following is 0 only for small objects described by a simple */
651 /* length descriptor. For many applications this is the common */
652 /* case, so we try to detect it quickly. */
653 if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | GC_DS_TAGS)) {
654 word tag = descr & GC_DS_TAGS;
659 /* Process part of the range to avoid pushing too much on the */
661 GC_ASSERT(descr < (word)GC_greatest_plausible_heap_addr
662 - (word)GC_least_plausible_heap_addr);
664 if (GC_trace_addr >= current_p
665 && GC_trace_addr < current_p + descr) {
666 GC_log_printf("GC:%u Large section; start %p len %lu\n",
667 (unsigned)GC_gc_no, current_p, (unsigned long)descr);
669 # endif /* ENABLE_TRACE */
670 # ifdef PARALLEL_MARK
671 # define SHARE_BYTES 2048
672 if (descr > SHARE_BYTES && GC_parallel
673 && mark_stack_top < mark_stack_limit - 1) {
674 int new_size = (descr/2) & ~(sizeof(word)-1);
675 mark_stack_top -> mse_start = current_p;
676 mark_stack_top -> mse_descr = new_size + sizeof(word);
677 /* makes sure we handle */
678 /* misaligned pointers. */
681 if (GC_trace_addr >= current_p
682 && GC_trace_addr < current_p + descr) {
683 GC_log_printf("GC:%u Splitting (parallel) %p at %p\n",
684 (unsigned)GC_gc_no, current_p, current_p + new_size);
686 # endif /* ENABLE_TRACE */
687 current_p += new_size;
691 # endif /* PARALLEL_MARK */
692 mark_stack_top -> mse_start =
693 limit = current_p + WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
694 mark_stack_top -> mse_descr =
695 descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
697 if (GC_trace_addr >= current_p
698 && GC_trace_addr < current_p + descr) {
699 GC_log_printf("GC:%u Splitting %p at %p\n",
700 (unsigned)GC_gc_no, current_p, limit);
702 # endif /* ENABLE_TRACE */
703 /* Make sure that pointers overlapping the two ranges are */
705 limit += sizeof(word) - ALIGNMENT;
710 if (GC_trace_addr >= current_p
711 && GC_trace_addr < current_p + WORDS_TO_BYTES(WORDSZ-2)) {
712 GC_log_printf("GC:%u Tracing from %p bitmap descr %lu\n",
713 (unsigned)GC_gc_no, current_p, (unsigned long)descr);
715 # endif /* ENABLE_TRACE */
716 descr &= ~GC_DS_TAGS;
717 credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
719 if ((signed_word)descr < 0) {
720 current = *(word *)current_p;
721 FIXUP_POINTER(current);
722 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
723 PREFETCH((ptr_t)current);
725 if (GC_trace_addr == current_p) {
726 GC_log_printf("GC:%u Considering(3) %p -> %p\n",
727 (unsigned)GC_gc_no, current_p, (ptr_t)current);
729 # endif /* ENABLE_TRACE */
730 PUSH_CONTENTS((ptr_t)current, mark_stack_top,
731 mark_stack_limit, current_p, exit1);
735 current_p += sizeof(word);
741 if (GC_trace_addr >= current_p
742 && GC_base(current_p) != 0
743 && GC_base(current_p) == GC_base(GC_trace_addr)) {
744 GC_log_printf("GC:%u Tracing from %p proc descr %lu\n",
745 (unsigned)GC_gc_no, current_p, (unsigned long)descr);
747 # endif /* ENABLE_TRACE */
748 credit -= GC_PROC_BYTES;
751 ((word *)current_p, mark_stack_top,
752 mark_stack_limit, ENV(descr));
754 case GC_DS_PER_OBJECT:
755 if ((signed_word)descr >= 0) {
756 /* Descriptor is in the object. */
757 descr = *(word *)(current_p + descr - GC_DS_PER_OBJECT);
759 /* Descriptor is in type descriptor pointed to by first */
760 /* word in object. */
761 ptr_t type_descr = *(ptr_t *)current_p;
762 /* type_descr is either a valid pointer to the descriptor */
763 /* structure, or this object was on a free list. If it */
764 /* it was anything but the last object on the free list, */
765 /* we will misinterpret the next object on the free list as */
766 /* the type descriptor, and get a 0 GC descriptor, which */
767 /* is ideal. Unfortunately, we need to check for the last */
768 /* object case explicitly. */
769 if (0 == type_descr) {
770 /* Rarely executed. */
774 descr = *(word *)(type_descr
775 - (descr + (GC_INDIR_PER_OBJ_BIAS
776 - GC_DS_PER_OBJECT)));
779 /* Can happen either because we generated a 0 descriptor */
780 /* or we saw a pointer to a free object. */
787 limit = 0; /* initialized to prevent warning. */
789 } else /* Small object with length descriptor */ {
791 limit = current_p + (word)descr;
794 if (GC_trace_addr >= current_p
795 && GC_trace_addr < limit) {
796 GC_log_printf("GC:%u Tracing from %p len %lu\n",
797 (int)GC_gc_no, current_p, (unsigned long)descr);
799 # endif /* ENABLE_TRACE */
800 /* The simple case in which we're scanning a range. */
801 GC_ASSERT(!((word)current_p & (ALIGNMENT-1)));
802 credit -= limit - current_p;
803 limit -= sizeof(word);
807 # ifndef SMALL_CONFIG
810 /* Try to prefetch the next pointer to be examined asap. */
811 /* Empirically, this also seems to help slightly without */
812 /* prefetches, at least on linux/X86. Presumably this loop */
813 /* ends up with less register pressure, and gcc thus ends up */
814 /* generating slightly better code. Overall gcc code quality */
815 /* for this loop is still not great. */
817 PREFETCH(limit - PREF_DIST*CACHE_LINE_SIZE);
818 GC_ASSERT(limit >= current_p);
819 deferred = *(word *)limit;
820 FIXUP_POINTER(deferred);
822 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
823 PREFETCH((ptr_t)deferred);
826 if (current_p > limit) goto next_object;
827 /* Unroll once, so we don't do too many of the prefetches */
828 /* based on limit. */
829 deferred = *(word *)limit;
830 FIXUP_POINTER(deferred);
832 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
833 PREFETCH((ptr_t)deferred);
836 if (current_p > limit) goto next_object;
840 while (current_p <= limit) {
841 /* Empirically, unrolling this loop doesn't help a lot. */
842 /* Since PUSH_CONTENTS expands to a lot of code, */
844 current = *(word *)current_p;
845 FIXUP_POINTER(current);
846 PREFETCH(current_p + PREF_DIST*CACHE_LINE_SIZE);
847 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
848 /* Prefetch the contents of the object we just pushed. It's */
849 /* likely we will need them soon. */
850 PREFETCH((ptr_t)current);
852 if (GC_trace_addr == current_p) {
853 GC_log_printf("GC:%u Considering(1) %p -> %p\n",
854 (unsigned)GC_gc_no, current_p, (ptr_t)current);
856 # endif /* ENABLE_TRACE */
857 PUSH_CONTENTS((ptr_t)current, mark_stack_top,
858 mark_stack_limit, current_p, exit2);
860 current_p += ALIGNMENT;
863 # ifndef SMALL_CONFIG
864 /* We still need to mark the entry we previously prefetched. */
865 /* We already know that it passes the preliminary pointer */
868 if (GC_trace_addr == current_p) {
869 GC_log_printf("GC:%u Considering(2) %p -> %p\n",
870 (unsigned)GC_gc_no, current_p, (ptr_t)deferred);
872 # endif /* ENABLE_TRACE */
873 PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
874 mark_stack_limit, current_p, exit4);
879 return mark_stack_top;
884 STATIC GC_bool GC_help_wanted = FALSE; /* Protected by mark lock */
885 STATIC unsigned GC_helper_count = 0; /* Number of running helpers. */
886 /* Protected by mark lock */
887 STATIC unsigned GC_active_count = 0; /* Number of active helpers. */
888 /* Protected by mark lock */
889 /* May increase and decrease */
890 /* within each mark cycle. But */
891 /* once it returns to 0, it */
892 /* stays zero for the cycle. */
894 GC_INNER word GC_mark_no = 0;
896 #define LOCAL_MARK_STACK_SIZE HBLKSIZE
897 /* Under normal circumstances, this is big enough to guarantee */
898 /* We don't overflow half of it in a single call to */
902 /* Steal mark stack entries starting at mse low into mark stack local */
903 /* until we either steal mse high, or we have max entries. */
904 /* Return a pointer to the top of the local mark stack. */
905 /* *next is replaced by a pointer to the next unscanned mark stack */
907 STATIC mse * GC_steal_mark_stack(mse * low, mse * high, mse * local,
908 unsigned max, mse **next)
911 mse *top = local - 1;
914 GC_ASSERT(high >= low-1 && (word)(high - low + 1) <= GC_mark_stack_size);
915 for (p = low; p <= high && i <= max; ++p) {
916 word descr = AO_load((volatile AO_t *) &(p -> mse_descr));
918 /* Must be ordered after read of descr: */
919 AO_store_release_write((volatile AO_t *) &(p -> mse_descr), 0);
920 /* More than one thread may get this entry, but that's only */
921 /* a minor performance problem. */
923 top -> mse_descr = descr;
924 top -> mse_start = p -> mse_start;
925 GC_ASSERT((top -> mse_descr & GC_DS_TAGS) != GC_DS_LENGTH ||
926 top -> mse_descr < (word)GC_greatest_plausible_heap_addr
927 - (word)GC_least_plausible_heap_addr);
928 /* If this is a big object, count it as */
929 /* size/256 + 1 objects. */
931 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) i += (int)(descr >> 8);
938 /* Copy back a local mark stack. */
939 /* low and high are inclusive bounds. */
940 STATIC void GC_return_mark_stack(mse * low, mse * high)
946 if (high < low) return;
947 stack_size = high - low + 1;
948 GC_acquire_mark_lock();
949 my_top = GC_mark_stack_top; /* Concurrent modification impossible. */
950 my_start = my_top + 1;
951 if (my_start - GC_mark_stack + stack_size > GC_mark_stack_size) {
952 if (GC_print_stats) {
953 GC_log_printf("No room to copy back mark stack\n");
955 GC_mark_state = MS_INVALID;
956 GC_mark_stack_too_small = TRUE;
957 /* We drop the local mark stack. We'll fix things later. */
959 BCOPY(low, my_start, stack_size * sizeof(mse));
960 GC_ASSERT((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
962 AO_store_release_write((volatile AO_t *)(&GC_mark_stack_top),
963 (AO_t)(my_top + stack_size));
964 /* Ensures visibility of previously written stack contents. */
966 GC_release_mark_lock();
967 GC_notify_all_marker();
970 /* Mark from the local mark stack. */
971 /* On return, the local mark stack is empty. */
972 /* But this may be achieved by copying the */
973 /* local mark stack back into the global one. */
974 STATIC void GC_do_local_mark(mse *local_mark_stack, mse *local_top)
977 # define N_LOCAL_ITERS 1
979 # ifdef GC_ASSERTIONS
980 /* Make sure we don't hold mark lock. */
981 GC_acquire_mark_lock();
982 GC_release_mark_lock();
985 for (n = 0; n < N_LOCAL_ITERS; ++n) {
986 local_top = GC_mark_from(local_top, local_mark_stack,
987 local_mark_stack + LOCAL_MARK_STACK_SIZE);
988 if (local_top < local_mark_stack) return;
989 if ((word)(local_top - local_mark_stack)
990 >= LOCAL_MARK_STACK_SIZE / 2) {
991 GC_return_mark_stack(local_mark_stack, local_top);
995 if ((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
996 < (mse *)AO_load(&GC_first_nonempty)
997 && GC_active_count < GC_helper_count
998 && local_top > local_mark_stack + 1) {
999 /* Try to share the load, since the main stack is empty, */
1000 /* and helper threads are waiting for a refill. */
1001 /* The entries near the bottom of the stack are likely */
1002 /* to require more work. Thus we return those, eventhough */
1004 mse * new_bottom = local_mark_stack
1005 + (local_top - local_mark_stack)/2;
1006 GC_ASSERT(new_bottom > local_mark_stack
1007 && new_bottom < local_top);
1008 GC_return_mark_stack(local_mark_stack, new_bottom - 1);
1009 memmove(local_mark_stack, new_bottom,
1010 (local_top - new_bottom + 1) * sizeof(mse));
1011 local_top -= (new_bottom - local_mark_stack);
1016 #define ENTRIES_TO_GET 5
1018 GC_INNER long GC_markers = 2; /* Normally changed by thread-library- */
1019 /* -specific code. */
1021 /* Mark using the local mark stack until the global mark stack is empty */
1022 /* and there are no active workers. Update GC_first_nonempty to reflect */
1024 /* Caller does not hold mark lock. */
1025 /* Caller has already incremented GC_helper_count. We decrement it, */
1026 /* and maintain GC_active_count. */
1027 STATIC void GC_mark_local(mse *local_mark_stack, int id)
1029 mse * my_first_nonempty;
1031 GC_acquire_mark_lock();
1033 my_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
1034 GC_ASSERT((mse *)AO_load(&GC_first_nonempty) >= GC_mark_stack &&
1035 (mse *)AO_load(&GC_first_nonempty) <=
1036 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1037 if (GC_print_stats == VERBOSE)
1038 GC_log_printf("Starting mark helper %lu\n", (unsigned long)id);
1039 GC_release_mark_lock();
1045 mse * global_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
1047 GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
1048 my_first_nonempty <=
1049 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1050 GC_ASSERT(global_first_nonempty >= GC_mark_stack &&
1051 global_first_nonempty <=
1052 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1053 if (my_first_nonempty < global_first_nonempty) {
1054 my_first_nonempty = global_first_nonempty;
1055 } else if (global_first_nonempty < my_first_nonempty) {
1056 AO_compare_and_swap(&GC_first_nonempty,
1057 (AO_t) global_first_nonempty,
1058 (AO_t) my_first_nonempty);
1059 /* If this fails, we just go ahead, without updating */
1060 /* GC_first_nonempty. */
1062 /* Perhaps we should also update GC_first_nonempty, if it */
1063 /* is less. But that would require using atomic updates. */
1064 my_top = (mse *)AO_load_acquire((volatile AO_t *)(&GC_mark_stack_top));
1065 n_on_stack = my_top - my_first_nonempty + 1;
1066 if (0 == n_on_stack) {
1067 GC_acquire_mark_lock();
1068 my_top = GC_mark_stack_top;
1069 /* Asynchronous modification impossible here, */
1070 /* since we hold mark lock. */
1071 n_on_stack = my_top - my_first_nonempty + 1;
1072 if (0 == n_on_stack) {
1074 GC_ASSERT(GC_active_count <= GC_helper_count);
1075 /* Other markers may redeposit objects */
1077 if (0 == GC_active_count) GC_notify_all_marker();
1078 while (GC_active_count > 0
1079 && (mse *)AO_load(&GC_first_nonempty)
1080 > GC_mark_stack_top) {
1081 /* We will be notified if either GC_active_count */
1082 /* reaches zero, or if more objects are pushed on */
1083 /* the global mark stack. */
1086 if (GC_active_count == 0 &&
1087 (mse *)AO_load(&GC_first_nonempty) > GC_mark_stack_top) {
1088 GC_bool need_to_notify = FALSE;
1089 /* The above conditions can't be falsified while we */
1090 /* hold the mark lock, since neither */
1091 /* GC_active_count nor GC_mark_stack_top can */
1092 /* change. GC_first_nonempty can only be */
1093 /* incremented asynchronously. Thus we know that */
1094 /* both conditions actually held simultaneously. */
1096 if (0 == GC_helper_count) need_to_notify = TRUE;
1097 if (GC_print_stats == VERBOSE)
1098 GC_log_printf("Finished mark helper %lu\n",
1100 GC_release_mark_lock();
1101 if (need_to_notify) GC_notify_all_marker();
1104 /* else there's something on the stack again, or */
1105 /* another helper may push something. */
1107 GC_ASSERT(GC_active_count > 0);
1108 GC_release_mark_lock();
1111 GC_release_mark_lock();
1114 n_to_get = ENTRIES_TO_GET;
1115 if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1;
1116 local_top = GC_steal_mark_stack(my_first_nonempty, my_top,
1117 local_mark_stack, n_to_get,
1118 &my_first_nonempty);
1119 GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
1120 my_first_nonempty <=
1121 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1122 GC_do_local_mark(local_mark_stack, local_top);
1126 /* Perform Parallel mark. */
1127 /* We hold the GC lock, not the mark lock. */
1128 /* Currently runs until the mark stack is */
1130 STATIC void GC_do_parallel_mark(void)
1132 mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1134 GC_acquire_mark_lock();
1135 GC_ASSERT(I_HOLD_LOCK());
1136 /* This could be a GC_ASSERT, but it seems safer to keep it on */
1137 /* all the time, especially since it's cheap. */
1138 if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0)
1139 ABORT("Tried to start parallel mark in bad state");
1140 if (GC_print_stats == VERBOSE)
1141 GC_log_printf("Starting marking for mark phase number %lu\n",
1142 (unsigned long)GC_mark_no);
1143 GC_first_nonempty = (AO_t)GC_mark_stack;
1144 GC_active_count = 0;
1145 GC_helper_count = 1;
1146 GC_help_wanted = TRUE;
1147 GC_release_mark_lock();
1148 GC_notify_all_marker();
1149 /* Wake up potential helpers. */
1150 GC_mark_local(local_mark_stack, 0);
1151 GC_acquire_mark_lock();
1152 GC_help_wanted = FALSE;
1153 /* Done; clean up. */
1154 while (GC_helper_count > 0) GC_wait_marker();
1155 /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */
1156 if (GC_print_stats == VERBOSE)
1157 GC_log_printf("Finished marking for mark phase number %lu\n",
1158 (unsigned long)GC_mark_no);
1160 GC_release_mark_lock();
1161 GC_notify_all_marker();
1165 /* Try to help out the marker, if it's running. */
1166 /* We do not hold the GC lock, but the requestor does. */
1167 GC_INNER void GC_help_marker(word my_mark_no)
1169 mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1172 if (!GC_parallel) return;
1173 GC_acquire_mark_lock();
1174 while (GC_mark_no < my_mark_no
1175 || (!GC_help_wanted && GC_mark_no == my_mark_no)) {
1178 my_id = GC_helper_count;
1179 if (GC_mark_no != my_mark_no || my_id >= (unsigned)GC_markers) {
1180 /* Second test is useful only if original threads can also */
1181 /* act as helpers. Under Linux they can't. */
1182 GC_release_mark_lock();
1185 GC_helper_count = my_id + 1;
1186 GC_release_mark_lock();
1187 GC_mark_local(local_mark_stack, my_id);
1188 /* GC_mark_local decrements GC_helper_count. */
1191 #endif /* PARALLEL_MARK */
1193 /* Allocate or reallocate space for mark stack of size n entries. */
1194 /* May silently fail. */
1195 static void alloc_mark_stack(size_t n)
1197 mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry));
1199 /* Don't recycle a stack segment obtained with the wrong flags. */
1200 /* Win32 GetWriteWatch requires the right kind of memory. */
1201 static GC_bool GC_incremental_at_stack_alloc = FALSE;
1202 GC_bool recycle_old = (!GC_incremental || GC_incremental_at_stack_alloc);
1204 GC_incremental_at_stack_alloc = GC_incremental;
1206 # define recycle_old TRUE
1209 GC_mark_stack_too_small = FALSE;
1210 if (GC_mark_stack_size != 0) {
1211 if (new_stack != 0) {
1213 /* Recycle old space */
1214 size_t page_offset = (word)GC_mark_stack & (GC_page_size - 1);
1215 size_t size = GC_mark_stack_size * sizeof(struct GC_ms_entry);
1218 if (0 != page_offset) displ = GC_page_size - page_offset;
1219 size = (size - displ) & ~(GC_page_size - 1);
1221 GC_add_to_heap((struct hblk *)
1222 ((word)GC_mark_stack + displ), (word)size);
1225 GC_mark_stack = new_stack;
1226 GC_mark_stack_size = n;
1227 GC_mark_stack_limit = new_stack + n;
1228 if (GC_print_stats) {
1229 GC_log_printf("Grew mark stack to %lu frames\n",
1230 (unsigned long) GC_mark_stack_size);
1233 if (GC_print_stats) {
1234 GC_log_printf("Failed to grow mark stack to %lu frames\n",
1239 if (new_stack == 0) {
1240 GC_err_printf("No space for mark stack\n");
1243 GC_mark_stack = new_stack;
1244 GC_mark_stack_size = n;
1245 GC_mark_stack_limit = new_stack + n;
1247 GC_mark_stack_top = GC_mark_stack-1;
1250 GC_INNER void GC_mark_init(void)
1252 alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
1256 * Push all locations between b and t onto the mark stack.
1257 * b is the first location to be checked. t is one past the last
1258 * location to be checked.
1259 * Should only be used if there is no possibility of mark stack
1262 void GC_push_all(ptr_t bottom, ptr_t top)
1264 register word length;
1266 bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1267 top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
1268 if (bottom >= top) return;
1270 GC_mark_stack_top++;
1271 if (GC_mark_stack_top >= GC_mark_stack_limit) {
1272 ABORT("Unexpected mark stack overflow");
1274 length = top - bottom;
1275 # if GC_DS_TAGS > ALIGNMENT - 1
1276 length += GC_DS_TAGS;
1277 length &= ~GC_DS_TAGS;
1279 GC_mark_stack_top -> mse_start = bottom;
1280 GC_mark_stack_top -> mse_descr = length;
1283 #ifndef GC_DISABLE_INCREMENTAL
1285 /* Analogous to the above, but push only those pages h with */
1286 /* dirty_fn(h) != 0. We use GC_push_all to actually push the block. */
1287 /* Used both to selectively push dirty pages, or to push a block in */
1288 /* piecemeal fashion, to allow for more marking concurrency. */
1289 /* Will not overflow mark stack if GC_push_all pushes a small fixed */
1290 /* number of entries. (This is invoked only if GC_push_all pushes */
1291 /* a single entry, or if it marks each object before pushing it, thus */
1292 /* ensuring progress in the event of a stack overflow.) */
1293 STATIC void GC_push_selected(ptr_t bottom, ptr_t top,
1294 GC_bool (*dirty_fn)(struct hblk *))
1298 bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1299 top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
1300 if (bottom >= top) return;
1302 h = HBLKPTR(bottom + HBLKSIZE);
1303 if (top <= (ptr_t) h) {
1304 if ((*dirty_fn)(h-1)) {
1305 GC_push_all(bottom, top);
1309 if ((*dirty_fn)(h-1)) {
1310 GC_push_all(bottom, (ptr_t)h);
1313 while ((ptr_t)(h+1) <= top) {
1314 if ((*dirty_fn)(h)) {
1315 if ((word)(GC_mark_stack_top - GC_mark_stack)
1316 > 3 * GC_mark_stack_size / 4) {
1317 /* Danger of mark stack overflow */
1318 GC_push_all((ptr_t)h, top);
1321 GC_push_all((ptr_t)h, (ptr_t)(h+1));
1327 if ((ptr_t)h != top && (*dirty_fn)(h)) {
1328 GC_push_all((ptr_t)h, top);
1330 if (GC_mark_stack_top >= GC_mark_stack_limit) {
1331 ABORT("Unexpected mark stack overflow");
1335 void GC_push_conditional(ptr_t bottom, ptr_t top, GC_bool all)
1338 GC_push_selected(bottom, top, GC_page_was_dirty);
1341 if (GC_dirty_maintained) {
1342 /* Pages that were never dirtied cannot contain pointers. */
1343 GC_push_selected(bottom, top, GC_page_was_ever_dirty);
1347 GC_push_all(bottom, top);
1351 #endif /* !GC_DISABLE_INCREMENTAL */
1353 #if defined(MSWIN32) || defined(MSWINCE)
1354 void __cdecl GC_push_one(word p)
1356 void GC_push_one(word p)
1359 GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER);
1363 GC_API struct GC_ms_entry * GC_CALL GC_mark_and_push(void *obj,
1364 mse *mark_stack_ptr,
1365 mse *mark_stack_limit,
1372 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
1373 if (GC_all_interior_pointers) {
1374 hhdr = GC_find_header(GC_base(obj));
1376 GC_ADD_TO_BLACK_LIST_NORMAL(obj, (ptr_t)src);
1377 return mark_stack_ptr;
1380 GC_ADD_TO_BLACK_LIST_NORMAL(obj, (ptr_t)src);
1381 return mark_stack_ptr;
1384 if (EXPECT(HBLK_IS_FREE(hhdr), FALSE)) {
1385 GC_ADD_TO_BLACK_LIST_NORMAL(obj, (ptr_t)src);
1386 return mark_stack_ptr;
1389 PUSH_CONTENTS_HDR(obj, mark_stack_ptr /* modified */, mark_stack_limit,
1390 (ptr_t)src, was_marked, hhdr, TRUE);
1392 return mark_stack_ptr;
1395 #if defined(MANUAL_VDB) && defined(THREADS)
1396 void GC_dirty(ptr_t p);
1399 /* Mark and push (i.e. gray) a single object p onto the main */
1400 /* mark stack. Consider p to be valid if it is an interior */
1402 /* The object p has passed a preliminary pointer validity */
1403 /* test, but we do not definitely know whether it is valid. */
1404 /* Mark bits are NOT atomically updated. Thus this must be the */
1405 /* only thread setting them. */
1406 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
1407 GC_INNER void GC_mark_and_push_stack(ptr_t p, ptr_t source)
1409 GC_INNER void GC_mark_and_push_stack(ptr_t p)
1410 # define source ((ptr_t)0)
1418 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
1424 GC_ADD_TO_BLACK_LIST_STACK(p, source);
1428 if (EXPECT(HBLK_IS_FREE(hhdr), FALSE)) {
1429 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
1432 # if defined(MANUAL_VDB) && defined(THREADS)
1433 /* Pointer is on the stack. We may have dirtied the object */
1434 /* it points to, but not yet have called GC_dirty(); */
1435 GC_dirty(p); /* Implicitly affects entire object. */
1437 PUSH_CONTENTS_HDR(r, GC_mark_stack_top, GC_mark_stack_limit,
1438 source, mark_and_push_exit, hhdr, FALSE);
1439 mark_and_push_exit: ;
1440 /* We silently ignore pointers to near the end of a block, */
1441 /* which is very mildly suboptimal. */
1442 /* FIXME: We should probably add a header word to address */
1449 # define TRACE_ENTRIES 1000
1451 struct trace_entry {
1457 } GC_trace_buf[TRACE_ENTRIES];
1459 int GC_trace_buf_ptr = 0;
1461 void GC_add_trace_entry(char *kind, word arg1, word arg2)
1463 GC_trace_buf[GC_trace_buf_ptr].kind = kind;
1464 GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
1465 GC_trace_buf[GC_trace_buf_ptr].bytes_allocd = GC_bytes_allocd;
1466 GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
1467 GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
1469 if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
1472 void GC_print_trace(word gc_no, GC_bool lock)
1475 struct trace_entry *p;
1479 for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
1480 if (i < 0) i = TRACE_ENTRIES-1;
1481 p = GC_trace_buf + i;
1482 if (p -> gc_no < gc_no || p -> kind == 0) return;
1483 printf("Trace:%s (gc:%u,bytes:%lu) 0x%X, 0x%X\n",
1484 p -> kind, (unsigned)p -> gc_no,
1485 (unsigned long)p -> bytes_allocd,
1486 (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
1488 printf("Trace incomplete\n");
1492 # endif /* TRACE_BUF */
1495 * A version of GC_push_all that treats all interior pointers as valid
1496 * and scans the entire region immediately, in case the contents
1499 GC_INNER void GC_push_all_eager(ptr_t bottom, ptr_t top)
1501 word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1502 word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
1506 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1507 register ptr_t least_ha = GC_least_plausible_heap_addr;
1508 # define GC_greatest_plausible_heap_addr greatest_ha
1509 # define GC_least_plausible_heap_addr least_ha
1511 if (top == 0) return;
1512 /* check all pointers in range and push if they appear */
1514 lim = t - 1 /* longword */;
1515 for (p = b; p <= lim; p = (word *)(((ptr_t)p) + ALIGNMENT)) {
1517 GC_PUSH_ONE_STACK(q, p);
1519 # undef GC_greatest_plausible_heap_addr
1520 # undef GC_least_plausible_heap_addr
1523 GC_INNER void GC_push_all_stack(ptr_t bottom, ptr_t top)
1525 # if defined(THREADS) && defined(MPROTECT_VDB)
1526 GC_push_all_eager(bottom, top);
1528 if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
1529 GC_push_all(bottom, top);
1531 GC_push_all_eager(bottom, top);
1536 #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES) && \
1537 defined(MARK_BIT_PER_GRANULE)
1538 # if GC_GRANULE_WORDS == 1
1539 # define USE_PUSH_MARKED_ACCELERATORS
1540 # define PUSH_GRANULE(q) \
1541 { word qcontents = (q)[0]; \
1542 GC_PUSH_ONE_HEAP(qcontents, (q)); }
1543 # elif GC_GRANULE_WORDS == 2
1544 # define USE_PUSH_MARKED_ACCELERATORS
1545 # define PUSH_GRANULE(q) \
1546 { word qcontents = (q)[0]; \
1547 GC_PUSH_ONE_HEAP(qcontents, (q)); \
1548 qcontents = (q)[1]; \
1549 GC_PUSH_ONE_HEAP(qcontents, (q)+1); }
1550 # elif GC_GRANULE_WORDS == 4
1551 # define USE_PUSH_MARKED_ACCELERATORS
1552 # define PUSH_GRANULE(q) \
1553 { word qcontents = (q)[0]; \
1554 GC_PUSH_ONE_HEAP(qcontents, (q)); \
1555 qcontents = (q)[1]; \
1556 GC_PUSH_ONE_HEAP(qcontents, (q)+1); \
1557 qcontents = (q)[2]; \
1558 GC_PUSH_ONE_HEAP(qcontents, (q)+2); \
1559 qcontents = (q)[3]; \
1560 GC_PUSH_ONE_HEAP(qcontents, (q)+3); }
1562 #endif /* !USE_MARK_BYTES && MARK_BIT_PER_GRANULE */
1564 #ifdef USE_PUSH_MARKED_ACCELERATORS
1565 /* Push all objects reachable from marked objects in the given block */
1566 /* containing objects of size 1 granule. */
1567 STATIC void GC_push_marked1(struct hblk *h, hdr *hhdr)
1569 word * mark_word_addr = &(hhdr->hb_marks[0]);
1575 /* Allow registers to be used for some frequently acccessed */
1576 /* global variables. Otherwise aliasing issues are likely */
1577 /* to prevent that. */
1578 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1579 ptr_t least_ha = GC_least_plausible_heap_addr;
1580 mse * mark_stack_top = GC_mark_stack_top;
1581 mse * mark_stack_limit = GC_mark_stack_limit;
1582 # define GC_mark_stack_top mark_stack_top
1583 # define GC_mark_stack_limit mark_stack_limit
1584 # define GC_greatest_plausible_heap_addr greatest_ha
1585 # define GC_least_plausible_heap_addr least_ha
1587 p = (word *)(h->hb_body);
1588 plim = (word *)(((word)h) + HBLKSIZE);
1590 /* go through all words in block */
1592 mark_word = *mark_word_addr++;
1594 while(mark_word != 0) {
1595 if (mark_word & 1) {
1598 q += GC_GRANULE_WORDS;
1601 p += WORDSZ*GC_GRANULE_WORDS;
1604 # undef GC_greatest_plausible_heap_addr
1605 # undef GC_least_plausible_heap_addr
1606 # undef GC_mark_stack_top
1607 # undef GC_mark_stack_limit
1609 GC_mark_stack_top = mark_stack_top;
1613 #ifndef UNALIGNED_PTRS
1615 /* Push all objects reachable from marked objects in the given block */
1616 /* of size 2 (granules) objects. */
1617 STATIC void GC_push_marked2(struct hblk *h, hdr *hhdr)
1619 word * mark_word_addr = &(hhdr->hb_marks[0]);
1625 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1626 ptr_t least_ha = GC_least_plausible_heap_addr;
1627 mse * mark_stack_top = GC_mark_stack_top;
1628 mse * mark_stack_limit = GC_mark_stack_limit;
1630 # define GC_mark_stack_top mark_stack_top
1631 # define GC_mark_stack_limit mark_stack_limit
1632 # define GC_greatest_plausible_heap_addr greatest_ha
1633 # define GC_least_plausible_heap_addr least_ha
1635 p = (word *)(h->hb_body);
1636 plim = (word *)(((word)h) + HBLKSIZE);
1638 /* go through all words in block */
1640 mark_word = *mark_word_addr++;
1642 while(mark_word != 0) {
1643 if (mark_word & 1) {
1645 PUSH_GRANULE(q + GC_GRANULE_WORDS);
1647 q += 2 * GC_GRANULE_WORDS;
1650 p += WORDSZ*GC_GRANULE_WORDS;
1653 # undef GC_greatest_plausible_heap_addr
1654 # undef GC_least_plausible_heap_addr
1655 # undef GC_mark_stack_top
1656 # undef GC_mark_stack_limit
1658 GC_mark_stack_top = mark_stack_top;
1661 # if GC_GRANULE_WORDS < 4
1662 /* Push all objects reachable from marked objects in the given block */
1663 /* of size 4 (granules) objects. */
1664 /* There is a risk of mark stack overflow here. But we handle that. */
1665 /* And only unmarked objects get pushed, so it's not very likely. */
1666 STATIC void GC_push_marked4(struct hblk *h, hdr *hhdr)
1668 word * mark_word_addr = &(hhdr->hb_marks[0]);
1674 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1675 ptr_t least_ha = GC_least_plausible_heap_addr;
1676 mse * mark_stack_top = GC_mark_stack_top;
1677 mse * mark_stack_limit = GC_mark_stack_limit;
1678 # define GC_mark_stack_top mark_stack_top
1679 # define GC_mark_stack_limit mark_stack_limit
1680 # define GC_greatest_plausible_heap_addr greatest_ha
1681 # define GC_least_plausible_heap_addr least_ha
1683 p = (word *)(h->hb_body);
1684 plim = (word *)(((word)h) + HBLKSIZE);
1686 /* go through all words in block */
1688 mark_word = *mark_word_addr++;
1690 while(mark_word != 0) {
1691 if (mark_word & 1) {
1693 PUSH_GRANULE(q + GC_GRANULE_WORDS);
1694 PUSH_GRANULE(q + 2*GC_GRANULE_WORDS);
1695 PUSH_GRANULE(q + 3*GC_GRANULE_WORDS);
1697 q += 4 * GC_GRANULE_WORDS;
1700 p += WORDSZ*GC_GRANULE_WORDS;
1702 # undef GC_greatest_plausible_heap_addr
1703 # undef GC_least_plausible_heap_addr
1704 # undef GC_mark_stack_top
1705 # undef GC_mark_stack_limit
1706 GC_mark_stack_top = mark_stack_top;
1709 #endif /* GC_GRANULE_WORDS < 4 */
1711 #endif /* UNALIGNED_PTRS */
1713 #endif /* USE_PUSH_MARKED_ACCELERATORS */
1715 /* Push all objects reachable from marked objects in the given block */
1716 STATIC void GC_push_marked(struct hblk *h, hdr *hhdr)
1718 size_t sz = hhdr -> hb_sz;
1719 word descr = hhdr -> hb_descr;
1723 mse * GC_mark_stack_top_reg;
1724 mse * mark_stack_limit = GC_mark_stack_limit;
1726 /* Some quick shortcuts: */
1727 if ((0 | GC_DS_LENGTH) == descr) return;
1728 if (GC_block_empty(hhdr)/* nothing marked */) return;
1729 GC_n_rescuing_pages++;
1730 GC_objects_are_marked = TRUE;
1731 if (sz > MAXOBJBYTES) {
1734 lim = (h + 1)->hb_body - sz;
1737 switch(BYTES_TO_GRANULES(sz)) {
1738 # if defined(USE_PUSH_MARKED_ACCELERATORS)
1740 GC_push_marked1(h, hhdr);
1742 # if !defined(UNALIGNED_PTRS)
1744 GC_push_marked2(h, hhdr);
1746 # if GC_GRANULE_WORDS < 4
1748 GC_push_marked4(h, hhdr);
1754 GC_mark_stack_top_reg = GC_mark_stack_top;
1755 for (p = h -> hb_body, bit_no = 0; p <= lim;
1756 p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
1757 if (mark_bit_from_hdr(hhdr, bit_no)) {
1758 /* Mark from fields inside the object */
1759 PUSH_OBJ(p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
1762 GC_mark_stack_top = GC_mark_stack_top_reg;
1766 #ifndef GC_DISABLE_INCREMENTAL
1767 /* Test whether any page in the given block is dirty. */
1768 STATIC GC_bool GC_block_was_dirty(struct hblk *h, hdr *hhdr)
1770 size_t sz = hhdr -> hb_sz;
1772 if (sz <= MAXOBJBYTES) {
1773 return(GC_page_was_dirty(h));
1776 while (p < (ptr_t)h + sz) {
1777 if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
1783 #endif /* GC_DISABLE_INCREMENTAL */
1785 /* Similar to GC_push_marked, but skip over unallocated blocks */
1786 /* and return address of next plausible block. */
1787 STATIC struct hblk * GC_push_next_marked(struct hblk *h)
1789 hdr * hhdr = HDR(h);
1791 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr) || HBLK_IS_FREE(hhdr), FALSE)) {
1792 h = GC_next_used_block(h);
1793 if (h == 0) return(0);
1794 hhdr = GC_find_header((ptr_t)h);
1796 GC_push_marked(h, hhdr);
1797 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1800 #ifndef GC_DISABLE_INCREMENTAL
1801 /* Identical to above, but mark only from dirty pages */
1802 STATIC struct hblk * GC_push_next_marked_dirty(struct hblk *h)
1804 hdr * hhdr = HDR(h);
1806 if (!GC_dirty_maintained) ABORT("Dirty bits not set up");
1808 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr)
1809 || HBLK_IS_FREE(hhdr), FALSE)) {
1810 h = GC_next_used_block(h);
1811 if (h == 0) return(0);
1812 hhdr = GC_find_header((ptr_t)h);
1814 # ifdef STUBBORN_ALLOC
1815 if (hhdr -> hb_obj_kind == STUBBORN) {
1816 if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
1820 if (GC_block_was_dirty(h, hhdr)) break;
1823 if (GC_block_was_dirty(h, hhdr)) break;
1825 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1828 GC_push_marked(h, hhdr);
1829 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1831 #endif /* !GC_DISABLE_INCREMENTAL */
1833 /* Similar to above, but for uncollectable pages. Needed since we */
1834 /* do not clear marks for such pages, even for full collections. */
1835 STATIC struct hblk * GC_push_next_marked_uncollectable(struct hblk *h)
1837 hdr * hhdr = HDR(h);
1840 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr)
1841 || HBLK_IS_FREE(hhdr), FALSE)) {
1842 h = GC_next_used_block(h);
1843 if (h == 0) return(0);
1844 hhdr = GC_find_header((ptr_t)h);
1846 if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
1847 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1850 GC_push_marked(h, hhdr);
1851 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));