2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
17 #include "private/gc_pmark.h"
21 #if defined(MSWIN32) && defined(__GNUC__)
25 /* We put this here to minimize the risk of inlining. */
27 #if defined(__BORLANDC__) || defined(__WATCOMC__)
29 void GC_noop(void *p, ...) {}
38 /* Single argument version, robust against whole program analysis. */
39 GC_API void GC_CALL GC_noop1(word x)
41 static volatile word sink;
46 /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
48 GC_INNER unsigned GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
50 /* Initialize GC_obj_kinds properly and standard free lists properly. */
51 /* This must be done statically since they may be accessed before */
52 /* GC_init is called. */
53 /* It's done here, since we need to deal with mark descriptors. */
54 GC_INNER struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
55 /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
56 0 | GC_DS_LENGTH, FALSE, FALSE },
57 /* NORMAL */ { &GC_objfreelist[0], 0,
58 0 | GC_DS_LENGTH, /* Adjusted in GC_init for EXTRA_BYTES */
59 TRUE /* add length to descr */, TRUE },
61 { &GC_uobjfreelist[0], 0,
62 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
63 # ifdef ATOMIC_UNCOLLECTABLE
65 { &GC_auobjfreelist[0], 0,
66 0 | GC_DS_LENGTH, FALSE /* add length to descr */, FALSE },
68 # ifdef STUBBORN_ALLOC
69 /*STUBBORN*/ { (void **)&GC_sobjfreelist[0], 0,
70 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
74 # ifdef ATOMIC_UNCOLLECTABLE
75 # ifdef STUBBORN_ALLOC
76 # define GC_N_KINDS_INITIAL_VALUE 5
78 # define GC_N_KINDS_INITIAL_VALUE 4
81 # ifdef STUBBORN_ALLOC
82 # define GC_N_KINDS_INITIAL_VALUE 4
84 # define GC_N_KINDS_INITIAL_VALUE 3
88 GC_INNER unsigned GC_n_kinds = GC_N_KINDS_INITIAL_VALUE;
90 # ifndef INITIAL_MARK_STACK_SIZE
91 # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
92 /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
93 /* multiple of HBLKSIZE. */
94 /* The incremental collector actually likes a larger */
95 /* size, since it want to push all marked dirty objs */
96 /* before marking anything new. Currently we let it */
97 /* grow dynamically. */
101 * Limits of stack for GC_mark routine.
102 * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
103 * need to be marked from.
106 STATIC word GC_n_rescuing_pages = 0;
107 /* Number of dirty pages we marked from */
108 /* excludes ptrfree pages, etc. */
110 GC_INNER mse * GC_mark_stack = NULL;
111 GC_INNER mse * GC_mark_stack_limit = NULL;
112 GC_INNER size_t GC_mark_stack_size = 0;
115 GC_INNER mse * volatile GC_mark_stack_top = NULL;
116 /* Updated only with mark lock held, but read asynchronously. */
117 STATIC volatile AO_t GC_first_nonempty = 0;
118 /* Lowest entry on mark stack */
119 /* that may be nonempty. */
120 /* Updated only by initiating */
123 GC_INNER mse * GC_mark_stack_top = NULL;
126 GC_INNER mark_state_t GC_mark_state = MS_NONE;
128 GC_INNER GC_bool GC_mark_stack_too_small = FALSE;
130 static struct hblk * scan_ptr;
132 STATIC GC_bool GC_objects_are_marked = FALSE;
133 /* Are there collectable marked objects in the heap? */
135 /* Is a collection in progress? Note that this can return true in the */
136 /* nonincremental case, if a collection has been abandoned and the */
137 /* mark state is now MS_INVALID. */
138 GC_INNER GC_bool GC_collection_in_progress(void)
140 return(GC_mark_state != MS_NONE);
143 /* clear all mark bits in the header */
144 GC_INNER void GC_clear_hdr_marks(hdr *hhdr)
146 size_t last_bit = FINAL_MARK_BIT(hhdr -> hb_sz);
148 # ifdef USE_MARK_BYTES
149 BZERO(hhdr -> hb_marks, MARK_BITS_SZ);
150 hhdr -> hb_marks[last_bit] = 1;
152 BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
153 set_mark_bit_from_hdr(hhdr, last_bit);
155 hhdr -> hb_n_marks = 0;
158 /* Set all mark bits in the header. Used for uncollectable blocks. */
159 GC_INNER void GC_set_hdr_marks(hdr *hhdr)
162 size_t sz = hhdr -> hb_sz;
163 unsigned n_marks = (unsigned)FINAL_MARK_BIT(sz);
165 # ifdef USE_MARK_BYTES
166 for (i = 0; i <= n_marks; i += (unsigned)MARK_BIT_OFFSET(sz)) {
167 hhdr -> hb_marks[i] = 1;
170 for (i = 0; i < divWORDSZ(n_marks + WORDSZ); ++i) {
171 hhdr -> hb_marks[i] = ONES;
174 # ifdef MARK_BIT_PER_OBJ
175 hhdr -> hb_n_marks = n_marks - 1;
177 hhdr -> hb_n_marks = HBLK_OBJS(sz);
182 * Clear all mark bits associated with block h.
185 static void clear_marks_for_block(struct hblk *h, word dummy)
187 register hdr * hhdr = HDR(h);
189 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
190 /* Mark bit for these is cleared only once the object is */
191 /* explicitly deallocated. This either frees the block, or */
192 /* the bit is cleared once the object is on the free list. */
193 GC_clear_hdr_marks(hhdr);
196 /* Slow but general routines for setting/clearing/asking about mark bits */
197 GC_INNER void GC_set_mark_bit(ptr_t p)
199 struct hblk *h = HBLKPTR(p);
201 word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
203 if (!mark_bit_from_hdr(hhdr, bit_no)) {
204 set_mark_bit_from_hdr(hhdr, bit_no);
205 ++hhdr -> hb_n_marks;
209 GC_INNER void GC_clear_mark_bit(ptr_t p)
211 struct hblk *h = HBLKPTR(p);
213 word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
215 if (mark_bit_from_hdr(hhdr, bit_no)) {
217 clear_mark_bit_from_hdr(hhdr, bit_no);
218 n_marks = hhdr -> hb_n_marks - 1;
219 # ifdef PARALLEL_MARK
220 if (n_marks != 0 || !GC_parallel)
221 hhdr -> hb_n_marks = n_marks;
222 /* Don't decrement to zero. The counts are approximate due to */
223 /* concurrency issues, but we need to ensure that a count of */
224 /* zero implies an empty block. */
226 hhdr -> hb_n_marks = n_marks;
231 GC_bool GC_is_marked(ptr_t p)
233 struct hblk *h = HBLKPTR(p);
235 word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
237 return((GC_bool)mark_bit_from_hdr(hhdr, bit_no));
242 * Clear mark bits in all allocated heap blocks. This invalidates
243 * the marker invariant, and sets GC_mark_state to reflect this.
244 * (This implicitly starts marking to reestablish the invariant.)
246 GC_INNER void GC_clear_marks(void)
248 GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
249 GC_objects_are_marked = FALSE;
250 GC_mark_state = MS_INVALID;
255 void GC_check_dirty(void);
258 /* Initiate a garbage collection. Initiates a full collection if the */
259 /* mark state is invalid. */
260 GC_INNER void GC_initiate_gc(void)
262 if (GC_dirty_maintained) GC_read_dirty();
263 # ifdef STUBBORN_ALLOC
267 if (GC_dirty_maintained) GC_check_dirty();
269 GC_n_rescuing_pages = 0;
270 if (GC_mark_state == MS_NONE) {
271 GC_mark_state = MS_PUSH_RESCUERS;
272 } else if (GC_mark_state != MS_INVALID) {
273 ABORT("unexpected state");
274 } /* else this is really a full collection, and mark */
275 /* bits are invalid. */
280 STATIC void GC_do_parallel_mark(void); /* initiate parallel marking. */
281 #endif /* PARALLEL_MARK */
283 #ifdef GC_DISABLE_INCREMENTAL
284 # define GC_push_next_marked_dirty(h) GC_push_next_marked(h)
286 STATIC struct hblk * GC_push_next_marked_dirty(struct hblk *h);
287 /* Invoke GC_push_marked on next dirty block above h. */
288 /* Return a pointer just past the end of this block. */
289 #endif /* !GC_DISABLE_INCREMENTAL */
290 STATIC struct hblk * GC_push_next_marked(struct hblk *h);
291 /* Ditto, but also mark from clean pages. */
292 STATIC struct hblk * GC_push_next_marked_uncollectable(struct hblk *h);
293 /* Ditto, but mark only from uncollectable pages. */
295 static void alloc_mark_stack(size_t);
297 # if (defined(MSWIN32) || defined(MSWINCE)) && !defined(__GNUC__) \
298 || defined(MSWIN32) && defined(I386) /* for Win98 */ \
299 || defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS)
300 /* Under rare conditions, we may end up marking from nonexistent memory. */
301 /* Hence we need to be prepared to recover by running GC_mark_some */
302 /* with a suitable handler in place. */
303 /* FIXME: Should we really need it for WinCE? If yes then */
304 /* WRAP_MARK_SOME should be also defined for CeGCC which requires */
305 /* CPU/OS-specific code in mark_ex_handler() and GC_mark_some() */
306 /* (for manual stack unwinding and exception handler installation). */
307 # define WRAP_MARK_SOME
310 /* Perform a small amount of marking. */
311 /* We try to touch roughly a page of memory. */
312 /* Return TRUE if we just finished a mark phase. */
313 /* Cold_gc_frame is an address inside a GC frame that */
314 /* remains valid until all marking is complete. */
315 /* A zero value indicates that it's OK to miss some */
316 /* register values. */
317 /* We hold the allocation lock. In the case of */
318 /* incremental collection, the world may not be stopped.*/
319 #ifdef WRAP_MARK_SOME
320 /* For win32, this is called after we establish a structured */
321 /* exception handler, in case Windows unmaps one of our root */
322 /* segments. See below. In either case, we acquire the */
323 /* allocator lock long before we get here. */
324 STATIC GC_bool GC_mark_some_inner(ptr_t cold_gc_frame)
326 GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame)
329 switch(GC_mark_state) {
333 case MS_PUSH_RESCUERS:
334 if (GC_mark_stack_top
335 >= GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2) {
336 /* Go ahead and mark, even though that might cause us to */
337 /* see more marked dirty objects later on. Avoid this */
339 GC_mark_stack_too_small = TRUE;
340 MARK_FROM_MARK_STACK();
343 scan_ptr = GC_push_next_marked_dirty(scan_ptr);
345 if (GC_print_stats) {
346 GC_log_printf("Marked from %lu dirty pages\n",
347 (unsigned long)GC_n_rescuing_pages);
349 GC_push_roots(FALSE, cold_gc_frame);
350 GC_objects_are_marked = TRUE;
351 if (GC_mark_state != MS_INVALID) {
352 GC_mark_state = MS_ROOTS_PUSHED;
358 case MS_PUSH_UNCOLLECTABLE:
359 if (GC_mark_stack_top
360 >= GC_mark_stack + GC_mark_stack_size/4) {
361 # ifdef PARALLEL_MARK
362 /* Avoid this, since we don't parallelize the marker */
364 if (GC_parallel) GC_mark_stack_too_small = TRUE;
366 MARK_FROM_MARK_STACK();
369 scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
371 GC_push_roots(TRUE, cold_gc_frame);
372 GC_objects_are_marked = TRUE;
373 if (GC_mark_state != MS_INVALID) {
374 GC_mark_state = MS_ROOTS_PUSHED;
380 case MS_ROOTS_PUSHED:
381 # ifdef PARALLEL_MARK
382 /* In the incremental GC case, this currently doesn't */
383 /* quite do the right thing, since it runs to */
384 /* completion. On the other hand, starting a */
385 /* parallel marker is expensive, so perhaps it is */
386 /* the right thing? */
387 /* Eventually, incremental marking should run */
388 /* asynchronously in multiple threads, without grabbing */
389 /* the allocation lock. */
391 GC_do_parallel_mark();
392 GC_ASSERT(GC_mark_stack_top < (mse *)GC_first_nonempty);
393 GC_mark_stack_top = GC_mark_stack - 1;
394 if (GC_mark_stack_too_small) {
395 alloc_mark_stack(2*GC_mark_stack_size);
397 if (GC_mark_state == MS_ROOTS_PUSHED) {
398 GC_mark_state = MS_NONE;
405 if (GC_mark_stack_top >= GC_mark_stack) {
406 MARK_FROM_MARK_STACK();
409 GC_mark_state = MS_NONE;
410 if (GC_mark_stack_too_small) {
411 alloc_mark_stack(2*GC_mark_stack_size);
417 case MS_PARTIALLY_INVALID:
418 if (!GC_objects_are_marked) {
419 GC_mark_state = MS_PUSH_UNCOLLECTABLE;
422 if (GC_mark_stack_top >= GC_mark_stack) {
423 MARK_FROM_MARK_STACK();
426 if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
427 /* About to start a heap scan for marked objects. */
428 /* Mark stack is empty. OK to reallocate. */
429 if (GC_mark_stack_too_small) {
430 alloc_mark_stack(2*GC_mark_stack_size);
432 GC_mark_state = MS_PARTIALLY_INVALID;
434 scan_ptr = GC_push_next_marked(scan_ptr);
435 if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
436 GC_push_roots(TRUE, cold_gc_frame);
437 GC_objects_are_marked = TRUE;
438 if (GC_mark_state != MS_INVALID) {
439 GC_mark_state = MS_ROOTS_PUSHED;
444 ABORT("GC_mark_some: bad state");
449 #ifdef WRAP_MARK_SOME
451 # if (defined(MSWIN32) || defined(MSWINCE)) && defined(__GNUC__)
454 EXCEPTION_REGISTRATION ex_reg;
458 static EXCEPTION_DISPOSITION mark_ex_handler(
459 struct _EXCEPTION_RECORD *ex_rec,
461 struct _CONTEXT *context,
464 if (ex_rec->ExceptionCode == STATUS_ACCESS_VIOLATION) {
465 ext_ex_regn *xer = (ext_ex_regn *)est_frame;
467 /* Unwind from the inner function assuming the standard */
468 /* function prologue. */
469 /* Assumes code has not been compiled with */
470 /* -fomit-frame-pointer. */
471 context->Esp = context->Ebp;
472 context->Ebp = *((DWORD *)context->Esp);
473 context->Esp = context->Esp - 8;
475 /* Resume execution at the "real" handler within the */
476 /* wrapper function. */
477 context->Eip = (DWORD )(xer->alt_path);
479 return ExceptionContinueExecution;
482 return ExceptionContinueSearch;
485 # endif /* __GNUC__ && MSWIN32 */
487 #if defined(GC_WIN32_THREADS) && !defined(__GNUC__)
488 GC_bool GC_started_thread_while_stopped(void);
489 /* In win32_threads.c. Did we invalidate mark phase with an */
490 /* unexpected thread start? */
493 GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame)
497 # if defined(MSWIN32) || defined(MSWINCE)
499 /* Windows 98 appears to asynchronously create and remove */
500 /* writable memory mappings, for reasons we haven't yet */
501 /* understood. Since we look for writable regions to */
502 /* determine the root set, we may try to mark from an */
503 /* address range that disappeared since we started the */
504 /* collection. Thus we have to recover from faults here. */
505 /* This code does not appear to be necessary for Windows */
506 /* 95/NT/2000+. Note that this code should never generate */
507 /* an incremental GC write fault. */
508 /* This code seems to be necessary for WinCE (at least in */
509 /* the case we'd decide to add MEM_PRIVATE sections to */
510 /* data roots in GC_register_dynamic_libraries()). */
511 /* It's conceivable that this is the same issue with */
512 /* terminating threads that we see with Linux and */
513 /* USE_PROC_FOR_LIBRARIES. */
516 ret_val = GC_mark_some_inner(cold_gc_frame);
517 } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ?
518 EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
521 # ifdef GC_WIN32_THREADS
522 /* With DllMain-based thread tracking, a thread may have */
523 /* started while we were marking. This is logically equivalent */
524 /* to the exception case; our results are invalid and we have */
525 /* to start over. This cannot be prevented since we can't */
526 /* block in DllMain. */
527 if (GC_started_thread_while_stopped()) goto handle_ex;
532 # else /* __GNUC__ */
534 /* Manually install an exception handler since GCC does */
535 /* not yet support Structured Exception Handling (SEH) on */
540 er.alt_path = &&handle_ex;
541 er.ex_reg.handler = mark_ex_handler;
542 asm volatile ("movl %%fs:0, %0" : "=r" (er.ex_reg.prev));
543 asm volatile ("movl %0, %%fs:0" : : "r" (&er));
544 ret_val = GC_mark_some_inner(cold_gc_frame);
545 /* Prevent GCC from considering the following code unreachable */
546 /* and thus eliminating it. */
547 if (er.alt_path == 0)
550 /* Uninstall the exception handler */
551 asm volatile ("mov %0, %%fs:0" : : "r" (er.ex_reg.prev));
554 # endif /* __GNUC__ */
555 # else /* !MSWIN32 */
556 /* Here we are handling the case in which /proc is used for root */
557 /* finding, and we have threads. We may find a stack for a */
558 /* thread that is in the process of exiting, and disappears */
559 /* while we are marking it. This seems extremely difficult to */
560 /* avoid otherwise. */
561 if (GC_incremental) {
562 WARN("Incremental GC incompatible with /proc roots\n", 0);
563 /* I'm not sure if this could still work ... */
565 GC_setup_temporary_fault_handler();
566 if(SETJMP(GC_jmp_buf) != 0) goto handle_ex;
567 ret_val = GC_mark_some_inner(cold_gc_frame);
569 GC_reset_fault_handler();
572 # endif /* !MSWIN32 */
575 /* Exception handler starts here for all cases. */
576 if (GC_print_stats) {
577 GC_log_printf("Caught ACCESS_VIOLATION in marker. "
578 "Memory mapping disappeared.\n");
581 /* We have bad roots on the stack. Discard mark stack. */
582 /* Rescan from marked objects. Redetermine roots. */
583 GC_invalidate_mark_state();
587 goto rm_handler; /* Back to platform-specific code. */
589 #endif /* WRAP_MARK_SOME */
591 GC_INNER GC_bool GC_mark_stack_empty(void)
593 return(GC_mark_stack_top < GC_mark_stack);
596 GC_INNER void GC_invalidate_mark_state(void)
598 GC_mark_state = MS_INVALID;
599 GC_mark_stack_top = GC_mark_stack-1;
602 GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp)
604 GC_mark_state = MS_INVALID;
605 GC_mark_stack_too_small = TRUE;
606 if (GC_print_stats) {
607 GC_log_printf("Mark stack overflow; current size = %lu entries\n",
608 (unsigned long)GC_mark_stack_size);
610 return(msp - GC_MARK_STACK_DISCARDS);
614 * Mark objects pointed to by the regions described by
615 * mark stack entries between mark_stack and mark_stack_top,
616 * inclusive. Assumes the upper limit of a mark stack entry
617 * is never 0. A mark stack entry never has size 0.
618 * We try to traverse on the order of a hblk of memory before we return.
619 * Caller is responsible for calling this until the mark stack is empty.
620 * Note that this is the most performance critical routine in the
621 * collector. Hence it contains all sorts of ugly hacks to speed
622 * things up. In particular, we avoid procedure calls on the common
623 * path, we take advantage of peculiarities of the mark descriptor
624 * encoding, we optionally maintain a cache for the block address to
625 * header mapping, we prefetch when an object is "grayed", etc.
627 GC_INNER mse * GC_mark_from(mse *mark_stack_top, mse *mark_stack,
628 mse *mark_stack_limit)
630 signed_word credit = HBLKSIZE; /* Remaining credit for marking work */
631 ptr_t current_p; /* Pointer to current candidate ptr. */
632 word current; /* Candidate pointer. */
633 ptr_t limit; /* (Incl) limit of current candidate */
636 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
637 ptr_t least_ha = GC_least_plausible_heap_addr;
640 # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
642 GC_objects_are_marked = TRUE;
644 # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
645 while (mark_stack_top >= mark_stack && credit >= 0)
647 while ((((ptr_t)mark_stack_top - (ptr_t)mark_stack) | credit) >= 0)
650 current_p = mark_stack_top -> mse_start;
651 descr = mark_stack_top -> mse_descr;
653 /* current_p and descr describe the current object. */
654 /* *mark_stack_top is vacant. */
655 /* The following is 0 only for small objects described by a simple */
656 /* length descriptor. For many applications this is the common */
657 /* case, so we try to detect it quickly. */
658 if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | GC_DS_TAGS)) {
659 word tag = descr & GC_DS_TAGS;
664 /* Process part of the range to avoid pushing too much on the */
666 GC_ASSERT(descr < (word)GC_greatest_plausible_heap_addr
667 - (word)GC_least_plausible_heap_addr);
669 if (GC_trace_addr >= current_p
670 && GC_trace_addr < current_p + descr) {
671 GC_log_printf("GC:%u Large section; start %p len %lu\n",
672 (unsigned)GC_gc_no, current_p,
673 (unsigned long) descr);
675 # endif /* ENABLE_TRACE */
676 # ifdef PARALLEL_MARK
677 # define SHARE_BYTES 2048
678 if (descr > SHARE_BYTES && GC_parallel
679 && mark_stack_top < mark_stack_limit - 1) {
680 int new_size = (descr/2) & ~(sizeof(word)-1);
681 mark_stack_top -> mse_start = current_p;
682 mark_stack_top -> mse_descr = new_size + sizeof(word);
683 /* makes sure we handle */
684 /* misaligned pointers. */
687 if (GC_trace_addr >= current_p
688 && GC_trace_addr < current_p + descr) {
689 GC_log_printf("GC:%u splitting (parallel) %p at %p\n",
690 (unsigned)GC_gc_no, current_p,
691 current_p + new_size);
693 # endif /* ENABLE_TRACE */
694 current_p += new_size;
698 # endif /* PARALLEL_MARK */
699 mark_stack_top -> mse_start =
700 limit = current_p + WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
701 mark_stack_top -> mse_descr =
702 descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
704 if (GC_trace_addr >= current_p
705 && GC_trace_addr < current_p + descr) {
706 GC_log_printf("GC:%u splitting %p at %p\n",
707 (unsigned)GC_gc_no, current_p, limit);
709 # endif /* ENABLE_TRACE */
710 /* Make sure that pointers overlapping the two ranges are */
712 limit += sizeof(word) - ALIGNMENT;
717 if (GC_trace_addr >= current_p
718 && GC_trace_addr < current_p + WORDS_TO_BYTES(WORDSZ-2)) {
719 GC_log_printf("GC:%u Tracing from %p bitmap descr %lu\n",
720 (unsigned)GC_gc_no, current_p,
721 (unsigned long) descr);
723 # endif /* ENABLE_TRACE */
724 descr &= ~GC_DS_TAGS;
725 credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
727 if ((signed_word)descr < 0) {
728 current = *(word *)current_p;
729 FIXUP_POINTER(current);
730 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
731 PREFETCH((ptr_t)current);
733 if (GC_trace_addr == current_p) {
734 GC_log_printf("GC:%u Considering(3) %p -> %p\n",
735 (unsigned)GC_gc_no, current_p,
738 # endif /* ENABLE_TRACE */
739 PUSH_CONTENTS((ptr_t)current, mark_stack_top,
740 mark_stack_limit, current_p, exit1);
744 current_p += sizeof(word);
750 if (GC_trace_addr >= current_p
751 && GC_base(current_p) != 0
752 && GC_base(current_p) == GC_base(GC_trace_addr)) {
753 GC_log_printf("GC:%u Tracing from %p proc descr %lu\n",
754 (unsigned)GC_gc_no, current_p,
755 (unsigned long) descr);
757 # endif /* ENABLE_TRACE */
758 credit -= GC_PROC_BYTES;
761 ((word *)current_p, mark_stack_top,
762 mark_stack_limit, ENV(descr));
764 case GC_DS_PER_OBJECT:
765 if ((signed_word)descr >= 0) {
766 /* Descriptor is in the object. */
767 descr = *(word *)(current_p + descr - GC_DS_PER_OBJECT);
769 /* Descriptor is in type descriptor pointed to by first */
770 /* word in object. */
771 ptr_t type_descr = *(ptr_t *)current_p;
772 /* type_descr is either a valid pointer to the descriptor */
773 /* structure, or this object was on a free list. If it */
774 /* it was anything but the last object on the free list, */
775 /* we will misinterpret the next object on the free list as */
776 /* the type descriptor, and get a 0 GC descriptor, which */
777 /* is ideal. Unfortunately, we need to check for the last */
778 /* object case explicitly. */
779 if (0 == type_descr) {
780 /* Rarely executed. */
784 descr = *(word *)(type_descr
785 - (descr + (GC_INDIR_PER_OBJ_BIAS
786 - GC_DS_PER_OBJECT)));
789 /* Can happen either because we generated a 0 descriptor */
790 /* or we saw a pointer to a free object. */
797 limit = 0; /* initialized to prevent warning. */
799 } else /* Small object with length descriptor */ {
801 limit = current_p + (word)descr;
804 if (GC_trace_addr >= current_p
805 && GC_trace_addr < limit) {
806 GC_log_printf("GC:%u Tracing from %p len %lu\n",
807 (int)GC_gc_no, current_p, (unsigned long) descr);
809 # endif /* ENABLE_TRACE */
810 /* The simple case in which we're scanning a range. */
811 GC_ASSERT(!((word)current_p & (ALIGNMENT-1)));
812 credit -= limit - current_p;
813 limit -= sizeof(word);
817 # ifndef SMALL_CONFIG
820 /* Try to prefetch the next pointer to be examined asap. */
821 /* Empirically, this also seems to help slightly without */
822 /* prefetches, at least on linux/X86. Presumably this loop */
823 /* ends up with less register pressure, and gcc thus ends up */
824 /* generating slightly better code. Overall gcc code quality */
825 /* for this loop is still not great. */
827 PREFETCH(limit - PREF_DIST*CACHE_LINE_SIZE);
828 GC_ASSERT(limit >= current_p);
829 deferred = *(word *)limit;
830 FIXUP_POINTER(deferred);
832 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
833 PREFETCH((ptr_t)deferred);
836 if (current_p > limit) goto next_object;
837 /* Unroll once, so we don't do too many of the prefetches */
838 /* based on limit. */
839 deferred = *(word *)limit;
840 FIXUP_POINTER(deferred);
842 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
843 PREFETCH((ptr_t)deferred);
846 if (current_p > limit) goto next_object;
850 while (current_p <= limit) {
851 /* Empirically, unrolling this loop doesn't help a lot. */
852 /* Since PUSH_CONTENTS expands to a lot of code, */
854 current = *(word *)current_p;
855 FIXUP_POINTER(current);
856 PREFETCH(current_p + PREF_DIST*CACHE_LINE_SIZE);
857 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
858 /* Prefetch the contents of the object we just pushed. It's */
859 /* likely we will need them soon. */
860 PREFETCH((ptr_t)current);
862 if (GC_trace_addr == current_p) {
863 GC_log_printf("GC:%u Considering(1) %p -> %p\n",
864 (unsigned)GC_gc_no, current_p, (ptr_t) current);
866 # endif /* ENABLE_TRACE */
867 PUSH_CONTENTS((ptr_t)current, mark_stack_top,
868 mark_stack_limit, current_p, exit2);
870 current_p += ALIGNMENT;
873 # ifndef SMALL_CONFIG
874 /* We still need to mark the entry we previously prefetched. */
875 /* We already know that it passes the preliminary pointer */
878 if (GC_trace_addr == current_p) {
879 GC_log_printf("GC:%u Considering(2) %p -> %p\n",
880 (unsigned)GC_gc_no, current_p, (ptr_t) deferred);
882 # endif /* ENABLE_TRACE */
883 PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
884 mark_stack_limit, current_p, exit4);
889 return mark_stack_top;
894 STATIC GC_bool GC_help_wanted = FALSE; /* Protected by mark lock */
895 STATIC unsigned GC_helper_count = 0; /* Number of running helpers. */
896 /* Protected by mark lock */
897 STATIC unsigned GC_active_count = 0; /* Number of active helpers. */
898 /* Protected by mark lock */
899 /* May increase and decrease */
900 /* within each mark cycle. But */
901 /* once it returns to 0, it */
902 /* stays zero for the cycle. */
904 GC_INNER word GC_mark_no = 0;
906 #define LOCAL_MARK_STACK_SIZE HBLKSIZE
907 /* Under normal circumstances, this is big enough to guarantee */
908 /* We don't overflow half of it in a single call to */
912 /* Steal mark stack entries starting at mse low into mark stack local */
913 /* until we either steal mse high, or we have max entries. */
914 /* Return a pointer to the top of the local mark stack. */
915 /* *next is replaced by a pointer to the next unscanned mark stack */
917 STATIC mse * GC_steal_mark_stack(mse * low, mse * high, mse * local,
918 unsigned max, mse **next)
921 mse *top = local - 1;
924 GC_ASSERT(high >= low-1 && (word)(high - low + 1) <= GC_mark_stack_size);
925 for (p = low; p <= high && i <= max; ++p) {
926 word descr = AO_load((volatile AO_t *) &(p -> mse_descr));
928 /* Must be ordered after read of descr: */
929 AO_store_release_write((volatile AO_t *) &(p -> mse_descr), 0);
930 /* More than one thread may get this entry, but that's only */
931 /* a minor performance problem. */
933 top -> mse_descr = descr;
934 top -> mse_start = p -> mse_start;
935 GC_ASSERT((top -> mse_descr & GC_DS_TAGS) != GC_DS_LENGTH ||
936 top -> mse_descr < (word)GC_greatest_plausible_heap_addr
937 - (word)GC_least_plausible_heap_addr);
938 /* If this is a big object, count it as */
939 /* size/256 + 1 objects. */
941 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) i += (int)(descr >> 8);
948 /* Copy back a local mark stack. */
949 /* low and high are inclusive bounds. */
950 STATIC void GC_return_mark_stack(mse * low, mse * high)
956 if (high < low) return;
957 stack_size = high - low + 1;
958 GC_acquire_mark_lock();
959 my_top = GC_mark_stack_top; /* Concurrent modification impossible. */
960 my_start = my_top + 1;
961 if (my_start - GC_mark_stack + stack_size > GC_mark_stack_size) {
962 if (GC_print_stats) {
963 GC_log_printf("No room to copy back mark stack\n");
965 GC_mark_state = MS_INVALID;
966 GC_mark_stack_too_small = TRUE;
967 /* We drop the local mark stack. We'll fix things later. */
969 BCOPY(low, my_start, stack_size * sizeof(mse));
970 GC_ASSERT((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
972 AO_store_release_write((volatile AO_t *)(&GC_mark_stack_top),
973 (AO_t)(my_top + stack_size));
974 /* Ensures visibility of previously written stack contents. */
976 GC_release_mark_lock();
977 GC_notify_all_marker();
980 /* Mark from the local mark stack. */
981 /* On return, the local mark stack is empty. */
982 /* But this may be achieved by copying the */
983 /* local mark stack back into the global one. */
984 STATIC void GC_do_local_mark(mse *local_mark_stack, mse *local_top)
987 # define N_LOCAL_ITERS 1
989 # ifdef GC_ASSERTIONS
990 /* Make sure we don't hold mark lock. */
991 GC_acquire_mark_lock();
992 GC_release_mark_lock();
995 for (n = 0; n < N_LOCAL_ITERS; ++n) {
996 local_top = GC_mark_from(local_top, local_mark_stack,
997 local_mark_stack + LOCAL_MARK_STACK_SIZE);
998 if (local_top < local_mark_stack) return;
999 if (local_top - local_mark_stack >= LOCAL_MARK_STACK_SIZE/2) {
1000 GC_return_mark_stack(local_mark_stack, local_top);
1004 if ((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
1005 < (mse *)AO_load(&GC_first_nonempty)
1006 && GC_active_count < GC_helper_count
1007 && local_top > local_mark_stack + 1) {
1008 /* Try to share the load, since the main stack is empty, */
1009 /* and helper threads are waiting for a refill. */
1010 /* The entries near the bottom of the stack are likely */
1011 /* to require more work. Thus we return those, eventhough */
1013 mse * new_bottom = local_mark_stack
1014 + (local_top - local_mark_stack)/2;
1015 GC_ASSERT(new_bottom > local_mark_stack
1016 && new_bottom < local_top);
1017 GC_return_mark_stack(local_mark_stack, new_bottom - 1);
1018 memmove(local_mark_stack, new_bottom,
1019 (local_top - new_bottom + 1) * sizeof(mse));
1020 local_top -= (new_bottom - local_mark_stack);
1025 #define ENTRIES_TO_GET 5
1027 GC_INNER long GC_markers = 2; /* Normally changed by thread-library- */
1028 /* -specific code. */
1030 /* Mark using the local mark stack until the global mark stack is empty */
1031 /* and there are no active workers. Update GC_first_nonempty to reflect */
1033 /* Caller does not hold mark lock. */
1034 /* Caller has already incremented GC_helper_count. We decrement it, */
1035 /* and maintain GC_active_count. */
1036 STATIC void GC_mark_local(mse *local_mark_stack, int id)
1038 mse * my_first_nonempty;
1040 GC_acquire_mark_lock();
1042 my_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
1043 GC_ASSERT((mse *)AO_load(&GC_first_nonempty) >= GC_mark_stack &&
1044 (mse *)AO_load(&GC_first_nonempty) <=
1045 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1046 if (GC_print_stats == VERBOSE)
1047 GC_log_printf("Starting mark helper %lu\n", (unsigned long)id);
1048 GC_release_mark_lock();
1054 mse * global_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
1056 GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
1057 my_first_nonempty <=
1058 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1059 GC_ASSERT(global_first_nonempty >= GC_mark_stack &&
1060 global_first_nonempty <=
1061 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1062 if (my_first_nonempty < global_first_nonempty) {
1063 my_first_nonempty = global_first_nonempty;
1064 } else if (global_first_nonempty < my_first_nonempty) {
1065 AO_compare_and_swap(&GC_first_nonempty,
1066 (AO_t) global_first_nonempty,
1067 (AO_t) my_first_nonempty);
1068 /* If this fails, we just go ahead, without updating */
1069 /* GC_first_nonempty. */
1071 /* Perhaps we should also update GC_first_nonempty, if it */
1072 /* is less. But that would require using atomic updates. */
1073 my_top = (mse *)AO_load_acquire((volatile AO_t *)(&GC_mark_stack_top));
1074 n_on_stack = my_top - my_first_nonempty + 1;
1075 if (0 == n_on_stack) {
1076 GC_acquire_mark_lock();
1077 my_top = GC_mark_stack_top;
1078 /* Asynchronous modification impossible here, */
1079 /* since we hold mark lock. */
1080 n_on_stack = my_top - my_first_nonempty + 1;
1081 if (0 == n_on_stack) {
1083 GC_ASSERT(GC_active_count <= GC_helper_count);
1084 /* Other markers may redeposit objects */
1086 if (0 == GC_active_count) GC_notify_all_marker();
1087 while (GC_active_count > 0
1088 && (mse *)AO_load(&GC_first_nonempty)
1089 > GC_mark_stack_top) {
1090 /* We will be notified if either GC_active_count */
1091 /* reaches zero, or if more objects are pushed on */
1092 /* the global mark stack. */
1095 if (GC_active_count == 0 &&
1096 (mse *)AO_load(&GC_first_nonempty) > GC_mark_stack_top) {
1097 GC_bool need_to_notify = FALSE;
1098 /* The above conditions can't be falsified while we */
1099 /* hold the mark lock, since neither */
1100 /* GC_active_count nor GC_mark_stack_top can */
1101 /* change. GC_first_nonempty can only be */
1102 /* incremented asynchronously. Thus we know that */
1103 /* both conditions actually held simultaneously. */
1105 if (0 == GC_helper_count) need_to_notify = TRUE;
1106 if (GC_print_stats == VERBOSE)
1108 "Finished mark helper %lu\n", (unsigned long)id);
1109 GC_release_mark_lock();
1110 if (need_to_notify) GC_notify_all_marker();
1113 /* else there's something on the stack again, or */
1114 /* another helper may push something. */
1116 GC_ASSERT(GC_active_count > 0);
1117 GC_release_mark_lock();
1120 GC_release_mark_lock();
1123 n_to_get = ENTRIES_TO_GET;
1124 if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1;
1125 local_top = GC_steal_mark_stack(my_first_nonempty, my_top,
1126 local_mark_stack, n_to_get,
1127 &my_first_nonempty);
1128 GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
1129 my_first_nonempty <=
1130 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1131 GC_do_local_mark(local_mark_stack, local_top);
1135 /* Perform Parallel mark. */
1136 /* We hold the GC lock, not the mark lock. */
1137 /* Currently runs until the mark stack is */
1139 STATIC void GC_do_parallel_mark(void)
1141 mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1143 GC_acquire_mark_lock();
1144 GC_ASSERT(I_HOLD_LOCK());
1145 /* This could be a GC_ASSERT, but it seems safer to keep it on */
1146 /* all the time, especially since it's cheap. */
1147 if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0)
1148 ABORT("Tried to start parallel mark in bad state");
1149 if (GC_print_stats == VERBOSE)
1150 GC_log_printf("Starting marking for mark phase number %lu\n",
1151 (unsigned long)GC_mark_no);
1152 GC_first_nonempty = (AO_t)GC_mark_stack;
1153 GC_active_count = 0;
1154 GC_helper_count = 1;
1155 GC_help_wanted = TRUE;
1156 GC_release_mark_lock();
1157 GC_notify_all_marker();
1158 /* Wake up potential helpers. */
1159 GC_mark_local(local_mark_stack, 0);
1160 GC_acquire_mark_lock();
1161 GC_help_wanted = FALSE;
1162 /* Done; clean up. */
1163 while (GC_helper_count > 0) GC_wait_marker();
1164 /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */
1165 if (GC_print_stats == VERBOSE)
1167 "Finished marking for mark phase number %lu\n",
1168 (unsigned long)GC_mark_no);
1170 GC_release_mark_lock();
1171 GC_notify_all_marker();
1175 /* Try to help out the marker, if it's running. */
1176 /* We do not hold the GC lock, but the requestor does. */
1177 GC_INNER void GC_help_marker(word my_mark_no)
1179 mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1182 if (!GC_parallel) return;
1183 GC_acquire_mark_lock();
1184 while (GC_mark_no < my_mark_no
1185 || (!GC_help_wanted && GC_mark_no == my_mark_no)) {
1188 my_id = GC_helper_count;
1189 if (GC_mark_no != my_mark_no || my_id >= (unsigned)GC_markers) {
1190 /* Second test is useful only if original threads can also */
1191 /* act as helpers. Under Linux they can't. */
1192 GC_release_mark_lock();
1195 GC_helper_count = my_id + 1;
1196 GC_release_mark_lock();
1197 GC_mark_local(local_mark_stack, my_id);
1198 /* GC_mark_local decrements GC_helper_count. */
1201 #endif /* PARALLEL_MARK */
1203 /* Allocate or reallocate space for mark stack of size n entries. */
1204 /* May silently fail. */
1205 static void alloc_mark_stack(size_t n)
1207 mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry));
1209 /* Don't recycle a stack segment obtained with the wrong flags. */
1210 /* Win32 GetWriteWatch requires the right kind of memory. */
1211 static GC_bool GC_incremental_at_stack_alloc = 0;
1212 GC_bool recycle_old = (!GC_incremental || GC_incremental_at_stack_alloc);
1214 GC_incremental_at_stack_alloc = GC_incremental;
1216 # define recycle_old 1
1219 GC_mark_stack_too_small = FALSE;
1220 if (GC_mark_stack_size != 0) {
1221 if (new_stack != 0) {
1223 /* Recycle old space */
1224 size_t page_offset = (word)GC_mark_stack & (GC_page_size - 1);
1225 size_t size = GC_mark_stack_size * sizeof(struct GC_ms_entry);
1228 if (0 != page_offset) displ = GC_page_size - page_offset;
1229 size = (size - displ) & ~(GC_page_size - 1);
1231 GC_add_to_heap((struct hblk *)
1232 ((word)GC_mark_stack + displ), (word)size);
1235 GC_mark_stack = new_stack;
1236 GC_mark_stack_size = n;
1237 GC_mark_stack_limit = new_stack + n;
1238 if (GC_print_stats) {
1239 GC_log_printf("Grew mark stack to %lu frames\n",
1240 (unsigned long) GC_mark_stack_size);
1243 if (GC_print_stats) {
1244 GC_log_printf("Failed to grow mark stack to %lu frames\n",
1249 if (new_stack == 0) {
1250 GC_err_printf("No space for mark stack\n");
1253 GC_mark_stack = new_stack;
1254 GC_mark_stack_size = n;
1255 GC_mark_stack_limit = new_stack + n;
1257 GC_mark_stack_top = GC_mark_stack-1;
1260 GC_INNER void GC_mark_init(void)
1262 alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
1266 * Push all locations between b and t onto the mark stack.
1267 * b is the first location to be checked. t is one past the last
1268 * location to be checked.
1269 * Should only be used if there is no possibility of mark stack
1272 GC_INNER void GC_push_all(ptr_t bottom, ptr_t top)
1274 register word length;
1276 bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1277 top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
1278 if (top == 0 || bottom == top) return;
1279 GC_mark_stack_top++;
1280 if (GC_mark_stack_top >= GC_mark_stack_limit) {
1281 ABORT("unexpected mark stack overflow");
1283 length = top - bottom;
1284 # if GC_DS_TAGS > ALIGNMENT - 1
1285 length += GC_DS_TAGS;
1286 length &= ~GC_DS_TAGS;
1288 GC_mark_stack_top -> mse_start = bottom;
1289 GC_mark_stack_top -> mse_descr = length;
1292 #ifndef GC_DISABLE_INCREMENTAL
1295 * Analogous to the above, but push only those pages h with
1296 * dirty_fn(h) != 0. We use push_fn to actually push the block.
1297 * Used both to selectively push dirty pages, or to push a block
1298 * in piecemeal fashion, to allow for more marking concurrency.
1299 * Will not overflow mark stack if push_fn pushes a small fixed number
1300 * of entries. (This is invoked only if push_fn pushes a single entry,
1301 * or if it marks each object before pushing it, thus ensuring progress
1302 * in the event of a stack overflow.)
1304 STATIC void GC_push_selected(ptr_t bottom, ptr_t top,
1305 int (*dirty_fn)(struct hblk *),
1306 void (*push_fn)(ptr_t, ptr_t))
1310 bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1311 top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
1313 if (top == 0 || bottom == top) return;
1314 h = HBLKPTR(bottom + HBLKSIZE);
1315 if (top <= (ptr_t) h) {
1316 if ((*dirty_fn)(h-1)) {
1317 (*push_fn)(bottom, top);
1321 if ((*dirty_fn)(h-1)) {
1322 (*push_fn)(bottom, (ptr_t)h);
1324 while ((ptr_t)(h+1) <= top) {
1325 if ((*dirty_fn)(h)) {
1326 if ((word)(GC_mark_stack_top - GC_mark_stack)
1327 > 3 * GC_mark_stack_size / 4) {
1328 /* Danger of mark stack overflow */
1329 (*push_fn)((ptr_t)h, top);
1332 (*push_fn)((ptr_t)h, (ptr_t)(h+1));
1337 if ((ptr_t)h != top) {
1338 if ((*dirty_fn)(h)) {
1339 (*push_fn)((ptr_t)h, top);
1342 if (GC_mark_stack_top >= GC_mark_stack_limit) {
1343 ABORT("unexpected mark stack overflow");
1348 GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk *h);
1349 /* Could the page contain valid heap pointers? */
1352 GC_INNER void GC_push_conditional(ptr_t bottom, ptr_t top, GC_bool all)
1355 if (GC_dirty_maintained) {
1357 /* Pages that were never dirtied cannot contain pointers */
1358 GC_push_selected(bottom, top, GC_page_was_ever_dirty,
1361 GC_push_all(bottom, top);
1364 GC_push_all(bottom, top);
1367 GC_push_selected(bottom, top, GC_page_was_dirty, GC_push_all);
1370 #endif /* !GC_DISABLE_INCREMENTAL */
1372 #if defined(MSWIN32) || defined(MSWINCE)
1373 void __cdecl GC_push_one(word p)
1375 void GC_push_one(word p)
1378 GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER);
1382 GC_API struct GC_ms_entry * GC_CALL GC_mark_and_push(void *obj,
1383 mse *mark_stack_ptr,
1384 mse *mark_stack_limit,
1391 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {
1392 if (GC_all_interior_pointers) {
1393 hhdr = GC_find_header(GC_base(obj));
1395 GC_ADD_TO_BLACK_LIST_NORMAL(obj, (ptr_t)src);
1396 return mark_stack_ptr;
1399 GC_ADD_TO_BLACK_LIST_NORMAL(obj, (ptr_t)src);
1400 return mark_stack_ptr;
1403 if (EXPECT(HBLK_IS_FREE(hhdr),0)) {
1404 GC_ADD_TO_BLACK_LIST_NORMAL(obj, (ptr_t)src);
1405 return mark_stack_ptr;
1408 PUSH_CONTENTS_HDR(obj, mark_stack_ptr /* modified */, mark_stack_limit,
1409 (ptr_t)src, was_marked, hhdr, TRUE);
1411 return mark_stack_ptr;
1414 /* Mark and push (i.e. gray) a single object p onto the main */
1415 /* mark stack. Consider p to be valid if it is an interior */
1417 /* The object p has passed a preliminary pointer validity */
1418 /* test, but we do not definitely know whether it is valid. */
1419 /* Mark bits are NOT atomically updated. Thus this must be the */
1420 /* only thread setting them. */
1421 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
1422 GC_INNER void GC_mark_and_push_stack(ptr_t p, ptr_t source)
1424 GC_INNER void GC_mark_and_push_stack(ptr_t p)
1425 # define source ((ptr_t)0)
1433 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {
1439 GC_ADD_TO_BLACK_LIST_STACK(p, source);
1443 if (EXPECT(HBLK_IS_FREE(hhdr),0)) {
1444 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
1447 # if defined(MANUAL_VDB) && defined(THREADS)
1448 /* Pointer is on the stack. We may have dirtied the object */
1449 /* it points to, but not yet have called GC_dirty(); */
1450 GC_dirty(p); /* Implicitly affects entire object. */
1452 PUSH_CONTENTS_HDR(r, GC_mark_stack_top, GC_mark_stack_limit,
1453 source, mark_and_push_exit, hhdr, FALSE);
1454 mark_and_push_exit: ;
1455 /* We silently ignore pointers to near the end of a block, */
1456 /* which is very mildly suboptimal. */
1457 /* FIXME: We should probably add a header word to address */
1464 # define TRACE_ENTRIES 1000
1466 struct trace_entry {
1472 } GC_trace_buf[TRACE_ENTRIES];
1474 int GC_trace_buf_ptr = 0;
1476 void GC_add_trace_entry(char *kind, word arg1, word arg2)
1478 GC_trace_buf[GC_trace_buf_ptr].kind = kind;
1479 GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
1480 GC_trace_buf[GC_trace_buf_ptr].bytes_allocd = GC_bytes_allocd;
1481 GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
1482 GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
1484 if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
1487 void GC_print_trace(word gc_no, GC_bool lock)
1490 struct trace_entry *p;
1493 for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
1494 if (i < 0) i = TRACE_ENTRIES-1;
1495 p = GC_trace_buf + i;
1496 if (p -> gc_no < gc_no || p -> kind == 0) return;
1497 printf("Trace:%s (gc:%u,bytes:%lu) 0x%X, 0x%X\n",
1498 p -> kind, (unsigned)p -> gc_no,
1499 (unsigned long)p -> bytes_allocd,
1500 (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
1502 printf("Trace incomplete\n");
1506 # endif /* TRACE_BUF */
1509 * A version of GC_push_all that treats all interior pointers as valid
1510 * and scans the entire region immediately, in case the contents
1513 GC_INNER void GC_push_all_eager(ptr_t bottom, ptr_t top)
1515 word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1516 word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
1520 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1521 register ptr_t least_ha = GC_least_plausible_heap_addr;
1522 # define GC_greatest_plausible_heap_addr greatest_ha
1523 # define GC_least_plausible_heap_addr least_ha
1525 if (top == 0) return;
1526 /* check all pointers in range and push if they appear */
1528 lim = t - 1 /* longword */;
1529 for (p = b; p <= lim; p = (word *)(((ptr_t)p) + ALIGNMENT)) {
1531 GC_PUSH_ONE_STACK(q, p);
1533 # undef GC_greatest_plausible_heap_addr
1534 # undef GC_least_plausible_heap_addr
1537 GC_INNER void GC_push_all_stack(ptr_t bottom, ptr_t top)
1539 # if defined(THREADS) && defined(MPROTECT_VDB)
1540 GC_push_all_eager(bottom, top);
1542 if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
1543 GC_push_all(bottom, top);
1545 GC_push_all_eager(bottom, top);
1550 #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES) && \
1551 defined(MARK_BIT_PER_GRANULE)
1552 # if GC_GRANULE_WORDS == 1
1553 # define USE_PUSH_MARKED_ACCELERATORS
1554 # define PUSH_GRANULE(q) \
1555 { word qcontents = (q)[0]; \
1556 GC_PUSH_ONE_HEAP(qcontents, (q)); }
1557 # elif GC_GRANULE_WORDS == 2
1558 # define USE_PUSH_MARKED_ACCELERATORS
1559 # define PUSH_GRANULE(q) \
1560 { word qcontents = (q)[0]; \
1561 GC_PUSH_ONE_HEAP(qcontents, (q)); \
1562 qcontents = (q)[1]; \
1563 GC_PUSH_ONE_HEAP(qcontents, (q)+1); }
1564 # elif GC_GRANULE_WORDS == 4
1565 # define USE_PUSH_MARKED_ACCELERATORS
1566 # define PUSH_GRANULE(q) \
1567 { word qcontents = (q)[0]; \
1568 GC_PUSH_ONE_HEAP(qcontents, (q)); \
1569 qcontents = (q)[1]; \
1570 GC_PUSH_ONE_HEAP(qcontents, (q)+1); \
1571 qcontents = (q)[2]; \
1572 GC_PUSH_ONE_HEAP(qcontents, (q)+2); \
1573 qcontents = (q)[3]; \
1574 GC_PUSH_ONE_HEAP(qcontents, (q)+3); }
1578 #ifdef USE_PUSH_MARKED_ACCELERATORS
1579 /* Push all objects reachable from marked objects in the given block */
1580 /* containing objects of size 1 granule. */
1581 STATIC void GC_push_marked1(struct hblk *h, hdr *hhdr)
1583 word * mark_word_addr = &(hhdr->hb_marks[0]);
1589 /* Allow registers to be used for some frequently acccessed */
1590 /* global variables. Otherwise aliasing issues are likely */
1591 /* to prevent that. */
1592 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1593 ptr_t least_ha = GC_least_plausible_heap_addr;
1594 mse * mark_stack_top = GC_mark_stack_top;
1595 mse * mark_stack_limit = GC_mark_stack_limit;
1596 # define GC_mark_stack_top mark_stack_top
1597 # define GC_mark_stack_limit mark_stack_limit
1598 # define GC_greatest_plausible_heap_addr greatest_ha
1599 # define GC_least_plausible_heap_addr least_ha
1601 p = (word *)(h->hb_body);
1602 plim = (word *)(((word)h) + HBLKSIZE);
1604 /* go through all words in block */
1606 mark_word = *mark_word_addr++;
1608 while(mark_word != 0) {
1609 if (mark_word & 1) {
1612 q += GC_GRANULE_WORDS;
1615 p += WORDSZ*GC_GRANULE_WORDS;
1618 # undef GC_greatest_plausible_heap_addr
1619 # undef GC_least_plausible_heap_addr
1620 # undef GC_mark_stack_top
1621 # undef GC_mark_stack_limit
1623 GC_mark_stack_top = mark_stack_top;
1627 #ifndef UNALIGNED_PTRS
1629 /* Push all objects reachable from marked objects in the given block */
1630 /* of size 2 (granules) objects. */
1631 STATIC void GC_push_marked2(struct hblk *h, hdr *hhdr)
1633 word * mark_word_addr = &(hhdr->hb_marks[0]);
1639 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1640 ptr_t least_ha = GC_least_plausible_heap_addr;
1641 mse * mark_stack_top = GC_mark_stack_top;
1642 mse * mark_stack_limit = GC_mark_stack_limit;
1644 # define GC_mark_stack_top mark_stack_top
1645 # define GC_mark_stack_limit mark_stack_limit
1646 # define GC_greatest_plausible_heap_addr greatest_ha
1647 # define GC_least_plausible_heap_addr least_ha
1649 p = (word *)(h->hb_body);
1650 plim = (word *)(((word)h) + HBLKSIZE);
1652 /* go through all words in block */
1654 mark_word = *mark_word_addr++;
1656 while(mark_word != 0) {
1657 if (mark_word & 1) {
1659 PUSH_GRANULE(q + GC_GRANULE_WORDS);
1661 q += 2 * GC_GRANULE_WORDS;
1664 p += WORDSZ*GC_GRANULE_WORDS;
1667 # undef GC_greatest_plausible_heap_addr
1668 # undef GC_least_plausible_heap_addr
1669 # undef GC_mark_stack_top
1670 # undef GC_mark_stack_limit
1672 GC_mark_stack_top = mark_stack_top;
1675 # if GC_GRANULE_WORDS < 4
1676 /* Push all objects reachable from marked objects in the given block */
1677 /* of size 4 (granules) objects. */
1678 /* There is a risk of mark stack overflow here. But we handle that. */
1679 /* And only unmarked objects get pushed, so it's not very likely. */
1680 STATIC void GC_push_marked4(struct hblk *h, hdr *hhdr)
1682 word * mark_word_addr = &(hhdr->hb_marks[0]);
1688 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1689 ptr_t least_ha = GC_least_plausible_heap_addr;
1690 mse * mark_stack_top = GC_mark_stack_top;
1691 mse * mark_stack_limit = GC_mark_stack_limit;
1692 # define GC_mark_stack_top mark_stack_top
1693 # define GC_mark_stack_limit mark_stack_limit
1694 # define GC_greatest_plausible_heap_addr greatest_ha
1695 # define GC_least_plausible_heap_addr least_ha
1697 p = (word *)(h->hb_body);
1698 plim = (word *)(((word)h) + HBLKSIZE);
1700 /* go through all words in block */
1702 mark_word = *mark_word_addr++;
1704 while(mark_word != 0) {
1705 if (mark_word & 1) {
1707 PUSH_GRANULE(q + GC_GRANULE_WORDS);
1708 PUSH_GRANULE(q + 2*GC_GRANULE_WORDS);
1709 PUSH_GRANULE(q + 3*GC_GRANULE_WORDS);
1711 q += 4 * GC_GRANULE_WORDS;
1714 p += WORDSZ*GC_GRANULE_WORDS;
1716 # undef GC_greatest_plausible_heap_addr
1717 # undef GC_least_plausible_heap_addr
1718 # undef GC_mark_stack_top
1719 # undef GC_mark_stack_limit
1720 GC_mark_stack_top = mark_stack_top;
1723 #endif /* GC_GRANULE_WORDS < 4 */
1725 #endif /* UNALIGNED_PTRS */
1727 #endif /* USE_PUSH_MARKED_ACCELERATORS */
1729 /* Push all objects reachable from marked objects in the given block */
1730 STATIC void GC_push_marked(struct hblk *h, hdr *hhdr)
1732 size_t sz = hhdr -> hb_sz;
1733 word descr = hhdr -> hb_descr;
1737 mse * GC_mark_stack_top_reg;
1738 mse * mark_stack_limit = GC_mark_stack_limit;
1740 /* Some quick shortcuts: */
1741 if ((0 | GC_DS_LENGTH) == descr) return;
1742 if (GC_block_empty(hhdr)/* nothing marked */) return;
1743 GC_n_rescuing_pages++;
1744 GC_objects_are_marked = TRUE;
1745 if (sz > MAXOBJBYTES) {
1748 lim = (h + 1)->hb_body - sz;
1751 switch(BYTES_TO_GRANULES(sz)) {
1752 # if defined(USE_PUSH_MARKED_ACCELERATORS)
1754 GC_push_marked1(h, hhdr);
1756 # if !defined(UNALIGNED_PTRS)
1758 GC_push_marked2(h, hhdr);
1760 # if GC_GRANULE_WORDS < 4
1762 GC_push_marked4(h, hhdr);
1768 GC_mark_stack_top_reg = GC_mark_stack_top;
1769 for (p = h -> hb_body, bit_no = 0; p <= lim;
1770 p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
1771 if (mark_bit_from_hdr(hhdr, bit_no)) {
1772 /* Mark from fields inside the object */
1773 PUSH_OBJ(p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
1776 GC_mark_stack_top = GC_mark_stack_top_reg;
1780 #ifndef GC_DISABLE_INCREMENTAL
1781 /* Test whether any page in the given block is dirty. */
1782 STATIC GC_bool GC_block_was_dirty(struct hblk *h, hdr *hhdr)
1784 size_t sz = hhdr -> hb_sz;
1786 if (sz <= MAXOBJBYTES) {
1787 return(GC_page_was_dirty(h));
1790 while (p < (ptr_t)h + sz) {
1791 if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
1797 #endif /* GC_DISABLE_INCREMENTAL */
1799 /* Similar to GC_push_marked, but skip over unallocated blocks */
1800 /* and return address of next plausible block. */
1801 STATIC struct hblk * GC_push_next_marked(struct hblk *h)
1803 hdr * hhdr = HDR(h);
1805 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr) || HBLK_IS_FREE(hhdr), FALSE)) {
1806 h = GC_next_used_block(h);
1807 if (h == 0) return(0);
1808 hhdr = GC_find_header((ptr_t)h);
1810 GC_push_marked(h, hhdr);
1811 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1814 #ifndef GC_DISABLE_INCREMENTAL
1815 /* Identical to above, but mark only from dirty pages */
1816 STATIC struct hblk * GC_push_next_marked_dirty(struct hblk *h)
1818 hdr * hhdr = HDR(h);
1820 if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
1822 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr)
1823 || HBLK_IS_FREE(hhdr), FALSE)) {
1824 h = GC_next_used_block(h);
1825 if (h == 0) return(0);
1826 hhdr = GC_find_header((ptr_t)h);
1828 # ifdef STUBBORN_ALLOC
1829 if (hhdr -> hb_obj_kind == STUBBORN) {
1830 if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
1834 if (GC_block_was_dirty(h, hhdr)) break;
1837 if (GC_block_was_dirty(h, hhdr)) break;
1839 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1842 GC_push_marked(h, hhdr);
1843 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1845 #endif /* !GC_DISABLE_INCREMENTAL */
1847 /* Similar to above, but for uncollectable pages. Needed since we */
1848 /* do not clear marks for such pages, even for full collections. */
1849 STATIC struct hblk * GC_push_next_marked_uncollectable(struct hblk *h)
1851 hdr * hhdr = HDR(h);
1854 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr)
1855 || HBLK_IS_FREE(hhdr), FALSE)) {
1856 h = GC_next_used_block(h);
1857 if (h == 0) return(0);
1858 hhdr = GC_find_header((ptr_t)h);
1860 if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
1861 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1864 GC_push_marked(h, hhdr);
1865 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));