3 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
4 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
21 # include "private/gc_pmark.h"
23 #if defined(MSWIN32) && defined(__GNUC__)
27 /* We put this here to minimize the risk of inlining. */
30 void GC_noop(void *p, ...) {}
35 /* Single argument version, robust against whole program analysis. */
38 static volatile word sink;
43 /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
45 unsigned GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
47 /* Initialize GC_obj_kinds properly and standard free lists properly. */
48 /* This must be done statically since they may be accessed before */
49 /* GC_init is called. */
50 /* It's done here, since we need to deal with mark descriptors. */
51 struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
52 /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
53 0 | GC_DS_LENGTH, FALSE, FALSE },
54 /* NORMAL */ { &GC_objfreelist[0], 0,
55 0 | GC_DS_LENGTH, /* Adjusted in GC_init_inner for EXTRA_BYTES */
56 TRUE /* add length to descr */, TRUE },
58 { &GC_uobjfreelist[0], 0,
59 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
60 # ifdef ATOMIC_UNCOLLECTABLE
62 { &GC_auobjfreelist[0], 0,
63 0 | GC_DS_LENGTH, FALSE /* add length to descr */, FALSE },
65 # ifdef STUBBORN_ALLOC
66 /*STUBBORN*/ { &GC_sobjfreelist[0], 0,
67 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },
71 # ifdef ATOMIC_UNCOLLECTABLE
72 # ifdef STUBBORN_ALLOC
73 unsigned GC_n_kinds = 5;
75 unsigned GC_n_kinds = 4;
78 # ifdef STUBBORN_ALLOC
79 unsigned GC_n_kinds = 4;
81 unsigned GC_n_kinds = 3;
86 # ifndef INITIAL_MARK_STACK_SIZE
87 # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
88 /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
89 /* multiple of HBLKSIZE. */
90 /* The incremental collector actually likes a larger */
91 /* size, since it want to push all marked dirty objs */
92 /* before marking anything new. Currently we let it */
93 /* grow dynamically. */
97 * Limits of stack for GC_mark routine.
98 * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
99 * need to be marked from.
102 word GC_n_rescuing_pages; /* Number of dirty pages we marked from */
103 /* excludes ptrfree pages, etc. */
107 mse * GC_mark_stack_limit;
109 size_t GC_mark_stack_size = 0;
112 # include "atomic_ops.h"
114 mse * volatile GC_mark_stack_top;
115 /* Updated only with mark lock held, but read asynchronously. */
116 volatile AO_t GC_first_nonempty;
117 /* Lowest entry on mark stack */
118 /* that may be nonempty. */
119 /* Updated only by initiating */
122 mse * GC_mark_stack_top;
125 static struct hblk * scan_ptr;
127 mark_state_t GC_mark_state = MS_NONE;
129 GC_bool GC_mark_stack_too_small = FALSE;
131 GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
132 /* objects in the heap? */
134 /* Is a collection in progress? Note that this can return true in the */
135 /* nonincremental case, if a collection has been abandoned and the */
136 /* mark state is now MS_INVALID. */
137 GC_bool GC_collection_in_progress(void)
139 return(GC_mark_state != MS_NONE);
142 /* clear all mark bits in the header */
143 void GC_clear_hdr_marks(hdr *hhdr)
145 size_t last_bit = FINAL_MARK_BIT(hhdr -> hb_sz);
147 # ifdef USE_MARK_BYTES
148 BZERO(hhdr -> hb_marks, MARK_BITS_SZ);
149 hhdr -> hb_marks[last_bit] = 1;
151 BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
152 set_mark_bit_from_hdr(hhdr, last_bit);
154 hhdr -> hb_n_marks = 0;
157 /* Set all mark bits in the header. Used for uncollectable blocks. */
158 void GC_set_hdr_marks(hdr *hhdr)
161 size_t sz = hhdr -> hb_sz;
162 size_t n_marks = FINAL_MARK_BIT(sz);
164 # ifdef USE_MARK_BYTES
165 for (i = 0; i <= n_marks; i += MARK_BIT_OFFSET(sz)) {
166 hhdr -> hb_marks[i] = 1;
169 for (i = 0; i < divWORDSZ(n_marks + WORDSZ); ++i) {
170 hhdr -> hb_marks[i] = ONES;
173 # ifdef MARK_BIT_PER_OBJ
174 hhdr -> hb_n_marks = n_marks - 1;
176 hhdr -> hb_n_marks = HBLK_OBJS(sz);
181 * Clear all mark bits associated with block h.
184 static void clear_marks_for_block(struct hblk *h, word dummy)
186 register hdr * hhdr = HDR(h);
188 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
189 /* Mark bit for these is cleared only once the object is */
190 /* explicitly deallocated. This either frees the block, or */
191 /* the bit is cleared once the object is on the free list. */
192 GC_clear_hdr_marks(hhdr);
195 /* Slow but general routines for setting/clearing/asking about mark bits */
196 void GC_set_mark_bit(ptr_t p)
198 struct hblk *h = HBLKPTR(p);
200 word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
202 if (!mark_bit_from_hdr(hhdr, bit_no)) {
203 set_mark_bit_from_hdr(hhdr, bit_no);
204 ++hhdr -> hb_n_marks;
208 void GC_clear_mark_bit(ptr_t p)
210 struct hblk *h = HBLKPTR(p);
212 word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
214 if (mark_bit_from_hdr(hhdr, bit_no)) {
216 clear_mark_bit_from_hdr(hhdr, bit_no);
217 n_marks = hhdr -> hb_n_marks - 1;
218 # ifdef PARALLEL_MARK
220 hhdr -> hb_n_marks = n_marks;
221 /* Don't decrement to zero. The counts are approximate due to */
222 /* concurrency issues, but we need to ensure that a count of */
223 /* zero implies an empty block. */
225 hhdr -> hb_n_marks = n_marks;
230 GC_bool GC_is_marked(ptr_t p)
232 struct hblk *h = HBLKPTR(p);
234 word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
236 return((GC_bool)mark_bit_from_hdr(hhdr, bit_no));
241 * Clear mark bits in all allocated heap blocks. This invalidates
242 * the marker invariant, and sets GC_mark_state to reflect this.
243 * (This implicitly starts marking to reestablish the invariant.)
245 void GC_clear_marks(void)
247 GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
248 GC_objects_are_marked = FALSE;
249 GC_mark_state = MS_INVALID;
253 /* Initiate a garbage collection. Initiates a full collection if the */
254 /* mark state is invalid. */
256 void GC_initiate_gc(void)
258 if (GC_dirty_maintained) GC_read_dirty();
259 # ifdef STUBBORN_ALLOC
264 extern void GC_check_dirty();
266 if (GC_dirty_maintained) GC_check_dirty();
269 GC_n_rescuing_pages = 0;
270 if (GC_mark_state == MS_NONE) {
271 GC_mark_state = MS_PUSH_RESCUERS;
272 } else if (GC_mark_state != MS_INVALID) {
273 ABORT("unexpected state");
274 } /* else this is really a full collection, and mark */
275 /* bits are invalid. */
280 static void alloc_mark_stack(size_t);
282 # if defined(MSWIN32) || defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS)
283 /* Under rare conditions, we may end up marking from nonexistent memory. */
284 /* Hence we need to be prepared to recover by running GC_mark_some */
285 /* with a suitable handler in place. */
286 # define WRAP_MARK_SOME
289 /* Perform a small amount of marking. */
290 /* We try to touch roughly a page of memory. */
291 /* Return TRUE if we just finished a mark phase. */
292 /* Cold_gc_frame is an address inside a GC frame that */
293 /* remains valid until all marking is complete. */
294 /* A zero value indicates that it's OK to miss some */
295 /* register values. */
296 /* We hold the allocation lock. In the case of */
297 /* incremental collection, the world may not be stopped.*/
298 #ifdef WRAP_MARK_SOME
299 /* For win32, this is called after we establish a structured */
300 /* exception handler, in case Windows unmaps one of our root */
301 /* segments. See below. In either case, we acquire the */
302 /* allocator lock long before we get here. */
303 GC_bool GC_mark_some_inner(ptr_t cold_gc_frame)
305 GC_bool GC_mark_some(ptr_t cold_gc_frame)
308 switch(GC_mark_state) {
312 case MS_PUSH_RESCUERS:
313 if (GC_mark_stack_top
314 >= GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2) {
315 /* Go ahead and mark, even though that might cause us to */
316 /* see more marked dirty objects later on. Avoid this */
318 GC_mark_stack_too_small = TRUE;
319 MARK_FROM_MARK_STACK();
322 scan_ptr = GC_push_next_marked_dirty(scan_ptr);
324 if (GC_print_stats) {
325 GC_log_printf("Marked from %u dirty pages\n",
326 GC_n_rescuing_pages);
328 GC_push_roots(FALSE, cold_gc_frame);
329 GC_objects_are_marked = TRUE;
330 if (GC_mark_state != MS_INVALID) {
331 GC_mark_state = MS_ROOTS_PUSHED;
337 case MS_PUSH_UNCOLLECTABLE:
338 if (GC_mark_stack_top
339 >= GC_mark_stack + GC_mark_stack_size/4) {
340 # ifdef PARALLEL_MARK
341 /* Avoid this, since we don't parallelize the marker */
343 if (GC_parallel) GC_mark_stack_too_small = TRUE;
345 MARK_FROM_MARK_STACK();
348 scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
350 GC_push_roots(TRUE, cold_gc_frame);
351 GC_objects_are_marked = TRUE;
352 if (GC_mark_state != MS_INVALID) {
353 GC_mark_state = MS_ROOTS_PUSHED;
359 case MS_ROOTS_PUSHED:
360 # ifdef PARALLEL_MARK
361 /* In the incremental GC case, this currently doesn't */
362 /* quite do the right thing, since it runs to */
363 /* completion. On the other hand, starting a */
364 /* parallel marker is expensive, so perhaps it is */
365 /* the right thing? */
366 /* Eventually, incremental marking should run */
367 /* asynchronously in multiple threads, without grabbing */
368 /* the allocation lock. */
370 GC_do_parallel_mark();
371 GC_ASSERT(GC_mark_stack_top < (mse *)GC_first_nonempty);
372 GC_mark_stack_top = GC_mark_stack - 1;
373 if (GC_mark_stack_too_small) {
374 alloc_mark_stack(2*GC_mark_stack_size);
376 if (GC_mark_state == MS_ROOTS_PUSHED) {
377 GC_mark_state = MS_NONE;
384 if (GC_mark_stack_top >= GC_mark_stack) {
385 MARK_FROM_MARK_STACK();
388 GC_mark_state = MS_NONE;
389 if (GC_mark_stack_too_small) {
390 alloc_mark_stack(2*GC_mark_stack_size);
396 case MS_PARTIALLY_INVALID:
397 if (!GC_objects_are_marked) {
398 GC_mark_state = MS_PUSH_UNCOLLECTABLE;
401 if (GC_mark_stack_top >= GC_mark_stack) {
402 MARK_FROM_MARK_STACK();
405 if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
406 /* About to start a heap scan for marked objects. */
407 /* Mark stack is empty. OK to reallocate. */
408 if (GC_mark_stack_too_small) {
409 alloc_mark_stack(2*GC_mark_stack_size);
411 GC_mark_state = MS_PARTIALLY_INVALID;
413 scan_ptr = GC_push_next_marked(scan_ptr);
414 if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
415 GC_push_roots(TRUE, cold_gc_frame);
416 GC_objects_are_marked = TRUE;
417 if (GC_mark_state != MS_INVALID) {
418 GC_mark_state = MS_ROOTS_PUSHED;
423 ABORT("GC_mark_some: bad state");
429 #if defined(MSWIN32) && defined(__GNUC__)
432 EXCEPTION_REGISTRATION ex_reg;
437 static EXCEPTION_DISPOSITION mark_ex_handler(
438 struct _EXCEPTION_RECORD *ex_rec,
440 struct _CONTEXT *context,
443 if (ex_rec->ExceptionCode == STATUS_ACCESS_VIOLATION) {
444 ext_ex_regn *xer = (ext_ex_regn *)est_frame;
446 /* Unwind from the inner function assuming the standard */
447 /* function prologue. */
448 /* Assumes code has not been compiled with */
449 /* -fomit-frame-pointer. */
450 context->Esp = context->Ebp;
451 context->Ebp = *((DWORD *)context->Esp);
452 context->Esp = context->Esp - 8;
454 /* Resume execution at the "real" handler within the */
455 /* wrapper function. */
456 context->Eip = (DWORD )(xer->alt_path);
458 return ExceptionContinueExecution;
461 return ExceptionContinueSearch;
464 # endif /* __GNUC__ && MSWIN32 */
466 #ifdef GC_WIN32_THREADS
467 extern GC_bool GC_started_thread_while_stopped(void);
468 /* In win32_threads.c. Did we invalidate mark phase with an */
469 /* unexpected thread start? */
472 # ifdef WRAP_MARK_SOME
473 GC_bool GC_mark_some(ptr_t cold_gc_frame)
479 /* Windows 98 appears to asynchronously create and remove */
480 /* writable memory mappings, for reasons we haven't yet */
481 /* understood. Since we look for writable regions to */
482 /* determine the root set, we may try to mark from an */
483 /* address range that disappeared since we started the */
484 /* collection. Thus we have to recover from faults here. */
485 /* This code does not appear to be necessary for Windows */
486 /* 95/NT/2000. Note that this code should never generate */
487 /* an incremental GC write fault. */
488 /* It's conceivable that this is the same issue with */
489 /* terminating threads that we see with Linux and */
490 /* USE_PROC_FOR_LIBRARIES. */
493 ret_val = GC_mark_some_inner(cold_gc_frame);
494 } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ?
495 EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
498 # ifdef GC_WIN32_THREADS
499 /* With DllMain-based thread tracking, a thread may have */
500 /* started while we were marking. This is logically equivalent */
501 /* to the exception case; our results are invalid and we have */
502 /* to start over. This cannot be prevented since we can't */
503 /* block in DllMain. */
504 if (GC_started_thread_while_stopped()) goto handle_ex;
509 # else /* __GNUC__ */
511 /* Manually install an exception handler since GCC does */
512 /* not yet support Structured Exception Handling (SEH) on */
517 er.alt_path = &&handle_ex;
518 er.ex_reg.handler = mark_ex_handler;
519 asm volatile ("movl %%fs:0, %0" : "=r" (er.ex_reg.prev));
520 asm volatile ("movl %0, %%fs:0" : : "r" (&er));
521 ret_val = GC_mark_some_inner(cold_gc_frame);
522 /* Prevent GCC from considering the following code unreachable */
523 /* and thus eliminating it. */
524 if (er.alt_path == 0)
527 /* Uninstall the exception handler */
528 asm volatile ("mov %0, %%fs:0" : : "r" (er.ex_reg.prev));
531 # endif /* __GNUC__ */
532 # else /* !MSWIN32 */
533 /* Here we are handling the case in which /proc is used for root */
534 /* finding, and we have threads. We may find a stack for a */
535 /* thread that is in the process of exiting, and disappears */
536 /* while we are marking it. This seems extremely difficult to */
537 /* avoid otherwise. */
539 WARN("Incremental GC incompatible with /proc roots\n", 0);
540 /* I'm not sure if this could still work ... */
541 GC_setup_temporary_fault_handler();
542 if(SETJMP(GC_jmp_buf) != 0) goto handle_ex;
543 ret_val = GC_mark_some_inner(cold_gc_frame);
545 GC_reset_fault_handler();
548 # endif /* !MSWIN32 */
551 /* Exception handler starts here for all cases. */
552 if (GC_print_stats) {
553 GC_log_printf("Caught ACCESS_VIOLATION in marker. "
554 "Memory mapping disappeared.\n");
557 /* We have bad roots on the stack. Discard mark stack. */
558 /* Rescan from marked objects. Redetermine roots. */
559 GC_invalidate_mark_state();
563 goto rm_handler; // Back to platform-specific code.
565 #endif /* WRAP_MARK_SOME */
568 GC_bool GC_mark_stack_empty(void)
570 return(GC_mark_stack_top < GC_mark_stack);
573 void GC_invalidate_mark_state(void)
575 GC_mark_state = MS_INVALID;
576 GC_mark_stack_top = GC_mark_stack-1;
579 mse * GC_signal_mark_stack_overflow(mse *msp)
581 GC_mark_state = MS_INVALID;
582 GC_mark_stack_too_small = TRUE;
583 if (GC_print_stats) {
584 GC_log_printf("Mark stack overflow; current size = %lu entries\n",
587 return(msp - GC_MARK_STACK_DISCARDS);
591 * Mark objects pointed to by the regions described by
592 * mark stack entries between mark_stack and mark_stack_top,
593 * inclusive. Assumes the upper limit of a mark stack entry
594 * is never 0. A mark stack entry never has size 0.
595 * We try to traverse on the order of a hblk of memory before we return.
596 * Caller is responsible for calling this until the mark stack is empty.
597 * Note that this is the most performance critical routine in the
598 * collector. Hence it contains all sorts of ugly hacks to speed
599 * things up. In particular, we avoid procedure calls on the common
600 * path, we take advantage of peculiarities of the mark descriptor
601 * encoding, we optionally maintain a cache for the block address to
602 * header mapping, we prefetch when an object is "grayed", etc.
604 mse * GC_mark_from(mse *mark_stack_top, mse *mark_stack, mse *mark_stack_limit)
606 signed_word credit = HBLKSIZE; /* Remaining credit for marking work */
607 ptr_t current_p; /* Pointer to current candidate ptr. */
608 word current; /* Candidate pointer. */
609 ptr_t limit; /* (Incl) limit of current candidate */
612 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
613 ptr_t least_ha = GC_least_plausible_heap_addr;
616 # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
618 GC_objects_are_marked = TRUE;
620 # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
621 while (mark_stack_top >= mark_stack && credit >= 0) {
623 while ((((ptr_t)mark_stack_top - (ptr_t)mark_stack) | credit)
626 current_p = mark_stack_top -> mse_start;
627 descr = mark_stack_top -> mse_descr;
629 /* current_p and descr describe the current object. */
630 /* *mark_stack_top is vacant. */
631 /* The following is 0 only for small objects described by a simple */
632 /* length descriptor. For many applications this is the common */
633 /* case, so we try to detect it quickly. */
634 if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | GC_DS_TAGS)) {
635 word tag = descr & GC_DS_TAGS;
640 /* Process part of the range to avoid pushing too much on the */
642 GC_ASSERT(descr < (word)GC_greatest_plausible_heap_addr
643 - (word)GC_least_plausible_heap_addr);
645 if (GC_trace_addr >= current_p
646 && GC_trace_addr < current_p + descr) {
647 GC_log_printf("GC:%d Large section; start %p len %lu\n",
648 GC_gc_no, current_p, (unsigned long) descr);
650 # endif /* ENABLE_TRACE */
651 # ifdef PARALLEL_MARK
652 # define SHARE_BYTES 2048
653 if (descr > SHARE_BYTES && GC_parallel
654 && mark_stack_top < mark_stack_limit - 1) {
655 int new_size = (descr/2) & ~(sizeof(word)-1);
656 mark_stack_top -> mse_start = current_p;
657 mark_stack_top -> mse_descr = new_size + sizeof(word);
658 /* makes sure we handle */
659 /* misaligned pointers. */
662 if (GC_trace_addr >= current_p
663 && GC_trace_addr < current_p + descr) {
664 GC_log_printf("GC:%d splitting (parallel) %p at %p\n",
665 GC_gc_no, current_p, current_p + new_size);
667 # endif /* ENABLE_TRACE */
668 current_p += new_size;
672 # endif /* PARALLEL_MARK */
673 mark_stack_top -> mse_start =
674 limit = current_p + WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
675 mark_stack_top -> mse_descr =
676 descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
678 if (GC_trace_addr >= current_p
679 && GC_trace_addr < current_p + descr) {
680 GC_log_printf("GC:%d splitting %p at %p\n",
681 GC_gc_no, current_p, limit);
683 # endif /* ENABLE_TRACE */
684 /* Make sure that pointers overlapping the two ranges are */
686 limit += sizeof(word) - ALIGNMENT;
691 if (GC_trace_addr >= current_p
692 && GC_trace_addr < current_p + WORDS_TO_BYTES(WORDSZ-2)) {
693 GC_log_printf("GC:%d Tracing from %p bitmap descr %lu\n",
694 GC_gc_no, current_p, (unsigned long) descr);
696 # endif /* ENABLE_TRACE */
697 descr &= ~GC_DS_TAGS;
698 credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
700 if ((signed_word)descr < 0) {
701 current = *(word *)current_p;
702 FIXUP_POINTER(current);
703 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
704 PREFETCH((ptr_t)current);
706 if (GC_trace_addr == current_p) {
707 GC_log_printf("GC:%d Considering(3) %p -> %p\n",
708 GC_gc_no, current_p, (ptr_t) current);
710 # endif /* ENABLE_TRACE */
711 PUSH_CONTENTS((ptr_t)current, mark_stack_top,
712 mark_stack_limit, current_p, exit1);
716 current_p += sizeof(word);
722 if (GC_trace_addr >= current_p
723 && GC_base(current_p) != 0
724 && GC_base(current_p) == GC_base(GC_trace_addr)) {
725 GC_log_printf("GC:%d Tracing from %p proc descr %lu\n",
726 GC_gc_no, current_p, (unsigned long) descr);
728 # endif /* ENABLE_TRACE */
729 credit -= GC_PROC_BYTES;
732 ((word *)current_p, mark_stack_top,
733 mark_stack_limit, ENV(descr));
735 case GC_DS_PER_OBJECT:
736 if ((signed_word)descr >= 0) {
737 /* Descriptor is in the object. */
738 descr = *(word *)(current_p + descr - GC_DS_PER_OBJECT);
740 /* Descriptor is in type descriptor pointed to by first */
741 /* word in object. */
742 ptr_t type_descr = *(ptr_t *)current_p;
743 /* type_descr is either a valid pointer to the descriptor */
744 /* structure, or this object was on a free list. If it */
745 /* it was anything but the last object on the free list, */
746 /* we will misinterpret the next object on the free list as */
747 /* the type descriptor, and get a 0 GC descriptor, which */
748 /* is ideal. Unfortunately, we need to check for the last */
749 /* object case explicitly. */
750 if (0 == type_descr) {
751 /* Rarely executed. */
755 descr = *(word *)(type_descr
756 - (descr - (GC_DS_PER_OBJECT
757 - GC_INDIR_PER_OBJ_BIAS)));
760 /* Can happen either because we generated a 0 descriptor */
761 /* or we saw a pointer to a free object. */
767 } else /* Small object with length descriptor */ {
769 limit = current_p + (word)descr;
772 if (GC_trace_addr >= current_p
773 && GC_trace_addr < limit) {
774 GC_log_printf("GC:%d Tracing from %p len %lu\n",
775 GC_gc_no, current_p, (unsigned long) descr);
777 # endif /* ENABLE_TRACE */
778 /* The simple case in which we're scanning a range. */
779 GC_ASSERT(!((word)current_p & (ALIGNMENT-1)));
780 credit -= limit - current_p;
781 limit -= sizeof(word);
785 # ifndef SMALL_CONFIG
788 /* Try to prefetch the next pointer to be examined asap. */
789 /* Empirically, this also seems to help slightly without */
790 /* prefetches, at least on linux/X86. Presumably this loop */
791 /* ends up with less register pressure, and gcc thus ends up */
792 /* generating slightly better code. Overall gcc code quality */
793 /* for this loop is still not great. */
795 PREFETCH(limit - PREF_DIST*CACHE_LINE_SIZE);
796 GC_ASSERT(limit >= current_p);
797 deferred = *(word *)limit;
798 FIXUP_POINTER(deferred);
800 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
801 PREFETCH((ptr_t)deferred);
804 if (current_p > limit) goto next_object;
805 /* Unroll once, so we don't do too many of the prefetches */
806 /* based on limit. */
807 deferred = *(word *)limit;
808 FIXUP_POINTER(deferred);
810 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
811 PREFETCH((ptr_t)deferred);
814 if (current_p > limit) goto next_object;
818 while (current_p <= limit) {
819 /* Empirically, unrolling this loop doesn't help a lot. */
820 /* Since PUSH_CONTENTS expands to a lot of code, */
822 current = *(word *)current_p;
823 FIXUP_POINTER(current);
824 PREFETCH(current_p + PREF_DIST*CACHE_LINE_SIZE);
825 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
826 /* Prefetch the contents of the object we just pushed. It's */
827 /* likely we will need them soon. */
828 PREFETCH((ptr_t)current);
830 if (GC_trace_addr == current_p) {
831 GC_log_printf("GC:%d Considering(1) %p -> %p\n",
832 GC_gc_no, current_p, (ptr_t) current);
834 # endif /* ENABLE_TRACE */
835 PUSH_CONTENTS((ptr_t)current, mark_stack_top,
836 mark_stack_limit, current_p, exit2);
838 current_p += ALIGNMENT;
841 # ifndef SMALL_CONFIG
842 /* We still need to mark the entry we previously prefetched. */
843 /* We already know that it passes the preliminary pointer */
846 if (GC_trace_addr == current_p) {
847 GC_log_printf("GC:%d Considering(2) %p -> %p\n",
848 GC_gc_no, current_p, (ptr_t) deferred);
850 # endif /* ENABLE_TRACE */
851 PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
852 mark_stack_limit, current_p, exit4);
857 return mark_stack_top;
862 /* We assume we have an ANSI C Compiler. */
863 GC_bool GC_help_wanted = FALSE;
864 unsigned GC_helper_count = 0;
865 unsigned GC_active_count = 0;
868 #define LOCAL_MARK_STACK_SIZE HBLKSIZE
869 /* Under normal circumstances, this is big enough to guarantee */
870 /* We don't overflow half of it in a single call to */
874 /* Steal mark stack entries starting at mse low into mark stack local */
875 /* until we either steal mse high, or we have max entries. */
876 /* Return a pointer to the top of the local mark stack. */
877 /* *next is replaced by a pointer to the next unscanned mark stack */
879 mse * GC_steal_mark_stack(mse * low, mse * high, mse * local,
880 unsigned max, mse **next)
883 mse *top = local - 1;
886 GC_ASSERT(high >= low-1 && high - low + 1 <= GC_mark_stack_size);
887 for (p = low; p <= high && i <= max; ++p) {
888 word descr = AO_load((volatile AO_t *) &(p -> mse_descr));
890 /* Must be ordered after read of descr: */
891 AO_store_release_write((volatile AO_t *) &(p -> mse_descr), 0);
892 /* More than one thread may get this entry, but that's only */
893 /* a minor performance problem. */
895 top -> mse_descr = descr;
896 top -> mse_start = p -> mse_start;
897 GC_ASSERT((top -> mse_descr & GC_DS_TAGS) != GC_DS_LENGTH ||
898 top -> mse_descr < (ptr_t)GC_greatest_plausible_heap_addr
899 - (ptr_t)GC_least_plausible_heap_addr);
900 /* If this is a big object, count it as */
901 /* size/256 + 1 objects. */
903 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) i += (descr >> 8);
910 /* Copy back a local mark stack. */
911 /* low and high are inclusive bounds. */
912 void GC_return_mark_stack(mse * low, mse * high)
918 if (high < low) return;
919 stack_size = high - low + 1;
920 GC_acquire_mark_lock();
921 my_top = GC_mark_stack_top; /* Concurrent modification impossible. */
922 my_start = my_top + 1;
923 if (my_start - GC_mark_stack + stack_size > GC_mark_stack_size) {
924 if (GC_print_stats) {
925 GC_log_printf("No room to copy back mark stack.");
927 GC_mark_state = MS_INVALID;
928 GC_mark_stack_too_small = TRUE;
929 /* We drop the local mark stack. We'll fix things later. */
931 BCOPY(low, my_start, stack_size * sizeof(mse));
932 GC_ASSERT((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
934 AO_store_release_write((volatile AO_t *)(&GC_mark_stack_top),
935 (AO_t)(my_top + stack_size));
936 /* Ensures visibility of previously written stack contents. */
938 GC_release_mark_lock();
939 GC_notify_all_marker();
942 /* Mark from the local mark stack. */
943 /* On return, the local mark stack is empty. */
944 /* But this may be achieved by copying the */
945 /* local mark stack back into the global one. */
946 void GC_do_local_mark(mse *local_mark_stack, mse *local_top)
949 # define N_LOCAL_ITERS 1
951 # ifdef GC_ASSERTIONS
952 /* Make sure we don't hold mark lock. */
953 GC_acquire_mark_lock();
954 GC_release_mark_lock();
957 for (n = 0; n < N_LOCAL_ITERS; ++n) {
958 local_top = GC_mark_from(local_top, local_mark_stack,
959 local_mark_stack + LOCAL_MARK_STACK_SIZE);
960 if (local_top < local_mark_stack) return;
961 if (local_top - local_mark_stack >= LOCAL_MARK_STACK_SIZE/2) {
962 GC_return_mark_stack(local_mark_stack, local_top);
966 if ((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
967 < (mse *)AO_load(&GC_first_nonempty)
968 && GC_active_count < GC_helper_count
969 && local_top > local_mark_stack + 1) {
970 /* Try to share the load, since the main stack is empty, */
971 /* and helper threads are waiting for a refill. */
972 /* The entries near the bottom of the stack are likely */
973 /* to require more work. Thus we return those, eventhough */
975 mse * new_bottom = local_mark_stack
976 + (local_top - local_mark_stack)/2;
977 GC_ASSERT(new_bottom > local_mark_stack
978 && new_bottom < local_top);
979 GC_return_mark_stack(local_mark_stack, new_bottom - 1);
980 memmove(local_mark_stack, new_bottom,
981 (local_top - new_bottom + 1) * sizeof(mse));
982 local_top -= (new_bottom - local_mark_stack);
987 #define ENTRIES_TO_GET 5
989 long GC_markers = 2; /* Normally changed by thread-library- */
990 /* -specific code. */
992 /* Mark using the local mark stack until the global mark stack is empty */
993 /* and there are no active workers. Update GC_first_nonempty to reflect */
995 /* Caller does not hold mark lock. */
996 /* Caller has already incremented GC_helper_count. We decrement it, */
997 /* and maintain GC_active_count. */
998 void GC_mark_local(mse *local_mark_stack, int id)
1000 mse * my_first_nonempty;
1002 GC_acquire_mark_lock();
1004 my_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
1005 GC_ASSERT((mse *)AO_load(&GC_first_nonempty) >= GC_mark_stack &&
1006 (mse *)AO_load(&GC_first_nonempty) <=
1007 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1008 if (GC_print_stats == VERBOSE)
1009 GC_log_printf("Starting mark helper %lu\n", (unsigned long)id);
1010 GC_release_mark_lock();
1016 mse * global_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
1018 GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
1019 my_first_nonempty <=
1020 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1021 GC_ASSERT(global_first_nonempty >= GC_mark_stack &&
1022 global_first_nonempty <=
1023 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1024 if (my_first_nonempty < global_first_nonempty) {
1025 my_first_nonempty = global_first_nonempty;
1026 } else if (global_first_nonempty < my_first_nonempty) {
1027 AO_compare_and_swap(&GC_first_nonempty,
1028 (AO_t) global_first_nonempty,
1029 (AO_t) my_first_nonempty);
1030 /* If this fails, we just go ahead, without updating */
1031 /* GC_first_nonempty. */
1033 /* Perhaps we should also update GC_first_nonempty, if it */
1034 /* is less. But that would require using atomic updates. */
1035 my_top = (mse *)AO_load_acquire((volatile AO_t *)(&GC_mark_stack_top));
1036 n_on_stack = my_top - my_first_nonempty + 1;
1037 if (0 == n_on_stack) {
1038 GC_acquire_mark_lock();
1039 my_top = GC_mark_stack_top;
1040 /* Asynchronous modification impossible here, */
1041 /* since we hold mark lock. */
1042 n_on_stack = my_top - my_first_nonempty + 1;
1043 if (0 == n_on_stack) {
1045 GC_ASSERT(GC_active_count <= GC_helper_count);
1046 /* Other markers may redeposit objects */
1048 if (0 == GC_active_count) GC_notify_all_marker();
1049 while (GC_active_count > 0
1050 && (mse *)AO_load(&GC_first_nonempty)
1051 > GC_mark_stack_top) {
1052 /* We will be notified if either GC_active_count */
1053 /* reaches zero, or if more objects are pushed on */
1054 /* the global mark stack. */
1057 if (GC_active_count == 0 &&
1058 (mse *)AO_load(&GC_first_nonempty) > GC_mark_stack_top) {
1059 GC_bool need_to_notify = FALSE;
1060 /* The above conditions can't be falsified while we */
1061 /* hold the mark lock, since neither */
1062 /* GC_active_count nor GC_mark_stack_top can */
1063 /* change. GC_first_nonempty can only be */
1064 /* incremented asynchronously. Thus we know that */
1065 /* both conditions actually held simultaneously. */
1067 if (0 == GC_helper_count) need_to_notify = TRUE;
1068 if (GC_print_stats == VERBOSE)
1070 "Finished mark helper %lu\n", (unsigned long)id);
1071 GC_release_mark_lock();
1072 if (need_to_notify) GC_notify_all_marker();
1075 /* else there's something on the stack again, or */
1076 /* another helper may push something. */
1078 GC_ASSERT(GC_active_count > 0);
1079 GC_release_mark_lock();
1082 GC_release_mark_lock();
1085 n_to_get = ENTRIES_TO_GET;
1086 if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1;
1087 local_top = GC_steal_mark_stack(my_first_nonempty, my_top,
1088 local_mark_stack, n_to_get,
1089 &my_first_nonempty);
1090 GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
1091 my_first_nonempty <=
1092 (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
1093 GC_do_local_mark(local_mark_stack, local_top);
1097 /* Perform Parallel mark. */
1098 /* We hold the GC lock, not the mark lock. */
1099 /* Currently runs until the mark stack is */
1101 void GC_do_parallel_mark()
1103 mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1105 GC_acquire_mark_lock();
1106 GC_ASSERT(I_HOLD_LOCK());
1107 /* This could be a GC_ASSERT, but it seems safer to keep it on */
1108 /* all the time, especially since it's cheap. */
1109 if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0)
1110 ABORT("Tried to start parallel mark in bad state");
1111 if (GC_print_stats == VERBOSE)
1112 GC_log_printf("Starting marking for mark phase number %lu\n",
1113 (unsigned long)GC_mark_no);
1114 GC_first_nonempty = (AO_t)GC_mark_stack;
1115 GC_active_count = 0;
1116 GC_helper_count = 1;
1117 GC_help_wanted = TRUE;
1118 GC_release_mark_lock();
1119 GC_notify_all_marker();
1120 /* Wake up potential helpers. */
1121 GC_mark_local(local_mark_stack, 0);
1122 GC_acquire_mark_lock();
1123 GC_help_wanted = FALSE;
1124 /* Done; clean up. */
1125 while (GC_helper_count > 0) GC_wait_marker();
1126 /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */
1127 if (GC_print_stats == VERBOSE)
1129 "Finished marking for mark phase number %lu\n",
1130 (unsigned long)GC_mark_no);
1132 GC_release_mark_lock();
1133 GC_notify_all_marker();
1137 /* Try to help out the marker, if it's running. */
1138 /* We do not hold the GC lock, but the requestor does. */
1139 void GC_help_marker(word my_mark_no)
1141 mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1144 if (!GC_parallel) return;
1145 GC_acquire_mark_lock();
1146 while (GC_mark_no < my_mark_no
1147 || (!GC_help_wanted && GC_mark_no == my_mark_no)) {
1150 my_id = GC_helper_count;
1151 if (GC_mark_no != my_mark_no || my_id >= GC_markers) {
1152 /* Second test is useful only if original threads can also */
1153 /* act as helpers. Under Linux they can't. */
1154 GC_release_mark_lock();
1157 GC_helper_count = my_id + 1;
1158 GC_release_mark_lock();
1159 GC_mark_local(local_mark_stack, my_id);
1160 /* GC_mark_local decrements GC_helper_count. */
1163 #endif /* PARALLEL_MARK */
1165 /* Allocate or reallocate space for mark stack of size n entries. */
1166 /* May silently fail. */
1167 static void alloc_mark_stack(size_t n)
1169 mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry));
1171 /* Don't recycle a stack segment obtained with the wrong flags. */
1172 /* Win32 GetWriteWatch requires the right kind of memory. */
1173 static GC_bool GC_incremental_at_stack_alloc = 0;
1174 GC_bool recycle_old = (!GC_incremental || GC_incremental_at_stack_alloc);
1176 GC_incremental_at_stack_alloc = GC_incremental;
1178 # define recycle_old 1
1181 GC_mark_stack_too_small = FALSE;
1182 if (GC_mark_stack_size != 0) {
1183 if (new_stack != 0) {
1185 /* Recycle old space */
1186 size_t page_offset = (word)GC_mark_stack & (GC_page_size - 1);
1187 size_t size = GC_mark_stack_size * sizeof(struct GC_ms_entry);
1190 if (0 != page_offset) displ = GC_page_size - page_offset;
1191 size = (size - displ) & ~(GC_page_size - 1);
1193 GC_add_to_heap((struct hblk *)
1194 ((word)GC_mark_stack + displ), (word)size);
1197 GC_mark_stack = new_stack;
1198 GC_mark_stack_size = n;
1199 GC_mark_stack_limit = new_stack + n;
1200 if (GC_print_stats) {
1201 GC_log_printf("Grew mark stack to %lu frames\n",
1202 (unsigned long) GC_mark_stack_size);
1205 if (GC_print_stats) {
1206 GC_log_printf("Failed to grow mark stack to %lu frames\n",
1211 if (new_stack == 0) {
1212 GC_err_printf("No space for mark stack\n");
1215 GC_mark_stack = new_stack;
1216 GC_mark_stack_size = n;
1217 GC_mark_stack_limit = new_stack + n;
1219 GC_mark_stack_top = GC_mark_stack-1;
1224 alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
1228 * Push all locations between b and t onto the mark stack.
1229 * b is the first location to be checked. t is one past the last
1230 * location to be checked.
1231 * Should only be used if there is no possibility of mark stack
1234 void GC_push_all(ptr_t bottom, ptr_t top)
1236 register word length;
1238 bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1239 top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
1240 if (top == 0 || bottom == top) return;
1241 GC_mark_stack_top++;
1242 if (GC_mark_stack_top >= GC_mark_stack_limit) {
1243 ABORT("unexpected mark stack overflow");
1245 length = top - bottom;
1246 # if GC_DS_TAGS > ALIGNMENT - 1
1247 length += GC_DS_TAGS;
1248 length &= ~GC_DS_TAGS;
1250 GC_mark_stack_top -> mse_start = bottom;
1251 GC_mark_stack_top -> mse_descr = length;
1255 * Analogous to the above, but push only those pages h with dirty_fn(h) != 0.
1256 * We use push_fn to actually push the block.
1257 * Used both to selectively push dirty pages, or to push a block
1258 * in piecemeal fashion, to allow for more marking concurrency.
1259 * Will not overflow mark stack if push_fn pushes a small fixed number
1260 * of entries. (This is invoked only if push_fn pushes a single entry,
1261 * or if it marks each object before pushing it, thus ensuring progress
1262 * in the event of a stack overflow.)
1264 void GC_push_selected(ptr_t bottom, ptr_t top,
1265 int (*dirty_fn) (struct hblk *),
1266 void (*push_fn) (ptr_t, ptr_t))
1270 bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1271 top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
1273 if (top == 0 || bottom == top) return;
1274 h = HBLKPTR(bottom + HBLKSIZE);
1275 if (top <= (ptr_t) h) {
1276 if ((*dirty_fn)(h-1)) {
1277 (*push_fn)(bottom, top);
1281 if ((*dirty_fn)(h-1)) {
1282 (*push_fn)(bottom, (ptr_t)h);
1284 while ((ptr_t)(h+1) <= top) {
1285 if ((*dirty_fn)(h)) {
1286 if ((word)(GC_mark_stack_top - GC_mark_stack)
1287 > 3 * GC_mark_stack_size / 4) {
1288 /* Danger of mark stack overflow */
1289 (*push_fn)((ptr_t)h, top);
1292 (*push_fn)((ptr_t)h, (ptr_t)(h+1));
1297 if ((ptr_t)h != top) {
1298 if ((*dirty_fn)(h)) {
1299 (*push_fn)((ptr_t)h, top);
1302 if (GC_mark_stack_top >= GC_mark_stack_limit) {
1303 ABORT("unexpected mark stack overflow");
1307 # ifndef SMALL_CONFIG
1309 #ifdef PARALLEL_MARK
1310 /* Break up root sections into page size chunks to better spread */
1312 GC_bool GC_true_func(struct hblk *h) { return TRUE; }
1313 # define GC_PUSH_ALL(b,t) GC_push_selected(b,t,GC_true_func,GC_push_all);
1315 # define GC_PUSH_ALL(b,t) GC_push_all(b,t);
1319 void GC_push_conditional(ptr_t bottom, ptr_t top, GC_bool all)
1322 if (GC_dirty_maintained) {
1324 /* Pages that were never dirtied cannot contain pointers */
1325 GC_push_selected(bottom, top, GC_page_was_ever_dirty, GC_push_all);
1327 GC_push_all(bottom, top);
1330 GC_push_all(bottom, top);
1333 GC_push_selected(bottom, top, GC_page_was_dirty, GC_push_all);
1338 # if defined(MSWIN32) || defined(MSWINCE)
1339 void __cdecl GC_push_one(word p)
1341 void GC_push_one(word p)
1344 GC_PUSH_ONE_STACK((ptr_t)p, MARKED_FROM_REGISTER);
1347 struct GC_ms_entry *GC_mark_and_push(void *obj,
1348 mse *mark_stack_ptr,
1349 mse *mark_stack_limit,
1356 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {
1357 if (GC_all_interior_pointers) {
1358 hhdr = GC_find_header(GC_base(obj));
1360 GC_ADD_TO_BLACK_LIST_NORMAL(obj, src);
1361 return mark_stack_ptr;
1364 GC_ADD_TO_BLACK_LIST_NORMAL(obj, src);
1365 return mark_stack_ptr;
1368 if (EXPECT(HBLK_IS_FREE(hhdr),0)) {
1369 GC_ADD_TO_BLACK_LIST_NORMAL(obj, src);
1370 return mark_stack_ptr;
1373 PUSH_CONTENTS_HDR(obj, mark_stack_ptr /* modified */, mark_stack_limit,
1374 src, was_marked, hhdr, TRUE);
1376 return mark_stack_ptr;
1379 /* Mark and push (i.e. gray) a single object p onto the main */
1380 /* mark stack. Consider p to be valid if it is an interior */
1382 /* The object p has passed a preliminary pointer validity */
1383 /* test, but we do not definitely know whether it is valid. */
1384 /* Mark bits are NOT atomically updated. Thus this must be the */
1385 /* only thread setting them. */
1386 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
1387 void GC_mark_and_push_stack(ptr_t p, ptr_t source)
1389 void GC_mark_and_push_stack(ptr_t p)
1398 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {
1404 GC_ADD_TO_BLACK_LIST_STACK(p, source);
1408 if (EXPECT(HBLK_IS_FREE(hhdr),0)) {
1409 GC_ADD_TO_BLACK_LIST_NORMAL(p, src);
1412 # if defined(MANUAL_VDB) && defined(THREADS)
1413 /* Pointer is on the stack. We may have dirtied the object */
1414 /* it points to, but not yet have called GC_dirty(); */
1415 GC_dirty(p); /* Implicitly affects entire object. */
1417 PUSH_CONTENTS_HDR(r, GC_mark_stack_top, GC_mark_stack_limit,
1418 source, mark_and_push_exit, hhdr, FALSE);
1419 mark_and_push_exit: ;
1420 /* We silently ignore pointers to near the end of a block, */
1421 /* which is very mildly suboptimal. */
1422 /* FIXME: We should probably add a header word to address */
1428 # define TRACE_ENTRIES 1000
1430 struct trace_entry {
1436 } GC_trace_buf[TRACE_ENTRIES];
1438 int GC_trace_buf_ptr = 0;
1440 void GC_add_trace_entry(char *kind, word arg1, word arg2)
1442 GC_trace_buf[GC_trace_buf_ptr].kind = kind;
1443 GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
1444 GC_trace_buf[GC_trace_buf_ptr].bytes_allocd = GC_bytes_allocd;
1445 GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
1446 GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
1448 if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
1451 void GC_print_trace(word gc_no, GC_bool lock)
1454 struct trace_entry *p;
1457 for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
1458 if (i < 0) i = TRACE_ENTRIES-1;
1459 p = GC_trace_buf + i;
1460 if (p -> gc_no < gc_no || p -> kind == 0) return;
1461 printf("Trace:%s (gc:%d,bytes:%d) 0x%X, 0x%X\n",
1462 p -> kind, p -> gc_no, p -> bytes_allocd,
1463 (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
1465 printf("Trace incomplete\n");
1469 # endif /* TRACE_BUF */
1472 * A version of GC_push_all that treats all interior pointers as valid
1473 * and scans the entire region immediately, in case the contents
1476 void GC_push_all_eager(ptr_t bottom, ptr_t top)
1478 word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1479 word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
1483 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1484 register ptr_t least_ha = GC_least_plausible_heap_addr;
1485 # define GC_greatest_plausible_heap_addr greatest_ha
1486 # define GC_least_plausible_heap_addr least_ha
1488 if (top == 0) return;
1489 /* check all pointers in range and push if they appear */
1491 lim = t - 1 /* longword */;
1492 for (p = b; p <= lim; p = (word *)(((ptr_t)p) + ALIGNMENT)) {
1494 GC_PUSH_ONE_STACK((ptr_t)q, p);
1496 # undef GC_greatest_plausible_heap_addr
1497 # undef GC_least_plausible_heap_addr
1502 * A version of GC_push_all that treats all interior pointers as valid
1503 * and scans part of the area immediately, to make sure that saved
1504 * register values are not lost.
1505 * Cold_gc_frame delimits the stack section that must be scanned
1506 * eagerly. A zero value indicates that no eager scanning is needed.
1507 * We don't need to worry about the MANUAL_VDB case here, since this
1508 * is only called in the single-threaded case. We assume that we
1509 * cannot collect between an assignment and the corresponding
1512 void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
1513 ptr_t cold_gc_frame)
1515 if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
1516 /* Push the hot end of the stack eagerly, so that register values */
1517 /* saved inside GC frames are marked before they disappear. */
1518 /* The rest of the marking can be deferred until later. */
1519 if (0 == cold_gc_frame) {
1520 GC_push_all_stack(bottom, top);
1523 GC_ASSERT(bottom <= cold_gc_frame && cold_gc_frame <= top);
1524 # ifdef STACK_GROWS_DOWN
1525 GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
1526 GC_push_all_eager(bottom, cold_gc_frame);
1527 # else /* STACK_GROWS_UP */
1528 GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
1529 GC_push_all_eager(cold_gc_frame, top);
1530 # endif /* STACK_GROWS_UP */
1532 GC_push_all_eager(bottom, top);
1535 GC_add_trace_entry("GC_push_all_stack", bottom, top);
1538 #endif /* !THREADS */
1540 void GC_push_all_stack(ptr_t bottom, ptr_t top)
1542 # if defined(THREADS) && defined(MPROTECT_VDB)
1543 GC_push_all_eager(bottom, top);
1545 if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
1546 GC_push_all(bottom, top);
1548 GC_push_all_eager(bottom, top);
1553 #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES) && \
1554 defined(MARK_BIT_PER_GRANULE)
1555 # if GC_GRANULE_WORDS == 1
1556 # define USE_PUSH_MARKED_ACCELERATORS
1557 # define PUSH_GRANULE(q) \
1558 { ptr_t qcontents = (ptr_t)((q)[0]); \
1559 GC_PUSH_ONE_HEAP(qcontents, (q)); }
1560 # elif GC_GRANULE_WORDS == 2
1561 # define USE_PUSH_MARKED_ACCELERATORS
1562 # define PUSH_GRANULE(q) \
1563 { ptr_t qcontents = (ptr_t)((q)[0]); \
1564 GC_PUSH_ONE_HEAP(qcontents, (q)); \
1565 qcontents = (ptr_t)((q)[1]); \
1566 GC_PUSH_ONE_HEAP(qcontents, (q)+1); }
1567 # elif GC_GRANULE_WORDS == 4
1568 # define USE_PUSH_MARKED_ACCELERATORS
1569 # define PUSH_GRANULE(q) \
1570 { ptr_t qcontents = (ptr_t)((q)[0]); \
1571 GC_PUSH_ONE_HEAP(qcontents, (q)); \
1572 qcontents = (ptr_t)((q)[1]); \
1573 GC_PUSH_ONE_HEAP(qcontents, (q)+1); \
1574 qcontents = (ptr_t)((q)[2]); \
1575 GC_PUSH_ONE_HEAP(qcontents, (q)+2); \
1576 qcontents = (ptr_t)((q)[3]); \
1577 GC_PUSH_ONE_HEAP(qcontents, (q)+3); }
1581 #ifdef USE_PUSH_MARKED_ACCELERATORS
1582 /* Push all objects reachable from marked objects in the given block */
1583 /* containing objects of size 1 granule. */
1584 void GC_push_marked1(struct hblk *h, hdr *hhdr)
1586 word * mark_word_addr = &(hhdr->hb_marks[0]);
1592 /* Allow registers to be used for some frequently acccessed */
1593 /* global variables. Otherwise aliasing issues are likely */
1594 /* to prevent that. */
1595 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1596 ptr_t least_ha = GC_least_plausible_heap_addr;
1597 mse * mark_stack_top = GC_mark_stack_top;
1598 mse * mark_stack_limit = GC_mark_stack_limit;
1599 # define GC_mark_stack_top mark_stack_top
1600 # define GC_mark_stack_limit mark_stack_limit
1601 # define GC_greatest_plausible_heap_addr greatest_ha
1602 # define GC_least_plausible_heap_addr least_ha
1604 p = (word *)(h->hb_body);
1605 plim = (word *)(((word)h) + HBLKSIZE);
1607 /* go through all words in block */
1609 mark_word = *mark_word_addr++;
1611 while(mark_word != 0) {
1612 if (mark_word & 1) {
1615 q += GC_GRANULE_WORDS;
1618 p += WORDSZ*GC_GRANULE_WORDS;
1621 # undef GC_greatest_plausible_heap_addr
1622 # undef GC_least_plausible_heap_addr
1623 # undef GC_mark_stack_top
1624 # undef GC_mark_stack_limit
1626 GC_mark_stack_top = mark_stack_top;
1632 /* Push all objects reachable from marked objects in the given block */
1633 /* of size 2 (granules) objects. */
1634 void GC_push_marked2(struct hblk *h, hdr *hhdr)
1636 word * mark_word_addr = &(hhdr->hb_marks[0]);
1642 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1643 ptr_t least_ha = GC_least_plausible_heap_addr;
1644 mse * mark_stack_top = GC_mark_stack_top;
1645 mse * mark_stack_limit = GC_mark_stack_limit;
1647 # define GC_mark_stack_top mark_stack_top
1648 # define GC_mark_stack_limit mark_stack_limit
1649 # define GC_greatest_plausible_heap_addr greatest_ha
1650 # define GC_least_plausible_heap_addr least_ha
1652 p = (word *)(h->hb_body);
1653 plim = (word *)(((word)h) + HBLKSIZE);
1655 /* go through all words in block */
1657 mark_word = *mark_word_addr++;
1659 while(mark_word != 0) {
1660 if (mark_word & 1) {
1662 PUSH_GRANULE(q + GC_GRANULE_WORDS);
1664 q += 2 * GC_GRANULE_WORDS;
1667 p += WORDSZ*GC_GRANULE_WORDS;
1670 # undef GC_greatest_plausible_heap_addr
1671 # undef GC_least_plausible_heap_addr
1672 # undef GC_mark_stack_top
1673 # undef GC_mark_stack_limit
1675 GC_mark_stack_top = mark_stack_top;
1678 # if GC_GRANULE_WORDS < 4
1679 /* Push all objects reachable from marked objects in the given block */
1680 /* of size 4 (granules) objects. */
1681 /* There is a risk of mark stack overflow here. But we handle that. */
1682 /* And only unmarked objects get pushed, so it's not very likely. */
1683 void GC_push_marked4(struct hblk *h, hdr *hhdr)
1685 word * mark_word_addr = &(hhdr->hb_marks[0]);
1691 ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1692 ptr_t least_ha = GC_least_plausible_heap_addr;
1693 mse * mark_stack_top = GC_mark_stack_top;
1694 mse * mark_stack_limit = GC_mark_stack_limit;
1695 # define GC_mark_stack_top mark_stack_top
1696 # define GC_mark_stack_limit mark_stack_limit
1697 # define GC_greatest_plausible_heap_addr greatest_ha
1698 # define GC_least_plausible_heap_addr least_ha
1700 p = (word *)(h->hb_body);
1701 plim = (word *)(((word)h) + HBLKSIZE);
1703 /* go through all words in block */
1705 mark_word = *mark_word_addr++;
1707 while(mark_word != 0) {
1708 if (mark_word & 1) {
1710 PUSH_GRANULE(q + GC_GRANULE_WORDS);
1711 PUSH_GRANULE(q + 2*GC_GRANULE_WORDS);
1712 PUSH_GRANULE(q + 3*GC_GRANULE_WORDS);
1714 q += 4 * GC_GRANULE_WORDS;
1717 p += WORDSZ*GC_GRANULE_WORDS;
1719 # undef GC_greatest_plausible_heap_addr
1720 # undef GC_least_plausible_heap_addr
1721 # undef GC_mark_stack_top
1722 # undef GC_mark_stack_limit
1723 GC_mark_stack_top = mark_stack_top;
1726 #endif /* GC_GRANULE_WORDS < 4 */
1728 #endif /* UNALIGNED */
1730 #endif /* USE_PUSH_MARKED_ACCELERATORS */
1732 /* Push all objects reachable from marked objects in the given block */
1733 void GC_push_marked(struct hblk *h, hdr *hhdr)
1735 size_t sz = hhdr -> hb_sz;
1736 word descr = hhdr -> hb_descr;
1740 mse * GC_mark_stack_top_reg;
1741 mse * mark_stack_limit = GC_mark_stack_limit;
1743 /* Some quick shortcuts: */
1744 if ((0 | GC_DS_LENGTH) == descr) return;
1745 if (GC_block_empty(hhdr)/* nothing marked */) return;
1746 GC_n_rescuing_pages++;
1747 GC_objects_are_marked = TRUE;
1748 if (sz > MAXOBJBYTES) {
1751 lim = (h + 1)->hb_body - sz;
1754 switch(BYTES_TO_GRANULES(sz)) {
1755 # if defined(USE_PUSH_MARKED_ACCELERATORS)
1757 GC_push_marked1(h, hhdr);
1759 # if !defined(UNALIGNED)
1761 GC_push_marked2(h, hhdr);
1763 # if GC_GRANULE_WORDS < 4
1765 GC_push_marked4(h, hhdr);
1771 GC_mark_stack_top_reg = GC_mark_stack_top;
1772 for (p = h -> hb_body, bit_no = 0; p <= lim;
1773 p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
1774 if (mark_bit_from_hdr(hhdr, bit_no)) {
1775 /* Mark from fields inside the object */
1776 PUSH_OBJ(p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
1779 GC_mark_stack_top = GC_mark_stack_top_reg;
1783 #ifndef SMALL_CONFIG
1784 /* Test whether any page in the given block is dirty */
1785 GC_bool GC_block_was_dirty(struct hblk *h, hdr *hhdr)
1787 size_t sz = hhdr -> hb_sz;
1789 if (sz <= MAXOBJBYTES) {
1790 return(GC_page_was_dirty(h));
1793 while (p < (ptr_t)h + sz) {
1794 if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
1800 #endif /* SMALL_CONFIG */
1802 /* Similar to GC_push_next_marked, but return address of next block */
1803 struct hblk * GC_push_next_marked(struct hblk *h)
1805 hdr * hhdr = HDR(h);
1807 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
1808 h = GC_next_used_block(h);
1809 if (h == 0) return(0);
1810 hhdr = GC_find_header((ptr_t)h);
1812 GC_push_marked(h, hhdr);
1813 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1816 #ifndef SMALL_CONFIG
1817 /* Identical to above, but mark only from dirty pages */
1818 struct hblk * GC_push_next_marked_dirty(struct hblk *h)
1820 hdr * hhdr = HDR(h);
1822 if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
1824 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
1825 h = GC_next_used_block(h);
1826 if (h == 0) return(0);
1827 hhdr = GC_find_header((ptr_t)h);
1829 # ifdef STUBBORN_ALLOC
1830 if (hhdr -> hb_obj_kind == STUBBORN) {
1831 if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
1835 if (GC_block_was_dirty(h, hhdr)) break;
1838 if (GC_block_was_dirty(h, hhdr)) break;
1840 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1843 GC_push_marked(h, hhdr);
1844 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1848 /* Similar to above, but for uncollectable pages. Needed since we */
1849 /* do not clear marks for such pages, even for full collections. */
1850 struct hblk * GC_push_next_marked_uncollectable(struct hblk *h)
1852 hdr * hhdr = HDR(h);
1855 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
1856 h = GC_next_used_block(h);
1857 if (h == 0) return(0);
1858 hhdr = GC_find_header((ptr_t)h);
1860 if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
1861 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1864 GC_push_marked(h, hhdr);
1865 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));