2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 2001 by Hewlett-Packard Company. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
16 /* Private declarations of GC marker data structures and macros */
19 * Declarations of mark stack. Needed by marker and client supplied mark
20 * routines. Transitively include gc_priv.h.
21 * (Note that gc_priv.h should not be included before this, since this
22 * includes dbg_mlc.h, which wants to include gc_priv.h AFTER defining
28 # if defined(KEEP_BACK_PTRS) || defined(PRINT_BLACK_LIST)
32 # include "../gc_mark.h"
38 /* The real declarations of the following is in gc_priv.h, so that */
39 /* we can avoid scanning the following table. */
41 extern mark_proc GC_mark_procs[MAX_MARK_PROCS];
44 #ifndef MARK_DESCR_OFFSET
45 # define MARK_DESCR_OFFSET sizeof(word)
49 * Mark descriptor stuff that should remain private for now, mostly
50 * because it's hard to export WORDSZ without including gcconfig.h.
52 # define BITMAP_BITS (WORDSZ - GC_DS_TAG_BITS)
53 # define PROC(descr) \
54 (GC_mark_procs[((descr) >> GC_DS_TAG_BITS) & (GC_MAX_MARK_PROCS-1)])
56 ((descr) >> (GC_DS_TAG_BITS + GC_LOG_MAX_MARK_PROCS))
58 (((word)1 << (WORDSZ - GC_DS_TAG_BITS - GC_LOG_MAX_MARK_PROCS)) - 1)
61 extern unsigned GC_n_mark_procs;
63 /* Number of mark stack entries to discard on overflow. */
64 #define GC_MARK_STACK_DISCARDS (INITIAL_MARK_STACK_SIZE/8)
66 typedef struct GC_ms_entry {
67 ptr_t mse_start; /* First word of object, word aligned */
68 GC_word mse_descr; /* Descriptor; low order two bits are tags, */
69 /* as described in gc_mark.h. */
72 extern size_t GC_mark_stack_size;
74 extern mse * GC_mark_stack_limit;
77 extern mse * volatile GC_mark_stack_top;
79 extern mse * GC_mark_stack_top;
82 extern mse * GC_mark_stack;
86 * Allow multiple threads to participate in the marking process.
87 * This works roughly as follows:
88 * The main mark stack never shrinks, but it can grow.
90 * The initiating threads holds the GC lock, and sets GC_help_wanted.
93 * 1) update helper_count (while holding mark_lock.)
94 * 2) allocate a local mark stack
96 * 3) Steal a global mark stack entry by atomically replacing
97 * its descriptor with 0.
98 * 4) Copy it to the local stack.
99 * 5) Mark on the local stack until it is empty, or
100 * it may be profitable to copy it back.
101 * 6) If necessary, copy local stack to global one,
103 * 7) Stop when the global mark stack is empty.
104 * 8) decrement helper_count (holding mark_lock).
106 * This is an experiment to see if we can do something along the lines
107 * of the University of Tokyo SGC in a less intrusive, though probably
108 * also less performant, way.
111 /* GC_mark_stack_top is protected by mark lock. */
114 * GC_notify_all_marker() is used when GC_help_wanted is first set,
115 * when the last helper becomes inactive,
116 * when something is added to the global mark stack, and just after
117 * GC_mark_no is incremented.
118 * This could be split into multiple CVs (and probably should be to
119 * scale to really large numbers of processors.)
121 #endif /* PARALLEL_MARK */
123 /* Return a pointer to within 1st page of object. */
124 /* Set *new_hdr_p to corr. hdr. */
125 ptr_t GC_find_start(ptr_t current, hdr *hhdr, hdr **new_hdr_p);
127 mse * GC_signal_mark_stack_overflow(mse *msp);
129 /* Push the object obj with corresponding heap block header hhdr onto */
130 /* the mark stack. */
131 # define PUSH_OBJ(obj, hhdr, mark_stack_top, mark_stack_limit) \
133 register word _descr = (hhdr) -> hb_descr; \
135 GC_ASSERT(!HBLK_IS_FREE(hhdr)); \
138 if (mark_stack_top >= mark_stack_limit) { \
139 mark_stack_top = GC_signal_mark_stack_overflow(mark_stack_top); \
141 mark_stack_top -> mse_start = (obj); \
142 mark_stack_top -> mse_descr = _descr; \
146 /* Push the contents of current onto the mark stack if it is a valid */
147 /* ptr to a currently unmarked object. Mark it. */
148 /* If we assumed a standard-conforming compiler, we could probably */
149 /* generate the exit_label transparently. */
150 # define PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit, \
151 source, exit_label) \
155 HC_GET_HDR(current, my_hhdr, source, exit_label); \
156 PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
157 source, exit_label, my_hhdr, TRUE); \
161 /* Set mark bit, exit if it was already set. */
163 # ifdef USE_MARK_BITS
164 # ifdef PARALLEL_MARK
165 /* The following may fail to exit even if the bit was already set. */
166 /* For our uses, that's benign: */
167 # define OR_WORD_EXIT_IF_SET(addr, bits, exit_label) \
169 if (!(*(addr) & (bits))) { \
170 AO_or((AO_t *)(addr), (bits)); \
176 # define OR_WORD_EXIT_IF_SET(addr, bits, exit_label) \
178 word old = *(addr); \
179 word my_bits = (bits); \
180 if (old & my_bits) goto exit_label; \
181 *(addr) = (old | my_bits); \
183 # endif /* !PARALLEL_MARK */
184 # define SET_MARK_BIT_EXIT_IF_SET(hhdr,bit_no,exit_label) \
186 word * mark_word_addr = hhdr -> hb_marks + divWORDSZ(bit_no); \
188 OR_WORD_EXIT_IF_SET(mark_word_addr, (word)1 << modWORDSZ(bit_no), \
194 # if defined(I386) && defined(__GNUC__)
195 # define LONG_MULT(hprod, lprod, x, y) { \
196 asm("mull %2" : "=a"(lprod), "=d"(hprod) : "g"(y), "0"(x)); \
198 # else /* No in-line X86 assembly code */
199 # define LONG_MULT(hprod, lprod, x, y) { \
200 unsigned long long prod = (unsigned long long)x \
201 * (unsigned long long)y; \
202 hprod = prod >> 32; \
203 lprod = (unsigned32)prod; \
207 #ifdef USE_MARK_BYTES
208 /* There is a race here, and we may set */
209 /* the bit twice in the concurrent case. This can result in the */
210 /* object being pushed twice. But that's only a performance issue. */
211 # define SET_MARK_BIT_EXIT_IF_SET(hhdr,bit_no,exit_label) \
213 char * mark_byte_addr = (char *)hhdr -> hb_marks + (bit_no); \
214 char mark_byte = *mark_byte_addr; \
216 if (mark_byte) goto exit_label; \
217 *mark_byte_addr = 1; \
219 #endif /* USE_MARK_BYTES */
222 # define INCR_MARKS(hhdr) \
223 AO_store(&(hhdr -> hb_n_marks), AO_load(&(hhdr -> hb_n_marks))+1);
225 # define INCR_MARKS(hhdr) ++(hhdr -> hb_n_marks)
229 # define TRACE(source, cmd) \
230 if (GC_trace_addr != 0 && (ptr_t)(source) == GC_trace_addr) cmd
231 # define TRACE_TARGET(target, cmd) \
232 if (GC_trace_addr != 0 && (target) == *(ptr_t *)GC_trace_addr) cmd
234 # define TRACE(source, cmd)
235 # define TRACE_TARGET(source, cmd)
237 /* If the mark bit corresponding to current is not set, set it, and */
238 /* push the contents of the object on the mark stack. Current points */
239 /* to the beginning of the object. We rely on the fact that the */
240 /* preceding header calculation will succeed for a pointer past the */
241 /* first page of an object, only if it is in fact a valid pointer */
242 /* to the object. Thus we can omit the otherwise necessary tests */
243 /* here. Note in particular that the "displ" value is the displacement */
244 /* from the beginning of the heap block, which may itself be in the */
245 /* interior of a large object. */
246 #ifdef MARK_BIT_PER_GRANULE
247 # define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
248 source, exit_label, hhdr, do_offset_check) \
250 size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
251 /* displ is always within range. If current doesn't point to */ \
252 /* first block, then we are in the all_interior_pointers case, and */ \
253 /* it is safe to use any displacement value. */ \
254 size_t gran_displ = BYTES_TO_GRANULES(displ); \
255 size_t gran_offset = hhdr -> hb_map[gran_displ]; \
256 size_t byte_offset = displ & (GRANULE_BYTES - 1); \
257 ptr_t base = current; \
258 /* The following always fails for large block references. */ \
259 if (EXPECT((gran_offset | byte_offset) != 0, FALSE)) { \
260 if (hhdr -> hb_large_block) { \
261 /* gran_offset is bogus. */ \
263 base = (ptr_t)(hhdr -> hb_block); \
264 obj_displ = (ptr_t)(current) - base; \
265 if (obj_displ != displ) { \
266 GC_ASSERT(obj_displ < hhdr -> hb_sz); \
267 /* Must be in all_interior_pointer case, not first block */ \
268 /* already did validity check on cache miss. */ \
271 if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
272 GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
277 GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
278 hhdr -> hb_block == HBLKPTR(current)); \
279 GC_ASSERT((ptr_t)(hhdr -> hb_block) <= (ptr_t) current); \
281 size_t obj_displ = GRANULES_TO_BYTES(gran_offset) \
283 if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
284 GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
287 gran_displ -= gran_offset; \
291 GC_ASSERT(hhdr == GC_find_header(base)); \
292 GC_ASSERT(gran_displ % BYTES_TO_GRANULES(hhdr -> hb_sz) == 0); \
293 TRACE(source, GC_log_printf("GC:%u: passed validity tests\n", \
294 (unsigned)GC_gc_no)); \
295 SET_MARK_BIT_EXIT_IF_SET(hhdr, gran_displ, exit_label); \
296 TRACE(source, GC_log_printf("GC:%u: previously unmarked\n", \
297 (unsigned)GC_gc_no)); \
299 GC_log_printf("GC:%u: marking %p from %p instead\n", (unsigned)GC_gc_no, \
302 GC_STORE_BACK_PTR((ptr_t)source, base); \
303 PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
305 #endif /* MARK_BIT_PER_GRANULE */
307 #ifdef MARK_BIT_PER_OBJ
308 # define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
309 source, exit_label, hhdr, do_offset_check) \
311 size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
312 unsigned32 low_prod, high_prod; \
313 unsigned32 inv_sz = hhdr -> hb_inv_sz; \
314 ptr_t base = current; \
315 LONG_MULT(high_prod, low_prod, displ, inv_sz); \
316 /* product is > and within sz_in_bytes of displ * sz_in_bytes * 2**32 */ \
317 if (EXPECT(low_prod >> 16 != 0, FALSE)) { \
318 FIXME: fails if offset is a multiple of HBLKSIZE which becomes 0 \
319 if (inv_sz == LARGE_INV_SZ) { \
321 base = (ptr_t)(hhdr -> hb_block); \
322 obj_displ = (ptr_t)(current) - base; \
323 if (obj_displ != displ) { \
324 GC_ASSERT(obj_displ < hhdr -> hb_sz); \
325 /* Must be in all_interior_pointer case, not first block */ \
326 /* already did validity check on cache miss. */ \
329 if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
330 GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
334 GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
335 hhdr -> hb_block == HBLKPTR(current)); \
336 GC_ASSERT((ptr_t)(hhdr -> hb_block) < (ptr_t) current); \
338 /* Accurate enough if HBLKSIZE <= 2**15. */ \
339 GC_STATIC_ASSERT(HBLKSIZE <= (1 << 15)); \
340 size_t obj_displ = (((low_prod >> 16) + 1) * (hhdr -> hb_sz)) >> 16; \
341 if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
342 GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
348 /* May get here for pointer to start of block not at */ \
349 /* beginning of object. If so, it's valid, and we're fine. */ \
350 GC_ASSERT(high_prod >= 0 && high_prod <= HBLK_OBJS(hhdr -> hb_sz)); \
351 TRACE(source, GC_log_printf("GC:%u: passed validity tests\n", \
352 (unsigned)GC_gc_no)); \
353 SET_MARK_BIT_EXIT_IF_SET(hhdr, high_prod, exit_label); \
354 TRACE(source, GC_log_printf("GC:%u: previously unmarked\n", \
355 (unsigned)GC_gc_no)); \
357 GC_log_printf("GC:%u: marking %p from %p instead\n", \
358 (unsigned)GC_gc_no, \
361 GC_STORE_BACK_PTR((ptr_t)source, base); \
362 PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
364 #endif /* MARK_BIT_PER_OBJ */
366 #if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
367 # define PUSH_ONE_CHECKED_STACK(p, source) \
368 GC_mark_and_push_stack((ptr_t)(p), (ptr_t)(source))
370 # define PUSH_ONE_CHECKED_STACK(p, source) \
371 GC_mark_and_push_stack((ptr_t)(p))
375 * Push a single value onto mark stack. Mark from the object pointed to by p.
376 * Invoke FIXUP_POINTER(p) before any further processing.
377 * P is considered valid even if it is an interior pointer.
378 * Previously marked objects are not pushed. Hence we make progress even
379 * if the mark stack overflows.
382 # if NEED_FIXUP_POINTER
383 /* Try both the raw version and the fixed up one. */
384 # define GC_PUSH_ONE_STACK(p, source) \
385 if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
386 && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
387 PUSH_ONE_CHECKED_STACK(p, source); \
390 if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
391 && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
392 PUSH_ONE_CHECKED_STACK(p, source); \
394 # else /* !NEED_FIXUP_POINTER */
395 # define GC_PUSH_ONE_STACK(p, source) \
396 if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
397 && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
398 PUSH_ONE_CHECKED_STACK(p, source); \
404 * As above, but interior pointer recognition as for
405 * normal heap pointers.
407 # define GC_PUSH_ONE_HEAP(p,source) \
409 if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
410 && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
411 GC_mark_stack_top = GC_mark_and_push( \
412 (void *)(p), GC_mark_stack_top, \
413 GC_mark_stack_limit, (void * *)(source)); \
416 /* Mark starting at mark stack entry top (incl.) down to */
417 /* mark stack entry bottom (incl.). Stop after performing */
418 /* about one page worth of work. Return the new mark stack */
420 mse * GC_mark_from(mse * top, mse * bottom, mse *limit);
422 #define MARK_FROM_MARK_STACK() \
423 GC_mark_stack_top = GC_mark_from(GC_mark_stack_top, \
425 GC_mark_stack + GC_mark_stack_size);
428 * Mark from one finalizable object using the specified
429 * mark proc. May not mark the object pointed to by
430 * real_ptr. That is the job of the caller, if appropriate.
431 * Note that this is called with the mutator running, but
432 * with us holding the allocation lock. This is safe only if the
433 * mutator needs the allocation lock to reveal hidden pointers.
434 * FIXME: Why do we need the GC_mark_state test below?
436 # define GC_MARK_FO(real_ptr, mark_proc) \
438 (*(mark_proc))(real_ptr); \
439 while (!GC_mark_stack_empty()) MARK_FROM_MARK_STACK(); \
440 if (GC_mark_state != MS_NONE) { \
441 GC_set_mark_bit(real_ptr); \
442 while (!GC_mark_some((ptr_t)0)) {} \
446 extern GC_bool GC_mark_stack_too_small;
447 /* We need a larger mark stack. May be */
448 /* set by client supplied mark routines.*/
450 typedef int mark_state_t; /* Current state of marking, as follows:*/
451 /* Used to remember where we are during */
452 /* concurrent marking. */
454 /* We say something is dirty if it was */
455 /* written since the last time we */
456 /* retrieved dirty bits. We say it's */
457 /* grungy if it was marked dirty in the */
458 /* last set of bits we retrieved. */
460 /* Invariant I: all roots and marked */
461 /* objects p are either dirty, or point */
462 /* to objects q that are either marked */
463 /* or a pointer to q appears in a range */
464 /* on the mark stack. */
466 # define MS_NONE 0 /* No marking in progress. I holds. */
467 /* Mark stack is empty. */
469 # define MS_PUSH_RESCUERS 1 /* Rescuing objects are currently */
470 /* being pushed. I holds, except */
471 /* that grungy roots may point to */
472 /* unmarked objects, as may marked */
473 /* grungy objects above scan_ptr. */
475 # define MS_PUSH_UNCOLLECTABLE 2
476 /* I holds, except that marked */
477 /* uncollectable objects above scan_ptr */
478 /* may point to unmarked objects. */
479 /* Roots may point to unmarked objects */
481 # define MS_ROOTS_PUSHED 3 /* I holds, mark stack may be nonempty */
483 # define MS_PARTIALLY_INVALID 4 /* I may not hold, e.g. because of M.S. */
484 /* overflow. However marked heap */
485 /* objects below scan_ptr point to */
486 /* marked or stacked objects. */
488 # define MS_INVALID 5 /* I may not hold. */
490 extern mark_state_t GC_mark_state;
492 #endif /* GC_PMARK_H */