2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 #include "private/gc_priv.h"
20 signed_word GC_bytes_found = 0;
21 /* Number of bytes of memory reclaimed */
22 /* minus the number of bytes originally */
23 /* on free lists which we had to drop. */
25 #if defined(PARALLEL_MARK)
26 word GC_fl_builder_count = 0;
27 /* Number of threads currently building free lists without */
28 /* holding GC lock. It is not safe to collect if this is */
30 #endif /* PARALLEL_MARK */
32 /* We defer printing of leaked objects until we're done with the GC */
33 /* cycle, since the routine for printing objects needs to run outside */
34 /* the collector, e.g. without the allocation lock. */
36 ptr_t GC_leaked[MAX_LEAKED];
37 STATIC unsigned GC_n_leaked = 0;
39 GC_bool GC_have_errors = FALSE;
41 STATIC void GC_add_leaked(ptr_t leaked)
43 if (GC_n_leaked < MAX_LEAKED) {
44 GC_have_errors = TRUE;
45 GC_leaked[GC_n_leaked++] = leaked;
46 /* Make sure it's not reclaimed this cycle */
47 GC_set_mark_bit(leaked);
51 static GC_bool printing_errors = FALSE;
52 /* Print all objects on the list after printing any smashed objs. */
53 /* Clear both lists. */
54 void GC_print_all_errors (void)
59 if (printing_errors) {
63 printing_errors = TRUE;
65 if (GC_debugging_started) GC_print_all_smashed();
66 for (i = 0; i < GC_n_leaked; ++i) {
67 ptr_t p = GC_leaked[i];
68 if (HDR(p) -> hb_obj_kind == PTRFREE) {
69 GC_err_printf("Leaked atomic object at ");
71 GC_err_printf("Leaked composite object at ");
79 printing_errors = FALSE;
90 * Test whether a block is completely empty, i.e. contains no marked
91 * objects. This does not require the block to be in physical
95 GC_bool GC_block_empty(hdr *hhdr)
97 return (hhdr -> hb_n_marks == 0);
100 STATIC GC_bool GC_block_nearly_full(hdr *hhdr)
102 return (hhdr -> hb_n_marks > 7 * HBLK_OBJS(hhdr -> hb_sz)/8);
105 /* FIXME: This should perhaps again be specialized for USE_MARK_BYTES */
106 /* and USE_MARK_BITS cases. */
109 * Restore unmarked small objects in h of size sz to the object
110 * free list. Returns the new list.
111 * Clears unmarked objects. Sz is in bytes.
113 STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, size_t sz,
114 ptr_t list, signed_word *count)
118 signed_word n_bytes_found = 0;
120 GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
121 GC_ASSERT(sz == hhdr -> hb_sz);
122 GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0);
123 p = (word *)(hbp->hb_body);
124 plim = (word *)(hbp->hb_body + HBLKSIZE - sz);
126 /* go through all words in block */
128 if( mark_bit_from_hdr(hhdr, bit_no) ) {
129 p = (word *)((ptr_t)p + sz);
132 /* object is available - put on list */
135 /* Clear object, advance p to next object in the process */
136 q = (word *)((ptr_t)p + sz);
137 # ifdef USE_MARK_BYTES
139 && !((word)p & (2 * sizeof(word) - 1)));
147 p++; /* Skip link field */
153 bit_no += MARK_BIT_OFFSET(sz);
155 *count += n_bytes_found;
159 /* The same thing, but don't clear objects: */
160 STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, size_t sz,
161 ptr_t list, signed_word *count)
165 signed_word n_bytes_found = 0;
167 GC_ASSERT(sz == hhdr -> hb_sz);
168 p = (word *)(hbp->hb_body);
169 plim = (word *)((ptr_t)hbp + HBLKSIZE - sz);
171 /* go through all words in block */
173 if( !mark_bit_from_hdr(hhdr, bit_no) ) {
175 /* object is available - put on list */
179 p = (word *)((ptr_t)p + sz);
180 bit_no += MARK_BIT_OFFSET(sz);
182 *count += n_bytes_found;
186 /* Don't really reclaim objects, just check for unmarked ones: */
187 STATIC void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
192 GC_ASSERT(sz == hhdr -> hb_sz);
194 plim = p + HBLKSIZE - sz;
196 /* go through all words in block */
198 if( !mark_bit_from_hdr(hhdr, bit_no) ) {
202 bit_no += MARK_BIT_OFFSET(sz);
208 * Generic procedure to rebuild a free list in hbp.
209 * Also called directly from GC_malloc_many.
210 * Sz is now in bytes.
212 ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
213 GC_bool init, ptr_t list, signed_word *count)
217 GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr);
218 GC_remove_protection(hbp, 1, (hhdr)->hb_descr == 0 /* Pointer-free? */);
219 if (init || GC_debugging_started) {
220 result = GC_reclaim_clear(hbp, hhdr, sz, list, count);
222 GC_ASSERT((hhdr)->hb_descr == 0 /* Pointer-free block */);
223 result = GC_reclaim_uninit(hbp, hhdr, sz, list, count);
225 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
230 * Restore unmarked small objects in the block pointed to by hbp
231 * to the appropriate object free list.
232 * If entirely empty blocks are to be completely deallocated, then
233 * caller should perform that check.
235 STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp,
238 hdr *hhdr = HDR(hbp);
239 size_t sz = hhdr -> hb_sz;
240 int kind = hhdr -> hb_obj_kind;
241 struct obj_kind * ok = &GC_obj_kinds[kind];
242 void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
244 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
246 if (report_if_found) {
247 GC_reclaim_check(hbp, hhdr, sz);
249 *flh = GC_reclaim_generic(hbp, hhdr, sz,
251 *flh, &GC_bytes_found);
256 * Restore an unmarked large object or an entirely empty blocks of small objects
257 * to the heap block free list.
258 * Otherwise enqueue the block for later processing
259 * by GC_reclaim_small_nonempty_block.
260 * If report_if_found is TRUE, then process any block immediately, and
261 * simply report free objects; do not actually reclaim them.
263 STATIC void GC_reclaim_block(struct hblk *hbp, word report_if_found)
265 hdr * hhdr = HDR(hbp);
266 size_t sz = hhdr -> hb_sz; /* size of objects in current block */
267 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
270 if( sz > MAXOBJBYTES ) { /* 1 big object */
271 if( !mark_bit_from_hdr(hhdr, 0) ) {
272 if (report_if_found) {
273 GC_add_leaked((ptr_t)hbp);
275 size_t blocks = OBJ_SZ_TO_BLOCKS(sz);
277 GC_large_allocd_bytes -= blocks * HBLKSIZE;
279 GC_bytes_found += sz;
283 if (hhdr -> hb_descr != 0) {
284 GC_composite_in_use += sz;
286 GC_atomic_in_use += sz;
290 GC_bool empty = GC_block_empty(hhdr);
291 # ifdef PARALLEL_MARK
292 /* Count can be low or one too high because we sometimes */
293 /* have to ignore decrements. Objects can also potentially */
294 /* be repeatedly marked by each marker. */
295 /* Here we assume two markers, but this is extremely */
296 /* unlikely to fail spuriously with more. And if it does, it */
297 /* should be looked at. */
298 GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
300 GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
302 if (hhdr -> hb_descr != 0) {
303 GC_composite_in_use += sz * hhdr -> hb_n_marks;
305 GC_atomic_in_use += sz * hhdr -> hb_n_marks;
307 if (report_if_found) {
308 GC_reclaim_small_nonempty_block(hbp, (int)report_if_found);
310 GC_bytes_found += HBLKSIZE;
312 } else if (GC_find_leak || !GC_block_nearly_full(hhdr)){
313 /* group of smaller objects, enqueue the real work */
314 rlh = &(ok -> ok_reclaim_list[BYTES_TO_GRANULES(sz)]);
315 hhdr -> hb_next = *rlh;
317 } /* else not worth salvaging. */
318 /* We used to do the nearly_full check later, but we */
319 /* already have the right cache context here. Also */
320 /* doing it here avoids some silly lock contention in */
321 /* GC_malloc_many. */
325 #if !defined(NO_DEBUGGING)
326 /* Routines to gather and print heap block info */
327 /* intended for debugging. Otherwise should be called */
332 size_t number_of_blocks;
336 #ifdef USE_MARK_BYTES
338 /* Return the number of set mark bits in the given header */
339 STATIC int GC_n_set_marks(hdr *hhdr)
343 size_t sz = hhdr -> hb_sz;
344 int offset = (int)MARK_BIT_OFFSET(sz);
345 int limit = (int)FINAL_MARK_BIT(sz);
347 for (i = 0; i < limit; i += offset) {
348 result += hhdr -> hb_marks[i];
350 GC_ASSERT(hhdr -> hb_marks[limit]);
356 /* Number of set bits in a word. Not performance critical. */
357 static int set_bits(word n)
369 /* Return the number of set mark bits in the given header */
370 STATIC int GC_n_set_marks(hdr *hhdr)
375 # ifdef MARK_BIT_PER_OBJ
376 int n_objs = (int)HBLK_OBJS(hhdr -> hb_sz);
378 if (0 == n_objs) n_objs = 1;
379 n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);
380 # else /* MARK_BIT_PER_GRANULE */
381 n_mark_words = MARK_BITS_SZ;
383 for (i = 0; i < n_mark_words - 1; i++) {
384 result += set_bits(hhdr -> hb_marks[i]);
386 # ifdef MARK_BIT_PER_OBJ
387 result += set_bits((hhdr -> hb_marks[n_mark_words - 1])
388 << (n_mark_words * WORDSZ - n_objs));
390 result += set_bits(hhdr -> hb_marks[n_mark_words - 1]);
395 #endif /* !USE_MARK_BYTES */
397 STATIC void GC_print_block_descr(struct hblk *h,
398 word /* struct PrintStats */ raw_ps)
401 size_t bytes = hhdr -> hb_sz;
402 struct Print_stats *ps;
403 unsigned n_marks = GC_n_set_marks(hhdr);
405 if (hhdr -> hb_n_marks != n_marks) {
406 GC_printf("(%u:%u,%u!=%u)", hhdr -> hb_obj_kind, (unsigned)bytes,
407 (unsigned)hhdr -> hb_n_marks, n_marks);
409 GC_printf("(%u:%u,%u)", hhdr -> hb_obj_kind,
410 (unsigned)bytes, n_marks);
413 bytes &= ~(HBLKSIZE-1);
415 ps = (struct Print_stats *)raw_ps;
416 ps->total_bytes += bytes;
417 ps->number_of_blocks++;
420 void GC_print_block_list(void)
422 struct Print_stats pstats;
424 GC_printf("(kind(0=ptrfree,1=normal,2=unc.):size_in_bytes, #_marks_set)\n");
425 pstats.number_of_blocks = 0;
426 pstats.total_bytes = 0;
427 GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats);
428 GC_printf("\nblocks = %lu, bytes = %lu\n",
429 (unsigned long)pstats.number_of_blocks,
430 (unsigned long)pstats.total_bytes);
433 /* Currently for debugger use only: */
434 void GC_print_free_list(int kind, size_t sz_in_granules)
436 struct obj_kind * ok = &GC_obj_kinds[kind];
437 ptr_t flh = ok -> ok_freelist[sz_in_granules];
438 struct hblk *lastBlock = 0;
442 struct hblk *block = HBLKPTR(flh);
443 if (block != lastBlock){
444 GC_printf("\nIn heap block at %p:\n\t", block);
447 GC_printf("%d: %p;", ++n, flh);
452 #endif /* NO_DEBUGGING */
455 * Clear all obj_link pointers in the list of free objects *flp.
457 * This must be done before dropping a list of free gcj-style objects,
458 * since may otherwise end up with dangling "descriptor" pointers.
459 * It may help for other pointer-containing objects.
461 STATIC void GC_clear_fl_links(void **flp)
467 flp = &(obj_link(next));
473 * Perform GC_reclaim_block on the entire heap, after first clearing
474 * small object free lists (if we are not just looking for leaks).
476 void GC_start_reclaim(GC_bool report_if_found)
480 # if defined(PARALLEL_MARK)
481 GC_ASSERT(0 == GC_fl_builder_count);
483 /* Reset in use counters. GC_reclaim_block recomputes them. */
484 GC_composite_in_use = 0;
485 GC_atomic_in_use = 0;
486 /* Clear reclaim- and free-lists */
487 for (kind = 0; kind < GC_n_kinds; kind++) {
492 struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
493 GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
495 if (rlist == 0) continue; /* This kind not used. */
496 if (!report_if_found) {
497 lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
498 for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) {
500 if (should_clobber) {
501 GC_clear_fl_links(fop);
507 } /* otherwise free list objects are marked, */
508 /* and its safe to leave them */
509 rlim = rlist + MAXOBJGRANULES+1;
510 for( rlp = rlist; rlp < rlim; rlp++ ) {
516 /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
517 /* or enqueue the block for later processing. */
518 GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
521 /* This is a very stupid thing to do. We make it possible anyway, */
522 /* so that you can convince yourself that it really is very stupid. */
523 GC_reclaim_all((GC_stop_func)0, FALSE);
525 # if defined(PARALLEL_MARK)
526 GC_ASSERT(0 == GC_fl_builder_count);
532 * Sweep blocks of the indicated object size and kind until either the
533 * appropriate free list is nonempty, or there are no more blocks to
536 void GC_continue_reclaim(size_t sz /* granules */, int kind)
540 struct obj_kind * ok = &(GC_obj_kinds[kind]);
541 struct hblk ** rlh = ok -> ok_reclaim_list;
542 void **flh = &(ok -> ok_freelist[sz]);
544 if (rlh == 0) return; /* No blocks of this kind. */
546 while ((hbp = *rlh) != 0) {
548 *rlh = hhdr -> hb_next;
549 GC_reclaim_small_nonempty_block(hbp, FALSE);
550 if (*flh != 0) break;
555 * Reclaim all small blocks waiting to be reclaimed.
556 * Abort and return FALSE when/if (*stop_func)() returns TRUE.
557 * If this returns TRUE, then it's safe to restart the world
558 * with incorrectly cleared mark bits.
559 * If ignore_old is TRUE, then reclaim only blocks that have been
560 * recently reclaimed, and discard the rest.
561 * Stop_func may be 0.
563 GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
569 struct obj_kind * ok;
572 # ifndef SMALL_CONFIG
573 CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
574 CLOCK_TYPE done_time;
576 if (GC_print_stats == VERBOSE)
577 GET_TIME(start_time);
580 for (kind = 0; kind < GC_n_kinds; kind++) {
581 ok = &(GC_obj_kinds[kind]);
582 rlp = ok -> ok_reclaim_list;
583 if (rlp == 0) continue;
584 for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
586 while ((hbp = *rlh) != 0) {
587 if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
591 *rlh = hhdr -> hb_next;
592 if (!ignore_old || hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
593 /* It's likely we'll need it this time, too */
594 /* It's been touched recently, so this */
595 /* shouldn't trigger paging. */
596 GC_reclaim_small_nonempty_block(hbp, FALSE);
601 # ifndef SMALL_CONFIG
602 if (GC_print_stats == VERBOSE) {
604 GC_log_printf("Disposing of reclaim lists took %lu msecs\n",
605 MS_TIME_DIFF(done_time,start_time));