2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (C) 2007 Free Software Foundation, Inc
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_pmark.h"
19 #ifdef FINALIZE_ON_DEMAND
20 int GC_finalize_on_demand = 1;
22 int GC_finalize_on_demand = 0;
25 #ifdef JAVA_FINALIZATION
26 int GC_java_finalization = 1;
28 int GC_java_finalization = 0;
31 /* Type of mark procedure used for marking from finalizable object. */
32 /* This procedure normally does not mark the object, only its */
34 typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
36 #define HASH3(addr,size,log_size) \
37 ((((word)(addr) >> 3) ^ ((word)(addr) >> (3 + (log_size)))) \
39 #define HASH2(addr,log_size) HASH3(addr, 1 << log_size, log_size)
41 struct hash_chain_entry {
43 struct hash_chain_entry * next;
46 static struct disappearing_link {
47 struct hash_chain_entry prolog;
48 # define dl_hidden_link prolog.hidden_key
49 /* Field to be cleared. */
50 # define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
51 # define dl_set_next(x,y) (x)->prolog.next = (struct hash_chain_entry *)(y)
53 word dl_hidden_obj; /* Pointer to object base */
56 static signed_word log_dl_table_size = -1;
58 /* current size of array pointed to by dl_head. */
59 /* -1 ==> size is 0. */
61 STATIC word GC_dl_entries = 0;
62 /* Number of entries currently in disappearing */
65 static struct finalizable_object {
66 struct hash_chain_entry prolog;
67 # define fo_hidden_base prolog.hidden_key
68 /* Pointer to object base. */
69 /* No longer hidden once object */
70 /* is on finalize_now queue. */
71 # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
72 # define fo_set_next(x,y) (x)->prolog.next = (struct hash_chain_entry *)(y)
73 GC_finalization_proc fo_fn; /* Finalizer. */
75 word fo_object_size; /* In bytes. */
76 finalization_mark_proc fo_mark_proc; /* Mark-through procedure */
79 STATIC struct finalizable_object * GC_finalize_now = 0;
80 /* List of objects that should be finalized now. */
82 static signed_word log_fo_table_size = -1;
84 word GC_fo_entries = 0; /* used also in extra/MacOS.c */
86 GC_INNER void GC_push_finalizer_structures(void)
88 GC_push_all((ptr_t)(&dl_head), (ptr_t)(&dl_head) + sizeof(word));
89 GC_push_all((ptr_t)(&fo_head), (ptr_t)(&fo_head) + sizeof(word));
90 GC_push_all((ptr_t)(&GC_finalize_now),
91 (ptr_t)(&GC_finalize_now) + sizeof(word));
94 /* Double the size of a hash table. *size_ptr is the log of its current */
95 /* size. May be a no-op. */
96 /* *table is a pointer to an array of hash headers. If we succeed, we */
97 /* update both *table and *log_size_ptr. */
99 STATIC void GC_grow_table(struct hash_chain_entry ***table,
100 signed_word *log_size_ptr)
103 register struct hash_chain_entry *p;
104 signed_word log_old_size = *log_size_ptr;
105 signed_word log_new_size = log_old_size + 1;
106 word old_size = ((log_old_size == -1)? 0: (1 << log_old_size));
107 word new_size = (word)1 << log_new_size;
108 /* FIXME: Power of 2 size often gets rounded up to one more page. */
109 struct hash_chain_entry **new_table = (struct hash_chain_entry **)
110 GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
111 (size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL);
113 if (new_table == 0) {
115 ABORT("Insufficient space for initial table allocation");
120 for (i = 0; i < old_size; i++) {
123 ptr_t real_key = GC_REVEAL_POINTER(p -> hidden_key);
124 struct hash_chain_entry *next = p -> next;
125 size_t new_hash = HASH3(real_key, new_size, log_new_size);
127 p -> next = new_table[new_hash];
128 new_table[new_hash] = p;
132 *log_size_ptr = log_new_size;
136 GC_API int GC_CALL GC_register_disappearing_link(void * * link)
140 base = (ptr_t)GC_base((void *)link);
142 ABORT("Bad arg to GC_register_disappearing_link");
143 return(GC_general_register_disappearing_link(link, base));
146 GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
149 struct disappearing_link *curr_dl;
151 struct disappearing_link * new_dl;
154 if (((word)link & (ALIGNMENT-1)) || link == NULL)
155 ABORT("Bad arg to GC_general_register_disappearing_link");
157 GC_ASSERT(obj != NULL && GC_base(obj) == obj);
158 if (log_dl_table_size == -1
159 || GC_dl_entries > ((word)1 << log_dl_table_size)) {
160 GC_grow_table((struct hash_chain_entry ***)(&dl_head),
162 if (GC_print_stats) {
163 GC_log_printf("Grew dl table to %u entries\n",
164 (1 << (unsigned)log_dl_table_size));
167 index = HASH2(link, log_dl_table_size);
168 for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
169 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
170 curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
175 new_dl = (struct disappearing_link *)
176 GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
178 GC_oom_func oom_fn = GC_oom_fn;
180 new_dl = (struct disappearing_link *)
181 (*oom_fn)(sizeof(struct disappearing_link));
185 /* It's not likely we'll make it here, but ... */
187 /* Recalculate index since the table may grow. */
188 index = HASH2(link, log_dl_table_size);
189 /* Check again that our disappearing link not in the table. */
190 for (curr_dl = dl_head[index]; curr_dl != 0;
191 curr_dl = dl_next(curr_dl)) {
192 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
193 curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
195 # ifndef DBG_HDRS_ALL
196 /* Free unused new_dl returned by GC_oom_fn() */
197 GC_free((void *)new_dl);
203 new_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
204 new_dl -> dl_hidden_link = GC_HIDE_POINTER(link);
205 dl_set_next(new_dl, dl_head[index]);
206 dl_head[index] = new_dl;
212 GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
214 struct disappearing_link *curr_dl, *prev_dl;
218 if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
221 index = HASH2(link, log_dl_table_size);
222 prev_dl = 0; curr_dl = dl_head[index];
223 while (curr_dl != 0) {
224 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
226 dl_head[index] = dl_next(curr_dl);
228 dl_set_next(prev_dl, dl_next(curr_dl));
233 dl_set_next(curr_dl, 0);
235 GC_free((void *)curr_dl);
240 curr_dl = dl_next(curr_dl);
246 /* Possible finalization_marker procedures. Note that mark stack */
247 /* overflow is handled by the caller, and is not a disaster. */
248 STATIC void GC_normal_finalize_mark_proc(ptr_t p)
252 PUSH_OBJ(p, hhdr, GC_mark_stack_top,
253 &(GC_mark_stack[GC_mark_stack_size]));
256 /* This only pays very partial attention to the mark descriptor. */
257 /* It does the right thing for normal and atomic objects, and treats */
258 /* most others as normal. */
259 STATIC void GC_ignore_self_finalize_mark_proc(ptr_t p)
262 word descr = hhdr -> hb_descr;
266 ptr_t target_limit = p + hhdr -> hb_sz - 1;
268 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
269 scan_limit = p + descr - sizeof(word);
271 scan_limit = target_limit + 1 - sizeof(word);
273 for (q = p; q <= scan_limit; q += ALIGNMENT) {
275 if ((ptr_t)r < p || (ptr_t)r > target_limit) {
276 GC_PUSH_ONE_HEAP(r, q);
282 STATIC void GC_null_finalize_mark_proc(ptr_t p) {}
284 /* Possible finalization_marker procedures. Note that mark stack */
285 /* overflow is handled by the caller, and is not a disaster. */
287 /* GC_unreachable_finalize_mark_proc is an alias for normal marking, */
288 /* but it is explicitly tested for, and triggers different */
289 /* behavior. Objects registered in this way are not finalized */
290 /* if they are reachable by other finalizable objects, even if those */
291 /* other objects specify no ordering. */
292 STATIC void GC_unreachable_finalize_mark_proc(ptr_t p)
294 GC_normal_finalize_mark_proc(p);
297 /* Register a finalization function. See gc.h for details. */
298 /* The last parameter is a procedure that determines */
299 /* marking for finalization ordering. Any objects marked */
300 /* by that procedure will be guaranteed to not have been */
301 /* finalized when this finalizer is invoked. */
302 STATIC void GC_register_finalizer_inner(void * obj,
303 GC_finalization_proc fn, void *cd,
304 GC_finalization_proc *ofn, void **ocd,
305 finalization_mark_proc mp)
308 struct finalizable_object * curr_fo, * prev_fo;
310 struct finalizable_object *new_fo = 0;
311 hdr *hhdr = NULL; /* initialized to prevent warning. */
316 if (log_fo_table_size == -1
317 || GC_fo_entries > ((word)1 << log_fo_table_size)) {
318 GC_grow_table((struct hash_chain_entry ***)(&fo_head),
320 if (GC_print_stats) {
321 GC_log_printf("Grew fo table to %u entries\n",
322 (1 << (unsigned)log_fo_table_size));
325 /* in the THREADS case we hold allocation lock. */
328 index = HASH2(base, log_fo_table_size);
329 prev_fo = 0; curr_fo = fo_head[index];
330 while (curr_fo != 0) {
331 GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
332 if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(base)) {
333 /* Interruption by a signal in the middle of this */
334 /* should be safe. The client may see only *ocd */
335 /* updated, but we'll declare that to be his problem. */
336 if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
337 if (ofn) *ofn = curr_fo -> fo_fn;
338 /* Delete the structure for base. */
340 fo_head[index] = fo_next(curr_fo);
342 fo_set_next(prev_fo, fo_next(curr_fo));
346 /* May not happen if we get a signal. But a high */
347 /* estimate will only make the table larger than */
349 # if !defined(THREADS) && !defined(DBG_HDRS_ALL)
350 GC_free((void *)curr_fo);
353 curr_fo -> fo_fn = fn;
354 curr_fo -> fo_client_data = (ptr_t)cd;
355 curr_fo -> fo_mark_proc = mp;
356 /* Reinsert it. We deleted it first to maintain */
357 /* consistency in the event of a signal. */
359 fo_head[index] = curr_fo;
361 fo_set_next(prev_fo, curr_fo);
365 # ifndef DBG_HDRS_ALL
366 if (EXPECT(new_fo != 0, FALSE)) {
367 /* Free unused new_fo returned by GC_oom_fn() */
368 GC_free((void *)new_fo);
374 curr_fo = fo_next(curr_fo);
376 if (EXPECT(new_fo != 0, FALSE)) {
377 /* new_fo is returned GC_oom_fn(), so fn != 0 and hhdr != 0. */
387 if (EXPECT(0 == hhdr, FALSE)) {
388 /* We won't collect it, hence finalizer wouldn't be run. */
394 new_fo = (struct finalizable_object *)
395 GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
396 if (EXPECT(new_fo != 0, TRUE))
400 new_fo = (struct finalizable_object *)
401 (*oom_fn)(sizeof(struct finalizable_object));
403 /* No enough memory. *ocd and *ofn remains unchanged. */
406 /* It's not likely we'll make it here, but ... */
408 /* Recalculate index since the table may grow and */
409 /* check again that our finalizer is not in the table. */
411 GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
414 new_fo -> fo_hidden_base = GC_HIDE_POINTER(base);
415 new_fo -> fo_fn = fn;
416 new_fo -> fo_client_data = (ptr_t)cd;
417 new_fo -> fo_object_size = hhdr -> hb_sz;
418 new_fo -> fo_mark_proc = mp;
419 fo_set_next(new_fo, fo_head[index]);
421 fo_head[index] = new_fo;
425 GC_API void GC_CALL GC_register_finalizer(void * obj,
426 GC_finalization_proc fn, void * cd,
427 GC_finalization_proc *ofn, void ** ocd)
429 GC_register_finalizer_inner(obj, fn, cd, ofn,
430 ocd, GC_normal_finalize_mark_proc);
433 GC_API void GC_CALL GC_register_finalizer_ignore_self(void * obj,
434 GC_finalization_proc fn, void * cd,
435 GC_finalization_proc *ofn, void ** ocd)
437 GC_register_finalizer_inner(obj, fn, cd, ofn,
438 ocd, GC_ignore_self_finalize_mark_proc);
441 GC_API void GC_CALL GC_register_finalizer_no_order(void * obj,
442 GC_finalization_proc fn, void * cd,
443 GC_finalization_proc *ofn, void ** ocd)
445 GC_register_finalizer_inner(obj, fn, cd, ofn,
446 ocd, GC_null_finalize_mark_proc);
449 static GC_bool need_unreachable_finalization = FALSE;
450 /* Avoid the work if this isn't used. */
452 GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj,
453 GC_finalization_proc fn, void * cd,
454 GC_finalization_proc *ofn, void ** ocd)
456 need_unreachable_finalization = TRUE;
457 GC_ASSERT(GC_java_finalization);
458 GC_register_finalizer_inner(obj, fn, cd, ofn,
459 ocd, GC_unreachable_finalize_mark_proc);
463 void GC_dump_finalization(void)
465 struct disappearing_link * curr_dl;
466 struct finalizable_object * curr_fo;
467 ptr_t real_ptr, real_link;
468 int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
469 int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
472 GC_printf("Disappearing links:\n");
473 for (i = 0; i < dl_size; i++) {
474 for (curr_dl = dl_head[i]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
475 real_ptr = GC_REVEAL_POINTER(curr_dl -> dl_hidden_obj);
476 real_link = GC_REVEAL_POINTER(curr_dl -> dl_hidden_link);
477 GC_printf("Object: %p, Link:%p\n", real_ptr, real_link);
480 GC_printf("Finalizers:\n");
481 for (i = 0; i < fo_size; i++) {
482 for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
483 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
484 GC_printf("Finalizable object: %p\n", real_ptr);
491 STATIC word GC_old_dl_entries = 0; /* for stats printing */
495 /* Defined in pthread_support.c or win32_threads.c. Called with the */
496 /* allocation lock held. */
497 GC_INNER void GC_reset_finalizer_nested(void);
498 GC_INNER unsigned *GC_check_finalizer_nested(void);
500 /* Global variables to minimize the level of recursion when a client */
501 /* finalizer allocates memory. */
502 STATIC unsigned GC_finalizer_nested = 0;
503 STATIC unsigned GC_finalizer_skipped = 0;
505 /* Checks and updates the level of finalizers recursion. */
506 /* Returns NULL if GC_invoke_finalizers() should not be called by the */
507 /* collector (to minimize the risk of a deep finalizers recursion), */
508 /* otherwise returns a pointer to GC_finalizer_nested. */
509 STATIC unsigned *GC_check_finalizer_nested(void)
511 unsigned nesting_level = GC_finalizer_nested;
513 /* We are inside another GC_invoke_finalizers(). */
514 /* Skip some implicitly-called GC_invoke_finalizers() */
515 /* depending on the nesting (recursion) level. */
516 if (++GC_finalizer_skipped < (1U << nesting_level)) return NULL;
517 GC_finalizer_skipped = 0;
519 GC_finalizer_nested = nesting_level + 1;
520 return &GC_finalizer_nested;
524 /* Called with held lock (but the world is running). */
525 /* Cause disappearing links to disappear and unreachable objects to be */
526 /* enqueued for finalization. */
527 GC_INNER void GC_finalize(void)
529 struct disappearing_link * curr_dl, * prev_dl, * next_dl;
530 struct finalizable_object * curr_fo, * prev_fo, * next_fo;
531 ptr_t real_ptr, real_link;
533 size_t dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
534 size_t fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
536 # ifndef SMALL_CONFIG
537 /* Save current GC_dl_entries value for stats printing */
538 GC_old_dl_entries = GC_dl_entries;
541 /* Make disappearing links disappear */
542 for (i = 0; i < dl_size; i++) {
543 curr_dl = dl_head[i];
545 while (curr_dl != 0) {
546 real_ptr = GC_REVEAL_POINTER(curr_dl -> dl_hidden_obj);
547 real_link = GC_REVEAL_POINTER(curr_dl -> dl_hidden_link);
548 if (!GC_is_marked(real_ptr)) {
549 *(word *)real_link = 0;
550 next_dl = dl_next(curr_dl);
552 dl_head[i] = next_dl;
554 dl_set_next(prev_dl, next_dl);
556 GC_clear_mark_bit((ptr_t)curr_dl);
561 curr_dl = dl_next(curr_dl);
565 /* Mark all objects reachable via chains of 1 or more pointers */
566 /* from finalizable objects. */
567 GC_ASSERT(GC_mark_state == MS_NONE);
568 for (i = 0; i < fo_size; i++) {
569 for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
570 GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
571 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
572 if (!GC_is_marked(real_ptr)) {
573 GC_MARKED_FOR_FINALIZATION(real_ptr);
574 GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
575 if (GC_is_marked(real_ptr)) {
576 WARN("Finalization cycle involving %p\n", real_ptr);
581 /* Enqueue for finalization all objects that are still */
583 GC_bytes_finalized = 0;
584 for (i = 0; i < fo_size; i++) {
585 curr_fo = fo_head[i];
587 while (curr_fo != 0) {
588 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
589 if (!GC_is_marked(real_ptr)) {
590 if (!GC_java_finalization) {
591 GC_set_mark_bit(real_ptr);
593 /* Delete from hash table */
594 next_fo = fo_next(curr_fo);
596 fo_head[i] = next_fo;
598 fo_set_next(prev_fo, next_fo);
601 /* Add to list of objects awaiting finalization. */
602 fo_set_next(curr_fo, GC_finalize_now);
603 GC_finalize_now = curr_fo;
604 /* unhide object pointer so any future collections will */
606 curr_fo -> fo_hidden_base =
607 (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
608 GC_bytes_finalized +=
609 curr_fo -> fo_object_size
610 + sizeof(struct finalizable_object);
611 GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo)));
615 curr_fo = fo_next(curr_fo);
620 if (GC_java_finalization) {
621 /* make sure we mark everything reachable from objects finalized
622 using the no_order mark_proc */
623 for (curr_fo = GC_finalize_now;
624 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
625 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
626 if (!GC_is_marked(real_ptr)) {
627 if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
628 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
630 if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
631 GC_set_mark_bit(real_ptr);
636 /* now revive finalize-when-unreachable objects reachable from
637 other finalizable objects */
638 if (need_unreachable_finalization) {
639 curr_fo = GC_finalize_now;
641 while (curr_fo != 0) {
642 next_fo = fo_next(curr_fo);
643 if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
644 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
645 if (!GC_is_marked(real_ptr)) {
646 GC_set_mark_bit(real_ptr);
649 GC_finalize_now = next_fo;
651 fo_set_next(prev_fo, next_fo);
653 curr_fo -> fo_hidden_base =
654 GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
655 GC_bytes_finalized -=
656 curr_fo->fo_object_size + sizeof(struct finalizable_object);
658 i = HASH2(real_ptr, log_fo_table_size);
659 fo_set_next (curr_fo, fo_head[i]);
661 fo_head[i] = curr_fo;
671 /* Remove dangling disappearing links. */
672 for (i = 0; i < dl_size; i++) {
673 curr_dl = dl_head[i];
675 while (curr_dl != 0) {
676 real_link = GC_base(GC_REVEAL_POINTER(curr_dl -> dl_hidden_link));
677 if (real_link != 0 && !GC_is_marked(real_link)) {
678 next_dl = dl_next(curr_dl);
680 dl_head[i] = next_dl;
682 dl_set_next(prev_dl, next_dl);
684 GC_clear_mark_bit((ptr_t)curr_dl);
689 curr_dl = dl_next(curr_dl);
694 /* Don't prevent running finalizers if there has been an allocation */
695 /* failure recently. */
697 GC_reset_finalizer_nested();
699 GC_finalizer_nested = 0;
704 #ifndef JAVA_FINALIZATION_NOT_NEEDED
706 /* Enqueue all remaining finalizers to be run - Assumes lock is held. */
707 STATIC void GC_enqueue_all_finalizers(void)
709 struct finalizable_object * curr_fo, * prev_fo, * next_fo;
714 fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
715 GC_bytes_finalized = 0;
716 for (i = 0; i < fo_size; i++) {
717 curr_fo = fo_head[i];
719 while (curr_fo != 0) {
720 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
721 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
722 GC_set_mark_bit(real_ptr);
724 /* Delete from hash table */
725 next_fo = fo_next(curr_fo);
727 fo_head[i] = next_fo;
729 fo_set_next(prev_fo, next_fo);
733 /* Add to list of objects awaiting finalization. */
734 fo_set_next(curr_fo, GC_finalize_now);
735 GC_finalize_now = curr_fo;
737 /* unhide object pointer so any future collections will */
739 curr_fo -> fo_hidden_base =
740 (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
741 GC_bytes_finalized +=
742 curr_fo -> fo_object_size + sizeof(struct finalizable_object);
748 /* Invoke all remaining finalizers that haven't yet been run.
749 * This is needed for strict compliance with the Java standard,
750 * which can make the runtime guarantee that all finalizers are run.
751 * Unfortunately, the Java standard implies we have to keep running
752 * finalizers until there are no more left, a potential infinite loop.
754 * Note that this is even more dangerous than the usual Java
755 * finalizers, in that objects reachable from static variables
756 * may have been finalized when these finalizers are run.
757 * Finalizers run at this point must be prepared to deal with a
758 * mostly broken world.
759 * This routine is externally callable, so is called without
760 * the allocation lock.
762 GC_API void GC_CALL GC_finalize_all(void)
767 while (GC_fo_entries > 0) {
768 GC_enqueue_all_finalizers();
770 GC_invoke_finalizers();
771 /* Running the finalizers in this thread is arguably not a good */
772 /* idea when we should be notifying another thread to run them. */
773 /* But otherwise we don't have a great way to wait for them to */
780 #endif /* !JAVA_FINALIZATION_NOT_NEEDED */
782 /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
783 /* finalizers can only be called from some kind of `safe state' and */
784 /* getting into that safe state is expensive.) */
785 GC_API int GC_CALL GC_should_invoke_finalizers(void)
787 return GC_finalize_now != 0;
790 /* Invoke finalizers for all objects that are ready to be finalized. */
791 /* Should be called without allocation lock. */
792 GC_API int GC_CALL GC_invoke_finalizers(void)
794 struct finalizable_object * curr_fo;
796 word bytes_freed_before = 0; /* initialized to prevent warning. */
799 while (GC_finalize_now != 0) {
804 bytes_freed_before = GC_bytes_freed;
805 /* Don't do this outside, since we need the lock. */
807 curr_fo = GC_finalize_now;
809 if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo);
811 if (curr_fo == 0) break;
813 GC_finalize_now = fo_next(curr_fo);
815 fo_set_next(curr_fo, 0);
816 (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
817 curr_fo -> fo_client_data);
818 curr_fo -> fo_client_data = 0;
821 /* This is probably a bad idea. It throws off accounting if */
822 /* nearly all objects are finalizable. O.w. it shouldn't */
824 GC_free((void *)curr_fo);
827 /* bytes_freed_before is initialized whenever count != 0 */
828 if (count != 0 && bytes_freed_before != GC_bytes_freed) {
830 GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
836 /* All accesses to it should be synchronized to avoid data races. */
837 GC_finalizer_notifier_proc GC_finalizer_notifier =
838 (GC_finalizer_notifier_proc)0;
840 static GC_word last_finalizer_notification = 0;
842 GC_INNER void GC_notify_or_invoke_finalizers(void)
844 GC_finalizer_notifier_proc notifier_fn = 0;
845 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
846 static word last_back_trace_gc_no = 1; /* Skip first one. */
847 # elif defined(THREADS)
848 /* Quick check (while unlocked) for an empty finalization queue. */
849 if (GC_finalize_now == 0) return;
853 /* This is a convenient place to generate backtraces if appropriate, */
854 /* since that code is not callable with the allocation lock. */
855 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
856 if (GC_gc_no > last_back_trace_gc_no) {
857 # ifdef KEEP_BACK_PTRS
859 /* Stops when GC_gc_no wraps; that's OK. */
860 last_back_trace_gc_no = (word)(-1); /* disable others. */
861 for (i = 0; i < GC_backtraces; ++i) {
862 /* FIXME: This tolerates concurrent heap mutation, */
863 /* which may cause occasional mysterious results. */
864 /* We need to release the GC lock, since GC_print_callers */
865 /* acquires it. It probably shouldn't. */
867 GC_generate_random_backtrace_no_gc();
870 last_back_trace_gc_no = GC_gc_no;
872 # ifdef MAKE_BACK_GRAPH
873 if (GC_print_back_height) {
875 GC_print_back_graph_stats();
881 if (GC_finalize_now == 0) {
886 if (!GC_finalize_on_demand) {
887 unsigned *pnested = GC_check_finalizer_nested();
889 /* Skip GC_invoke_finalizers() if nested */
890 if (pnested != NULL) {
891 (void) GC_invoke_finalizers();
892 *pnested = 0; /* Reset since no more finalizers. */
894 GC_ASSERT(GC_finalize_now == 0);
895 # endif /* Otherwise GC can run concurrently and add more */
900 /* These variables require synchronization to avoid data races. */
901 if (last_finalizer_notification != GC_gc_no) {
902 last_finalizer_notification = GC_gc_no;
903 notifier_fn = GC_finalizer_notifier;
906 if (notifier_fn != 0)
907 (*notifier_fn)(); /* Invoke the notifier */
910 GC_API void * GC_CALL GC_call_with_alloc_lock(GC_fn_type fn,
918 /* FIXME - This looks wrong!! */
921 result = (*fn)(client_data);
923 # ifndef GC_ASSERTIONS
925 # endif /* o.w. UNLOCK() does it implicitly */
932 GC_INNER void GC_print_finalization_stats(void)
934 struct finalizable_object *fo = GC_finalize_now;
935 unsigned long ready = 0;
938 "%lu finalization table entries; %lu disappearing links alive\n",
939 (unsigned long)GC_fo_entries, (unsigned long)GC_dl_entries);
940 for (; 0 != fo; fo = fo_next(fo)) ++ready;
941 GC_log_printf("%lu objects are eligible for immediate finalization; "
942 "%ld links cleared\n",
943 ready, (long)GC_old_dl_entries - (long)GC_dl_entries);
945 #endif /* SMALL_CONFIG */