2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 /* Boehm, July 31, 1995 5:02 pm PDT */
18 #include "private/gc_priv.h"
20 # ifdef STUBBORN_ALLOC
21 /* Stubborn object (hard to change, nearly immutable) allocation. */
23 extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
25 #define GENERAL_MALLOC(lb,k) \
26 (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
28 /* Data structure representing immutable objects that */
29 /* are still being initialized. */
30 /* This is a bit baroque in order to avoid acquiring */
31 /* the lock twice for a typical allocation. */
33 GC_PTR * GC_changing_list_start;
35 void GC_push_stubborn_structures GC_PROTO((void))
37 GC_push_all((ptr_t)(&GC_changing_list_start),
38 (ptr_t)(&GC_changing_list_start) + sizeof(GC_PTR *));
42 VOLATILE GC_PTR * VOLATILE GC_changing_list_current;
44 GC_PTR * GC_changing_list_current;
46 /* Points at last added element. Also (ab)used for */
47 /* synchronization. Updates and reads are assumed atomic. */
49 GC_PTR * GC_changing_list_limit;
50 /* Points at the last word of the buffer, which is always 0 */
51 /* All entries in (GC_changing_list_current, */
52 /* GC_changing_list_limit] are 0 */
55 void GC_stubborn_init()
59 GC_changing_list_start = (GC_PTR *)
61 (word)(INIT_SIZE * sizeof(GC_PTR)),
63 BZERO(GC_changing_list_start,
64 INIT_SIZE * sizeof(GC_PTR));
65 if (GC_changing_list_start == 0) {
66 GC_err_printf0("Insufficient space to start up\n");
67 ABORT("GC_stubborn_init: put of space");
69 GC_changing_list_current = GC_changing_list_start;
70 GC_changing_list_limit = GC_changing_list_start + INIT_SIZE - 1;
71 * GC_changing_list_limit = 0;
74 /* Compact and possibly grow GC_uninit_list. The old copy is */
75 /* left alone. Lock must be held. */
76 /* When called GC_changing_list_current == GC_changing_list_limit */
77 /* which is one past the current element. */
78 /* When we finish GC_changing_list_current again points one past last */
80 /* Invariant while this is running: GC_changing_list_current */
81 /* points at a word containing 0. */
82 /* Returns FALSE on failure. */
83 GC_bool GC_compact_changing_list()
85 register GC_PTR *p, *q;
86 register word count = 0;
87 word old_size = (char **)GC_changing_list_limit
88 - (char **)GC_changing_list_start+1;
89 /* The casts are needed as a workaround for an Amiga bug */
90 register word new_size = old_size;
93 for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
96 if (2 * count > old_size) new_size = 2 * count;
99 new_size * sizeof(GC_PTR), PTRFREE);
100 /* PTRFREE is a lie. But we don't want the collector to */
101 /* consider these. We do want the list itself to be */
103 if (new_list == 0) return(FALSE);
104 BZERO(new_list, new_size * sizeof(GC_PTR));
106 for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
107 if (*p != 0) *q++ = *p;
109 GC_changing_list_start = new_list;
110 GC_changing_list_limit = new_list + new_size - 1;
111 GC_changing_list_current = q;
115 /* Add p to changing list. Clear p on failure. */
116 # define ADD_CHANGING(p) \
118 register struct hblk * h = HBLKPTR(p); \
119 register word index = PHT_HASH(h); \
121 set_pht_entry_from_index(GC_changed_pages, index); \
123 if (*GC_changing_list_current != 0 \
124 && ++GC_changing_list_current == GC_changing_list_limit) { \
125 if (!GC_compact_changing_list()) (p) = 0; \
127 *GC_changing_list_current = p;
129 void GC_change_stubborn(p)
141 void GC_end_stubborn_change(p)
145 register VOLATILE GC_PTR * my_current = GC_changing_list_current;
147 register GC_PTR * my_current = GC_changing_list_current;
149 register GC_bool tried_quick;
152 if (*my_current == p) {
153 /* Hopefully the normal case. */
154 /* Compaction could not have been running when we started. */
157 if (my_current == GC_changing_list_current) {
158 /* Compaction can't have run in the interim. */
159 /* We got away with the quick and dirty approach. */
171 my_current = GC_changing_list_current;
172 for (; my_current >= GC_changing_list_start; my_current--) {
173 if (*my_current == p) {
181 GC_err_printf1("Bad arg to GC_end_stubborn_change: 0x%lx\n",
183 ABORT("Bad arg to GC_end_stubborn_change");
189 /* Allocate lb bytes of composite (pointerful) data */
190 /* No pointer fields may be changed after a call to */
191 /* GC_end_stubborn_change(p) where p is the value */
192 /* returned by GC_malloc_stubborn. */
194 GC_PTR GC_malloc_stubborn(size_t lb)
196 GC_PTR GC_malloc_stubborn(lb)
206 if( SMALL_OBJ(lb) ) {
208 lw = GC_size_map[lb];
210 lw = ALIGNED_WORDS(lb);
212 opp = &(GC_sobjfreelist[lw]);
214 if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
216 result = GC_generic_malloc((word)lb, STUBBORN);
221 GC_words_allocd += lw;
222 result = (GC_PTR) op;
223 ADD_CHANGING(result);
225 return((GC_PTR)result);
228 GC_generic_malloc((word)lb, STUBBORN);
233 ADD_CHANGING(result);
236 return((GC_PTR)GC_clear_stack(result));
240 /* Functions analogous to GC_read_dirty and GC_page_was_dirty. */
241 /* Report pages on which stubborn objects were changed. */
242 void GC_read_changed()
244 register GC_PTR * p = GC_changing_list_start;
246 register struct hblk * h;
249 if (p == 0) /* initializing */ return;
250 BCOPY(GC_changed_pages, GC_prev_changed_pages,
251 (sizeof GC_changed_pages));
252 BZERO(GC_changed_pages, (sizeof GC_changed_pages));
253 for (; p <= GC_changing_list_current; p++) {
257 set_pht_entry_from_index(GC_changed_pages, index);
262 GC_bool GC_page_was_changed(h)
265 register word index = PHT_HASH(h);
267 return(get_pht_entry_from_index(GC_prev_changed_pages, index));
270 /* Remove unreachable entries from changed list. Should only be */
271 /* called with mark bits consistent and lock held. */
272 void GC_clean_changing_list()
274 register GC_PTR * p = GC_changing_list_start;
277 register unsigned long count = 0;
278 register unsigned long dropped_count = 0;
280 if (p == 0) /* initializing */ return;
281 for (; p <= GC_changing_list_current; p++) {
284 r = (ptr_t)GC_base(q);
285 if (r == 0 || !GC_is_marked(r)) {
293 GC_printf2("%lu entries in changing list: reclaimed %lu\n",
294 (unsigned long)count, (unsigned long)dropped_count);
299 #else /* !STUBBORN_ALLOC */
302 GC_PTR GC_malloc_stubborn(size_t lb)
304 GC_PTR GC_malloc_stubborn(lb)
308 return(GC_malloc(lb));
312 void GC_end_stubborn_change(p)
318 void GC_change_stubborn(p)
323 void GC_push_stubborn_structures GC_PROTO((void))