2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 * Note that this defines a large number of tuning hooks, which can
19 * safely be ignored in nearly all cases. For normal use it suffices
20 * to call only GC_MALLOC and perhaps GC_REALLOC.
21 * For better performance, also look at GC_MALLOC_ATOMIC, and
22 * GC_enable_incremental. If you need an action to be performed
23 * immediately before an object is collected, look at GC_register_finalizer.
24 * If you are using Solaris threads, look at the end of this file.
25 * Everything else is best ignored unless you encounter performance
33 # include "gc_config_macros.h"
35 # if defined(__STDC__) || defined(__cplusplus) || defined(_AIX)
36 # define GC_PROTO(args) args
37 typedef void * GC_PTR;
38 # define GC_CONST const
40 # define GC_PROTO(args) ()
41 typedef char * GC_PTR;
50 /* Define word and signed_word to be unsigned and signed types of the */
51 /* size as char * or void *. There seems to be no way to do this */
52 /* even semi-portably. The following is probably no better/worse */
53 /* than almost anything else. */
54 /* The ANSI standard suggests that size_t and ptr_diff_t might be */
55 /* better choices. But those had incorrect definitions on some older */
56 /* systems. Notably "typedef int size_t" is WRONG. */
58 typedef unsigned long GC_word;
59 typedef long GC_signed_word;
61 /* Win64 isn't really supported yet, but this is the first step. And */
62 /* it might cause error messages to show up in more plausible places. */
63 /* This needs basetsd.h, which is included by windows.h. */
64 typedef ULONG_PTR GC_word;
65 typedef LONG_PTR GC_word;
68 /* Public read-only variables */
70 GC_API GC_word GC_gc_no;/* Counter incremented per collection. */
71 /* Includes empty GCs at startup. */
73 GC_API int GC_parallel; /* GC is parallelized for performance on */
74 /* multiprocessors. Currently set only */
75 /* implicitly if collector is built with */
76 /* -DPARALLEL_MARK and if either: */
77 /* Env variable GC_NPROC is set to > 1, or */
78 /* GC_NPROC is not set and this is an MP. */
79 /* If GC_parallel is set, incremental */
80 /* collection is only partially functional, */
81 /* and may not be desirable. */
84 /* Public R/W variables */
86 GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested));
87 /* When there is insufficient memory to satisfy */
88 /* an allocation request, we return */
89 /* (*GC_oom_fn)(). By default this just */
91 /* If it returns, it must return 0 or a valid */
92 /* pointer to a previously allocated heap */
99 GC_EVENT_RECLAIM_START,
100 GC_EVENT_RECLAIM_END,
104 GC_API void (*GC_notify_event) GC_PROTO((GCEventType event_type));
105 /* Invoked at specific points during every collection.
108 GC_API void (*GC_on_heap_resize) GC_PROTO((size_t new_size));
109 /* Invoked when the heap grows or shrinks */
111 GC_API int GC_find_leak;
112 /* Do not actually garbage collect, but simply */
113 /* report inaccessible memory that was not */
114 /* deallocated with GC_free. Initial value */
115 /* is determined by FIND_LEAK macro. */
117 GC_API int GC_all_interior_pointers;
118 /* Arrange for pointers to object interiors to */
119 /* be recognized as valid. May not be changed */
120 /* after GC initialization. */
121 /* Initial value is determined by */
122 /* -DALL_INTERIOR_POINTERS. */
123 /* Unless DONT_ADD_BYTE_AT_END is defined, this */
124 /* also affects whether sizes are increased by */
125 /* at least a byte to allow "off the end" */
126 /* pointer recognition. */
127 /* MUST BE 0 or 1. */
129 GC_API int GC_quiet; /* Disable statistics output. Only matters if */
130 /* collector has been compiled with statistics */
131 /* enabled. This involves a performance cost, */
132 /* and is thus not the default. */
134 GC_API int GC_finalize_on_demand;
135 /* If nonzero, finalizers will only be run in */
136 /* response to an explicit GC_invoke_finalizers */
137 /* call. The default is determined by whether */
138 /* the FINALIZE_ON_DEMAND macro is defined */
139 /* when the collector is built. */
141 GC_API int GC_java_finalization;
142 /* Mark objects reachable from finalizable */
143 /* objects in a separate postpass. This makes */
144 /* it a bit safer to use non-topologically- */
145 /* ordered finalization. Default value is */
146 /* determined by JAVA_FINALIZATION macro. */
148 GC_API void (* GC_finalizer_notifier)(void);
149 /* Invoked by the collector when there are */
150 /* objects to be finalized. Invoked at most */
151 /* once per GC cycle. Never invoked unless */
152 /* GC_finalize_on_demand is set. */
153 /* Typically this will notify a finalization */
154 /* thread, which will call GC_invoke_finalizers */
157 GC_API int GC_dont_gc; /* != 0 ==> Dont collect. In versions 6.2a1+, */
158 /* this overrides explicit GC_gcollect() calls. */
159 /* Used as a counter, so that nested enabling */
160 /* and disabling work correctly. Should */
161 /* normally be updated with GC_enable() and */
162 /* GC_disable() calls. */
163 /* Direct assignment to GC_dont_gc is */
166 GC_API int GC_dont_expand;
167 /* Dont expand heap unless explicitly requested */
170 GC_API int GC_use_entire_heap;
171 /* Causes the nonincremental collector to use the */
172 /* entire heap before collecting. This was the only */
173 /* option for GC versions < 5.0. This sometimes */
174 /* results in more large block fragmentation, since */
175 /* very larg blocks will tend to get broken up */
176 /* during each GC cycle. It is likely to result in a */
177 /* larger working set, but lower collection */
178 /* frequencies, and hence fewer instructions executed */
179 /* in the collector. */
181 GC_API int GC_full_freq; /* Number of partial collections between */
182 /* full collections. Matters only if */
183 /* GC_incremental is set. */
184 /* Full collections are also triggered if */
185 /* the collector detects a substantial */
186 /* increase in the number of in-use heap */
187 /* blocks. Values in the tens are now */
188 /* perfectly reasonable, unlike for */
189 /* earlier GC versions. */
191 GC_API GC_word GC_non_gc_bytes;
192 /* Bytes not considered candidates for collection. */
193 /* Used only to control scheduling of collections. */
194 /* Updated by GC_malloc_uncollectable and GC_free. */
197 GC_API int GC_no_dls;
198 /* Don't register dynamic library data segments. */
199 /* Wizards only. Should be used only if the */
200 /* application explicitly registers all roots. */
201 /* In Microsoft Windows environments, this will */
202 /* usually also prevent registration of the */
203 /* main data segment as part of the root set. */
205 GC_API GC_word GC_free_space_divisor;
206 /* We try to make sure that we allocate at */
207 /* least N/GC_free_space_divisor bytes between */
208 /* collections, where N is the heap size plus */
209 /* a rough estimate of the root set size. */
210 /* Initially, GC_free_space_divisor = 3. */
211 /* Increasing its value will use less space */
212 /* but more collection time. Decreasing it */
213 /* will appreciably decrease collection time */
214 /* at the expense of space. */
215 /* GC_free_space_divisor = 1 will effectively */
216 /* disable collections. */
218 GC_API GC_word GC_max_retries;
219 /* The maximum number of GCs attempted before */
220 /* reporting out of memory after heap */
221 /* expansion fails. Initially 0. */
224 GC_API char *GC_stackbottom; /* Cool end of user stack. */
225 /* May be set in the client prior to */
226 /* calling any GC_ routines. This */
227 /* avoids some overhead, and */
228 /* potentially some signals that can */
229 /* confuse debuggers. Otherwise the */
230 /* collector attempts to set it */
232 /* For multithreaded code, this is the */
233 /* cold end of the stack for the */
234 /* primordial thread. */
236 GC_API int GC_dont_precollect; /* Don't collect as part of */
237 /* initialization. Should be set only */
238 /* if the client wants a chance to */
239 /* manually initialize the root set */
240 /* before the first collection. */
241 /* Interferes with blacklisting. */
244 GC_API unsigned long GC_time_limit;
245 /* If incremental collection is enabled, */
246 /* We try to terminate collections */
247 /* after this many milliseconds. Not a */
248 /* hard time bound. Setting this to */
249 /* GC_TIME_UNLIMITED will essentially */
250 /* disable incremental collection while */
251 /* leaving generational collection */
253 # define GC_TIME_UNLIMITED 999999
254 /* Setting GC_time_limit to this value */
255 /* will disable the "pause time exceeded"*/
258 /* Public procedures */
260 /* Initialize the collector. This is only required when using thread-local
261 * allocation, since unlike the regular allocation routines, GC_local_malloc
262 * is not self-initializing. If you use GC_local_malloc you should arrange
263 * to call this somehow (e.g. from a constructor) before doing any allocation.
264 * For win32 threads, it needs to be called explicitly.
266 GC_API void GC_init GC_PROTO((void));
269 * general purpose allocation routines, with roughly malloc calling conv.
270 * The atomic versions promise that no relevant pointers are contained
271 * in the object. The nonatomic versions guarantee that the new object
272 * is cleared. GC_malloc_stubborn promises that no changes to the object
273 * will occur after GC_end_stubborn_change has been called on the
274 * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object
275 * that is scanned for pointers to collectable objects, but is not itself
276 * collectable. The object is scanned even if it does not appear to
277 * be reachable. GC_malloc_uncollectable and GC_free called on the resulting
278 * object implicitly update GC_non_gc_bytes appropriately.
280 * Note that the GC_malloc_stubborn support is stubbed out by default
281 * starting in 6.0. GC_malloc_stubborn is an alias for GC_malloc unless
282 * the collector is built with STUBBORN_ALLOC defined.
284 GC_API GC_PTR GC_malloc GC_PROTO((size_t size_in_bytes));
285 GC_API GC_PTR GC_malloc_atomic GC_PROTO((size_t size_in_bytes));
286 GC_API GC_PTR GC_malloc_uncollectable GC_PROTO((size_t size_in_bytes));
287 GC_API GC_PTR GC_malloc_stubborn GC_PROTO((size_t size_in_bytes));
289 /* The following is only defined if the library has been suitably */
291 GC_API GC_PTR GC_malloc_atomic_uncollectable GC_PROTO((size_t size_in_bytes));
293 /* Explicitly deallocate an object. Dangerous if used incorrectly. */
294 /* Requires a pointer to the base of an object. */
295 /* If the argument is stubborn, it should not be changeable when freed. */
296 /* An object should not be enable for finalization when it is */
297 /* explicitly deallocated. */
298 /* GC_free(0) is a no-op, as required by ANSI C for free. */
299 GC_API void GC_free GC_PROTO((GC_PTR object_addr));
302 * Stubborn objects may be changed only if the collector is explicitly informed.
303 * The collector is implicitly informed of coming change when such
304 * an object is first allocated. The following routines inform the
305 * collector that an object will no longer be changed, or that it will
306 * once again be changed. Only nonNIL pointer stores into the object
307 * are considered to be changes. The argument to GC_end_stubborn_change
308 * must be exacly the value returned by GC_malloc_stubborn or passed to
309 * GC_change_stubborn. (In the second case it may be an interior pointer
310 * within 512 bytes of the beginning of the objects.)
311 * There is a performance penalty for allowing more than
312 * one stubborn object to be changed at once, but it is acceptable to
313 * do so. The same applies to dropping stubborn objects that are still
316 GC_API void GC_change_stubborn GC_PROTO((GC_PTR));
317 GC_API void GC_end_stubborn_change GC_PROTO((GC_PTR));
319 /* Return a pointer to the base (lowest address) of an object given */
320 /* a pointer to a location within the object. */
321 /* I.e. map an interior pointer to the corresponding bas pointer. */
322 /* Note that with debugging allocation, this returns a pointer to the */
323 /* actual base of the object, i.e. the debug information, not to */
324 /* the base of the user object. */
325 /* Return 0 if displaced_pointer doesn't point to within a valid */
327 /* Note that a deallocated object in the garbage collected heap */
328 /* may be considered valid, even if it has been deallocated with */
330 GC_API GC_PTR GC_base GC_PROTO((GC_PTR displaced_pointer));
332 /* Given a pointer to the base of an object, return its size in bytes. */
333 /* The returned size may be slightly larger than what was originally */
335 GC_API size_t GC_size GC_PROTO((GC_PTR object_addr));
337 /* For compatibility with C library. This is occasionally faster than */
338 /* a malloc followed by a bcopy. But if you rely on that, either here */
339 /* or with the standard C library, your code is broken. In my */
340 /* opinion, it shouldn't have been invented, but now we're stuck. -HB */
341 /* The resulting object has the same kind as the original. */
342 /* If the argument is stubborn, the result will have changes enabled. */
343 /* It is an error to have changes enabled for the original object. */
344 /* Follows ANSI comventions for NULL old_object. */
345 GC_API GC_PTR GC_realloc
346 GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes));
348 /* Explicitly increase the heap size. */
349 /* Returns 0 on failure, 1 on success. */
350 GC_API int GC_expand_hp GC_PROTO((size_t number_of_bytes));
352 /* Limit the heap size to n bytes. Useful when you're debugging, */
353 /* especially on systems that don't handle running out of memory well. */
354 /* n == 0 ==> unbounded. This is the default. */
355 GC_API void GC_set_max_heap_size GC_PROTO((GC_word n));
357 /* Inform the collector that a certain section of statically allocated */
358 /* memory contains no pointers to garbage collected memory. Thus it */
359 /* need not be scanned. This is sometimes important if the application */
360 /* maps large read/write files into the address space, which could be */
361 /* mistaken for dynamic library data segments on some systems. */
362 GC_API void GC_exclude_static_roots GC_PROTO((GC_PTR start, GC_PTR finish));
364 /* Clear the set of root segments. Wizards only. */
365 GC_API void GC_clear_roots GC_PROTO((void));
367 /* Add a root segment. Wizards only. */
368 GC_API void GC_add_roots GC_PROTO((char * low_address,
369 char * high_address_plus_1));
371 /* Remove a root segment. Wizards only. */
372 GC_API void GC_remove_roots GC_PROTO((char * low_address,
373 char * high_address_plus_1));
375 /* Add a displacement to the set of those considered valid by the */
376 /* collector. GC_register_displacement(n) means that if p was returned */
377 /* by GC_malloc, then (char *)p + n will be considered to be a valid */
378 /* pointer to p. N must be small and less than the size of p. */
379 /* (All pointers to the interior of objects from the stack are */
380 /* considered valid in any case. This applies to heap objects and */
382 /* Preferably, this should be called before any other GC procedures. */
383 /* Calling it later adds to the probability of excess memory */
385 /* This is a no-op if the collector has recognition of */
386 /* arbitrary interior pointers enabled, which is now the default. */
387 GC_API void GC_register_displacement GC_PROTO((GC_word n));
389 /* The following version should be used if any debugging allocation is */
391 GC_API void GC_debug_register_displacement GC_PROTO((GC_word n));
393 /* Explicitly trigger a full, world-stop collection. */
394 GC_API void GC_gcollect GC_PROTO((void));
396 /* Trigger a full world-stopped collection. Abort the collection if */
397 /* and when stop_func returns a nonzero value. Stop_func will be */
398 /* called frequently, and should be reasonably fast. This works even */
399 /* if virtual dirty bits, and hence incremental collection is not */
400 /* available for this architecture. Collections can be aborted faster */
401 /* than normal pause times for incremental collection. However, */
402 /* aborted collections do no useful work; the next collection needs */
403 /* to start from the beginning. */
404 /* Return 0 if the collection was aborted, 1 if it succeeded. */
405 typedef int (* GC_stop_func) GC_PROTO((void));
406 GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
408 /* Return the number of bytes in the heap. Excludes collector private */
409 /* data structures. Includes empty blocks and fragmentation loss. */
410 /* Includes some pages that were allocated but never written. */
411 GC_API size_t GC_get_heap_size GC_PROTO((void));
413 /* Return a lower bound on the number of free bytes in the heap. */
414 GC_API size_t GC_get_free_bytes GC_PROTO((void));
416 /* Return the number of bytes allocated since the last collection. */
417 GC_API size_t GC_get_bytes_since_gc GC_PROTO((void));
419 /* Return the total number of bytes allocated in this process. */
420 /* Never decreases, except due to wrapping. */
421 GC_API size_t GC_get_total_bytes GC_PROTO((void));
423 /* Disable garbage collection. Even GC_gcollect calls will be */
425 GC_API void GC_disable GC_PROTO((void));
427 /* Reenable garbage collection. GC_disable() and GC_enable() calls */
428 /* nest. Garbage collection is enabled if the number of calls to both */
429 /* both functions is equal. */
430 GC_API void GC_enable GC_PROTO((void));
432 /* Enable incremental/generational collection. */
433 /* Not advisable unless dirty bits are */
434 /* available or most heap objects are */
435 /* pointerfree(atomic) or immutable. */
436 /* Don't use in leak finding mode. */
437 /* Ignored if GC_dont_gc is true. */
438 /* Only the generational piece of this is */
439 /* functional if GC_parallel is TRUE */
440 /* or if GC_time_limit is GC_TIME_UNLIMITED. */
441 /* Causes GC_local_gcj_malloc() to revert to */
442 /* locked allocation. Must be called */
443 /* before any GC_local_gcj_malloc() calls. */
444 GC_API void GC_enable_incremental GC_PROTO((void));
446 /* Does incremental mode write-protect pages? Returns zero or */
447 /* more of the following, or'ed together: */
448 #define GC_PROTECTS_POINTER_HEAP 1 /* May protect non-atomic objs. */
449 #define GC_PROTECTS_PTRFREE_HEAP 2
450 #define GC_PROTECTS_STATIC_DATA 4 /* Curently never. */
451 #define GC_PROTECTS_STACK 8 /* Probably impractical. */
453 #define GC_PROTECTS_NONE 0
454 GC_API int GC_incremental_protection_needs GC_PROTO((void));
456 /* Perform some garbage collection work, if appropriate. */
457 /* Return 0 if there is no more work to be done. */
458 /* Typically performs an amount of work corresponding roughly */
459 /* to marking from one page. May do more work if further */
460 /* progress requires it, e.g. if incremental collection is */
461 /* disabled. It is reasonable to call this in a wait loop */
462 /* until it returns 0. */
463 GC_API int GC_collect_a_little GC_PROTO((void));
465 /* Allocate an object of size lb bytes. The client guarantees that */
466 /* as long as the object is live, it will be referenced by a pointer */
467 /* that points to somewhere within the first 256 bytes of the object. */
468 /* (This should normally be declared volatile to prevent the compiler */
469 /* from invalidating this assertion.) This routine is only useful */
470 /* if a large array is being allocated. It reduces the chance of */
471 /* accidentally retaining such an array as a result of scanning an */
472 /* integer that happens to be an address inside the array. (Actually, */
473 /* it reduces the chance of the allocator not finding space for such */
474 /* an array, since it will try hard to avoid introducing such a false */
475 /* reference.) On a SunOS 4.X or MS Windows system this is recommended */
476 /* for arrays likely to be larger than 100K or so. For other systems, */
477 /* or if the collector is not configured to recognize all interior */
478 /* pointers, the threshold is normally much higher. */
479 GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
480 GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
482 #if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
483 # define GC_ADD_CALLER
484 # define GC_RETURN_ADDR (GC_word)__return_address
488 # include <features.h>
489 # if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1 || __GLIBC__ > 2) \
490 && !defined(__ia64__)
491 # ifndef GC_HAVE_BUILTIN_BACKTRACE
492 # define GC_HAVE_BUILTIN_BACKTRACE
495 # if defined(__i386__) || defined(__x86_64__)
496 # define GC_CAN_SAVE_CALL_STACKS
500 #if defined(GC_HAVE_BUILTIN_BACKTRACE) && !defined(GC_CAN_SAVE_CALL_STACKS)
501 # define GC_CAN_SAVE_CALL_STACKS
504 #if defined(__sparc__)
505 # define GC_CAN_SAVE_CALL_STACKS
508 /* If we're on an a platform on which we can't save call stacks, but */
509 /* gcc is normally used, we go ahead and define GC_ADD_CALLER. */
510 /* We make this decision independent of whether gcc is actually being */
511 /* used, in order to keep the interface consistent, and allow mixing */
513 /* This may also be desirable if it is possible but expensive to */
514 /* retrieve the call chain. */
515 #if (defined(__linux__) || defined(__NetBSD__) || defined(__OpenBSD__) \
516 || defined(__FreeBSD__)) & !defined(GC_CAN_SAVE_CALL_STACKS)
517 # define GC_ADD_CALLER
518 # if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 95)
519 /* gcc knows how to retrieve return address, but we don't know */
520 /* how to generate call stacks. */
521 # define GC_RETURN_ADDR (GC_word)__builtin_return_address(0)
523 /* Just pass 0 for gcc compatibility. */
524 # define GC_RETURN_ADDR 0
529 # define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
530 # define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * s, int i
532 # define GC_EXTRAS __FILE__, __LINE__
533 # define GC_EXTRA_PARAMS GC_CONST char * s, int i
536 /* Debugging (annotated) allocation. GC_gcollect will check */
537 /* objects allocated in this way for overwrites, etc. */
538 GC_API GC_PTR GC_debug_malloc
539 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
540 GC_API GC_PTR GC_debug_malloc_atomic
541 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
542 GC_API GC_PTR GC_debug_malloc_uncollectable
543 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
544 GC_API GC_PTR GC_debug_malloc_stubborn
545 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
546 GC_API GC_PTR GC_debug_malloc_ignore_off_page
547 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
548 GC_API GC_PTR GC_debug_malloc_atomic_ignore_off_page
549 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
550 GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr));
551 GC_API GC_PTR GC_debug_realloc
552 GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
554 GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR));
555 GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
557 /* Routines that allocate objects with debug information (like the */
558 /* above), but just fill in dummy file and line number information. */
559 /* Thus they can serve as drop-in malloc/realloc replacements. This */
560 /* can be useful for two reasons: */
561 /* 1) It allows the collector to be built with DBG_HDRS_ALL defined */
562 /* even if some allocation calls come from 3rd party libraries */
563 /* that can't be recompiled. */
564 /* 2) On some platforms, the file and line information is redundant, */
565 /* since it can be reconstructed from a stack trace. On such */
566 /* platforms it may be more convenient not to recompile, e.g. for */
567 /* leak detection. This can be accomplished by instructing the */
568 /* linker to replace malloc/realloc with these. */
569 GC_API GC_PTR GC_debug_malloc_replacement GC_PROTO((size_t size_in_bytes));
570 GC_API GC_PTR GC_debug_realloc_replacement
571 GC_PROTO((GC_PTR object_addr, size_t size_in_bytes));
574 # define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
575 # define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
576 # define GC_MALLOC_UNCOLLECTABLE(sz) \
577 GC_debug_malloc_uncollectable(sz, GC_EXTRAS)
578 # define GC_MALLOC_IGNORE_OFF_PAGE(sz) \
579 GC_debug_malloc_ignore_off_page(sz, GC_EXTRAS)
580 # define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \
581 GC_debug_malloc_atomic_ignore_off_page(sz, GC_EXTRAS)
582 # define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
583 # define GC_FREE(p) GC_debug_free(p)
584 # define GC_REGISTER_FINALIZER(p, f, d, of, od) \
585 GC_debug_register_finalizer(p, f, d, of, od)
586 # define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
587 GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
588 # define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
589 GC_debug_register_finalizer_no_order(p, f, d, of, od)
590 # define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
591 # define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
592 # define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
593 # define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
594 GC_general_register_disappearing_link(link, GC_base(obj))
595 # define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n)
597 # define GC_MALLOC(sz) GC_malloc(sz)
598 # define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
599 # define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz)
600 # define GC_MALLOC_IGNORE_OFF_PAGE(sz) \
601 GC_malloc_ignore_off_page(sz)
602 # define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \
603 GC_malloc_atomic_ignore_off_page(sz)
604 # define GC_REALLOC(old, sz) GC_realloc(old, sz)
605 # define GC_FREE(p) GC_free(p)
606 # define GC_REGISTER_FINALIZER(p, f, d, of, od) \
607 GC_register_finalizer(p, f, d, of, od)
608 # define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
609 GC_register_finalizer_ignore_self(p, f, d, of, od)
610 # define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
611 GC_register_finalizer_no_order(p, f, d, of, od)
612 # define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
613 # define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
614 # define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
615 # define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
616 GC_general_register_disappearing_link(link, obj)
617 # define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n)
619 /* The following are included because they are often convenient, and */
620 /* reduce the chance for a misspecifed size argument. But calls may */
621 /* expand to something syntactically incorrect if t is a complicated */
622 /* type expression. */
623 # define GC_NEW(t) (t *)GC_MALLOC(sizeof (t))
624 # define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t))
625 # define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t))
626 # define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t))
628 /* Finalization. Some of these primitives are grossly unsafe. */
629 /* The idea is to make them both cheap, and sufficient to build */
630 /* a safer layer, closer to Modula-3, Java, or PCedar finalization. */
631 /* The interface represents my conclusions from a long discussion */
632 /* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
633 /* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
634 /* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
635 typedef void (*GC_finalization_proc)
636 GC_PROTO((GC_PTR obj, GC_PTR client_data));
638 GC_API void GC_register_finalizer
639 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
640 GC_finalization_proc *ofn, GC_PTR *ocd));
641 GC_API void GC_debug_register_finalizer
642 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
643 GC_finalization_proc *ofn, GC_PTR *ocd));
644 /* When obj is no longer accessible, invoke */
645 /* (*fn)(obj, cd). If a and b are inaccessible, and */
646 /* a points to b (after disappearing links have been */
647 /* made to disappear), then only a will be */
648 /* finalized. (If this does not create any new */
649 /* pointers to b, then b will be finalized after the */
650 /* next collection.) Any finalizable object that */
651 /* is reachable from itself by following one or more */
652 /* pointers will not be finalized (or collected). */
653 /* Thus cycles involving finalizable objects should */
654 /* be avoided, or broken by disappearing links. */
655 /* All but the last finalizer registered for an object */
657 /* Finalization may be removed by passing 0 as fn. */
658 /* Finalizers are implicitly unregistered just before */
659 /* they are invoked. */
660 /* The old finalizer and client data are stored in */
662 /* Fn is never invoked on an accessible object, */
663 /* provided hidden pointers are converted to real */
664 /* pointers only if the allocation lock is held, and */
665 /* such conversions are not performed by finalization */
667 /* If GC_register_finalizer is aborted as a result of */
668 /* a signal, the object may be left with no */
669 /* finalization, even if neither the old nor new */
670 /* finalizer were NULL. */
671 /* Obj should be the nonNULL starting address of an */
672 /* object allocated by GC_malloc or friends. */
673 /* Note that any garbage collectable object referenced */
674 /* by cd will be considered accessible until the */
675 /* finalizer is invoked. */
677 /* Another versions of the above follow. It ignores */
678 /* self-cycles, i.e. pointers from a finalizable object to */
679 /* itself. There is a stylistic argument that this is wrong, */
680 /* but it's unavoidable for C++, since the compiler may */
681 /* silently introduce these. It's also benign in that specific */
682 /* case. And it helps if finalizable objects are split to */
684 /* Note that cd will still be viewed as accessible, even if it */
685 /* refers to the object itself. */
686 GC_API void GC_register_finalizer_ignore_self
687 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
688 GC_finalization_proc *ofn, GC_PTR *ocd));
689 GC_API void GC_debug_register_finalizer_ignore_self
690 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
691 GC_finalization_proc *ofn, GC_PTR *ocd));
693 /* Another version of the above. It ignores all cycles. */
694 /* It should probably only be used by Java implementations. */
695 /* Note that cd will still be viewed as accessible, even if it */
696 /* refers to the object itself. */
697 GC_API void GC_register_finalizer_no_order
698 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
699 GC_finalization_proc *ofn, GC_PTR *ocd));
700 GC_API void GC_debug_register_finalizer_no_order
701 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
702 GC_finalization_proc *ofn, GC_PTR *ocd));
705 /* The following routine may be used to break cycles between */
706 /* finalizable objects, thus causing cyclic finalizable */
707 /* objects to be finalized in the correct order. Standard */
708 /* use involves calling GC_register_disappearing_link(&p), */
709 /* where p is a pointer that is not followed by finalization */
710 /* code, and should not be considered in determining */
711 /* finalization order. */
712 GC_API int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */));
713 /* Link should point to a field of a heap allocated */
714 /* object obj. *link will be cleared when obj is */
715 /* found to be inaccessible. This happens BEFORE any */
716 /* finalization code is invoked, and BEFORE any */
717 /* decisions about finalization order are made. */
718 /* This is useful in telling the finalizer that */
719 /* some pointers are not essential for proper */
720 /* finalization. This may avoid finalization cycles. */
721 /* Note that obj may be resurrected by another */
722 /* finalizer, and thus the clearing of *link may */
723 /* be visible to non-finalization code. */
724 /* There's an argument that an arbitrary action should */
725 /* be allowed here, instead of just clearing a pointer. */
726 /* But this causes problems if that action alters, or */
727 /* examines connectivity. */
728 /* Returns 1 if link was already registered, 0 */
730 /* Only exists for backward compatibility. See below: */
732 GC_API int GC_general_register_disappearing_link
733 GC_PROTO((GC_PTR * /* link */, GC_PTR obj));
734 /* A slight generalization of the above. *link is */
735 /* cleared when obj first becomes inaccessible. This */
736 /* can be used to implement weak pointers easily and */
737 /* safely. Typically link will point to a location */
738 /* holding a disguised pointer to obj. (A pointer */
739 /* inside an "atomic" object is effectively */
740 /* disguised.) In this way soft */
741 /* pointers are broken before any object */
742 /* reachable from them are finalized. Each link */
743 /* May be registered only once, i.e. with one obj */
744 /* value. This was added after a long email discussion */
745 /* with John Ellis. */
746 /* Obj must be a pointer to the first word of an object */
747 /* we allocated. It is unsafe to explicitly deallocate */
748 /* the object containing link. Explicitly deallocating */
749 /* obj may or may not cause link to eventually be */
751 GC_API int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */));
752 /* Returns 0 if link was not actually registered. */
753 /* Undoes a registration by either of the above two */
756 /* Returns !=0 if GC_invoke_finalizers has something to do. */
757 GC_API int GC_should_invoke_finalizers GC_PROTO((void));
759 GC_API int GC_invoke_finalizers GC_PROTO((void));
760 /* Run finalizers for all objects that are ready to */
761 /* be finalized. Return the number of finalizers */
762 /* that were run. Normally this is also called */
763 /* implicitly during some allocations. If */
764 /* GC-finalize_on_demand is nonzero, it must be called */
767 /* GC_set_warn_proc can be used to redirect or filter warning messages. */
768 /* p may not be a NULL pointer. */
769 typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg));
770 GC_API GC_warn_proc GC_set_warn_proc GC_PROTO((GC_warn_proc p));
771 /* Returns old warning procedure. */
773 GC_API GC_word GC_set_free_space_divisor GC_PROTO((GC_word value));
774 /* Set free_space_divisor. See above for definition. */
775 /* Returns old value. */
777 /* The following is intended to be used by a higher level */
778 /* (e.g. Java-like) finalization facility. It is expected */
779 /* that finalization code will arrange for hidden pointers to */
780 /* disappear. Otherwise objects can be accessed after they */
781 /* have been collected. */
782 /* Note that putting pointers in atomic objects or in */
783 /* nonpointer slots of "typed" objects is equivalent to */
784 /* disguising them in this way, and may have other advantages. */
785 # if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS)
786 typedef GC_word GC_hidden_pointer;
787 # define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
788 # define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
789 /* Converting a hidden pointer to a real pointer requires verifying */
790 /* that the object still exists. This involves acquiring the */
791 /* allocator lock to avoid a race with the collector. */
792 # endif /* I_HIDE_POINTERS */
794 typedef GC_PTR (*GC_fn_type) GC_PROTO((GC_PTR client_data));
795 GC_API GC_PTR GC_call_with_alloc_lock
796 GC_PROTO((GC_fn_type fn, GC_PTR client_data));
798 /* The following routines are primarily intended for use with a */
799 /* preprocessor which inserts calls to check C pointer arithmetic. */
800 /* They indicate failure by invoking the corresponding _print_proc. */
802 /* Check that p and q point to the same object. */
803 /* Fail conspicuously if they don't. */
804 /* Returns the first argument. */
805 /* Succeeds if neither p nor q points to the heap. */
806 /* May succeed if both p and q point to between heap objects. */
807 GC_API GC_PTR GC_same_obj GC_PROTO((GC_PTR p, GC_PTR q));
809 /* Checked pointer pre- and post- increment operations. Note that */
810 /* the second argument is in units of bytes, not multiples of the */
811 /* object size. This should either be invoked from a macro, or the */
812 /* call should be automatically generated. */
813 GC_API GC_PTR GC_pre_incr GC_PROTO((GC_PTR *p, size_t how_much));
814 GC_API GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much));
816 /* Check that p is visible */
817 /* to the collector as a possibly pointer containing location. */
818 /* If it isn't fail conspicuously. */
819 /* Returns the argument in all cases. May erroneously succeed */
820 /* in hard cases. (This is intended for debugging use with */
821 /* untyped allocations. The idea is that it should be possible, though */
822 /* slow, to add such a call to all indirect pointer stores.) */
823 /* Currently useless for multithreaded worlds. */
824 GC_API GC_PTR GC_is_visible GC_PROTO((GC_PTR p));
826 /* Check that if p is a pointer to a heap page, then it points to */
827 /* a valid displacement within a heap object. */
828 /* Fail conspicuously if this property does not hold. */
829 /* Uninteresting with GC_all_interior_pointers. */
830 /* Always returns its argument. */
831 GC_API GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p));
833 /* Returns 1 if the calling thread is registered with the GC, 0 otherwise */
834 GC_API int GC_thread_is_registered GC_PROTO((void));
836 /* Safer, but slow, pointer addition. Probably useful mainly with */
837 /* a preprocessor. Useful only for heap pointers. */
839 # define GC_PTR_ADD3(x, n, type_of_result) \
840 ((type_of_result)GC_same_obj((x)+(n), (x)))
841 # define GC_PRE_INCR3(x, n, type_of_result) \
842 ((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x))
843 # define GC_POST_INCR2(x, type_of_result) \
844 ((type_of_result)GC_post_incr(&(x), sizeof(*x))
846 # define GC_PTR_ADD(x, n) \
847 GC_PTR_ADD3(x, n, typeof(x))
848 # define GC_PRE_INCR(x, n) \
849 GC_PRE_INCR3(x, n, typeof(x))
850 # define GC_POST_INCR(x, n) \
851 GC_POST_INCR3(x, typeof(x))
853 /* We can't do this right without typeof, which ANSI */
854 /* decided was not sufficiently useful. Repeatedly */
855 /* mentioning the arguments seems too dangerous to be */
856 /* useful. So does not casting the result. */
857 # define GC_PTR_ADD(x, n) ((x)+(n))
859 #else /* !GC_DEBUG */
860 # define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n))
861 # define GC_PTR_ADD(x, n) ((x)+(n))
862 # define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n))
863 # define GC_PRE_INCR(x, n) ((x) += (n))
864 # define GC_POST_INCR2(x, n, type_of_result) ((x)++)
865 # define GC_POST_INCR(x, n) ((x)++)
868 /* Safer assignment of a pointer to a nonstack location. */
870 # if defined(__STDC__) || defined(_AIX)
871 # define GC_PTR_STORE(p, q) \
872 (*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
874 # define GC_PTR_STORE(p, q) \
875 (*(char **)GC_is_visible(p) = GC_is_valid_displacement(q))
877 #else /* !GC_DEBUG */
878 # define GC_PTR_STORE(p, q) *((p) = (q))
881 /* Functions called to report pointer checking errors */
882 GC_API void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR p, GC_PTR q));
884 GC_API void (*GC_is_valid_displacement_print_proc)
885 GC_PROTO((GC_PTR p));
887 GC_API void (*GC_is_visible_print_proc)
888 GC_PROTO((GC_PTR p));
890 /* For pthread support, we generally need to intercept a number of */
891 /* thread library calls. We do that here by macro defining them. */
893 #if !defined(GC_USE_LD_WRAP) && \
894 (defined(GC_PTHREADS) || defined(GC_SOLARIS_THREADS) || defined(GC_DARWIN_THREADS) || defined(GC_MACOSX_THREADS))
895 #if defined(_IN_LIBGC) || defined(USE_INCLUDED_LIBGC)
896 # include "gc_pthread_redirects.h"
898 # include <gc/gc_pthread_redirects.h>
902 # if defined(PCR) || defined(GC_SOLARIS_THREADS) || \
903 defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
904 /* Any flavor of threads except SRC_M3. */
905 /* This returns a list of objects, linked through their first */
906 /* word. Its use can greatly reduce lock contention problems, since */
907 /* the allocation lock can be acquired and released many fewer times. */
908 /* lb must be large enough to hold the pointer field. */
909 /* It is used internally by gc_local_alloc.h, which provides a simpler */
910 /* programming interface on Linux. */
911 GC_PTR GC_malloc_many(size_t lb);
912 #define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
913 /* in returned list. */
914 extern void GC_thr_init(void); /* Needed for Solaris/X86 */
916 #endif /* THREADS && !SRC_M3 */
918 #if defined(GC_WIN32_THREADS) && !defined(__CYGWIN32__) && !defined(__CYGWIN__)
919 # include <windows.h>
922 * All threads must be created using GC_CreateThread, so that they will be
923 * recorded in the thread table. For backwards compatibility, this is not
924 * technically true if the GC is built as a dynamic library, since it can
925 * and does then use DllMain to keep track of thread creations. But new code
926 * should be built to call GC_CreateThread.
928 GC_API HANDLE WINAPI GC_CreateThread(
929 LPSECURITY_ATTRIBUTES lpThreadAttributes,
930 DWORD dwStackSize, LPTHREAD_START_ROUTINE lpStartAddress,
931 LPVOID lpParameter, DWORD dwCreationFlags, LPDWORD lpThreadId );
933 # if defined(_WIN32_WCE)
935 * win32_threads.c implements the real WinMain, which will start a new thread
936 * to call GC_WinMain after initializing the garbage collector.
938 int WINAPI GC_WinMain(
940 HINSTANCE hPrevInstance,
945 # define WinMain GC_WinMain
946 # define CreateThread GC_CreateThread
948 # endif /* defined(_WIN32_WCE) */
950 #endif /* defined(GC_WIN32_THREADS) && !cygwin */
953 * Fully portable code should call GC_INIT() from the main program
954 * before making any other GC_ calls. On most platforms this is a
955 * no-op and the collector self-initializes. But a number of platforms
956 * make that too hard.
958 #if (defined(sparc) || defined(__sparc)) && defined(sun)
960 * If you are planning on putting
961 * the collector in a SunOS 5 dynamic library, you need to call GC_INIT()
962 * from the statically loaded program section.
963 * This circumvents a Solaris 2.X (X<=4) linker bug.
965 # define GC_INIT() { extern end, etext; \
966 GC_noop(&end, &etext); }
968 # if defined(__CYGWIN32__) || defined (_AIX)
970 * Similarly gnu-win32 DLLs need explicit initialization from
971 * the main program, as does AIX.
974 extern int _data_start__[];
975 extern int _data_end__[];
976 extern int _bss_start__[];
977 extern int _bss_end__[];
978 # define GC_MAX(x,y) ((x) > (y) ? (x) : (y))
979 # define GC_MIN(x,y) ((x) < (y) ? (x) : (y))
980 # define GC_DATASTART ((GC_PTR) GC_MIN(_data_start__, _bss_start__))
981 # define GC_DATAEND ((GC_PTR) GC_MAX(_data_end__, _bss_end__))
983 # define GC_INIT() { GC_add_roots(GC_DATASTART, GC_DATAEND); }
989 extern int _data[], _end[];
990 # define GC_DATASTART ((GC_PTR)((ulong)_data))
991 # define GC_DATAEND ((GC_PTR)((ulong)_end))
992 # define GC_INIT() { GC_add_roots(GC_DATASTART, GC_DATAEND); }
995 # if defined(__APPLE__) && defined(__MACH__) || defined(GC_WIN32_THREADS)
996 # define GC_INIT() { GC_init(); }
999 # endif /* !__MACH && !GC_WIN32_THREADS */
1000 # endif /* !AIX && !cygwin */
1003 #if !defined(_WIN32_WCE) \
1004 && ((defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \
1005 || defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__))
1006 /* win32S may not free all resources on process exit. */
1007 /* This explicitly deallocates the heap. */
1008 GC_API void GC_win32_free_heap ();
1011 #if ( defined(_AMIGA) && !defined(GC_AMIGA_MAKINGLIB) )
1012 /* Allocation really goes through GC_amiga_allocwrapper_do */
1013 # include "gc_amiga_redirects.h"
1016 #if defined(GC_REDIRECT_TO_LOCAL) && !defined(GC_LOCAL_ALLOC_H)
1017 # include "gc_local_alloc.h"
1021 } /* end of extern "C" */