-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-
-# ifndef GC_PRIVATE_H
-# define GC_PRIVATE_H
+#ifndef GC_PRIVATE_H
+#define GC_PRIVATE_H
+
+#ifdef HAVE_CONFIG_H
+# include "private/config.h"
+#endif
+
+#ifndef GC_BUILD
+# define GC_BUILD
+#endif
-#if defined(mips) && defined(SYSTYPE_BSD) && defined(sony_news)
- /* sony RISC NEWS, NEWSOS 4 */
-# define BSD_TIME
-/* typedef long ptrdiff_t; -- necessary on some really old systems */
+#if (defined(__linux__) || defined(__GLIBC__) || defined(__GNU__)) \
+ && !defined(_GNU_SOURCE)
+ /* Can't test LINUX, since this must be defined before other includes. */
+# define _GNU_SOURCE
#endif
-#if defined(mips) && defined(SYSTYPE_BSD43)
- /* MIPS RISCOS 4 */
-# define BSD_TIME
+#if (defined(DGUX) && defined(GC_THREADS) || defined(DGUX386_THREADS) \
+ || defined(GC_DGUX386_THREADS)) && !defined(_USING_POSIX4A_DRAFT10)
+# define _USING_POSIX4A_DRAFT10 1
+#endif
+
+# if defined(NO_DEBUGGING) && !defined(GC_ASSERTIONS) && !defined(NDEBUG)
+ /* To turn off assertion checking (in atomic_ops.h). */
+# define NDEBUG 1
+# endif
+
+#ifndef GC_H
+# define GC_I_HIDE_POINTERS /* to get GC_HIDE_POINTER() and friends */
+# include "../gc.h"
+#endif
+
+#include <stdlib.h>
+#if !defined(sony_news)
+# include <stddef.h>
#endif
#ifdef DGUX
-# include <sys/types.h>
-# include <sys/time.h>
-# include <sys/resource.h>
+# include <sys/types.h>
+# include <sys/time.h>
+# include <sys/resource.h>
#endif /* DGUX */
#ifdef BSD_TIME
-# include <sys/types.h>
-# include <sys/time.h>
-# include <sys/resource.h>
+# include <sys/types.h>
+# include <sys/time.h>
+# include <sys/resource.h>
#endif /* BSD_TIME */
-# ifndef _GC_H
-# include "../gc.h"
+#ifdef PARALLEL_MARK
+# define AO_REQUIRE_CAS
+# if !defined(__GNUC__) && !defined(AO_ASSUME_WINDOWS98)
+# define AO_ASSUME_WINDOWS98
# endif
+#endif
-# ifndef GC_MARK_H
-# include "../gc_mark.h"
-# endif
+#ifndef GC_TINY_FL_H
+# include "../gc_tiny_fl.h"
+#endif
+
+#ifndef GC_MARK_H
+# include "../gc_mark.h"
+#endif
typedef GC_word word;
typedef GC_signed_word signed_word;
+typedef unsigned int unsigned32;
typedef int GC_bool;
-# define TRUE 1
-# define FALSE 0
+#define TRUE 1
+#define FALSE 0
-typedef char * ptr_t; /* A generic pointer to which we can add */
- /* byte displacements. */
- /* Preferably identical to caddr_t, if it */
- /* exists. */
-
-# ifndef GCCONFIG_H
-# include "gcconfig.h"
-# endif
+typedef char * ptr_t; /* A generic pointer to which we can add */
+ /* byte displacements and which can be used */
+ /* for address comparisons. */
-# ifndef HEADERS_H
-# include "gc_hdrs.h"
-# endif
+#ifndef GCCONFIG_H
+# include "gcconfig.h"
+#endif
-#if defined(__STDC__)
-# include <stdlib.h>
-# if !(defined( sony_news ) )
-# include <stddef.h>
-# endif
-# define VOLATILE volatile
-#else
-# ifdef MSWIN32
-# include <stdlib.h>
+#ifndef GC_INNER
+ /* This tagging macro must be used at the start of every variable */
+ /* definition which is declared with GC_EXTERN. Should be also used */
+ /* for the GC-scope function definitions and prototypes. Must not be */
+ /* used in gcconfig.h. Shouldn't be used for the debugging-only */
+ /* functions. Currently, not used for the functions declared in or */
+ /* called from the "dated" source files (pcr_interface.c, specific.c */
+ /* and in the "extra" folder). */
+# if defined(GC_DLL) && defined(__GNUC__) && !defined(MSWIN32) \
+ && !defined(MSWINCE)
+# if __GNUC__ >= 4
+ /* See the corresponding GC_API definition. */
+# define GC_INNER __attribute__((__visibility__("hidden")))
+# else
+ /* The attribute is unsupported. */
+# define GC_INNER /* empty */
# endif
-# define VOLATILE
+# else
+# define GC_INNER /* empty */
+# endif
+
+# define GC_EXTERN extern GC_INNER
+ /* Used only for the GC-scope variables (prefixed with "GC_") */
+ /* declared in the header files. Must not be used for thread-local */
+ /* variables. Must not be used in gcconfig.h. Shouldn't be used for */
+ /* the debugging-only or profiling-only variables. Currently, not */
+ /* used for the variables accessed from the "dated" source files */
+ /* (pcr_interface.c, specific.c/h, and in the "extra" folder). */
+ /* The corresponding variable definition must start with GC_INNER. */
+#endif /* !GC_INNER */
+
+#ifndef HEADERS_H
+# include "gc_hdrs.h"
#endif
-#if 0 /* defined(__GNUC__) doesn't work yet */
+#if __GNUC__ >= 3
# define EXPECT(expr, outcome) __builtin_expect(expr,outcome)
/* Equivalent to (expr), but predict that usually (expr)==outcome. */
#else
# define EXPECT(expr, outcome) (expr)
#endif /* __GNUC__ */
-# ifndef GC_LOCKS_H
-# include "gc_locks.h"
-# endif
+#if defined(_MSC_VER) || defined(__INTEL_COMPILER) || defined(__DMC__) \
+ || defined(__WATCOMC__)
+# define GC_INLINE static __inline
+#elif (__GNUC__ >= 3) || defined(__sun)
+# define GC_INLINE static inline
+#else
+# define GC_INLINE static
+#endif
+
+#ifndef GC_API_PRIV
+# define GC_API_PRIV GC_API
+#endif
+
+#ifndef GC_LOCKS_H
+# include "gc_locks.h"
+#endif
# ifdef STACK_GROWS_DOWN
# define COOLER_THAN >
# define HOTTER_THAN <
-# define MAKE_COOLER(x,y) if ((word)(x)+(y) > (word)(x)) {(x) += (y);} \
- else {(x) = (word)ONES;}
+# define MAKE_COOLER(x,y) if ((x)+(y) > (x)) {(x) += (y);} \
+ else {(x) = (ptr_t)ONES;}
# define MAKE_HOTTER(x,y) (x) -= (y)
# else
# define COOLER_THAN <
# define HOTTER_THAN >
-# define MAKE_COOLER(x,y) if ((word)(x)-(y) < (word)(x)) {(x) -= (y);} else {(x) = 0;}
+# define MAKE_COOLER(x,y) if ((x)-(y) < (x)) {(x) -= (y);} else {(x) = 0;}
# define MAKE_HOTTER(x,y) (x) += (y)
# endif
/*********************************/
/* #define STUBBORN_ALLOC */
- /* Enable stubborm allocation, and thus a limited */
- /* form of incremental collection w/o dirty bits. */
+ /* Enable stubborn allocation, and thus a limited */
+ /* form of incremental collection w/o dirty bits. */
/* #define ALL_INTERIOR_POINTERS */
- /* Forces all pointers into the interior of an */
- /* object to be considered valid. Also causes the */
- /* sizes of all objects to be inflated by at least */
- /* one byte. This should suffice to guarantee */
- /* that in the presence of a compiler that does */
- /* not perform garbage-collector-unsafe */
- /* optimizations, all portable, strictly ANSI */
- /* conforming C programs should be safely usable */
- /* with malloc replaced by GC_malloc and free */
- /* calls removed. There are several disadvantages: */
- /* 1. There are probably no interesting, portable, */
- /* strictly ANSI conforming C programs. */
- /* 2. This option makes it hard for the collector */
- /* to allocate space that is not ``pointed to'' */
- /* by integers, etc. Under SunOS 4.X with a */
- /* statically linked libc, we empiricaly */
- /* observed that it would be difficult to */
- /* allocate individual objects larger than 100K. */
- /* Even if only smaller objects are allocated, */
- /* more swap space is likely to be needed. */
- /* Fortunately, much of this will never be */
- /* touched. */
- /* If you can easily avoid using this option, do. */
- /* If not, try to keep individual objects small. */
- /* This is now really controlled at startup, */
- /* through GC_all_interior_pointers. */
-
-#define PRINTSTATS /* Print garbage collection statistics */
- /* For less verbose output, undefine in reclaim.c */
-
-#define PRINTTIMES /* Print the amount of time consumed by each garbage */
- /* collection. */
-
-#define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
- /* whether the objects are atomic or composite, and */
- /* whether or not the block was found to be empty */
- /* during the reclaim phase. Typically generates */
- /* about one screenful per garbage collection. */
-#undef PRINTBLOCKS
-
-#ifdef SILENT
-# ifdef PRINTSTATS
-# undef PRINTSTATS
-# endif
-# ifdef PRINTTIMES
-# undef PRINTTIMES
-# endif
-# ifdef PRINTNBLOCKS
-# undef PRINTNBLOCKS
-# endif
-#endif
+ /* Forces all pointers into the interior of an */
+ /* object to be considered valid. Also causes the */
+ /* sizes of all objects to be inflated by at least */
+ /* one byte. This should suffice to guarantee */
+ /* that in the presence of a compiler that does */
+ /* not perform garbage-collector-unsafe */
+ /* optimizations, all portable, strictly ANSI */
+ /* conforming C programs should be safely usable */
+ /* with malloc replaced by GC_malloc and free */
+ /* calls removed. There are several disadvantages: */
+ /* 1. There are probably no interesting, portable, */
+ /* strictly ANSI conforming C programs. */
+ /* 2. This option makes it hard for the collector */
+ /* to allocate space that is not ``pointed to'' */
+ /* by integers, etc. Under SunOS 4.X with a */
+ /* statically linked libc, we empirically */
+ /* observed that it would be difficult to */
+ /* allocate individual objects larger than 100K. */
+ /* Even if only smaller objects are allocated, */
+ /* more swap space is likely to be needed. */
+ /* Fortunately, much of this will never be */
+ /* touched. */
+ /* If you can easily avoid using this option, do. */
+ /* If not, try to keep individual objects small. */
+ /* This is now really controlled at startup, */
+ /* through GC_all_interior_pointers. */
-#if defined(PRINTSTATS) && !defined(GATHERSTATS)
-# define GATHERSTATS
-#endif
-
-#if defined(PRINTSTATS) || !defined(SMALL_CONFIG)
-# define CONDPRINT /* Print some things if GC_print_stats is set */
-#endif
#define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers()
-#define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */
- /* free lists are actually maintained. This applies */
- /* only to the top level routines in misc.c, not to */
- /* user generated code that calls GC_allocobj and */
- /* GC_allocaobj directly. */
- /* Slows down average programs slightly. May however */
- /* substantially reduce fragmentation if allocation */
- /* request sizes are widely scattered. */
- /* May save significant amounts of space for obj_map */
- /* entries. */
-
-#if defined(USE_MARK_BYTES) && !defined(ALIGN_DOUBLE)
-# define ALIGN_DOUBLE
- /* We use one byte for every 2 words, which doesn't allow for */
- /* odd numbered words to have mark bits. */
-#endif
-
-#if defined(GC_GCJ_SUPPORT) && ALIGNMENT < 8 && !defined(ALIGN_DOUBLE)
- /* GCJ's Hashtable synchronization code requires 64-bit alignment. */
-# define ALIGN_DOUBLE
-#endif
-
-/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
-# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
-# define MERGE_SIZES
-# endif
-
#if !defined(DONT_ADD_BYTE_AT_END)
# define EXTRA_BYTES GC_all_interior_pointers
+# define MAX_EXTRA_BYTES 1
#else
# define EXTRA_BYTES 0
+# define MAX_EXTRA_BYTES 0
#endif
# ifndef LARGE_CONFIG
-# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
- /* Must be multiple of largest page size. */
+# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
+ /* Must be multiple of largest page size. */
# define MAXHINCR 2048 /* Maximum heap increment, in blocks */
# else
# define MINHINCR 64
# define MAXHINCR 4096
# endif
-# define TIME_LIMIT 50 /* We try to keep pause times from exceeding */
- /* this by much. In milliseconds. */
-
# define BL_LIMIT GC_black_list_spacing
- /* If we need a block of N bytes, and we have */
- /* a block of N + BL_LIMIT bytes available, */
- /* and N > BL_LIMIT, */
- /* but all possible positions in it are */
- /* blacklisted, we just use it anyway (and */
- /* print a warning, if warnings are enabled). */
- /* This risks subsequently leaking the block */
- /* due to a false reference. But not using */
- /* the block risks unreasonable immediate */
- /* heap growth. */
+ /* If we need a block of N bytes, and we have */
+ /* a block of N + BL_LIMIT bytes available, */
+ /* and N > BL_LIMIT, */
+ /* but all possible positions in it are */
+ /* blacklisted, we just use it anyway (and */
+ /* print a warning, if warnings are enabled). */
+ /* This risks subsequently leaking the block */
+ /* due to a false reference. But not using */
+ /* the block risks unreasonable immediate */
+ /* heap growth. */
/*********************************/
/* */
-/* Stack saving for debugging */
+/* Stack saving for debugging */
/* */
/*********************************/
#ifdef NEED_CALLINFO
struct callinfo {
- word ci_pc; /* Caller, not callee, pc */
-# if NARGS > 0
- word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
-# endif
-# if defined(ALIGN_DOUBLE) && (NFRAMES * (NARGS + 1)) % 2 == 1
- /* Likely alignment problem. */
- word ci_dummy;
-# endif
+ word ci_pc; /* Caller, not callee, pc */
+# if NARGS > 0
+ word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
+# endif
+# if (NFRAMES * (NARGS + 1)) % 2 == 1
+ /* Likely alignment problem. */
+ word ci_dummy;
+# endif
};
#endif
#ifdef SAVE_CALL_CHAIN
-
-/* Fill in the pc and argument information for up to NFRAMES of my */
-/* callers. Ignore my frame and my callers frame. */
-void GC_save_callers GC_PROTO((struct callinfo info[NFRAMES]));
-
-void GC_print_callers GC_PROTO((struct callinfo info[NFRAMES]));
-
+ /* Fill in the pc and argument information for up to NFRAMES of my */
+ /* callers. Ignore my frame and my callers frame. */
+ GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]);
+ GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]);
#endif
/*********************************/
/* */
-/* OS interface routines */
+/* OS interface routines */
/* */
/*********************************/
# undef MS_TIME_DIFF
# define CLOCK_TYPE struct timeval
# define GET_TIME(x) { struct rusage rusage; \
- getrusage (RUSAGE_SELF, &rusage); \
- x = rusage.ru_utime; }
-# define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \
- + (double) (a.tv_usec - b.tv_usec) / 1000.0)
+ getrusage (RUSAGE_SELF, &rusage); \
+ x = rusage.ru_utime; }
+# define MS_TIME_DIFF(a,b) \
+ ((unsigned long)((double) (a.tv_sec - b.tv_sec) * 1000.0 \
+ + (double) (a.tv_usec - b.tv_usec) / 1000.0))
#else /* !BSD_TIME */
# if defined(MSWIN32) || defined(MSWINCE)
# include <windows.h>
# else /* !MSWIN32, !MSWINCE, !BSD_TIME */
# include <time.h>
# if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4)
- clock_t clock(); /* Not in time.h, where it belongs */
+ clock_t clock(void); /* Not in time.h, where it belongs */
# endif
# if defined(FREEBSD) && !defined(CLOCKS_PER_SEC)
# include <machine/limits.h>
* CLOCKS_PER_SEC be defined. But at least under SunOS4.1.1, it isn't.
* Also note that the combination of ANSI C and POSIX is incredibly gross
* here. The type clock_t is used by both clock() and times(). But on
- * some machines these use different notions of a clock tick, CLOCKS_PER_SEC
+ * some machines these use different notions of a clock tick, CLOCKS_PER_SEC
* seems to apply only to clock. Hence we use it here. On many machines,
* including SunOS, clock actually uses units of microseconds (which are
* not really clock ticks).
# define CLOCK_TYPE clock_t
# define GET_TIME(x) x = clock()
# define MS_TIME_DIFF(a,b) ((unsigned long) \
- (1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC))
+ (1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC))
# endif /* !MSWIN32 */
#endif /* !BSD_TIME */
-/* We use bzero and bcopy internally. They may not be available. */
+/* We use bzero and bcopy internally. They may not be available. */
# if defined(SPARC) && defined(SUNOS4)
# define BCOPY_EXISTS
# endif
# define BCOPY(x,y,n) memcpy(y, x, (size_t)(n))
# define BZERO(x,n) memset(x, 0, (size_t)(n))
# else
-# define BCOPY(x,y,n) bcopy((char *)(x),(char *)(y),(int)(n))
-# define BZERO(x,n) bzero((char *)(x),(int)(n))
-# endif
-
-/* Delay any interrupts or signals that may abort this thread. Data */
-/* structures are in a consistent state outside this pair of calls. */
-/* ANSI C allows both to be empty (though the standard isn't very */
-/* clear on that point). Standard malloc implementations are usually */
-/* neither interruptable nor thread-safe, and thus correspond to */
-/* empty definitions. */
-/* It probably doesn't make any sense to declare these to be nonempty */
-/* if the code is being optimized, since signal safety relies on some */
-/* ordering constraints that are typically not obeyed by optimizing */
-/* compilers. */
-# ifdef PCR
-# define DISABLE_SIGNALS() \
- PCR_Th_SetSigMask(PCR_allSigsBlocked,&GC_old_sig_mask)
-# define ENABLE_SIGNALS() \
- PCR_Th_SetSigMask(&GC_old_sig_mask, NIL)
-# else
-# if defined(THREADS) || defined(AMIGA) \
- || defined(MSWIN32) || defined(MSWINCE) || defined(MACOS) \
- || defined(DJGPP) || defined(NO_SIGNALS)
- /* Also useful for debugging. */
- /* Should probably use thr_sigsetmask for GC_SOLARIS_THREADS. */
-# define DISABLE_SIGNALS()
-# define ENABLE_SIGNALS()
-# else
-# define DISABLE_SIGNALS() GC_disable_signals()
- void GC_disable_signals();
-# define ENABLE_SIGNALS() GC_enable_signals()
- void GC_enable_signals();
-# endif
+# define BCOPY(x,y,n) bcopy((void *)(x),(void *)(y),(size_t)(n))
+# define BZERO(x,n) bzero((void *)(x),(size_t)(n))
# endif
/*
# ifdef PCR
# include "th/PCR_ThCtl.h"
# define STOP_WORLD() \
- PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \
- PCR_allSigsBlocked, \
- PCR_waitForever)
+ PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \
+ PCR_allSigsBlocked, \
+ PCR_waitForever)
# define START_WORLD() \
- PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \
- PCR_allSigsBlocked, \
- PCR_waitForever);
+ PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \
+ PCR_allSigsBlocked, \
+ PCR_waitForever);
# else
-# if defined(GC_SOLARIS_THREADS) || defined(GC_WIN32_THREADS) \
- || defined(GC_PTHREADS)
- void GC_stop_world();
- void GC_start_world();
+# if defined(GC_WIN32_THREADS) || defined(GC_PTHREADS)
+ GC_INNER void GC_stop_world(void);
+ GC_INNER void GC_start_world(void);
# define STOP_WORLD() GC_stop_world()
# define START_WORLD() GC_start_world()
# else
-# define STOP_WORLD()
+ /* Just do a sanity check: we are not inside GC_do_blocking(). */
+# define STOP_WORLD() GC_ASSERT(GC_blocked_sp == NULL)
# define START_WORLD()
# endif
# endif
# ifdef PCR
# define ABORT(s) PCR_Base_Panic(s)
# else
+# if defined(MSWINCE) && !defined(DebugBreak) \
+ && (!defined(UNDER_CE) || (defined(__MINGW32CE__) && !defined(ARM32)))
+ /* This simplifies linking for WinCE (and, probably, doesn't */
+ /* hurt debugging much); use -DDebugBreak=DebugBreak to override */
+ /* this behavior if really needed. This is also a workaround for */
+ /* x86mingw32ce toolchain (if it is still declaring DebugBreak() */
+ /* instead of defining it as a macro). */
+# define DebugBreak() _exit(-1) /* there is no abort() in WinCE */
+# endif
# ifdef SMALL_CONFIG
-# define ABORT(msg) abort();
+# if defined(MSWIN32) || defined(MSWINCE)
+# define ABORT(msg) DebugBreak()
+# else
+# define ABORT(msg) abort()
+# endif
# else
- GC_API void GC_abort GC_PROTO((GC_CONST char * msg));
-# define ABORT(msg) GC_abort(msg);
+ GC_API_PRIV void GC_abort(const char * msg);
+# define ABORT(msg) GC_abort(msg)
# endif
# endif
# define EXIT() (void)exit(1)
# endif
-/* Print warning message, e.g. almost out of memory. */
-# define WARN(msg,arg) (*GC_current_warn_proc)("GC Warning: " msg, (GC_word)(arg))
-extern GC_warn_proc GC_current_warn_proc;
+/* Print warning message, e.g. almost out of memory. */
+#define WARN(msg, arg) (*GC_current_warn_proc)("GC Warning: " msg, \
+ (GC_word)(arg))
+GC_EXTERN GC_warn_proc GC_current_warn_proc;
+
+/* Print format type macro for signed_word. Currently used for WARN() */
+/* only. This could be of use on Win64 but commented out since Win64 */
+/* is only a little-endian architecture (for now) and the WARN format */
+/* string is, possibly, processed on the client side, so non-standard */
+/* print type modifiers should be avoided (if possible). */
+#if defined(_MSC_VER) && defined(_WIN64) && !defined(GC_PRIdPTR)
+/* #define GC_PRIdPTR "I64d" */
+#endif
+
+#if !defined(GC_PRIdPTR) && (defined(_LLP64) || defined(__LLP64__) \
+ || defined(_WIN64))
+/* #include <inttypes.h> */
+/* #define GC_PRIdPTR PRIdPTR */
+#endif
+
+#ifndef GC_PRIdPTR
+ /* Assume sizeof(void *) == sizeof(long) (or a little-endian machine) */
+# define GC_PRIdPTR "ld"
+#endif
/* Get environment entry */
#if !defined(NO_GETENV)
# if defined(EMPTY_GETENV_RESULTS)
- /* Workaround for a reputed Wine bug. */
- static inline char * fixed_getenv(const char *name)
- {
- char * tmp = getenv(name);
- if (tmp == 0 || strlen(tmp) == 0)
- return 0;
- return tmp;
- }
+ /* Workaround for a reputed Wine bug. */
+ GC_INLINE char * fixed_getenv(const char *name)
+ {
+ char * tmp = getenv(name);
+ if (tmp == 0 || strlen(tmp) == 0)
+ return 0;
+ return tmp;
+ }
# define GETENV(name) fixed_getenv(name)
# else
# define GETENV(name) getenv(name)
# define GETENV(name) 0
#endif
+#if defined(DARWIN)
+# if defined(POWERPC)
+# if CPP_WORDSZ == 32
+# define GC_THREAD_STATE_T ppc_thread_state_t
+# define GC_MACH_THREAD_STATE PPC_THREAD_STATE
+# define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE_COUNT
+# define GC_MACH_HEADER mach_header
+# define GC_MACH_SECTION section
+# define GC_GETSECTBYNAME getsectbynamefromheader
+# else
+# define GC_THREAD_STATE_T ppc_thread_state64_t
+# define GC_MACH_THREAD_STATE PPC_THREAD_STATE64
+# define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE64_COUNT
+# define GC_MACH_HEADER mach_header_64
+# define GC_MACH_SECTION section_64
+# define GC_GETSECTBYNAME getsectbynamefromheader_64
+# endif
+# elif defined(I386) || defined(X86_64)
+# if CPP_WORDSZ == 32
+# define GC_THREAD_STATE_T x86_thread_state32_t
+# define GC_MACH_THREAD_STATE x86_THREAD_STATE32
+# define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE32_COUNT
+# define GC_MACH_HEADER mach_header
+# define GC_MACH_SECTION section
+# define GC_GETSECTBYNAME getsectbynamefromheader
+# else
+# define GC_THREAD_STATE_T x86_thread_state64_t
+# define GC_MACH_THREAD_STATE x86_THREAD_STATE64
+# define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT
+# define GC_MACH_HEADER mach_header_64
+# define GC_MACH_SECTION section_64
+# define GC_GETSECTBYNAME getsectbynamefromheader_64
+# endif
+# else
+# if defined(ARM32)
+# define GC_THREAD_STATE_T arm_thread_state_t
+# else
+# error define GC_THREAD_STATE_T
+# endif
+# define GC_MACH_THREAD_STATE MACHINE_THREAD_STATE
+# define GC_MACH_THREAD_STATE_COUNT MACHINE_THREAD_STATE_COUNT
+# endif
+/* Try to work out the right way to access thread state structure members.
+ The structure has changed its definition in different Darwin versions.
+ This now defaults to the (older) names without __, thus hopefully,
+ not breaking any existing Makefile.direct builds. */
+# if defined (HAS_PPC_THREAD_STATE___R0) \
+ || defined (HAS_PPC_THREAD_STATE64___R0) \
+ || defined (HAS_X86_THREAD_STATE32___EAX) \
+ || defined (HAS_X86_THREAD_STATE64___RAX)
+# define THREAD_FLD(x) __ ## x
+# else
+# define THREAD_FLD(x) x
+# endif
+#endif
+
/*********************************/
/* */
/* Word-size-dependent defines */
# define WORDS_TO_BYTES(x) ((x)<<2)
# define BYTES_TO_WORDS(x) ((x)>>2)
# define LOGWL ((word)5) /* log[2] of CPP_WORDSZ */
-# define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */
+# define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */
# if ALIGNMENT != 4
-# define UNALIGNED
+# define UNALIGNED_PTRS
# endif
#endif
# define WORDS_TO_BYTES(x) ((x)<<3)
# define BYTES_TO_WORDS(x) ((x)>>3)
# define LOGWL ((word)6) /* log[2] of CPP_WORDSZ */
-# define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */
+# define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */
# if ALIGNMENT != 8
-# define UNALIGNED
+# define UNALIGNED_PTRS
# endif
#endif
+/* The first TINY_FREELISTS free lists correspond to the first */
+/* TINY_FREELISTS multiples of GRANULE_BYTES, i.e. we keep */
+/* separate free lists for each multiple of GRANULE_BYTES */
+/* up to (TINY_FREELISTS-1) * GRANULE_BYTES. After that they */
+/* may be spread out further. */
+#include "../gc_tiny_fl.h"
+#define GRANULE_BYTES GC_GRANULE_BYTES
+#define TINY_FREELISTS GC_TINY_FREELISTS
+
#define WORDSZ ((word)CPP_WORDSZ)
#define SIGNB ((word)1 << (WORDSZ-1))
#define BYTES_PER_WORD ((word)(sizeof (word)))
#define ONES ((word)(signed_word)(-1))
-#define divWORDSZ(n) ((n) >> LOGWL) /* divide n by size of word */
+#define divWORDSZ(n) ((n) >> LOGWL) /* divide n by size of word */
+
+#if GRANULE_BYTES == 8
+# define BYTES_TO_GRANULES(n) ((n)>>3)
+# define GRANULES_TO_BYTES(n) ((n)<<3)
+# if CPP_WORDSZ == 64
+# define GRANULES_TO_WORDS(n) (n)
+# elif CPP_WORDSZ == 32
+# define GRANULES_TO_WORDS(n) ((n)<<1)
+# else
+# define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
+# endif
+#elif GRANULE_BYTES == 16
+# define BYTES_TO_GRANULES(n) ((n)>>4)
+# define GRANULES_TO_BYTES(n) ((n)<<4)
+# if CPP_WORDSZ == 64
+# define GRANULES_TO_WORDS(n) ((n)<<1)
+# elif CPP_WORDSZ == 32
+# define GRANULES_TO_WORDS(n) ((n)<<2)
+# else
+# define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
+# endif
+#else
+# error Bad GRANULE_BYTES value
+#endif
/*********************/
/* */
/* */
/*********************/
-/* heap block size, bytes. Should be power of 2 */
+/* Heap block size, bytes. Should be power of 2. */
+/* Incremental GC with MPROTECT_VDB currently requires the */
+/* page size to be a multiple of HBLKSIZE. Since most modern */
+/* architectures support variable page sizes down to 4K, and */
+/* X86 is generally 4K, we now default to 4K, except for */
+/* Alpha: Seems to be used with 8K pages. */
+/* SMALL_CONFIG: Want less block-level fragmentation. */
#ifndef HBLKSIZE
# ifdef SMALL_CONFIG
# define CPP_LOG_HBLKSIZE 10
# else
-# if (CPP_WORDSZ == 32) || (defined(HPUX) && defined(HP_PA))
- /* HPUX/PA seems to use 4K pages with the 64 bit ABI */
-# define CPP_LOG_HBLKSIZE 12
-# else
+# if defined(ALPHA)
# define CPP_LOG_HBLKSIZE 13
+# else
+# define CPP_LOG_HBLKSIZE 12
# endif
# endif
#else
# endif
# undef HBLKSIZE
#endif
+
# define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
-# define LOG_HBLKSIZE ((word)CPP_LOG_HBLKSIZE)
-# define HBLKSIZE ((word)CPP_HBLKSIZE)
+# define LOG_HBLKSIZE ((size_t)CPP_LOG_HBLKSIZE)
+# define HBLKSIZE ((size_t)CPP_HBLKSIZE)
-/* max size objects supported by freelist (larger objects may be */
-/* allocated, but less efficiently) */
+/* max size objects supported by freelist (larger objects are */
+/* allocated directly with allchblk(), by rounding to the next */
+/* multiple of HBLKSIZE. */
#define CPP_MAXOBJBYTES (CPP_HBLKSIZE/2)
-#define MAXOBJBYTES ((word)CPP_MAXOBJBYTES)
-#define CPP_MAXOBJSZ BYTES_TO_WORDS(CPP_MAXOBJBYTES)
-#define MAXOBJSZ ((word)CPP_MAXOBJSZ)
-
+#define MAXOBJBYTES ((size_t)CPP_MAXOBJBYTES)
+#define CPP_MAXOBJWORDS BYTES_TO_WORDS(CPP_MAXOBJBYTES)
+#define MAXOBJWORDS ((size_t)CPP_MAXOBJWORDS)
+#define CPP_MAXOBJGRANULES BYTES_TO_GRANULES(CPP_MAXOBJBYTES)
+#define MAXOBJGRANULES ((size_t)CPP_MAXOBJGRANULES)
+
# define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
# define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q)
- /* Equivalent to subtracting 2 hblk pointers. */
- /* We do it this way because a compiler should */
- /* find it hard to use an integer division */
- /* instead of a shift. The bundled SunOS 4.1 */
- /* o.w. sometimes pessimizes the subtraction to */
- /* involve a call to .div. */
-
+ /* Equivalent to subtracting 2 hblk pointers. */
+ /* We do it this way because a compiler should */
+ /* find it hard to use an integer division */
+ /* instead of a shift. The bundled SunOS 4.1 */
+ /* o.w. sometimes pessimizes the subtraction to */
+ /* involve a call to .div. */
+
# define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
-
+
# define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1)))
-# define HBLKDISPL(objptr) (((word) (objptr)) & (HBLKSIZE-1))
+# define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1))
/* Round up byte allocation requests to integral number of words, etc. */
-# define ROUNDED_UP_WORDS(n) \
- BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1 + EXTRA_BYTES))
-# ifdef ALIGN_DOUBLE
-# define ALIGNED_WORDS(n) \
- (BYTES_TO_WORDS((n) + WORDS_TO_BYTES(2) - 1 + EXTRA_BYTES) & ~1)
+# define ROUNDED_UP_GRANULES(n) \
+ BYTES_TO_GRANULES((n) + (GRANULE_BYTES - 1 + EXTRA_BYTES))
+# if MAX_EXTRA_BYTES == 0
+# define SMALL_OBJ(bytes) EXPECT((bytes) <= (MAXOBJBYTES), 1)
# else
-# define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n)
+# define SMALL_OBJ(bytes) \
+ (EXPECT((bytes) <= (MAXOBJBYTES - MAX_EXTRA_BYTES), 1) || \
+ (bytes) <= (MAXOBJBYTES - EXTRA_BYTES))
+ /* This really just tests bytes <= MAXOBJBYTES - EXTRA_BYTES. */
+ /* But we try to avoid looking up EXTRA_BYTES. */
# endif
-# define SMALL_OBJ(bytes) ((bytes) <= (MAXOBJBYTES - EXTRA_BYTES))
# define ADD_SLOP(bytes) ((bytes) + EXTRA_BYTES)
# ifndef MIN_WORDS
- /* MIN_WORDS is the size of the smallest allocated object. */
- /* 1 and 2 are the only valid values. */
- /* 2 must be used if: */
- /* - GC_gcj_malloc can be used for objects of requested */
- /* size smaller than 2 words, or */
- /* - USE_MARK_BYTES is defined. */
-# if defined(USE_MARK_BYTES) || defined(GC_GCJ_SUPPORT)
-# define MIN_WORDS 2 /* Smallest allocated object. */
-# else
-# define MIN_WORDS 1
-# endif
+# define MIN_WORDS 2 /* FIXME: obsolete */
# endif
-
/*
- * Hash table representation of sets of pages. This assumes it is
- * OK to add spurious entries to sets.
+ * Hash table representation of sets of pages.
+ * Implements a map from aligned HBLKSIZE chunks of the address space to one
+ * bit each.
+ * This assumes it is OK to spuriously set bits, e.g. because multiple
+ * addresses are represented by a single location.
* Used by black-listing code, and perhaps by dirty bit maintenance code.
*/
-
+
# ifdef LARGE_CONFIG
-# define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */
- /* which is >= 4GB. Each table takes */
- /* 128KB, some of which may never be */
- /* touched. */
+# if CPP_WORDSZ == 32
+# define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */
+ /* which is >= 4GB. Each table takes */
+ /* 128KB, some of which may never be */
+ /* touched. */
+# else
+# define LOG_PHT_ENTRIES 21 /* Collisions likely at 2M blocks, */
+ /* which is >= 8GB. Each table takes */
+ /* 256KB, some of which may never be */
+ /* touched. */
+# endif
# else
# ifdef SMALL_CONFIG
-# define LOG_PHT_ENTRIES 14 /* Collisions are likely if heap grows */
- /* to more than 16K hblks = 64MB. */
- /* Each hash table occupies 2K bytes. */
+# define LOG_PHT_ENTRIES 15 /* Collisions are likely if heap grows */
+ /* to more than 32K hblks = 128MB. */
+ /* Each hash table occupies 4K bytes. */
# else /* default "medium" configuration */
-# define LOG_PHT_ENTRIES 16 /* Collisions are likely if heap grows */
- /* to more than 64K hblks >= 256MB. */
- /* Each hash table occupies 8K bytes. */
- /* Even for somewhat smaller heaps, */
- /* say half that, collisions may be an */
- /* issue because we blacklist */
- /* addresses outside the heap. */
+# define LOG_PHT_ENTRIES 18 /* Collisions are likely if heap grows */
+ /* to more than 256K hblks >= 1GB. */
+ /* Each hash table occupies 32K bytes. */
+ /* Even for somewhat smaller heaps, */
+ /* say half that, collisions may be an */
+ /* issue because we blacklist */
+ /* addresses outside the heap. */
# endif
# endif
# define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
# define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1))
# define get_pht_entry_from_index(bl, index) \
- (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
+ (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
# define set_pht_entry_from_index(bl, index) \
- (bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
+ (bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
# define clear_pht_entry_from_index(bl, index) \
- (bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index))
-/* And a dumb but thread-safe version of set_pht_entry_from_index. */
-/* This sets (many) extra bits. */
+ (bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index))
+/* And a dumb but thread-safe version of set_pht_entry_from_index. */
+/* This sets (many) extra bits. */
# define set_pht_entry_from_index_safe(bl, index) \
- (bl)[divWORDSZ(index)] = ONES
-
+ (bl)[divWORDSZ(index)] = ONES
/********************************************/
/* heap block header */
#define HBLKMASK (HBLKSIZE-1)
-#define BITS_PER_HBLK (CPP_HBLKSIZE * 8)
-
-#define MARK_BITS_PER_HBLK (BITS_PER_HBLK/CPP_WORDSZ)
- /* upper bound */
- /* We allocate 1 bit/word, unless USE_MARK_BYTES */
- /* is defined. Only the first word */
- /* in each object is actually marked. */
+#define MARK_BITS_PER_HBLK (HBLKSIZE/GRANULE_BYTES)
+ /* upper bound */
+ /* We allocate 1 bit per allocation granule. */
+ /* If MARK_BIT_PER_GRANULE is defined, we use */
+ /* every nth bit, where n is the number of */
+ /* allocation granules per object. If */
+ /* MARK_BIT_PER_OBJ is defined, we only use the */
+ /* initial group of mark bits, and it is safe */
+ /* to allocate smaller header for large objects. */
# ifdef USE_MARK_BYTES
-# define MARK_BITS_SZ (MARK_BITS_PER_HBLK/2)
- /* Unlike the other case, this is in units of bytes. */
- /* We actually allocate only every second mark bit, since we */
- /* force all objects to be doubleword aligned. */
- /* However, each mark bit is allocated as a byte. */
+# define MARK_BITS_SZ (MARK_BITS_PER_HBLK + 1)
+ /* Unlike the other case, this is in units of bytes. */
+ /* Since we force doubleword alignment, we need at most one */
+ /* mark bit per 2 words. But we do allocate and set one */
+ /* extra mark bit to avoid an explicit check for the */
+ /* partial object at the end of each block. */
# else
-# define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ)
+# define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ + 1)
# endif
-/* We maintain layout maps for heap blocks containing objects of a given */
-/* size. Each entry in this map describes a byte offset and has the */
-/* following type. */
-typedef unsigned char map_entry_type;
+#ifdef PARALLEL_MARK
+# include "atomic_ops.h"
+ typedef AO_t counter_t;
+#else
+ typedef size_t counter_t;
+# if defined(THREADS) && defined(MPROTECT_VDB)
+# include "atomic_ops.h"
+# endif
+#endif
+/* We maintain layout maps for heap blocks containing objects of a given */
+/* size. Each entry in this map describes a byte offset and has the */
+/* following type. */
struct hblkhdr {
- word hb_sz; /* If in use, size in words, of objects in the block. */
- /* if free, the size in bytes of the whole block */
- struct hblk * hb_next; /* Link field for hblk free list */
- /* and for lists of chunks waiting to be */
- /* reclaimed. */
- struct hblk * hb_prev; /* Backwards link for free list. */
- word hb_descr; /* object descriptor for marking. See */
- /* mark.h. */
- map_entry_type * hb_map;
- /* A pointer to a pointer validity map of the block. */
- /* See GC_obj_map. */
- /* Valid for all blocks with headers. */
- /* Free blocks point to GC_invalid_map. */
+ struct hblk * hb_next; /* Link field for hblk free list */
+ /* and for lists of chunks waiting to be */
+ /* reclaimed. */
+ struct hblk * hb_prev; /* Backwards link for free list. */
+ struct hblk * hb_block; /* The corresponding block. */
unsigned char hb_obj_kind;
- /* Kind of objects in the block. Each kind */
- /* identifies a mark procedure and a set of */
- /* list headers. Sometimes called regions. */
+ /* Kind of objects in the block. Each kind */
+ /* identifies a mark procedure and a set of */
+ /* list headers. Sometimes called regions. */
unsigned char hb_flags;
-# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
- /* point to the first page of */
- /* this object. */
-# define WAS_UNMAPPED 2 /* This is a free block, which has */
- /* been unmapped from the address */
- /* space. */
- /* GC_remap must be invoked on it */
- /* before it can be reallocated. */
- /* Only set with USE_MUNMAP. */
+# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
+ /* point to the first page of */
+ /* this object. */
+# define WAS_UNMAPPED 2 /* This is a free block, which has */
+ /* been unmapped from the address */
+ /* space. */
+ /* GC_remap must be invoked on it */
+ /* before it can be reallocated. */
+ /* Only set with USE_MUNMAP. */
+# define FREE_BLK 4 /* Block is free, i.e. not in use. */
unsigned short hb_last_reclaimed;
- /* Value of GC_gc_no when block was */
- /* last allocated or swept. May wrap. */
- /* For a free block, this is maintained */
- /* only for USE_MUNMAP, and indicates */
- /* when the header was allocated, or */
- /* when the size of the block last */
- /* changed. */
+ /* Value of GC_gc_no when block was */
+ /* last allocated or swept. May wrap. */
+ /* For a free block, this is maintained */
+ /* only for USE_MUNMAP, and indicates */
+ /* when the header was allocated, or */
+ /* when the size of the block last */
+ /* changed. */
+ size_t hb_sz; /* If in use, size in bytes, of objects in the block. */
+ /* if free, the size in bytes of the whole block */
+ /* We assume that this is convertible to signed_word */
+ /* without generating a negative result. We avoid */
+ /* generating free blocks larger than that. */
+ word hb_descr; /* object descriptor for marking. See */
+ /* mark.h. */
+# ifdef MARK_BIT_PER_OBJ
+ unsigned32 hb_inv_sz; /* A good upper bound for 2**32/hb_sz. */
+ /* For large objects, we use */
+ /* LARGE_INV_SZ. */
+# define LARGE_INV_SZ (1 << 16)
+# else
+ unsigned char hb_large_block;
+ short * hb_map; /* Essentially a table of remainders */
+ /* mod BYTES_TO_GRANULES(hb_sz), except */
+ /* for large blocks. See GC_obj_map. */
+# endif
+ counter_t hb_n_marks; /* Number of set mark bits, excluding */
+ /* the one always set at the end. */
+ /* Currently it is concurrently */
+ /* updated and hence only approximate. */
+ /* But a zero value does guarantee that */
+ /* the block contains no marked */
+ /* objects. */
+ /* Ensuring this property means that we */
+ /* never decrement it to zero during a */
+ /* collection, and hence the count may */
+ /* be one too high. Due to concurrent */
+ /* updates, an arbitrary number of */
+ /* increments, but not all of them (!) */
+ /* may be lost, hence it may in theory */
+ /* be much too low. */
+ /* The count may also be too high if */
+ /* multiple mark threads mark the */
+ /* same object due to a race. */
+ /* Without parallel marking, the count */
+ /* is accurate. */
# ifdef USE_MARK_BYTES
union {
char _hb_marks[MARK_BITS_SZ];
- /* The i'th byte is 1 if the object */
- /* starting at word 2i is marked, 0 o.w. */
- word dummy; /* Force word alignment of mark bytes. */
+ /* The i'th byte is 1 if the object */
+ /* starting at granule i or object i is */
+ /* marked, 0 o.w. */
+ /* The mark bit for the "one past the */
+ /* end" object is always set to avoid a */
+ /* special case test in the marker. */
+ word dummy; /* Force word alignment of mark bytes. */
} _mark_byte_union;
# define hb_marks _mark_byte_union._hb_marks
# else
word hb_marks[MARK_BITS_SZ];
- /* Bit i in the array refers to the */
- /* object starting at the ith word (header */
- /* INCLUDED) in the heap block. */
- /* The lsb of word 0 is numbered 0. */
- /* Unused bits are invalid, and are */
- /* occasionally set, e.g for uncollectable */
- /* objects. */
# endif /* !USE_MARK_BYTES */
};
+# define ANY_INDEX 23 /* "Random" mark bit index for assertions */
+
/* heap block body */
-# define BODY_SZ (HBLKSIZE/sizeof(word))
+# define HBLK_WORDS (HBLKSIZE/sizeof(word))
+# define HBLK_GRANULES (HBLKSIZE/GRANULE_BYTES)
+
+/* The number of objects in a block dedicated to a certain size. */
+/* may erroneously yield zero (instead of one) for large objects. */
+# define HBLK_OBJS(sz_in_bytes) (HBLKSIZE/(sz_in_bytes))
struct hblk {
- word hb_body[BODY_SZ];
+ char hb_body[HBLKSIZE];
};
-# define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
+# define HBLK_IS_FREE(hdr) (((hdr) -> hb_flags & FREE_BLK) != 0)
-# define OBJ_SZ_TO_BLOCKS(sz) \
- divHBLKSZ(WORDS_TO_BYTES(sz) + HBLKSIZE-1)
- /* Size of block (in units of HBLKSIZE) needed to hold objects of */
- /* given sz (in words). */
+# define OBJ_SZ_TO_BLOCKS(sz) divHBLKSZ((sz) + HBLKSIZE-1)
+ /* Size of block (in units of HBLKSIZE) needed to hold objects of */
+ /* given sz (in bytes). */
/* Object free list link */
-# define obj_link(p) (*(ptr_t *)(p))
+# define obj_link(p) (*(void **)(p))
# define LOG_MAX_MARK_PROCS 6
# define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS)
-/* Root sets. Logically private to mark_rts.c. But we don't want the */
-/* tables scanned, so we put them here. */
-/* MAX_ROOT_SETS is the maximum number of ranges that can be */
-/* registered as static roots. */
+/* Root sets. Logically private to mark_rts.c. But we don't want the */
+/* tables scanned, so we put them here. */
+/* MAX_ROOT_SETS is the maximum number of ranges that can be */
+/* registered as static roots. */
# ifdef LARGE_CONFIG
-# define MAX_ROOT_SETS 4096
+# define MAX_ROOT_SETS 8192
# else
- /* GCJ LOCAL: MAX_ROOT_SETS increased to permit more shared */
- /* libraries to be loaded. */
-# define MAX_ROOT_SETS 1024
+# ifdef SMALL_CONFIG
+# define MAX_ROOT_SETS 512
+# else
+# define MAX_ROOT_SETS 2048
+# endif
# endif
# define MAX_EXCLUSIONS (MAX_ROOT_SETS/4)
-/* Maximum number of segments that can be excluded from root sets. */
+/* Maximum number of segments that can be excluded from root sets. */
/*
* Data structure for excluded static roots.
ptr_t e_end;
};
-/* Data structure for list of root sets. */
-/* We keep a hash table, so that we can filter out duplicate additions. */
-/* Under Win32, we need to do a better job of filtering overlaps, so */
-/* we resort to sequential search, and pay the price. */
+/* Data structure for list of root sets. */
+/* We keep a hash table, so that we can filter out duplicate additions. */
+/* Under Win32, we need to do a better job of filtering overlaps, so */
+/* we resort to sequential search, and pay the price. */
struct roots {
- ptr_t r_start;
- ptr_t r_end;
-# if !defined(MSWIN32) && !defined(MSWINCE)
- struct roots * r_next;
-# endif
- GC_bool r_tmp;
- /* Delete before registering new dynamic libraries */
+ ptr_t r_start;/* multiple of word size */
+ ptr_t r_end; /* multiple of word size and greater than r_start */
+# if !defined(MSWIN32) && !defined(MSWINCE)
+ struct roots * r_next;
+# endif
+ GC_bool r_tmp;
+ /* Delete before registering new dynamic libraries */
};
#if !defined(MSWIN32) && !defined(MSWINCE)
- /* Size of hash table index to roots. */
+ /* Size of hash table index to roots. */
# define LOG_RT_SIZE 6
# define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */
#endif
-/* Lists of all heap blocks and free lists */
-/* as well as other random data structures */
-/* that should not be scanned by the */
-/* collector. */
-/* These are grouped together in a struct */
-/* so that they can be easily skipped by the */
-/* GC_mark routine. */
-/* The ordering is weird to make GC_malloc */
-/* faster by keeping the important fields */
-/* sufficiently close together that a */
-/* single load of a base register will do. */
-/* Scalars that could easily appear to */
-/* be pointers are also put here. */
-/* The main fields should precede any */
-/* conditionally included fields, so that */
-/* gc_inl.h will work even if a different set */
-/* of macros is defined when the client is */
-/* compiled. */
+/* Lists of all heap blocks and free lists */
+/* as well as other random data structures */
+/* that should not be scanned by the */
+/* collector. */
+/* These are grouped together in a struct */
+/* so that they can be easily skipped by the */
+/* GC_mark routine. */
+/* The ordering is weird to make GC_malloc */
+/* faster by keeping the important fields */
+/* sufficiently close together that a */
+/* single load of a base register will do. */
+/* Scalars that could easily appear to */
+/* be pointers are also put here. */
+/* The main fields should precede any */
+/* conditionally included fields, so that */
+/* gc_inl.h will work even if a different set */
+/* of macros is defined when the client is */
+/* compiled. */
struct _GC_arrays {
- word _heapsize;
+ word _heapsize; /* Heap size in bytes. */
word _max_heapsize;
- word _requested_heapsize; /* Heap size due to explicit expansion */
+ word _requested_heapsize; /* Heap size due to explicit expansion */
ptr_t _last_heap_addr;
ptr_t _prev_heap_addr;
word _large_free_bytes;
- /* Total bytes contained in blocks on large object free */
- /* list. */
+ /* Total bytes contained in blocks on large object free */
+ /* list. */
word _large_allocd_bytes;
- /* Total number of bytes in allocated large objects blocks. */
- /* For the purposes of this counter and the next one only, a */
- /* large object is one that occupies a block of at least */
- /* 2*HBLKSIZE. */
+ /* Total number of bytes in allocated large objects blocks. */
+ /* For the purposes of this counter and the next one only, a */
+ /* large object is one that occupies a block of at least */
+ /* 2*HBLKSIZE. */
word _max_large_allocd_bytes;
- /* Maximum number of bytes that were ever allocated in */
- /* large object blocks. This is used to help decide when it */
- /* is safe to split up a large block. */
- word _words_allocd_before_gc;
- /* Number of words allocated before this */
- /* collection cycle. */
+ /* Maximum number of bytes that were ever allocated in */
+ /* large object blocks. This is used to help decide when it */
+ /* is safe to split up a large block. */
+ word _bytes_allocd_before_gc;
+ /* Number of words allocated before this */
+ /* collection cycle. */
# ifndef SEPARATE_GLOBALS
- word _words_allocd;
- /* Number of words allocated during this collection cycle */
-# endif
- word _words_wasted;
- /* Number of words wasted due to internal fragmentation */
- /* in large objects, or due to dropping blacklisted */
- /* blocks, since last gc. Approximate. */
- word _words_finalized;
- /* Approximate number of words in objects (and headers) */
- /* That became ready for finalization in the last */
- /* collection. */
+ word _bytes_allocd;
+ /* Number of words allocated during this collection cycle */
+# endif
+ word _bytes_dropped;
+ /* Number of black-listed bytes dropped during GC cycle */
+ /* as a result of repeated scanning during allocation */
+ /* attempts. These are treated largely as allocated, */
+ /* even though they are not useful to the client. */
+ word _bytes_finalized;
+ /* Approximate number of bytes in objects (and headers) */
+ /* that became ready for finalization in the last */
+ /* collection. */
word _non_gc_bytes_at_gc;
- /* Number of explicitly managed bytes of storage */
- /* at last collection. */
- word _mem_freed;
- /* Number of explicitly deallocated words of memory */
- /* since last collection. */
- word _finalizer_mem_freed;
- /* Words of memory explicitly deallocated while */
- /* finalizers were running. Used to approximate mem. */
- /* explicitly deallocated by finalizers. */
+ /* Number of explicitly managed bytes of storage */
+ /* at last collection. */
+ word _bytes_freed;
+ /* Number of explicitly deallocated bytes of memory */
+ /* since last collection. */
+ word _finalizer_bytes_freed;
+ /* Bytes of memory explicitly deallocated while */
+ /* finalizers were running. Used to approximate mem. */
+ /* explicitly deallocated by finalizers. */
ptr_t _scratch_end_ptr;
ptr_t _scratch_last_end_ptr;
- /* Used by headers.c, and can easily appear to point to */
- /* heap. */
+ /* Used by headers.c, and can easily appear to point to */
+ /* heap. */
GC_mark_proc _mark_procs[MAX_MARK_PROCS];
- /* Table of user-defined mark procedures. There is */
- /* a small number of these, which can be referenced */
- /* by DS_PROC mark descriptors. See gc_mark.h. */
+ /* Table of user-defined mark procedures. There is */
+ /* a small number of these, which can be referenced */
+ /* by DS_PROC mark descriptors. See gc_mark.h. */
# ifndef SEPARATE_GLOBALS
- ptr_t _objfreelist[MAXOBJSZ+1];
- /* free list for objects */
- ptr_t _aobjfreelist[MAXOBJSZ+1];
- /* free list for atomic objs */
+ void *_objfreelist[MAXOBJGRANULES+1];
+ /* free list for objects */
+ void *_aobjfreelist[MAXOBJGRANULES+1];
+ /* free list for atomic objs */
# endif
- ptr_t _uobjfreelist[MAXOBJSZ+1];
- /* uncollectable but traced objs */
- /* objects on this and auobjfreelist */
- /* are always marked, except during */
- /* garbage collections. */
+ void *_uobjfreelist[MAXOBJGRANULES+1];
+ /* uncollectable but traced objs */
+ /* objects on this and auobjfreelist */
+ /* are always marked, except during */
+ /* garbage collections. */
# ifdef ATOMIC_UNCOLLECTABLE
- ptr_t _auobjfreelist[MAXOBJSZ+1];
+ void *_auobjfreelist[MAXOBJGRANULES+1];
# endif
- /* uncollectable but traced objs */
+ /* uncollectable but traced objs */
-# ifdef GATHERSTATS
word _composite_in_use;
- /* Number of words in accessible composite */
- /* objects. */
+ /* Number of words in accessible composite */
+ /* objects. */
word _atomic_in_use;
- /* Number of words in accessible atomic */
- /* objects. */
-# endif
+ /* Number of words in accessible atomic */
+ /* objects. */
# ifdef USE_MUNMAP
word _unmapped_bytes;
# endif
-# ifdef MERGE_SIZES
- unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
- /* Number of words to allocate for a given allocation request in */
- /* bytes. */
-# endif
+
+ size_t _size_map[MAXOBJBYTES+1];
+ /* Number of granules to allocate when asked for a certain */
+ /* number of bytes. */
# ifdef STUBBORN_ALLOC
- ptr_t _sobjfreelist[MAXOBJSZ+1];
+ ptr_t _sobjfreelist[MAXOBJGRANULES+1];
# endif
- /* free list for immutable objects */
- map_entry_type * _obj_map[MAXOBJSZ+1];
+ /* free list for immutable objects */
+# ifdef MARK_BIT_PER_GRANULE
+ short * _obj_map[MAXOBJGRANULES+1];
/* If not NIL, then a pointer to a map of valid */
- /* object addresses. _obj_map[sz][i] is j if the */
- /* address block_start+i is a valid pointer */
- /* to an object at block_start + */
- /* WORDS_TO_BYTES(BYTES_TO_WORDS(i) - j) */
- /* I.e. j is a word displacement from the */
- /* object beginning. */
- /* The entry is OBJ_INVALID if the corresponding */
- /* address is not a valid pointer. It is */
- /* OFFSET_TOO_BIG if the value j would be too */
- /* large to fit in the entry. (Note that the */
- /* size of these entries matters, both for */
- /* space consumption and for cache utilization.) */
-# define OFFSET_TOO_BIG 0xfe
-# define OBJ_INVALID 0xff
-# define MAP_ENTRY(map, bytes) (map)[bytes]
-# define MAP_ENTRIES HBLKSIZE
-# define MAP_SIZE MAP_ENTRIES
-# define CPP_MAX_OFFSET (OFFSET_TOO_BIG - 1)
-# define MAX_OFFSET ((word)CPP_MAX_OFFSET)
- /* The following are used only if GC_all_interior_ptrs != 0 */
-# define VALID_OFFSET_SZ \
- (CPP_MAX_OFFSET > WORDS_TO_BYTES(CPP_MAXOBJSZ)? \
- CPP_MAX_OFFSET+1 \
- : WORDS_TO_BYTES(CPP_MAXOBJSZ)+1)
- char _valid_offsets[VALID_OFFSET_SZ];
- /* GC_valid_offsets[i] == TRUE ==> i */
- /* is registered as a displacement. */
- char _modws_valid_offsets[sizeof(word)];
- /* GC_valid_offsets[i] ==> */
- /* GC_modws_valid_offsets[i%sizeof(word)] */
-# define OFFSET_VALID(displ) \
- (GC_all_interior_pointers || GC_valid_offsets[displ])
+ /* object addresses. */
+ /* _obj_map[sz_in_granules][i] is */
+ /* i % sz_in_granules. */
+ /* This is now used purely to replace a */
+ /* division in the marker by a table lookup. */
+ /* _obj_map[0] is used for large objects and */
+ /* contains all nonzero entries. This gets us */
+ /* out of the marker fast path without an extra */
+ /* test. */
+# define MAP_LEN BYTES_TO_GRANULES(HBLKSIZE)
+# endif
+# define VALID_OFFSET_SZ HBLKSIZE
+ char _valid_offsets[VALID_OFFSET_SZ];
+ /* GC_valid_offsets[i] == TRUE ==> i */
+ /* is registered as a displacement. */
+ char _modws_valid_offsets[sizeof(word)];
+ /* GC_valid_offsets[i] ==> */
+ /* GC_modws_valid_offsets[i%sizeof(word)] */
# ifdef STUBBORN_ALLOC
page_hash_table _changed_pages;
- /* Stubborn object pages that were changes since last call to */
- /* GC_read_changed. */
+ /* Stubborn object pages that were changes since last call to */
+ /* GC_read_changed. */
page_hash_table _prev_changed_pages;
- /* Stubborn object pages that were changes before last call to */
- /* GC_read_changed. */
+ /* Stubborn object pages that were changes before last call to */
+ /* GC_read_changed. */
# endif
-# if defined(PROC_VDB) || defined(MPROTECT_VDB)
- page_hash_table _grungy_pages; /* Pages that were dirty at last */
- /* GC_read_dirty. */
+# if defined(PROC_VDB) || defined(MPROTECT_VDB) || \
+ defined(GWW_VDB) || defined(MANUAL_VDB)
+ page_hash_table _grungy_pages; /* Pages that were dirty at last */
+ /* GC_read_dirty. */
# endif
-# ifdef MPROTECT_VDB
- VOLATILE page_hash_table _dirty_pages;
- /* Pages dirtied since last GC_read_dirty. */
+# if defined(MPROTECT_VDB) || defined(MANUAL_VDB)
+ volatile page_hash_table _dirty_pages;
+ /* Pages dirtied since last GC_read_dirty. */
# endif
-# ifdef PROC_VDB
- page_hash_table _written_pages; /* Pages ever dirtied */
+# if defined(PROC_VDB) || defined(GWW_VDB)
+ page_hash_table _written_pages; /* Pages ever dirtied */
# endif
+
+#ifndef MAX_HEAP_SECTS
# ifdef LARGE_CONFIG
# if CPP_WORDSZ > 32
-# define MAX_HEAP_SECTS 4096 /* overflows at roughly 64 GB */
+# define MAX_HEAP_SECTS 8192 /* overflows at roughly 128 GB */
# else
-# define MAX_HEAP_SECTS 768 /* Separately added heap sections. */
+# define MAX_HEAP_SECTS 768 /* Separately added heap sections. */
# endif
# else
# ifdef SMALL_CONFIG
-# define MAX_HEAP_SECTS 128 /* Roughly 256MB (128*2048*1K) */
+# define MAX_HEAP_SECTS 128 /* Roughly 256MB (128*2048*1K) */
# else
-# define MAX_HEAP_SECTS 384 /* Roughly 3GB */
+# if CPP_WORDSZ > 32
+# define MAX_HEAP_SECTS 1024 /* Roughly 8GB */
+# else
+# define MAX_HEAP_SECTS 512 /* Roughly 4GB */
+# endif
# endif
# endif
+#endif /* !MAX_HEAP_SECTS */
+
struct HeapSect {
- ptr_t hs_start; word hs_bytes;
- } _heap_sects[MAX_HEAP_SECTS];
+ ptr_t hs_start; size_t hs_bytes;
+ } _heap_sects[MAX_HEAP_SECTS]; /* Heap segments potentially */
+ /* client objects. */
+# if defined(USE_PROC_FOR_LIBRARIES)
+ struct HeapSect _our_memory[MAX_HEAP_SECTS];
+ /* All GET_MEM allocated */
+ /* memory. Includes block */
+ /* headers and the like. */
+# endif
# if defined(MSWIN32) || defined(MSWINCE)
ptr_t _heap_bases[MAX_HEAP_SECTS];
- /* Start address of memory regions obtained from kernel. */
+ /* Start address of memory regions obtained from kernel. */
# endif
# ifdef MSWINCE
word _heap_lengths[MAX_HEAP_SECTS];
- /* Commited lengths of memory regions obtained from kernel. */
+ /* Committed lengths of memory regions obtained from kernel. */
# endif
struct roots _static_roots[MAX_ROOT_SETS];
# if !defined(MSWIN32) && !defined(MSWINCE)
/* Block header index; see gc_headers.h */
bottom_index * _all_nils;
bottom_index * _top_index [TOP_SZ];
+#ifdef ENABLE_TRACE
+ ptr_t _trace_addr;
+#endif
#ifdef SAVE_CALL_CHAIN
- struct callinfo _last_stack[NFRAMES]; /* Stack at last garbage collection.*/
- /* Useful for debugging mysterious */
- /* object disappearances. */
- /* In the multithreaded case, we */
- /* currently only save the calling */
- /* stack. */
+ struct callinfo _last_stack[NFRAMES]; /* Stack at last garbage collection.*/
+ /* Useful for debugging mysterious */
+ /* object disappearances. */
+ /* In the multithreaded case, we */
+ /* currently only save the calling */
+ /* stack. */
#endif
};
-GC_API GC_FAR struct _GC_arrays GC_arrays;
+GC_API_PRIV GC_FAR struct _GC_arrays GC_arrays;
# ifndef SEPARATE_GLOBALS
# define GC_objfreelist GC_arrays._objfreelist
# define GC_aobjfreelist GC_arrays._aobjfreelist
-# define GC_words_allocd GC_arrays._words_allocd
+# define GC_bytes_allocd GC_arrays._bytes_allocd
# endif
# define GC_uobjfreelist GC_arrays._uobjfreelist
# ifdef ATOMIC_UNCOLLECTABLE
# define GC_changed_pages GC_arrays._changed_pages
# define GC_prev_changed_pages GC_arrays._prev_changed_pages
# endif
-# define GC_obj_map GC_arrays._obj_map
+# ifdef MARK_BIT_PER_GRANULE
+# define GC_obj_map GC_arrays._obj_map
+# endif
# define GC_last_heap_addr GC_arrays._last_heap_addr
# define GC_prev_heap_addr GC_arrays._prev_heap_addr
-# define GC_words_wasted GC_arrays._words_wasted
# define GC_large_free_bytes GC_arrays._large_free_bytes
# define GC_large_allocd_bytes GC_arrays._large_allocd_bytes
# define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes
-# define GC_words_finalized GC_arrays._words_finalized
+# define GC_bytes_dropped GC_arrays._bytes_dropped
+# define GC_bytes_finalized GC_arrays._bytes_finalized
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
-# define GC_mem_freed GC_arrays._mem_freed
-# define GC_finalizer_mem_freed GC_arrays._finalizer_mem_freed
+# define GC_bytes_freed GC_arrays._bytes_freed
+# define GC_finalizer_bytes_freed GC_arrays._finalizer_bytes_freed
# define GC_scratch_end_ptr GC_arrays._scratch_end_ptr
# define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr
# define GC_mark_procs GC_arrays._mark_procs
# define GC_heapsize GC_arrays._heapsize
# define GC_max_heapsize GC_arrays._max_heapsize
# define GC_requested_heapsize GC_arrays._requested_heapsize
-# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
+# define GC_bytes_allocd_before_gc GC_arrays._bytes_allocd_before_gc
# define GC_heap_sects GC_arrays._heap_sects
+# ifdef USE_PROC_FOR_LIBRARIES
+# define GC_our_memory GC_arrays._our_memory
+# endif
# define GC_last_stack GC_arrays._last_stack
+#ifdef ENABLE_TRACE
+#define GC_trace_addr GC_arrays._trace_addr
+#endif
# ifdef USE_MUNMAP
# define GC_unmapped_bytes GC_arrays._unmapped_bytes
+# else
+# define GC_unmapped_bytes 0
# endif
# if defined(MSWIN32) || defined(MSWINCE)
# define GC_heap_bases GC_arrays._heap_bases
# define GC_excl_table GC_arrays._excl_table
# define GC_all_nils GC_arrays._all_nils
# define GC_top_index GC_arrays._top_index
-# if defined(PROC_VDB) || defined(MPROTECT_VDB)
+# if defined(PROC_VDB) || defined(MPROTECT_VDB) || \
+ defined(GWW_VDB) || defined(MANUAL_VDB)
# define GC_grungy_pages GC_arrays._grungy_pages
# endif
-# ifdef MPROTECT_VDB
+# if defined(MPROTECT_VDB) || defined(MANUAL_VDB)
# define GC_dirty_pages GC_arrays._dirty_pages
# endif
-# ifdef PROC_VDB
+# if defined(PROC_VDB) || defined(GWW_VDB)
# define GC_written_pages GC_arrays._written_pages
# endif
-# ifdef GATHERSTATS
-# define GC_composite_in_use GC_arrays._composite_in_use
-# define GC_atomic_in_use GC_arrays._atomic_in_use
-# endif
-# ifdef MERGE_SIZES
-# define GC_size_map GC_arrays._size_map
-# endif
+# define GC_composite_in_use GC_arrays._composite_in_use
+# define GC_atomic_in_use GC_arrays._atomic_in_use
+# define GC_size_map GC_arrays._size_map
# define beginGC_arrays ((ptr_t)(&GC_arrays))
# define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
/* Object kinds: */
# define MAXOBJKINDS 16
-extern struct obj_kind {
- ptr_t *ok_freelist; /* Array of free listheaders for this kind of object */
- /* Point either to GC_arrays or to storage allocated */
- /* with GC_scratch_alloc. */
+GC_EXTERN struct obj_kind {
+ void **ok_freelist; /* Array of free listheaders for this kind of object */
+ /* Point either to GC_arrays or to storage allocated */
+ /* with GC_scratch_alloc. */
struct hblk **ok_reclaim_list;
- /* List headers for lists of blocks waiting to be */
- /* swept. */
- word ok_descriptor; /* Descriptor template for objects in this */
- /* block. */
+ /* List headers for lists of blocks waiting to be */
+ /* swept. */
+ /* Indexed by object size in granules. */
+ word ok_descriptor; /* Descriptor template for objects in this */
+ /* block. */
GC_bool ok_relocate_descr;
- /* Add object size in bytes to descriptor */
- /* template to obtain descriptor. Otherwise */
- /* template is used as is. */
+ /* Add object size in bytes to descriptor */
+ /* template to obtain descriptor. Otherwise */
+ /* template is used as is. */
GC_bool ok_init; /* Clear objects before putting them on the free list. */
} GC_obj_kinds[MAXOBJKINDS];
# define beginGC_obj_kinds ((ptr_t)(&GC_obj_kinds))
# define endGC_obj_kinds (beginGC_obj_kinds + (sizeof GC_obj_kinds))
-/* Variables that used to be in GC_arrays, but need to be accessed by */
-/* inline allocation code. If they were in GC_arrays, the inlined */
-/* allocation code would include GC_arrays offsets (as it did), which */
-/* introduce maintenance problems. */
+/* Variables that used to be in GC_arrays, but need to be accessed by */
+/* inline allocation code. If they were in GC_arrays, the inlined */
+/* allocation code would include GC_arrays offsets (as it did), which */
+/* introduce maintenance problems. */
#ifdef SEPARATE_GLOBALS
- word GC_words_allocd;
- /* Number of words allocated during this collection cycle */
- ptr_t GC_objfreelist[MAXOBJSZ+1];
- /* free list for NORMAL objects */
+ extern word GC_bytes_allocd;
+ /* Number of words allocated during this collection cycle */
+ extern ptr_t GC_objfreelist[MAXOBJGRANULES+1];
+ /* free list for NORMAL objects */
# define beginGC_objfreelist ((ptr_t)(&GC_objfreelist))
# define endGC_objfreelist (beginGC_objfreelist + sizeof(GC_objfreelist))
- ptr_t GC_aobjfreelist[MAXOBJSZ+1];
- /* free list for atomic (PTRFREE) objs */
+ extern ptr_t GC_aobjfreelist[MAXOBJGRANULES+1];
+ /* free list for atomic (PTRFREE) objs */
# define beginGC_aobjfreelist ((ptr_t)(&GC_aobjfreelist))
# define endGC_aobjfreelist (beginGC_aobjfreelist + sizeof(GC_aobjfreelist))
#endif
# define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE)
# endif
-extern int GC_n_kinds;
+GC_EXTERN unsigned GC_n_kinds;
-GC_API word GC_fo_entries;
+GC_EXTERN word GC_n_heap_sects; /* Number of separately added heap */
+ /* sections. */
-extern word GC_n_heap_sects; /* Number of separately added heap */
- /* sections. */
+#ifdef USE_PROC_FOR_LIBRARIES
+ GC_EXTERN word GC_n_memory; /* Number of GET_MEM allocated memory */
+ /* sections. */
+#endif
-extern word GC_page_size;
+GC_EXTERN word GC_page_size;
-# if defined(MSWIN32) || defined(MSWINCE)
+#if defined(MSWIN32) || defined(MSWINCE)
struct _SYSTEM_INFO;
- extern struct _SYSTEM_INFO GC_sysinfo;
- extern word GC_n_heap_bases; /* See GC_heap_bases. */
-# endif
-
-extern word GC_total_stack_black_listed;
- /* Number of bytes on stack blacklist. */
-
-extern word GC_black_list_spacing;
- /* Average number of bytes between blacklisted */
- /* blocks. Approximate. */
- /* Counts only blocks that are */
- /* "stack-blacklisted", i.e. that are */
- /* problematic in the interior of an object. */
-
-extern map_entry_type * GC_invalid_map;
- /* Pointer to the nowhere valid hblk map */
- /* Blocks pointing to this map are free. */
-
-extern struct hblk * GC_hblkfreelist[];
- /* List of completely empty heap blocks */
- /* Linked through hb_next field of */
- /* header structure associated with */
- /* block. */
+ GC_EXTERN struct _SYSTEM_INFO GC_sysinfo;
+#endif
-extern GC_bool GC_objects_are_marked; /* There are marked objects in */
- /* the heap. */
+GC_EXTERN word GC_black_list_spacing;
+ /* Average number of bytes between blacklisted */
+ /* blocks. Approximate. */
+ /* Counts only blocks that are */
+ /* "stack-blacklisted", i.e. that are */
+ /* problematic in the interior of an object. */
-#ifndef SMALL_CONFIG
- extern GC_bool GC_incremental;
- /* Using incremental/generational collection. */
-# define TRUE_INCREMENTAL \
- (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED)
- /* True incremental, not just generational, mode */
-#else
+#ifdef GC_DISABLE_INCREMENTAL
# define GC_incremental FALSE
- /* Hopefully allow optimizer to remove some code. */
+ /* Hopefully allow optimizer to remove some code. */
# define TRUE_INCREMENTAL FALSE
-#endif
-
-extern GC_bool GC_dirty_maintained;
- /* Dirty bits are being maintained, */
- /* either for incremental collection, */
- /* or to limit the root set. */
+#else
+ GC_EXTERN GC_bool GC_incremental;
+ /* Using incremental/generational collection. */
+# define TRUE_INCREMENTAL \
+ (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED)
+ /* True incremental, not just generational, mode */
+#endif /* !GC_DISABLE_INCREMENTAL */
-extern word GC_root_size; /* Total size of registered root sections */
+GC_EXTERN GC_bool GC_dirty_maintained;
+ /* Dirty bits are being maintained, */
+ /* either for incremental collection, */
+ /* or to limit the root set. */
-extern GC_bool GC_debugging_started; /* GC_debug_malloc has been called. */
+GC_EXTERN word GC_root_size; /* Total size of registered root sections. */
-extern long GC_large_alloc_warn_interval;
- /* Interval between unsuppressed warnings. */
+GC_EXTERN GC_bool GC_debugging_started;
+ /* GC_debug_malloc has been called. */
-extern long GC_large_alloc_warn_suppressed;
- /* Number of warnings suppressed so far. */
+/* This is used by GC_do_blocking[_inner](). */
+struct blocking_data {
+ GC_fn_type fn;
+ void * client_data; /* and result */
+};
-#ifdef THREADS
- extern GC_bool GC_world_stopped;
+/* This is used by GC_call_with_gc_active(), GC_push_all_stack_frames(). */
+struct GC_activation_frame_s {
+ ptr_t saved_stack_ptr;
+#ifdef IA64
+ ptr_t saved_backing_store_ptr;
+ ptr_t backing_store_end;
#endif
+ struct GC_activation_frame_s *prev;
+};
-/* Operations */
-# ifndef abs
-# define abs(x) ((x) < 0? (-(x)) : (x))
-# endif
+#ifdef THREADS
+/* Process all activation "frames" - scan entire stack except for */
+/* frames belonging to the user functions invoked by GC_do_blocking(). */
+ GC_INNER void GC_push_all_stack_frames(ptr_t lo, ptr_t hi,
+ struct GC_activation_frame_s *activation_frame);
+ GC_EXTERN word GC_total_stacksize; /* updated on every push_all_stacks */
+#else
+ GC_EXTERN ptr_t GC_blocked_sp;
+ GC_EXTERN struct GC_activation_frame_s *GC_activation_frame;
+ /* Points to the "frame" data held in stack by */
+ /* the innermost GC_call_with_gc_active(). */
+ /* NULL if no such "frame" active. */
+#endif /* !THREADS */
+#ifdef IA64
+ /* Similar to GC_push_all_stack_frames() but for IA-64 registers store. */
+ GC_INNER void GC_push_all_register_frames(ptr_t bs_lo, ptr_t bs_hi,
+ int eager, struct GC_activation_frame_s *activation_frame);
+#endif
/* Marks are in a reserved area in */
/* each heap block. Each word has one mark bit associated */
/* with it. Only those corresponding to the beginning of an */
/* object are used. */
-/* Set mark bit correctly, even if mark bits may be concurrently */
-/* accessed. */
+/* Set mark bit correctly, even if mark bits may be concurrently */
+/* accessed. */
#ifdef PARALLEL_MARK
# define OR_WORD(addr, bits) \
- { word old; \
- do { \
- old = *((volatile word *)addr); \
- } while (!GC_compare_and_exchange((addr), old, old | (bits))); \
- }
-# define OR_WORD_EXIT_IF_SET(addr, bits, exit_label) \
- { word old; \
- word my_bits = (bits); \
- do { \
- old = *((volatile word *)addr); \
- if (old & my_bits) goto exit_label; \
- } while (!GC_compare_and_exchange((addr), old, old | my_bits)); \
- }
+ { AO_or((volatile AO_t *)(addr), (AO_t)bits); }
#else
# define OR_WORD(addr, bits) *(addr) |= (bits)
-# define OR_WORD_EXIT_IF_SET(addr, bits, exit_label) \
- { \
- word old = *(addr); \
- word my_bits = (bits); \
- if (old & my_bits) goto exit_label; \
- *(addr) = (old | my_bits); \
- }
#endif
/* Mark bit operations */
/*
- * Retrieve, set, clear the mark bit corresponding
- * to the nth word in a given heap block.
+ * Retrieve, set, clear the nth mark bit in a given heap block.
*
- * (Recall that bit n corresponds to object beginning at word n
+ * (Recall that bit n corresponds to nth object or allocation granule
* relative to the beginning of the block, including unused words)
*/
#ifdef USE_MARK_BYTES
-# define mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[(n) >> 1])
-# define set_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[(n)>>1]) = 1
-# define clear_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[(n)>>1]) = 0
+# define mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n])
+# define set_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n]) = 1
+# define clear_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n]) = 0
#else /* !USE_MARK_BYTES */
# define mark_bit_from_hdr(hhdr,n) (((hhdr)->hb_marks[divWORDSZ(n)] \
- >> (modWORDSZ(n))) & (word)1)
+ >> (modWORDSZ(n))) & (word)1)
# define set_mark_bit_from_hdr(hhdr,n) \
- OR_WORD((hhdr)->hb_marks+divWORDSZ(n), \
- (word)1 << modWORDSZ(n))
+ OR_WORD((hhdr)->hb_marks+divWORDSZ(n), \
+ (word)1 << modWORDSZ(n))
# define clear_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
- &= ~((word)1 << modWORDSZ(n))
+ &= ~((word)1 << modWORDSZ(n))
#endif /* !USE_MARK_BYTES */
+#ifdef MARK_BIT_PER_OBJ
+# define MARK_BIT_NO(offset, sz) (((unsigned)(offset))/(sz))
+ /* Get the mark bit index corresponding to the given byte */
+ /* offset and size (in bytes). */
+# define MARK_BIT_OFFSET(sz) 1
+ /* Spacing between useful mark bits. */
+# define IF_PER_OBJ(x) x
+# define FINAL_MARK_BIT(sz) ((sz) > MAXOBJBYTES? 1 : HBLK_OBJS(sz))
+ /* Position of final, always set, mark bit. */
+#else /* MARK_BIT_PER_GRANULE */
+# define MARK_BIT_NO(offset, sz) BYTES_TO_GRANULES((unsigned)(offset))
+# define MARK_BIT_OFFSET(sz) BYTES_TO_GRANULES(sz)
+# define IF_PER_OBJ(x)
+# define FINAL_MARK_BIT(sz) \
+ ((sz) > MAXOBJBYTES? MARK_BITS_PER_HBLK \
+ : BYTES_TO_GRANULES((sz) * HBLK_OBJS(sz)))
+#endif
+
/* Important internal collector routines */
-ptr_t GC_approx_sp GC_PROTO((void));
-
-GC_bool GC_should_collect GC_PROTO((void));
-
-void GC_apply_to_all_blocks GC_PROTO(( \
- void (*fn) GC_PROTO((struct hblk *h, word client_data)), \
- word client_data));
- /* Invoke fn(hbp, client_data) for each */
- /* allocated heap block. */
-struct hblk * GC_next_used_block GC_PROTO((struct hblk * h));
- /* Return first in-use block >= h */
-struct hblk * GC_prev_block GC_PROTO((struct hblk * h));
- /* Return last block <= h. Returned block */
- /* is managed by GC, but may or may not be in */
- /* use. */
-void GC_mark_init GC_PROTO((void));
-void GC_clear_marks GC_PROTO((void)); /* Clear mark bits for all heap objects. */
-void GC_invalidate_mark_state GC_PROTO((void));
- /* Tell the marker that marked */
- /* objects may point to unmarked */
- /* ones, and roots may point to */
- /* unmarked objects. */
- /* Reset mark stack. */
-GC_bool GC_mark_stack_empty GC_PROTO((void));
-GC_bool GC_mark_some GC_PROTO((ptr_t cold_gc_frame));
- /* Perform about one pages worth of marking */
- /* work of whatever kind is needed. Returns */
- /* quickly if no collection is in progress. */
- /* Return TRUE if mark phase finished. */
-void GC_initiate_gc GC_PROTO((void));
- /* initiate collection. */
- /* If the mark state is invalid, this */
- /* becomes full colleection. Otherwise */
- /* it's partial. */
-void GC_push_all GC_PROTO((ptr_t bottom, ptr_t top));
- /* Push everything in a range */
- /* onto mark stack. */
-void GC_push_selected GC_PROTO(( \
- ptr_t bottom, \
- ptr_t top, \
- int (*dirty_fn) GC_PROTO((struct hblk *h)), \
- void (*push_fn) GC_PROTO((ptr_t bottom, ptr_t top)) ));
- /* Push all pages h in [b,t) s.t. */
- /* select_fn(h) != 0 onto mark stack. */
-#ifndef SMALL_CONFIG
- void GC_push_conditional GC_PROTO((ptr_t b, ptr_t t, GC_bool all));
+GC_INNER ptr_t GC_approx_sp(void);
+
+GC_INNER GC_bool GC_should_collect(void);
+
+void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
+ word client_data);
+ /* Invoke fn(hbp, client_data) for each */
+ /* allocated heap block. */
+GC_INNER struct hblk * GC_next_used_block(struct hblk * h);
+ /* Return first in-use block >= h */
+GC_INNER struct hblk * GC_prev_block(struct hblk * h);
+ /* Return last block <= h. Returned block */
+ /* is managed by GC, but may or may not be in */
+ /* use. */
+GC_INNER void GC_mark_init(void);
+GC_INNER void GC_clear_marks(void);
+ /* Clear mark bits for all heap objects. */
+GC_INNER void GC_invalidate_mark_state(void);
+ /* Tell the marker that marked */
+ /* objects may point to unmarked */
+ /* ones, and roots may point to */
+ /* unmarked objects. Reset mark stack. */
+GC_INNER GC_bool GC_mark_stack_empty(void);
+GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame);
+ /* Perform about one pages worth of marking */
+ /* work of whatever kind is needed. Returns */
+ /* quickly if no collection is in progress. */
+ /* Return TRUE if mark phase finished. */
+GC_INNER void GC_initiate_gc(void);
+ /* initiate collection. */
+ /* If the mark state is invalid, this */
+ /* becomes full collection. Otherwise */
+ /* it's partial. */
+
+GC_INNER GC_bool GC_collection_in_progress(void);
+ /* Collection is in progress, or was abandoned. */
+
+GC_INNER void GC_push_all(ptr_t bottom, ptr_t top);
+ /* Push everything in a range */
+ /* onto mark stack. */
+#ifndef GC_DISABLE_INCREMENTAL
+ GC_INNER void GC_push_conditional(ptr_t b, ptr_t t, GC_bool all);
#else
# define GC_push_conditional(b, t, all) GC_push_all(b, t)
#endif
- /* Do either of the above, depending */
- /* on the third arg. */
-void GC_push_all_stack GC_PROTO((ptr_t b, ptr_t t));
- /* As above, but consider */
- /* interior pointers as valid */
-void GC_push_all_eager GC_PROTO((ptr_t b, ptr_t t));
- /* Same as GC_push_all_stack, but */
- /* ensures that stack is scanned */
- /* immediately, not just scheduled */
- /* for scanning. */
-#ifndef THREADS
- void GC_push_all_stack_partially_eager GC_PROTO(( \
- ptr_t bottom, ptr_t top, ptr_t cold_gc_frame ));
- /* Similar to GC_push_all_eager, but only the */
- /* part hotter than cold_gc_frame is scanned */
- /* immediately. Needed to ensure that callee- */
- /* save registers are not missed. */
-#else
- /* In the threads case, we push part of the current thread stack */
+ /* Do either of the above, depending */
+ /* on the third arg. */
+GC_INNER void GC_push_all_stack(ptr_t b, ptr_t t);
+ /* As above, but consider */
+ /* interior pointers as valid */
+GC_INNER void GC_push_all_eager(ptr_t b, ptr_t t);
+ /* Same as GC_push_all_stack, but */
+ /* ensures that stack is scanned */
+ /* immediately, not just scheduled */
+ /* for scanning. */
+
+ /* In the threads case, we push part of the current thread stack */
/* with GC_push_all_eager when we push the registers. This gets the */
- /* callee-save registers that may disappear. The remainder of the */
- /* stacks are scheduled for scanning in *GC_push_other_roots, which */
- /* is thread-package-specific. */
-#endif
-void GC_push_current_stack GC_PROTO((ptr_t cold_gc_frame));
- /* Push enough of the current stack eagerly to */
- /* ensure that callee-save registers saved in */
- /* GC frames are scanned. */
- /* In the non-threads case, schedule entire */
- /* stack for scanning. */
-void GC_push_roots GC_PROTO((GC_bool all, ptr_t cold_gc_frame));
- /* Push all or dirty roots. */
-extern void (*GC_push_other_roots) GC_PROTO((void));
- /* Push system or application specific roots */
- /* onto the mark stack. In some environments */
- /* (e.g. threads environments) this is */
- /* predfined to be non-zero. A client supplied */
- /* replacement should also call the original */
- /* function. */
-extern void GC_push_gc_structures GC_PROTO((void));
- /* Push GC internal roots. These are normally */
- /* included in the static data segment, and */
- /* Thus implicitly pushed. But we must do this */
- /* explicitly if normal root processing is */
- /* disabled. Calls the following: */
- extern void GC_push_finalizer_structures GC_PROTO((void));
- extern void GC_push_stubborn_structures GC_PROTO((void));
-# ifdef THREADS
- extern void GC_push_thread_structures GC_PROTO((void));
-# endif
-extern void (*GC_start_call_back) GC_PROTO((void));
- /* Called at start of full collections. */
- /* Not called if 0. Called with allocation */
- /* lock held. */
- /* 0 by default. */
-# if defined(USE_GENERIC_PUSH_REGS)
- void GC_generic_push_regs GC_PROTO((ptr_t cold_gc_frame));
-# else
- void GC_push_regs GC_PROTO((void));
-# endif
-# if defined(SPARC) || defined(IA64)
- /* Cause all stacked registers to be saved in memory. Return a */
- /* pointer to the top of the corresponding memory stack. */
- word GC_save_regs_in_stack GC_PROTO((void));
-# endif
- /* Push register contents onto mark stack. */
- /* If NURSERY is defined, the default push */
- /* action can be overridden with GC_push_proc */
+ /* callee-save registers that may disappear. The remainder of the */
+ /* stacks are scheduled for scanning in *GC_push_other_roots, which */
+ /* is thread-package-specific. */
+
+GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame);
+ /* Push all or dirty roots. */
+
+GC_EXTERN void (*GC_push_other_roots)(void);
+ /* Push system or application specific roots */
+ /* onto the mark stack. In some environments */
+ /* (e.g. threads environments) this is */
+ /* predefined to be non-zero. A client */
+ /* supplied replacement should also call the */
+ /* original function. */
+
+GC_INNER void GC_push_finalizer_structures(void);
+#ifdef THREADS
+ void GC_push_thread_structures(void);
+#endif
+GC_EXTERN void (*GC_push_typed_structures)(void);
+ /* A pointer such that we can avoid linking in */
+ /* the typed allocation support if unused. */
-# ifdef NURSERY
- extern void (*GC_push_proc)(ptr_t);
-# endif
-# if defined(MSWIN32) || defined(MSWINCE)
- void __cdecl GC_push_one GC_PROTO((word p));
-# else
- void GC_push_one GC_PROTO((word p));
- /* If p points to an object, mark it */
+GC_INNER void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
+ ptr_t arg);
+
+#if defined(SPARC) || defined(IA64)
+ /* Cause all stacked registers to be saved in memory. Return a */
+ /* pointer to the top of the corresponding memory stack. */
+ ptr_t GC_save_regs_in_stack(void);
+#endif
+ /* Push register contents onto mark stack. */
+
+#if defined(MSWIN32) || defined(MSWINCE)
+ void __cdecl GC_push_one(word p);
+#else
+ void GC_push_one(word p);
+ /* If p points to an object, mark it */
/* and push contents on the mark stack */
- /* Pointer recognition test always */
- /* accepts interior pointers, i.e. this */
- /* is appropriate for pointers found on */
- /* stack. */
-# endif
-# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
- void GC_mark_and_push_stack GC_PROTO((word p, ptr_t source));
- /* Ditto, omits plausibility test */
-# else
- void GC_mark_and_push_stack GC_PROTO((word p));
-# endif
-void GC_push_marked GC_PROTO((struct hblk * h, hdr * hhdr));
- /* Push contents of all marked objects in h onto */
- /* mark stack. */
-#ifdef SMALL_CONFIG
-# define GC_push_next_marked_dirty(h) GC_push_next_marked(h)
+ /* Pointer recognition test always */
+ /* accepts interior pointers, i.e. this */
+ /* is appropriate for pointers found on */
+ /* stack. */
+#endif
+
+#if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
+ GC_INNER void GC_mark_and_push_stack(ptr_t p, ptr_t source);
+ /* Ditto, omits plausibility test */
#else
- struct hblk * GC_push_next_marked_dirty GC_PROTO((struct hblk * h));
- /* Invoke GC_push_marked on next dirty block above h. */
- /* Return a pointer just past the end of this block. */
-#endif /* !SMALL_CONFIG */
-struct hblk * GC_push_next_marked GC_PROTO((struct hblk * h));
- /* Ditto, but also mark from clean pages. */
-struct hblk * GC_push_next_marked_uncollectable GC_PROTO((struct hblk * h));
- /* Ditto, but mark only from uncollectable pages. */
-GC_bool GC_stopped_mark GC_PROTO((GC_stop_func stop_func));
- /* Stop world and mark from all roots */
- /* and rescuers. */
-void GC_clear_hdr_marks GC_PROTO((hdr * hhdr));
- /* Clear the mark bits in a header */
-void GC_set_hdr_marks GC_PROTO((hdr * hhdr));
- /* Set the mark bits in a header */
-void GC_set_fl_marks GC_PROTO((ptr_t p));
- /* Set all mark bits associated with */
- /* a free list. */
-void GC_add_roots_inner GC_PROTO((char * b, char * e, GC_bool tmp));
-void GC_remove_roots_inner GC_PROTO((char * b, char * e));
-GC_bool GC_is_static_root GC_PROTO((ptr_t p));
- /* Is the address p in one of the registered static */
- /* root sections? */
-# if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION)
-GC_bool GC_is_tmp_root GC_PROTO((ptr_t p));
- /* Is the address p in one of the temporary static */
- /* root sections? */
-# endif
-void GC_register_dynamic_libraries GC_PROTO((void));
- /* Add dynamic library data sections to the root set. */
-
-GC_bool GC_register_main_static_data GC_PROTO((void));
- /* We need to register the main data segment. Returns */
- /* TRUE unless this is done implicitly as part of */
- /* dynamic library registration. */
-
+ GC_INNER void GC_mark_and_push_stack(ptr_t p);
+#endif
+
+GC_INNER void GC_clear_hdr_marks(hdr * hhdr);
+ /* Clear the mark bits in a header */
+GC_INNER void GC_set_hdr_marks(hdr * hhdr);
+ /* Set the mark bits in a header */
+GC_INNER void GC_set_fl_marks(ptr_t p);
+ /* Set all mark bits associated with */
+ /* a free list. */
+#ifdef GC_ASSERTIONS
+ void GC_check_fl_marks(ptr_t p);
+ /* Check that all mark bits */
+ /* associated with a free list are */
+ /* set. Abort if not. */
+#endif
+void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp);
+GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish);
+GC_INNER void GC_register_dynamic_libraries(void);
+ /* Add dynamic library data sections to the root set. */
+GC_INNER void GC_cond_register_dynamic_libraries(void);
+ /* Remove and reregister dynamic libraries if we're */
+ /* configured to do that at each GC. */
+
/* Machine dependent startup routines */
-ptr_t GC_get_stack_base GC_PROTO((void)); /* Cold end of stack */
+ptr_t GC_get_main_stack_base(void); /* Cold end of stack. */
#ifdef IA64
- ptr_t GC_get_register_stack_base GC_PROTO((void));
- /* Cold end of register stack. */
+ GC_INNER ptr_t GC_get_register_stack_base(void);
+ /* Cold end of register stack. */
+#endif
+void GC_register_data_segments(void);
+
+#ifdef THREADS
+ GC_INNER void GC_thr_init(void);
+ GC_INNER void GC_init_parallel(void);
+#else
+ GC_INNER GC_bool GC_is_static_root(ptr_t p);
+ /* Is the address p in one of the registered static */
+ /* root sections? */
#endif
-void GC_register_data_segments GC_PROTO((void));
-
+
/* Black listing: */
-void GC_bl_init GC_PROTO((void));
+GC_INNER void GC_bl_init(void);
# ifdef PRINT_BLACK_LIST
- void GC_add_to_black_list_normal GC_PROTO((word p, ptr_t source));
- /* Register bits as a possible future false */
- /* reference from the heap or static data */
+ GC_INNER void GC_add_to_black_list_normal(word p, ptr_t source);
+ /* Register bits as a possible future false */
+ /* reference from the heap or static data */
# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
- if (GC_all_interior_pointers) { \
- GC_add_to_black_list_stack(bits, (ptr_t)(source)); \
- } else { \
- GC_add_to_black_list_normal(bits, (ptr_t)(source)); \
- }
+ if (GC_all_interior_pointers) { \
+ GC_add_to_black_list_stack((word)(bits), (source)); \
+ } else { \
+ GC_add_to_black_list_normal((word)(bits), (source)); \
+ }
# else
- void GC_add_to_black_list_normal GC_PROTO((word p));
+ GC_INNER void GC_add_to_black_list_normal(word p);
# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
- if (GC_all_interior_pointers) { \
- GC_add_to_black_list_stack(bits); \
- } else { \
- GC_add_to_black_list_normal(bits); \
- }
+ if (GC_all_interior_pointers) { \
+ GC_add_to_black_list_stack((word)(bits)); \
+ } else { \
+ GC_add_to_black_list_normal((word)(bits)); \
+ }
# endif
# ifdef PRINT_BLACK_LIST
- void GC_add_to_black_list_stack GC_PROTO((word p, ptr_t source));
+ GC_INNER void GC_add_to_black_list_stack(word p, ptr_t source);
+# define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
+ GC_add_to_black_list_stack((word)(bits), (source))
# else
- void GC_add_to_black_list_stack GC_PROTO((word p));
-# endif
-struct hblk * GC_is_black_listed GC_PROTO((struct hblk * h, word len));
- /* If there are likely to be false references */
- /* to a block starting at h of the indicated */
- /* length, then return the next plausible */
- /* starting location for h that might avoid */
- /* these false references. */
-void GC_promote_black_lists GC_PROTO((void));
- /* Declare an end to a black listing phase. */
-void GC_unpromote_black_lists GC_PROTO((void));
- /* Approximately undo the effect of the above. */
- /* This actually loses some information, but */
- /* only in a reasonably safe way. */
-word GC_number_stack_black_listed GC_PROTO(( \
- struct hblk *start, struct hblk *endp1));
- /* Return the number of (stack) blacklisted */
- /* blocks in the range for statistical */
- /* purposes. */
-
-ptr_t GC_scratch_alloc GC_PROTO((word bytes));
- /* GC internal memory allocation for */
- /* small objects. Deallocation is not */
- /* possible. */
-
-/* Heap block layout maps: */
-void GC_invalidate_map GC_PROTO((hdr * hhdr));
- /* Remove the object map associated */
- /* with the block. This identifies */
- /* the block as invalid to the mark */
- /* routines. */
-GC_bool GC_add_map_entry GC_PROTO((word sz));
- /* Add a heap block map for objects of */
- /* size sz to obj_map. */
- /* Return FALSE on failure. */
-void GC_register_displacement_inner GC_PROTO((word offset));
- /* Version of GC_register_displacement */
- /* that assumes lock is already held */
- /* and signals are already disabled. */
-
-/* hblk allocation: */
-void GC_new_hblk GC_PROTO((word size_in_words, int kind));
- /* Allocate a new heap block, and build */
- /* a free list in it. */
-
-ptr_t GC_build_fl GC_PROTO((struct hblk *h, word sz,
- GC_bool clear, ptr_t list));
- /* Build a free list for objects of */
- /* size sz in block h. Append list to */
- /* end of the free lists. Possibly */
- /* clear objects on the list. Normally */
- /* called by GC_new_hblk, but also */
- /* called explicitly without GC lock. */
-
-struct hblk * GC_allochblk GC_PROTO(( \
- word size_in_words, int kind, unsigned flags));
- /* Allocate a heap block, inform */
- /* the marker that block is valid */
- /* for objects of indicated size. */
-
-ptr_t GC_alloc_large GC_PROTO((word lw, int k, unsigned flags));
- /* Allocate a large block of size lw words. */
- /* The block is not cleared. */
- /* Flags is 0 or IGNORE_OFF_PAGE. */
- /* Calls GC_allchblk to do the actual */
- /* allocation, but also triggers GC and/or */
- /* heap expansion as appropriate. */
- /* Does not update GC_words_allocd, but does */
- /* other accounting. */
-
-ptr_t GC_alloc_large_and_clear GC_PROTO((word lw, int k, unsigned flags));
- /* As above, but clear block if appropriate */
- /* for kind k. */
-
-void GC_freehblk GC_PROTO((struct hblk * p));
- /* Deallocate a heap block and mark it */
- /* as invalid. */
-
+ GC_INNER void GC_add_to_black_list_stack(word p);
+# define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
+ GC_add_to_black_list_stack((word)(bits))
+# endif
+GC_INNER struct hblk * GC_is_black_listed(struct hblk * h, word len);
+ /* If there are likely to be false references */
+ /* to a block starting at h of the indicated */
+ /* length, then return the next plausible */
+ /* starting location for h that might avoid */
+ /* these false references. */
+GC_INNER void GC_promote_black_lists(void);
+ /* Declare an end to a black listing phase. */
+GC_INNER void GC_unpromote_black_lists(void);
+ /* Approximately undo the effect of the above. */
+ /* This actually loses some information, but */
+ /* only in a reasonably safe way. */
+
+GC_INNER ptr_t GC_scratch_alloc(size_t bytes);
+ /* GC internal memory allocation for */
+ /* small objects. Deallocation is not */
+ /* possible. */
+
+/* Heap block layout maps: */
+GC_INNER GC_bool GC_add_map_entry(size_t sz);
+ /* Add a heap block map for objects of */
+ /* size sz to obj_map. */
+ /* Return FALSE on failure. */
+GC_INNER void GC_register_displacement_inner(size_t offset);
+ /* Version of GC_register_displacement */
+ /* that assumes lock is already held. */
+
+GC_INNER void GC_initialize_offsets(void);
+ /* Initialize GC_valid_offsets, */
+ /* depending on current */
+ /* GC_all_interior_pointers settings. */
+
+/* hblk allocation: */
+GC_INNER void GC_new_hblk(size_t size_in_granules, int kind);
+ /* Allocate a new heap block, and build */
+ /* a free list in it. */
+
+GC_INNER ptr_t GC_build_fl(struct hblk *h, size_t words, GC_bool clear,
+ ptr_t list);
+ /* Build a free list for objects of */
+ /* size sz in block h. Append list to */
+ /* end of the free lists. Possibly */
+ /* clear objects on the list. Normally */
+ /* called by GC_new_hblk, but also */
+ /* called explicitly without GC lock. */
+
+GC_INNER struct hblk * GC_allochblk(size_t size_in_bytes, int kind,
+ unsigned flags);
+ /* Allocate a heap block, inform */
+ /* the marker that block is valid */
+ /* for objects of indicated size. */
+
+GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags);
+ /* Allocate a large block of size lb bytes. */
+ /* The block is not cleared. */
+ /* Flags is 0 or IGNORE_OFF_PAGE. */
+ /* Calls GC_allchblk to do the actual */
+ /* allocation, but also triggers GC and/or */
+ /* heap expansion as appropriate. */
+ /* Does not update GC_bytes_allocd, but does */
+ /* other accounting. */
+
+GC_INNER void GC_freehblk(struct hblk * p);
+ /* Deallocate a heap block and mark it */
+ /* as invalid. */
+
/* Misc GC: */
-void GC_init_inner GC_PROTO((void));
-GC_bool GC_expand_hp_inner GC_PROTO((word n));
-void GC_start_reclaim GC_PROTO((int abort_if_found));
- /* Restore unmarked objects to free */
- /* lists, or (if abort_if_found is */
- /* TRUE) report them. */
- /* Sweeping of small object pages is */
- /* largely deferred. */
-void GC_continue_reclaim GC_PROTO((word sz, int kind));
- /* Sweep pages of the given size and */
- /* kind, as long as possible, and */
- /* as long as the corr. free list is */
- /* empty. */
-void GC_reclaim_or_delete_all GC_PROTO((void));
- /* Arrange for all reclaim lists to be */
- /* empty. Judiciously choose between */
- /* sweeping and discarding each page. */
-GC_bool GC_reclaim_all GC_PROTO((GC_stop_func stop_func, GC_bool ignore_old));
- /* Reclaim all blocks. Abort (in a */
- /* consistent state) if f returns TRUE. */
-GC_bool GC_block_empty GC_PROTO((hdr * hhdr));
- /* Block completely unmarked? */
-GC_bool GC_never_stop_func GC_PROTO((void));
- /* Returns FALSE. */
-GC_bool GC_try_to_collect_inner GC_PROTO((GC_stop_func f));
-
- /* Collect; caller must have acquired */
- /* lock and disabled signals. */
- /* Collection is aborted if f returns */
- /* TRUE. Returns TRUE if it completes */
- /* successfully. */
+GC_INNER GC_bool GC_expand_hp_inner(word n);
+GC_INNER void GC_start_reclaim(int abort_if_found);
+ /* Restore unmarked objects to free */
+ /* lists, or (if abort_if_found is */
+ /* TRUE) report them. */
+ /* Sweeping of small object pages is */
+ /* largely deferred. */
+GC_INNER void GC_continue_reclaim(size_t sz, int kind);
+ /* Sweep pages of the given size and */
+ /* kind, as long as possible, and */
+ /* as long as the corr. free list is */
+ /* empty. Sz is in granules. */
+
+GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old);
+ /* Reclaim all blocks. Abort (in a */
+ /* consistent state) if f returns TRUE. */
+GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
+ GC_bool init, ptr_t list,
+ signed_word *count);
+ /* Rebuild free list in hbp with */
+ /* header hhdr, with objects of size sz */
+ /* bytes. Add list to the end of the */
+ /* free list. Add the number of */
+ /* reclaimed bytes to *count. */
+GC_INNER GC_bool GC_block_empty(hdr * hhdr);
+ /* Block completely unmarked? */
+GC_INNER int GC_CALLBACK GC_never_stop_func(void);
+ /* Always returns 0 (FALSE). */
+GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func f);
+
+ /* Collect; caller must have acquired */
+ /* lock. Collection is aborted if f */
+ /* returns TRUE. Returns TRUE if it */
+ /* completes successfully. */
# define GC_gcollect_inner() \
- (void) GC_try_to_collect_inner(GC_never_stop_func)
-void GC_finish_collection GC_PROTO((void));
- /* Finish collection. Mark bits are */
- /* consistent and lock is still held. */
-GC_bool GC_collect_or_expand GC_PROTO(( \
- word needed_blocks, GC_bool ignore_off_page));
- /* Collect or expand heap in an attempt */
- /* make the indicated number of free */
- /* blocks available. Should be called */
- /* until the blocks are available or */
- /* until it fails by returning FALSE. */
-
-extern GC_bool GC_is_initialized; /* GC_init() has been run. */
+ (void)GC_try_to_collect_inner(GC_never_stop_func)
+
+GC_EXTERN GC_bool GC_is_initialized; /* GC_init() has been run. */
#if defined(MSWIN32) || defined(MSWINCE)
- void GC_deinit GC_PROTO((void));
+ void GC_deinit(void);
/* Free any resources allocated by */
/* GC_init */
#endif
-void GC_collect_a_little_inner GC_PROTO((int n));
- /* Do n units worth of garbage */
- /* collection work, if appropriate. */
- /* A unit is an amount appropriate for */
- /* HBLKSIZE bytes of allocation. */
-/* ptr_t GC_generic_malloc GC_PROTO((word lb, int k)); */
- /* Allocate an object of the given */
- /* kind. By default, there are only */
- /* a few kinds: composite(pointerfree), */
- /* atomic, uncollectable, etc. */
- /* We claim it's possible for clever */
- /* client code that understands GC */
- /* internals to add more, e.g. to */
- /* communicate object layout info */
- /* to the collector. */
- /* The actual decl is in gc_mark.h. */
-ptr_t GC_generic_malloc_ignore_off_page GC_PROTO((size_t b, int k));
- /* As above, but pointers past the */
- /* first page of the resulting object */
- /* are ignored. */
-ptr_t GC_generic_malloc_inner GC_PROTO((word lb, int k));
- /* Ditto, but I already hold lock, etc. */
-ptr_t GC_generic_malloc_words_small_inner GC_PROTO((word lw, int k));
- /* Analogous to the above, but assumes */
- /* a small object size, and bypasses */
- /* MERGE_SIZES mechanism. */
-ptr_t GC_generic_malloc_words_small GC_PROTO((size_t lw, int k));
- /* As above, but size in units of words */
- /* Bypasses MERGE_SIZES. Assumes */
- /* words <= MAXOBJSZ. */
-ptr_t GC_generic_malloc_inner_ignore_off_page GC_PROTO((size_t lb, int k));
- /* Allocate an object, where */
- /* the client guarantees that there */
- /* will always be a pointer to the */
- /* beginning of the object while the */
- /* object is live. */
-ptr_t GC_allocobj GC_PROTO((word sz, int kind));
- /* Make the indicated */
- /* free list nonempty, and return its */
- /* head. */
-
-void GC_free_inner(GC_PTR p);
-
-void GC_init_headers GC_PROTO((void));
-struct hblkhdr * GC_install_header GC_PROTO((struct hblk *h));
- /* Install a header for block h. */
- /* Return 0 on failure, or the header */
- /* otherwise. */
-GC_bool GC_install_counts GC_PROTO((struct hblk * h, word sz));
- /* Set up forwarding counts for block */
- /* h of size sz. */
- /* Return FALSE on failure. */
-void GC_remove_header GC_PROTO((struct hblk * h));
- /* Remove the header for block h. */
-void GC_remove_counts GC_PROTO((struct hblk * h, word sz));
- /* Remove forwarding counts for h. */
-hdr * GC_find_header GC_PROTO((ptr_t h)); /* Debugging only. */
-
-void GC_finalize GC_PROTO((void));
- /* Perform all indicated finalization actions */
- /* on unmarked objects. */
- /* Unreachable finalizable objects are enqueued */
- /* for processing by GC_invoke_finalizers. */
- /* Invoked with lock. */
-
-void GC_notify_or_invoke_finalizers GC_PROTO((void));
- /* If GC_finalize_on_demand is not set, invoke */
- /* eligible finalizers. Otherwise: */
- /* Call *GC_finalizer_notifier if there are */
- /* finalizers to be run, and we haven't called */
- /* this procedure yet this GC cycle. */
-
-GC_API GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
-GC_API void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
- /* Auxiliary fns to make finalization work */
- /* correctly with displaced pointers introduced */
- /* by the debugging allocators. */
-
-void GC_add_to_heap GC_PROTO((struct hblk *p, word bytes));
- /* Add a HBLKSIZE aligned chunk to the heap. */
-
-void GC_print_obj GC_PROTO((ptr_t p));
- /* P points to somewhere inside an object with */
- /* debugging info. Print a human readable */
- /* description of the object to stderr. */
-extern void (*GC_check_heap) GC_PROTO((void));
- /* Check that all objects in the heap with */
- /* debugging info are intact. */
- /* Add any that are not to GC_smashed list. */
-extern void (*GC_print_all_smashed) GC_PROTO((void));
- /* Print GC_smashed if it's not empty. */
- /* Clear GC_smashed list. */
-extern void GC_print_all_errors GC_PROTO((void));
- /* Print smashed and leaked objects, if any. */
- /* Clear the lists of such objects. */
-extern void (*GC_print_heap_obj) GC_PROTO((ptr_t p));
- /* If possible print s followed by a more */
- /* detailed description of the object */
- /* referred to by p. */
+GC_INNER void GC_collect_a_little_inner(int n);
+ /* Do n units worth of garbage */
+ /* collection work, if appropriate. */
+ /* A unit is an amount appropriate for */
+ /* HBLKSIZE bytes of allocation. */
+/* void * GC_generic_malloc(size_t lb, int k); */
+ /* Allocate an object of the given */
+ /* kind. By default, there are only */
+ /* a few kinds: composite(pointerfree), */
+ /* atomic, uncollectable, etc. */
+ /* We claim it's possible for clever */
+ /* client code that understands GC */
+ /* internals to add more, e.g. to */
+ /* communicate object layout info */
+ /* to the collector. */
+ /* The actual decl is in gc_mark.h. */
+GC_INNER void * GC_generic_malloc_ignore_off_page(size_t b, int k);
+ /* As above, but pointers past the */
+ /* first page of the resulting object */
+ /* are ignored. */
+GC_INNER void * GC_generic_malloc_inner(size_t lb, int k);
+ /* Ditto, but I already hold lock, etc. */
+GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k);
+ /* Allocate an object, where */
+ /* the client guarantees that there */
+ /* will always be a pointer to the */
+ /* beginning of the object while the */
+ /* object is live. */
+
+GC_INNER ptr_t GC_allocobj(size_t sz, int kind);
+ /* Make the indicated */
+ /* free list nonempty, and return its */
+ /* head. Sz is in granules. */
+
+GC_INNER void * GC_clear_stack(void *);
+ /* in misc.c, behaves like identity. */
+
+/* We make the GC_clear_stack() call a tail one, hoping to get more of */
+/* the stack. */
+#define GENERAL_MALLOC(lb,k) \
+ GC_clear_stack(GC_generic_malloc(lb, k))
+#define GENERAL_MALLOC_IOP(lb,k) \
+ GC_clear_stack(GC_generic_malloc_ignore_off_page(lb, k))
+
+/* Allocation routines that bypass the thread local cache. */
+#ifdef THREAD_LOCAL_ALLOC
+ GC_INNER void * GC_core_malloc(size_t);
+ GC_INNER void * GC_core_malloc_atomic(size_t);
+# ifdef GC_GCJ_SUPPORT
+ GC_INNER void * GC_core_gcj_malloc(size_t, void *);
+# endif
+#endif /* THREAD_LOCAL_ALLOC */
+
+GC_INNER void GC_init_headers(void);
+GC_INNER struct hblkhdr * GC_install_header(struct hblk *h);
+ /* Install a header for block h. */
+ /* Return 0 on failure, or the header */
+ /* otherwise. */
+GC_INNER GC_bool GC_install_counts(struct hblk * h, size_t sz);
+ /* Set up forwarding counts for block */
+ /* h of size sz. */
+ /* Return FALSE on failure. */
+GC_INNER void GC_remove_header(struct hblk * h);
+ /* Remove the header for block h. */
+GC_INNER void GC_remove_counts(struct hblk * h, size_t sz);
+ /* Remove forwarding counts for h. */
+GC_INNER hdr * GC_find_header(ptr_t h);
+
+GC_INNER void GC_finalize(void);
+ /* Perform all indicated finalization actions */
+ /* on unmarked objects. */
+ /* Unreachable finalizable objects are enqueued */
+ /* for processing by GC_invoke_finalizers. */
+ /* Invoked with lock. */
+
+GC_INNER void GC_notify_or_invoke_finalizers(void);
+ /* If GC_finalize_on_demand is not set, invoke */
+ /* eligible finalizers. Otherwise: */
+ /* Call *GC_finalizer_notifier if there are */
+ /* finalizers to be run, and we haven't called */
+ /* this procedure yet this GC cycle. */
+
+GC_INNER void GC_add_to_heap(struct hblk *p, size_t bytes);
+ /* Add a HBLKSIZE aligned chunk to the heap. */
+
+#ifdef USE_PROC_FOR_LIBRARIES
+ GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes);
+ /* Add a chunk to GC_our_memory. */
+ /* If p == 0, do nothing. */
+#else
+# define GC_add_to_our_memory(p, bytes)
+#endif
+
+GC_INNER void GC_print_all_errors(void);
+ /* Print smashed and leaked objects, if any. */
+ /* Clear the lists of such objects. */
+
+GC_EXTERN void (*GC_check_heap)(void);
+ /* Check that all objects in the heap with */
+ /* debugging info are intact. */
+ /* Add any that are not to GC_smashed list. */
+GC_EXTERN void (*GC_print_all_smashed)(void);
+ /* Print GC_smashed if it's not empty. */
+ /* Clear GC_smashed list. */
+GC_EXTERN void (*GC_print_heap_obj)(ptr_t p);
+ /* If possible print s followed by a more */
+ /* detailed description of the object */
+ /* referred to by p. */
+
#if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
- void GC_print_address_map GC_PROTO((void));
- /* Print an address map of the process. */
+ void GC_print_address_map(void);
+ /* Print an address map of the process. */
#endif
-extern GC_bool GC_have_errors; /* We saw a smashed or leaked object. */
- /* Call error printing routine */
- /* occasionally. */
-extern GC_bool GC_print_stats; /* Produce at least some logging output */
- /* Set from environment variable. */
+GC_EXTERN GC_bool GC_have_errors; /* We saw a smashed or leaked object. */
+ /* Call error printing routine */
+ /* occasionally. */
+
+#ifndef SMALL_CONFIG
+ /* GC_print_stats should be visible outside the GC in some cases. */
+ extern int GC_print_stats; /* Nonzero generates basic GC log. */
+ /* VERBOSE generates add'l messages. */
+#else
+# define GC_print_stats 0
+ /* Will this remove the message character strings from the executable? */
+ /* With a particular level of optimizations, it should... */
+#endif
+#define VERBOSE 2
#ifndef NO_DEBUGGING
- extern GC_bool GC_dump_regularly; /* Generate regular debugging dumps. */
+ GC_EXTERN GC_bool GC_dump_regularly;
+ /* Generate regular debugging dumps. */
# define COND_DUMP if (GC_dump_regularly) GC_dump();
#else
# define COND_DUMP
#endif
#ifdef KEEP_BACK_PTRS
- extern long GC_backtraces;
- void GC_generate_random_backtrace_no_gc(void);
+ GC_EXTERN long GC_backtraces;
+ GC_INNER void GC_generate_random_backtrace_no_gc(void);
#endif
-extern GC_bool GC_print_back_height;
+GC_EXTERN GC_bool GC_print_back_height;
#ifdef MAKE_BACK_GRAPH
void GC_print_back_graph_stats(void);
#endif
-/* Macros used for collector internal allocation. */
-/* These assume the collector lock is held. */
+#ifdef THREADS
+ GC_INNER void GC_free_inner(void * p);
+#endif
+
+/* Macros used for collector internal allocation. */
+/* These assume the collector lock is held. */
#ifdef DBG_HDRS_ALL
- extern GC_PTR GC_debug_generic_malloc_inner(size_t lb, int k);
- extern GC_PTR GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
- int k);
+ GC_INNER void * GC_debug_generic_malloc_inner(size_t lb, int k);
+ GC_INNER void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
+ int k);
# define GC_INTERNAL_MALLOC GC_debug_generic_malloc_inner
# define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \
- GC_debug_generic_malloc_inner_ignore_off_page
+ GC_debug_generic_malloc_inner_ignore_off_page
# ifdef THREADS
+ GC_INNER void GC_debug_free_inner(void * p);
# define GC_INTERNAL_FREE GC_debug_free_inner
# else
# define GC_INTERNAL_FREE GC_debug_free
#else
# define GC_INTERNAL_MALLOC GC_generic_malloc_inner
# define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \
- GC_generic_malloc_inner_ignore_off_page
+ GC_generic_malloc_inner_ignore_off_page
# ifdef THREADS
# define GC_INTERNAL_FREE GC_free_inner
# else
# endif
#endif
-/* Memory unmapping: */
#ifdef USE_MUNMAP
- void GC_unmap_old(void);
- void GC_merge_unmapped(void);
- void GC_unmap(ptr_t start, word bytes);
- void GC_remap(ptr_t start, word bytes);
- void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2);
-#endif
-
-/* Virtual dirty bit implementation: */
-/* Each implementation exports the following: */
-void GC_read_dirty GC_PROTO((void));
- /* Retrieve dirty bits. */
-GC_bool GC_page_was_dirty GC_PROTO((struct hblk *h));
- /* Read retrieved dirty bits. */
-GC_bool GC_page_was_ever_dirty GC_PROTO((struct hblk *h));
- /* Could the page contain valid heap pointers? */
-void GC_is_fresh GC_PROTO((struct hblk *h, word n));
- /* Assert the region currently contains no */
- /* valid pointers. */
-void GC_remove_protection GC_PROTO((struct hblk *h, word nblocks,
- GC_bool pointerfree));
- /* h is about to be writteni or allocated. Ensure */
- /* that it's not write protected by the virtual */
- /* dirty bit implementation. */
-
-void GC_dirty_init GC_PROTO((void));
-
+ /* Memory unmapping: */
+ GC_INNER void GC_unmap_old(void);
+ GC_INNER void GC_merge_unmapped(void);
+ GC_INNER void GC_unmap(ptr_t start, size_t bytes);
+ GC_INNER void GC_remap(ptr_t start, size_t bytes);
+ GC_INNER void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2,
+ size_t bytes2);
+#endif
+
+/* Virtual dirty bit implementation: */
+/* Each implementation exports the following: */
+GC_INNER void GC_read_dirty(void);
+ /* Retrieve dirty bits. */
+GC_INNER GC_bool GC_page_was_dirty(struct hblk *h);
+ /* Read retrieved dirty bits. */
+GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
+ GC_bool pointerfree);
+ /* h is about to be written or allocated. Ensure */
+ /* that it's not write protected by the virtual */
+ /* dirty bit implementation. */
+
+GC_INNER void GC_dirty_init(void);
+
/* Slow/general mark bit manipulation: */
-GC_API GC_bool GC_is_marked GC_PROTO((ptr_t p));
-void GC_clear_mark_bit GC_PROTO((ptr_t p));
-void GC_set_mark_bit GC_PROTO((ptr_t p));
-
+GC_API_PRIV GC_bool GC_is_marked(ptr_t p);
+GC_INNER void GC_clear_mark_bit(ptr_t p);
+GC_INNER void GC_set_mark_bit(ptr_t p);
+
/* Stubborn objects: */
-void GC_read_changed GC_PROTO((void)); /* Analogous to GC_read_dirty */
-GC_bool GC_page_was_changed GC_PROTO((struct hblk * h));
- /* Analogous to GC_page_was_dirty */
-void GC_clean_changing_list GC_PROTO((void));
- /* Collect obsolete changing list entries */
-void GC_stubborn_init GC_PROTO((void));
-
+void GC_read_changed(void); /* Analogous to GC_read_dirty */
+GC_bool GC_page_was_changed(struct hblk * h);
+ /* Analogous to GC_page_was_dirty */
+void GC_clean_changing_list(void);
+ /* Collect obsolete changing list entries */
+void GC_stubborn_init(void);
+
/* Debugging print routines: */
-void GC_print_block_list GC_PROTO((void));
-void GC_print_hblkfreelist GC_PROTO((void));
-void GC_print_heap_sects GC_PROTO((void));
-void GC_print_static_roots GC_PROTO((void));
-void GC_print_finalization_stats GC_PROTO((void));
-void GC_dump GC_PROTO((void));
+void GC_print_block_list(void);
+void GC_print_hblkfreelist(void);
+void GC_print_heap_sects(void);
+void GC_print_static_roots(void);
+GC_INNER void GC_print_finalization_stats(void);
+/* void GC_dump(void); - declared in gc.h */
#ifdef KEEP_BACK_PTRS
- void GC_store_back_pointer(ptr_t source, ptr_t dest);
- void GC_marked_for_finalization(ptr_t dest);
+ GC_INNER void GC_store_back_pointer(ptr_t source, ptr_t dest);
+ GC_INNER void GC_marked_for_finalization(ptr_t dest);
# define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
# define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
#else
-# define GC_STORE_BACK_PTR(source, dest)
+# define GC_STORE_BACK_PTR(source, dest)
# define GC_MARKED_FOR_FINALIZATION(dest)
#endif
/* Make arguments appear live to compiler */
-# ifdef __WATCOMC__
+# if defined(__BORLANDC__) || defined(__WATCOMC__)
void GC_noop(void*, ...);
# else
# ifdef __DMC__
- GC_API void GC_noop(...);
+ void GC_noop(...);
# else
- GC_API void GC_noop();
+ void GC_noop();
# endif
# endif
-void GC_noop1 GC_PROTO((word));
-
-/* Logging and diagnostic output: */
-GC_API void GC_printf GC_PROTO((GC_CONST char * format, long, long, long, long, long, long));
- /* A version of printf that doesn't allocate, */
- /* is restricted to long arguments, and */
- /* (unfortunately) doesn't use varargs for */
- /* portability. Restricted to 6 args and */
- /* 1K total output length. */
- /* (We use sprintf. Hopefully that doesn't */
- /* allocate for long arguments.) */
-# define GC_printf0(f) GC_printf(f, 0l, 0l, 0l, 0l, 0l, 0l)
-# define GC_printf1(f,a) GC_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l)
-# define GC_printf2(f,a,b) GC_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l)
-# define GC_printf3(f,a,b,c) GC_printf(f, (long)a, (long)b, (long)c, 0l, 0l, 0l)
-# define GC_printf4(f,a,b,c,d) GC_printf(f, (long)a, (long)b, (long)c, \
- (long)d, 0l, 0l)
-# define GC_printf5(f,a,b,c,d,e) GC_printf(f, (long)a, (long)b, (long)c, \
- (long)d, (long)e, 0l)
-# define GC_printf6(f,a,b,c,d,e,g) GC_printf(f, (long)a, (long)b, (long)c, \
- (long)d, (long)e, (long)g)
-
-GC_API void GC_err_printf GC_PROTO((GC_CONST char * format, long, long, long, long, long, long));
-# define GC_err_printf0(f) GC_err_puts(f)
-# define GC_err_printf1(f,a) GC_err_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l)
-# define GC_err_printf2(f,a,b) GC_err_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l)
-# define GC_err_printf3(f,a,b,c) GC_err_printf(f, (long)a, (long)b, (long)c, \
- 0l, 0l, 0l)
-# define GC_err_printf4(f,a,b,c,d) GC_err_printf(f, (long)a, (long)b, \
- (long)c, (long)d, 0l, 0l)
-# define GC_err_printf5(f,a,b,c,d,e) GC_err_printf(f, (long)a, (long)b, \
- (long)c, (long)d, \
- (long)e, 0l)
-# define GC_err_printf6(f,a,b,c,d,e,g) GC_err_printf(f, (long)a, (long)b, \
- (long)c, (long)d, \
- (long)e, (long)g)
- /* Ditto, writes to stderr. */
-
-void GC_err_puts GC_PROTO((GC_CONST char *s));
- /* Write s to stderr, don't buffer, don't add */
- /* newlines, don't ... */
+GC_API void GC_CALL GC_noop1(word);
+
+#ifndef GC_ATTR_FORMAT_PRINTF
+# if defined(__GNUC__) && __GNUC__ >= 3
+# define GC_ATTR_FORMAT_PRINTF(spec_argnum, first_checked) \
+ __attribute__((__format__(__printf__, spec_argnum, first_checked)))
+# else
+# define GC_ATTR_FORMAT_PRINTF(spec_argnum, first_checked)
+# endif
+#endif
+
+/* Logging and diagnostic output: */
+GC_API_PRIV void GC_printf(const char * format, ...)
+ GC_ATTR_FORMAT_PRINTF(1, 2);
+ /* A version of printf that doesn't allocate, */
+ /* 1K total output length. */
+ /* (We use sprintf. Hopefully that doesn't */
+ /* allocate for long arguments.) */
+GC_API_PRIV void GC_err_printf(const char * format, ...)
+ GC_ATTR_FORMAT_PRINTF(1, 2);
+GC_API_PRIV void GC_log_printf(const char * format, ...)
+ GC_ATTR_FORMAT_PRINTF(1, 2);
+void GC_err_puts(const char *s);
+ /* Write s to stderr, don't buffer, don't add */
+ /* newlines, don't ... */
#if defined(LINUX) && !defined(SMALL_CONFIG)
- void GC_err_write GC_PROTO((GC_CONST char *buf, size_t len));
- /* Write buf to stderr, don't buffer, don't add */
- /* newlines, don't ... */
+ GC_INNER void GC_err_write(const char *buf, size_t len);
+ /* Write buf to stderr, don't buffer, don't add */
+ /* newlines, don't ... */
+#endif
+
+GC_EXTERN unsigned GC_fail_count;
+ /* How many consecutive GC/expansion failures? */
+ /* Reset by GC_allochblk(); defined in alloc.c. */
+
+GC_EXTERN long GC_large_alloc_warn_interval; /* defined in misc.c */
+
+GC_EXTERN signed_word GC_bytes_found;
+ /* Number of reclaimed bytes after garbage collection; */
+ /* protected by GC lock; defined in reclaim.c. */
+
+#ifdef USE_MUNMAP
+ GC_EXTERN int GC_unmap_threshold; /* defined in allchblk.c */
+ GC_EXTERN GC_bool GC_force_unmap_on_gcollect; /* defined in misc.c */
#endif
+#ifdef MSWIN32
+ GC_EXTERN GC_bool GC_no_win32_dlls; /* defined in os_dep.c */
+ GC_EXTERN GC_bool GC_wnt; /* Is Windows NT derivative; */
+ /* defined and set in os_dep.c. */
+#endif
+
+#ifdef THREADS
+# if defined(MSWIN32) || defined(MSWINCE)
+ GC_EXTERN CRITICAL_SECTION GC_write_cs; /* defined in misc.c */
+# ifdef GC_ASSERTIONS
+ GC_EXTERN GC_bool GC_write_disabled;
+ /* defined in win32_threads.c; */
+ /* protected by GC_write_cs. */
+
+# endif
+# endif
+# ifdef MPROTECT_VDB
+ GC_EXTERN volatile AO_TS_t GC_fault_handler_lock;
+ /* defined in os_dep.c */
+# endif
+# ifdef MSWINCE
+ GC_EXTERN GC_bool GC_dont_query_stack_min;
+ /* Defined and set in os_dep.c. */
+# endif
+#elif defined(IA64)
+ GC_EXTERN ptr_t GC_save_regs_ret_val; /* defined in mach_dep.c. */
+ /* Previously set to backing store pointer. */
+#endif /* !THREADS */
+
+#ifdef THREAD_LOCAL_ALLOC
+ GC_EXTERN GC_bool GC_world_stopped; /* defined in alloc.c */
+#endif
+
+#ifdef GC_GCJ_SUPPORT
+ GC_EXTERN GC_bool GC_gcj_malloc_initialized; /* defined in gcj_mlc.c */
+ GC_EXTERN ptr_t * GC_gcjobjfreelist;
+#endif
# ifdef GC_ASSERTIONS
-# define GC_ASSERT(expr) if(!(expr)) {\
- GC_err_printf2("Assertion failure: %s:%ld\n", \
- __FILE__, (unsigned long)__LINE__); \
- ABORT("assertion failure"); }
-# else
-# define GC_ASSERT(expr)
+# define GC_ASSERT(expr) if(!(expr)) {\
+ GC_err_printf("Assertion failure: %s:%ld\n", \
+ __FILE__, (unsigned long)__LINE__); \
+ ABORT("assertion failure"); }
+# else
+# define GC_ASSERT(expr)
# endif
-/* Check a compile time assertion at compile time. The error */
-/* message for failure is a bit baroque, but ... */
+/* Check a compile time assertion at compile time. The error */
+/* message for failure is a bit baroque, but ... */
#if defined(mips) && !defined(__GNUC__)
-/* DOB: MIPSPro C gets an internal error taking the sizeof an array type.
+/* DOB: MIPSPro C gets an internal error taking the sizeof an array type.
This code works correctly (ugliness is to avoid "unused var" warnings) */
-# define GC_STATIC_ASSERT(expr) do { if (0) { char j[(expr)? 1 : -1]; j[0]='\0'; j[0]=j[0]; } } while(0)
+# define GC_STATIC_ASSERT(expr) \
+ do { if (0) { char j[(expr)? 1 : -1]; j[0]='\0'; j[0]=j[0]; } } while(0)
#else
-# define GC_STATIC_ASSERT(expr) sizeof(char[(expr)? 1 : -1])
-#endif
-
-# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
- /* We need additional synchronization facilities from the thread */
- /* support. We believe these are less performance critical */
- /* than the main garbage collector lock; standard pthreads-based */
- /* implementations should be sufficient. */
-
- /* The mark lock and condition variable. If the GC lock is also */
- /* acquired, the GC lock must be acquired first. The mark lock is */
- /* used to both protect some variables used by the parallel */
- /* marker, and to protect GC_fl_builder_count, below. */
- /* GC_notify_all_marker() is called when */
- /* the state of the parallel marker changes */
- /* in some significant way (see gc_mark.h for details). The */
- /* latter set of events includes incrementing GC_mark_no. */
- /* GC_notify_all_builder() is called when GC_fl_builder_count */
- /* reaches 0. */
-
- extern void GC_acquire_mark_lock();
- extern void GC_release_mark_lock();
- extern void GC_notify_all_builder();
- /* extern void GC_wait_builder(); */
- extern void GC_wait_for_reclaim();
-
- extern word GC_fl_builder_count; /* Protected by mark lock. */
-# endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
-# ifdef PARALLEL_MARK
- extern void GC_notify_all_marker();
- extern void GC_wait_marker();
- extern word GC_mark_no; /* Protected by mark lock. */
-
- extern void GC_help_marker(word my_mark_no);
- /* Try to help out parallel marker for mark cycle */
- /* my_mark_no. Returns if the mark cycle finishes or */
- /* was already done, or there was nothing to do for */
- /* some other reason. */
+# define GC_STATIC_ASSERT(expr) (void)sizeof(char[(expr)? 1 : -1])
+#endif
+
+# if defined(PARALLEL_MARK)
+ /* We need additional synchronization facilities from the thread */
+ /* support. We believe these are less performance critical */
+ /* than the main garbage collector lock; standard pthreads-based */
+ /* implementations should be sufficient. */
+
+ GC_EXTERN long GC_markers; /* Number of mark threads we would like */
+ /* to have. Includes the initiating */
+ /* thread. Defined in mark.c. */
+
+ /* The mark lock and condition variable. If the GC lock is also */
+ /* acquired, the GC lock must be acquired first. The mark lock is */
+ /* used to both protect some variables used by the parallel */
+ /* marker, and to protect GC_fl_builder_count, below. */
+ /* GC_notify_all_marker() is called when */
+ /* the state of the parallel marker changes */
+ /* in some significant way (see gc_mark.h for details). The */
+ /* latter set of events includes incrementing GC_mark_no. */
+ /* GC_notify_all_builder() is called when GC_fl_builder_count */
+ /* reaches 0. */
+
+ GC_INNER void GC_acquire_mark_lock(void);
+ GC_INNER void GC_release_mark_lock(void);
+ GC_INNER void GC_notify_all_builder(void);
+ GC_INNER void GC_wait_for_reclaim(void);
+
+ GC_EXTERN word GC_fl_builder_count; /* Protected by mark lock. */
+
+ GC_INNER void GC_notify_all_marker(void);
+ GC_INNER void GC_wait_marker(void);
+ GC_EXTERN word GC_mark_no; /* Protected by mark lock. */
+
+ GC_INNER void GC_help_marker(word my_mark_no);
+ /* Try to help out parallel marker for mark cycle */
+ /* my_mark_no. Returns if the mark cycle finishes or */
+ /* was already done, or there was nothing to do for */
+ /* some other reason. */
# endif /* PARALLEL_MARK */
-# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS)
- /* We define the thread suspension signal here, so that we can refer */
- /* to it in the dirty bit implementation, if necessary. Ideally we */
+# if defined(GC_PTHREADS)
+ /* We define the thread suspension signal here, so that we can refer */
+ /* to it in the dirty bit implementation, if necessary. Ideally we */
/* would allocate a (real-time ?) signal using the standard mechanism.*/
- /* unfortunately, there is no standard mechanism. (There is one */
- /* in Linux glibc, but it's not exported.) Thus we continue to use */
- /* the same hard-coded signals we've always used. */
+ /* unfortunately, there is no standard mechanism. (There is one */
+ /* in Linux glibc, but it's not exported.) Thus we continue to use */
+ /* the same hard-coded signals we've always used. */
# if !defined(SIG_SUSPEND)
# if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)
# if defined(SPARC) && !defined(SIGPWR)
/* SPARC/Linux doesn't properly define SIGPWR in <signal.h>.
- * It is aliased to SIGLOST in asm/signal.h, though. */
+ * It is aliased to SIGLOST in asm/signal.h, though. */
# define SIG_SUSPEND SIGLOST
# else
- /* Linuxthreads itself uses SIGUSR1 and SIGUSR2. */
+ /* Linuxthreads itself uses SIGUSR1 and SIGUSR2. */
# define SIG_SUSPEND SIGPWR
# endif
# else /* !GC_LINUX_THREADS */
# define SIG_SUSPEND _SIGRTMIN + 6
# else
# define SIG_SUSPEND SIGRTMIN + 6
-# endif
+# endif
# endif
# endif /* !SIG_SUSPEND */
-
+
# endif
+/* Some macros for setjmp that works across signal handlers */
+/* were possible, and a couple of routines to facilitate */
+/* catching accesses to bad addresses when that's */
+/* possible/needed. */
+#if defined(UNIX_LIKE) || (defined(NEED_FIND_LIMIT) && defined(CYGWIN32))
+# include <setjmp.h>
+# if defined(SUNOS5SIGS) && !defined(FREEBSD)
+# include <sys/siginfo.h>
+# endif
+ /* Define SETJMP and friends to be the version that restores */
+ /* the signal mask. */
+# define SETJMP(env) sigsetjmp(env, 1)
+# define LONGJMP(env, val) siglongjmp(env, val)
+# define JMP_BUF sigjmp_buf
+#else
+# ifdef ECOS
+# define SETJMP(env) hal_setjmp(env)
+# else
+# define SETJMP(env) setjmp(env)
+# endif
+# define LONGJMP(env, val) longjmp(env, val)
+# define JMP_BUF jmp_buf
+#endif
+
+/* Do we need the GC_find_limit machinery to find the end of a */
+/* data segment. */
+# if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
+# define NEED_FIND_LIMIT
+# endif
+
+# if !defined(STACKBOTTOM) && defined(HEURISTIC2)
+# define NEED_FIND_LIMIT
+# endif
+
+# if (defined(SVR4) || defined(AUX) || defined(DGUX) \
+ || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
+# define NEED_FIND_LIMIT
+# endif
+
+#if defined(FREEBSD) && (defined(I386) || defined(X86_64) || defined(powerpc) \
+ || defined(__powerpc__))
+# include <machine/trap.h>
+# if !defined(PCR)
+# define NEED_FIND_LIMIT
+# endif
+#endif
+
+#if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__) \
+ && !defined(NEED_FIND_LIMIT)
+ /* Used by GC_init_netbsd_elf() in os_dep.c. */
+# define NEED_FIND_LIMIT
+#endif
+
+#if defined(IA64) && !defined(NEED_FIND_LIMIT)
+# define NEED_FIND_LIMIT
+ /* May be needed for register backing store base. */
+#endif
+
+# if defined(NEED_FIND_LIMIT) || \
+ defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS)
+JMP_BUF GC_jmp_buf;
+
+/* Set up a handler for address faults which will longjmp to */
+/* GC_jmp_buf; */
+GC_INNER void GC_setup_temporary_fault_handler(void);
+
+/* Undo the effect of GC_setup_temporary_fault_handler. */
+GC_INNER void GC_reset_fault_handler(void);
+
+# endif /* Need to handle address faults. */
+
+/* Some convenience macros for cancellation support. */
+# if defined(CANCEL_SAFE)
+# if defined(GC_ASSERTIONS) && (defined(USE_COMPILER_TLS) \
+ || (defined(LINUX) && !defined(ARM32) \
+ && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) \
+ || defined(HPUX) /* and probably others ... */))
+ extern __thread unsigned char GC_cancel_disable_count;
+# define NEED_CANCEL_DISABLE_COUNT
+# define INCR_CANCEL_DISABLE() ++GC_cancel_disable_count
+# define DECR_CANCEL_DISABLE() --GC_cancel_disable_count
+# define ASSERT_CANCEL_DISABLED() GC_ASSERT(GC_cancel_disable_count > 0)
+# else
+# define INCR_CANCEL_DISABLE()
+# define DECR_CANCEL_DISABLE()
+# define ASSERT_CANCEL_DISABLED()
+# endif /* GC_ASSERTIONS & ... */
+# define DISABLE_CANCEL(state) \
+ { pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state); \
+ INCR_CANCEL_DISABLE(); }
+# define RESTORE_CANCEL(state) \
+ { ASSERT_CANCEL_DISABLED(); \
+ pthread_setcancelstate(state, NULL); \
+ DECR_CANCEL_DISABLE(); }
+# else /* !CANCEL_SAFE */
+# define DISABLE_CANCEL(state)
+# define RESTORE_CANCEL(state)
+# define ASSERT_CANCEL_DISABLED()
+# endif /* !CANCEL_SAFE */
+
# endif /* GC_PRIVATE_H */