#if __GNUC__ >= 3
# define EXPECT(expr, outcome) __builtin_expect(expr,outcome)
/* Equivalent to (expr), but predict that usually (expr)==outcome. */
-# define INLINE inline
#else
# define EXPECT(expr, outcome) (expr)
+#endif /* __GNUC__ */
+
+#if __GNUC__ >= 3
+# define INLINE inline
+#else
# define INLINE
#endif /* __GNUC__ */
/*********************************/
/* #define STUBBORN_ALLOC */
- /* Enable stubborm allocation, and thus a limited */
+ /* Enable stubborn allocation, and thus a limited */
/* form of incremental collection w/o dirty bits. */
/* #define ALL_INTERIOR_POINTERS */
/* 2. This option makes it hard for the collector */
/* to allocate space that is not ``pointed to'' */
/* by integers, etc. Under SunOS 4.X with a */
- /* statically linked libc, we empiricaly */
+ /* statically linked libc, we empirically */
/* observed that it would be difficult to */
/* allocate individual objects larger than 100K. */
/* Even if only smaller objects are allocated, */
# define MAXHINCR 4096
# endif
-# define TIME_LIMIT 50 /* We try to keep pause times from exceeding */
- /* this by much. In milliseconds. */
-
# define BL_LIMIT GC_black_list_spacing
/* If we need a block of N bytes, and we have */
/* a block of N + BL_LIMIT bytes available, */
# define GET_TIME(x) { struct rusage rusage; \
getrusage (RUSAGE_SELF, &rusage); \
x = rusage.ru_utime; }
-# define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \
- + (double) (a.tv_usec - b.tv_usec) / 1000.0)
+# define MS_TIME_DIFF(a,b) \
+ ((unsigned long)((double) (a.tv_sec - b.tv_sec) * 1000.0 \
+ + (double) (a.tv_usec - b.tv_usec) / 1000.0))
#else /* !BSD_TIME */
# if defined(MSWIN32) || defined(MSWINCE)
# include <windows.h>
# else /* !MSWIN32, !MSWINCE, !BSD_TIME */
# include <time.h>
# if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4)
- clock_t clock(); /* Not in time.h, where it belongs */
+ clock_t clock(void); /* Not in time.h, where it belongs */
# endif
# if defined(FREEBSD) && !defined(CLOCKS_PER_SEC)
# include <machine/limits.h>
PCR_allSigsBlocked, \
PCR_waitForever);
# else
-# if defined(GC_SOLARIS_THREADS) || defined(GC_WIN32_THREADS) \
- || defined(GC_PTHREADS)
- void GC_stop_world();
- void GC_start_world();
+# if defined(GC_WIN32_THREADS) || defined(GC_PTHREADS)
+ void GC_stop_world(void);
+ void GC_start_world(void);
# define STOP_WORLD() GC_stop_world()
# define START_WORLD() GC_start_world()
# else
# define ABORT(s) PCR_Base_Panic(s)
# else
# ifdef SMALL_CONFIG
-# define ABORT(msg) abort()
+# if defined(MSWIN32) || defined(MSWINCE)
+# define ABORT(msg) DebugBreak()
+# else
+# define ABORT(msg) abort()
+# endif
# else
GC_API void GC_abort(const char * msg);
# define ABORT(msg) GC_abort(msg)
# define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE_COUNT
# define GC_MACH_HEADER mach_header
# define GC_MACH_SECTION section
+# define GC_GETSECTBYNAME getsectbynamefromheader
# else
# define GC_THREAD_STATE_T ppc_thread_state64_t
# define GC_MACH_THREAD_STATE PPC_THREAD_STATE64
# define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE64_COUNT
# define GC_MACH_HEADER mach_header_64
# define GC_MACH_SECTION section_64
+# define GC_GETSECTBYNAME getsectbynamefromheader_64
# endif
# elif defined(I386) || defined(X86_64)
# if CPP_WORDSZ == 32
# define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE32_COUNT
# define GC_MACH_HEADER mach_header
# define GC_MACH_SECTION section
+# define GC_GETSECTBYNAME getsectbynamefromheader
# else
# define GC_THREAD_STATE_T x86_thread_state64_t
# define GC_MACH_THREAD_STATE x86_THREAD_STATE64
# define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT
# define GC_MACH_HEADER mach_header_64
# define GC_MACH_SECTION section_64
+# define GC_GETSECTBYNAME getsectbynamefromheader_64
# endif
# else
-# error define GC_THREAD_STATE_T
+# if defined(ARM32)
+# define GC_THREAD_STATE_T arm_thread_state_t
+# else
+# error define GC_THREAD_STATE_T
+# endif
# define GC_MACH_THREAD_STATE MACHINE_THREAD_STATE
# define GC_MACH_THREAD_STATE_COUNT MACHINE_THREAD_STATE_COUNT
# endif
/* */
/*********************/
-/* heap block size, bytes. Should be power of 2 */
+/* Heap block size, bytes. Should be power of 2. */
+/* Incremental GC with MPROTECT_VDB currently requires the */
+/* page size to be a multiple of HBLKSIZE. Since most modern */
+/* architectures support variable page sizes down to 4K, and */
+/* X86 is generally 4K, we now default to 4K, except for */
+/* Alpha: Seems to be used with 8K pages. */
+/* SMALL_CONFIG: Want less block-level fragmentation. */
#ifndef HBLKSIZE
# ifdef SMALL_CONFIG
# define CPP_LOG_HBLKSIZE 10
# else
-# if (CPP_WORDSZ == 32) || (defined(HPUX) && defined(HP_PA))
- /* HPUX/PA seems to use 4K pages with the 64 bit ABI */
-# define CPP_LOG_HBLKSIZE 12
-# else
+# if defined(ALPHA)
# define CPP_LOG_HBLKSIZE 13
+# else
+# define CPP_LOG_HBLKSIZE 12
# endif
# endif
#else
# endif
# undef HBLKSIZE
#endif
+
# define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
# define LOG_HBLKSIZE ((size_t)CPP_LOG_HBLKSIZE)
# define HBLKSIZE ((size_t)CPP_HBLKSIZE)
# define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1))
/* Round up byte allocation requests to integral number of words, etc. */
-# define ROUNDED_UP_WORDS(n) \
- BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1 + EXTRA_BYTES))
# define ROUNDED_UP_GRANULES(n) \
BYTES_TO_GRANULES((n) + (GRANULE_BYTES - 1 + EXTRA_BYTES))
# if MAX_EXTRA_BYTES == 0
*/
# ifdef LARGE_CONFIG
-# define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */
+# if CPP_WORDSZ == 32
+# define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */
/* which is >= 4GB. Each table takes */
/* 128KB, some of which may never be */
/* touched. */
+# else
+# define LOG_PHT_ENTRIES 21 /* Collisions likely at 2M blocks, */
+ /* which is >= 8GB. Each table takes */
+ /* 256KB, some of which may never be */
+ /* touched. */
+# endif
# else
# ifdef SMALL_CONFIG
-# define LOG_PHT_ENTRIES 14 /* Collisions are likely if heap grows */
- /* to more than 16K hblks = 64MB. */
- /* Each hash table occupies 2K bytes. */
+# define LOG_PHT_ENTRIES 15 /* Collisions are likely if heap grows */
+ /* to more than 32K hblks = 128MB. */
+ /* Each hash table occupies 4K bytes. */
# else /* default "medium" configuration */
-# define LOG_PHT_ENTRIES 16 /* Collisions are likely if heap grows */
- /* to more than 64K hblks >= 256MB. */
- /* Each hash table occupies 8K bytes. */
+# define LOG_PHT_ENTRIES 18 /* Collisions are likely if heap grows */
+ /* to more than 256K hblks >= 1GB. */
+ /* Each hash table occupies 32K bytes. */
/* Even for somewhat smaller heaps, */
/* say half that, collisions may be an */
/* issue because we blacklist */
/* changed. */
size_t hb_sz; /* If in use, size in bytes, of objects in the block. */
/* if free, the size in bytes of the whole block */
+ /* We assume that this is convertible to signed_word */
+ /* without generating a negative result. We avoid */
+ /* generating free blocks larger than that. */
word hb_descr; /* object descriptor for marking. See */
/* mark.h. */
# ifdef MARK_BIT_PER_OBJ
# define HBLK_IS_FREE(hdr) (((hdr) -> hb_flags & FREE_BLK) != 0)
-# define OBJ_SZ_TO_BLOCKS(sz) divHBLKSZ(sz + HBLKSIZE-1)
+# define OBJ_SZ_TO_BLOCKS(sz) divHBLKSZ((sz) + HBLKSIZE-1)
/* Size of block (in units of HBLKSIZE) needed to hold objects of */
/* given sz (in bytes). */
/* MAX_ROOT_SETS is the maximum number of ranges that can be */
/* registered as static roots. */
# ifdef LARGE_CONFIG
-# define MAX_ROOT_SETS 4096
+# define MAX_ROOT_SETS 8192
# else
- /* GCJ LOCAL: MAX_ROOT_SETS increased to permit more shared */
- /* libraries to be loaded. */
-# define MAX_ROOT_SETS 1024
+# ifdef SMALL_CONFIG
+# define MAX_ROOT_SETS 512
+# else
+# define MAX_ROOT_SETS 2048
+# endif
# endif
# define MAX_EXCLUSIONS (MAX_ROOT_SETS/4)
word _bytes_allocd;
/* Number of words allocated during this collection cycle */
# endif
+ word _bytes_dropped;
+ /* Number of black-listed bytes dropped during GC cycle */
+ /* as a result of repeated scanning during allocation */
+ /* attempts. These are treated largely as allocated, */
+ /* even though they are not useful to the client. */
word _bytes_finalized;
/* Approximate number of bytes in objects (and headers) */
- /* That became ready for finalization in the last */
+ /* that became ready for finalization in the last */
/* collection. */
word _non_gc_bytes_at_gc;
/* Number of explicitly managed bytes of storage */
# endif
size_t _size_map[MAXOBJBYTES+1];
- /* Number of words to allocate for a given allocation request in */
- /* bytes. */
+ /* Number of granules to allocate when asked for a certain */
+ /* number of bytes. */
# ifdef STUBBORN_ALLOC
ptr_t _sobjfreelist[MAXOBJGRANULES+1];
# endif
# ifdef LARGE_CONFIG
# if CPP_WORDSZ > 32
-# define MAX_HEAP_SECTS 4096 /* overflows at roughly 64 GB */
+# define MAX_HEAP_SECTS 8192 /* overflows at roughly 128 GB */
# else
# define MAX_HEAP_SECTS 768 /* Separately added heap sections. */
# endif
# ifdef SMALL_CONFIG
# define MAX_HEAP_SECTS 128 /* Roughly 256MB (128*2048*1K) */
# else
-# define MAX_HEAP_SECTS 384 /* Roughly 3GB */
+# if CPP_WORDSZ > 32
+# define MAX_HEAP_SECTS 1024 /* Roughly 8GB */
+# else
+# define MAX_HEAP_SECTS 512 /* Roughly 4GB */
+# endif
# endif
# endif
struct HeapSect {
ptr_t hs_start; size_t hs_bytes;
- } _heap_sects[MAX_HEAP_SECTS];
+ } _heap_sects[MAX_HEAP_SECTS]; /* Heap segments potentially */
+ /* client objects. */
+# if defined(USE_PROC_FOR_LIBRARIES)
+ struct HeapSect _our_memory[MAX_HEAP_SECTS];
+ /* All GET_MEM allocated */
+ /* memory. Includes block */
+ /* headers and the like. */
+# endif
# if defined(MSWIN32) || defined(MSWINCE)
ptr_t _heap_bases[MAX_HEAP_SECTS];
/* Start address of memory regions obtained from kernel. */
# endif
# ifdef MSWINCE
word _heap_lengths[MAX_HEAP_SECTS];
- /* Commited lengths of memory regions obtained from kernel. */
+ /* Committed lengths of memory regions obtained from kernel. */
# endif
struct roots _static_roots[MAX_ROOT_SETS];
# if !defined(MSWIN32) && !defined(MSWINCE)
# define GC_large_free_bytes GC_arrays._large_free_bytes
# define GC_large_allocd_bytes GC_arrays._large_allocd_bytes
# define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes
+# define GC_bytes_dropped GC_arrays._bytes_dropped
# define GC_bytes_finalized GC_arrays._bytes_finalized
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
# define GC_bytes_freed GC_arrays._bytes_freed
# define GC_requested_heapsize GC_arrays._requested_heapsize
# define GC_bytes_allocd_before_gc GC_arrays._bytes_allocd_before_gc
# define GC_heap_sects GC_arrays._heap_sects
+# ifdef USE_PROC_FOR_LIBRARIES
+# define GC_our_memory GC_arrays._our_memory
+# endif
# define GC_last_stack GC_arrays._last_stack
#ifdef ENABLE_TRACE
#define GC_trace_addr GC_arrays._trace_addr
#endif
# ifdef USE_MUNMAP
# define GC_unmapped_bytes GC_arrays._unmapped_bytes
+# else
+# define GC_unmapped_bytes 0
# endif
# if defined(MSWIN32) || defined(MSWINCE)
# define GC_heap_bases GC_arrays._heap_bases
extern word GC_n_heap_sects; /* Number of separately added heap */
/* sections. */
+#ifdef USE_PROC_FOR_LIBRARIES
+ extern word GC_n_memory; /* Number of GET_MEM allocated memory */
+ /* sections. */
+#endif
+
extern word GC_page_size;
# if defined(MSWIN32) || defined(MSWINCE)
extern GC_bool GC_world_stopped;
#endif
-/* Operations */
-# ifndef abs
-# define abs(x) ((x) < 0? (-(x)) : (x))
-# endif
-
-
/* Marks are in a reserved area in */
/* each heap block. Each word has one mark bit associated */
/* with it. Only those corresponding to the beginning of an */
void GC_initiate_gc(void);
/* initiate collection. */
/* If the mark state is invalid, this */
- /* becomes full colleection. Otherwise */
+ /* becomes full collection. Otherwise */
/* it's partial. */
void GC_push_all(ptr_t bottom, ptr_t top);
/* Push everything in a range */
/* stacks are scheduled for scanning in *GC_push_other_roots, which */
/* is thread-package-specific. */
#endif
-void GC_push_current_stack(ptr_t cold_gc_frame, void *context);
- /* Push enough of the current stack eagerly to */
- /* ensure that callee-save registers saved in */
- /* GC frames are scanned. */
- /* In the non-threads case, schedule entire */
- /* stack for scanning. */
- /* The second argument is a pointer to the */
- /* (possibly null) thread context, for */
- /* (currently hypothetical) more precise */
- /* stack scanning. */
void GC_push_roots(GC_bool all, ptr_t cold_gc_frame);
/* Push all or dirty roots. */
extern void (*GC_push_other_roots)(void);
/* Push system or application specific roots */
/* onto the mark stack. In some environments */
/* (e.g. threads environments) this is */
- /* predfined to be non-zero. A client supplied */
- /* replacement should also call the original */
- /* function. */
-extern void GC_push_gc_structures(void);
- /* Push GC internal roots. These are normally */
- /* included in the static data segment, and */
- /* Thus implicitly pushed. But we must do this */
- /* explicitly if normal root processing is */
- /* disabled. Calls the following: */
+ /* predefined to be non-zero. A client */
+ /* supplied replacement should also call the */
+ /* original function. */
+
extern void GC_push_finalizer_structures(void);
extern void GC_push_stubborn_structures (void);
# ifdef THREADS
extern void GC_push_thread_structures (void);
# endif
+ extern void (*GC_push_typed_structures) (void);
+ /* A pointer such that we can avoid linking in */
+ /* the typed allocation support if unused. */
extern void (*GC_start_call_back) (void);
/* Called at start of full collections. */
/* Not called if 0. Called with allocation */
/* lock held. */
/* 0 by default. */
-void GC_push_regs_and_stack(ptr_t cold_gc_frame);
void GC_push_regs(void);
/* Ditto, but also mark from clean pages. */
struct hblk * GC_push_next_marked_uncollectable(struct hblk * h);
/* Ditto, but mark only from uncollectable pages. */
-GC_bool GC_stopped_mark(GC_stop_func stop_func);
- /* Stop world and mark from all roots */
- /* and rescuers. */
void GC_clear_hdr_marks(hdr * hhdr);
/* Clear the mark bits in a header */
void GC_set_hdr_marks(hdr * hhdr);
/* Return FALSE on failure. */
void GC_register_displacement_inner(size_t offset);
/* Version of GC_register_displacement */
- /* that assumes lock is already held */
- /* and signals are already disabled. */
+ /* that assumes lock is already held. */
void GC_initialize_offsets(void);
/* Initialize GC_valid_offsets, */
/* reclaimed bytes to *count. */
GC_bool GC_block_empty(hdr * hhdr);
/* Block completely unmarked? */
-GC_bool GC_never_stop_func(void);
+GC_bool GC_CALLBACK GC_never_stop_func(void);
/* Returns FALSE. */
GC_bool GC_try_to_collect_inner(GC_stop_func f);
/* Collect; caller must have acquired */
- /* lock and disabled signals. */
- /* Collection is aborted if f returns */
- /* TRUE. Returns TRUE if it completes */
- /* successfully. */
+ /* lock. Collection is aborted if f */
+ /* returns TRUE. Returns TRUE if it */
+ /* completes successfully. */
# define GC_gcollect_inner() \
(void) GC_try_to_collect_inner(GC_never_stop_func)
-void GC_finish_collection(void);
- /* Finish collection. Mark bits are */
- /* consistent and lock is still held. */
GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page);
/* Collect or expand heap in an attempt */
/* make the indicated number of free */
/* this procedure yet this GC cycle. */
GC_API void * GC_make_closure(GC_finalization_proc fn, void * data);
-GC_API void GC_debug_invoke_finalizer(void * obj, void * data);
+GC_API void GC_CALLBACK GC_debug_invoke_finalizer(void * obj, void * data);
/* Auxiliary fns to make finalization work */
/* correctly with displaced pointers introduced */
/* by the debugging allocators. */
void GC_add_to_heap(struct hblk *p, size_t bytes);
/* Add a HBLKSIZE aligned chunk to the heap. */
+
+#ifdef USE_PROC_FOR_LIBRARIES
+ void GC_add_to_our_memory(ptr_t p, size_t bytes);
+ /* Add a chunk to GC_our_memory. */
+ /* If p == 0, do nothing. */
+#else
+# define GC_add_to_our_memory(p, bytes)
+#endif
void GC_print_obj(ptr_t p);
/* P points to somewhere inside an object with */
/* Could the page contain valid heap pointers? */
void GC_remove_protection(struct hblk *h, word nblocks,
GC_bool pointerfree);
- /* h is about to be writteni or allocated. Ensure */
+ /* h is about to be written or allocated. Ensure */
/* that it's not write protected by the virtual */
/* dirty bit implementation. */
#endif
/* Make arguments appear live to compiler */
-# ifdef __WATCOMC__
+# if defined(__BORLANDC__) || defined(__WATCOMC__)
void GC_noop(void*, ...);
# else
# ifdef __DMC__
# endif
# endif
-void GC_noop1(word);
+GC_API void GC_CALL GC_noop1(word);
/* Logging and diagnostic output: */
GC_API void GC_printf (const char * format, ...);
This code works correctly (ugliness is to avoid "unused var" warnings) */
# define GC_STATIC_ASSERT(expr) do { if (0) { char j[(expr)? 1 : -1]; j[0]='\0'; j[0]=j[0]; } } while(0)
#else
-# define GC_STATIC_ASSERT(expr) sizeof(char[(expr)? 1 : -1])
+# define GC_STATIC_ASSERT(expr) (void)sizeof(char[(expr)? 1 : -1])
#endif
-# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
+# if defined(PARALLEL_MARK)
/* We need additional synchronization facilities from the thread */
/* support. We believe these are less performance critical */
/* than the main garbage collector lock; standard pthreads-based */
/* GC_notify_all_builder() is called when GC_fl_builder_count */
/* reaches 0. */
- extern void GC_acquire_mark_lock();
- extern void GC_release_mark_lock();
- extern void GC_notify_all_builder();
- /* extern void GC_wait_builder(); */
- extern void GC_wait_for_reclaim();
+ void GC_acquire_mark_lock(void);
+ void GC_release_mark_lock(void);
+ void GC_notify_all_builder(void);
+ void GC_wait_for_reclaim(void);
extern word GC_fl_builder_count; /* Protected by mark lock. */
-# endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
-# ifdef PARALLEL_MARK
- extern void GC_notify_all_marker();
- extern void GC_wait_marker();
+
+ void GC_notify_all_marker(void);
+ void GC_wait_marker(void);
extern word GC_mark_no; /* Protected by mark lock. */
extern void GC_help_marker(word my_mark_no);
/* were possible, and a couple of routines to facilitate */
/* catching accesses to bad addresses when that's */
/* possible/needed. */
-#ifdef UNIX_LIKE
+#if defined(UNIX_LIKE) || (defined(NEED_FIND_LIMIT) && defined(CYGWIN32))
# include <setjmp.h>
# if defined(SUNOS5SIGS) && !defined(FREEBSD)
# include <sys/siginfo.h>