X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=src%2Fmm%2Fboehm-gc%2Finclude%2Fprivate%2Fgc_priv.h;h=520c791083c9bf4b85887c6a16c0206e04b507cd;hb=c83bff94e40ee0a218a39931af17814d1a42cb5c;hp=ec93ffea9c979ce1d104be22bf9e60d38e401b7f;hpb=70bd214ee92d267a4bcfb4dbe2dc6e8e0d2c6a51;p=cacao.git diff --git a/src/mm/boehm-gc/include/private/gc_priv.h b/src/mm/boehm-gc/include/private/gc_priv.h index ec93ffea9..520c79108 100644 --- a/src/mm/boehm-gc/include/private/gc_priv.h +++ b/src/mm/boehm-gc/include/private/gc_priv.h @@ -75,9 +75,13 @@ typedef char * ptr_t; /* A generic pointer to which we can add */ #if __GNUC__ >= 3 # define EXPECT(expr, outcome) __builtin_expect(expr,outcome) /* Equivalent to (expr), but predict that usually (expr)==outcome. */ -# define INLINE inline #else # define EXPECT(expr, outcome) (expr) +#endif /* __GNUC__ */ + +#if __GNUC__ >= 3 +# define INLINE inline +#else # define INLINE #endif /* __GNUC__ */ @@ -119,7 +123,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */ /*********************************/ /* #define STUBBORN_ALLOC */ - /* Enable stubborm allocation, and thus a limited */ + /* Enable stubborn allocation, and thus a limited */ /* form of incremental collection w/o dirty bits. */ /* #define ALL_INTERIOR_POINTERS */ @@ -138,7 +142,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */ /* 2. This option makes it hard for the collector */ /* to allocate space that is not ``pointed to'' */ /* by integers, etc. Under SunOS 4.X with a */ - /* statically linked libc, we empiricaly */ + /* statically linked libc, we empirically */ /* observed that it would be difficult to */ /* allocate individual objects larger than 100K. */ /* Even if only smaller objects are allocated, */ @@ -171,9 +175,6 @@ typedef char * ptr_t; /* A generic pointer to which we can add */ # define MAXHINCR 4096 # endif -# define TIME_LIMIT 50 /* We try to keep pause times from exceeding */ - /* this by much. In milliseconds. */ - # define BL_LIMIT GC_black_list_spacing /* If we need a block of N bytes, and we have */ /* a block of N + BL_LIMIT bytes available, */ @@ -230,8 +231,9 @@ void GC_print_callers(struct callinfo info[NFRAMES]); # define GET_TIME(x) { struct rusage rusage; \ getrusage (RUSAGE_SELF, &rusage); \ x = rusage.ru_utime; } -# define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \ - + (double) (a.tv_usec - b.tv_usec) / 1000.0) +# define MS_TIME_DIFF(a,b) \ + ((unsigned long)((double) (a.tv_sec - b.tv_sec) * 1000.0 \ + + (double) (a.tv_usec - b.tv_usec) / 1000.0)) #else /* !BSD_TIME */ # if defined(MSWIN32) || defined(MSWINCE) # include @@ -242,7 +244,7 @@ void GC_print_callers(struct callinfo info[NFRAMES]); # else /* !MSWIN32, !MSWINCE, !BSD_TIME */ # include # if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4) - clock_t clock(); /* Not in time.h, where it belongs */ + clock_t clock(void); /* Not in time.h, where it belongs */ # endif # if defined(FREEBSD) && !defined(CLOCKS_PER_SEC) # include @@ -313,10 +315,9 @@ void GC_print_callers(struct callinfo info[NFRAMES]); PCR_allSigsBlocked, \ PCR_waitForever); # else -# if defined(GC_SOLARIS_THREADS) || defined(GC_WIN32_THREADS) \ - || defined(GC_PTHREADS) - void GC_stop_world(); - void GC_start_world(); +# if defined(GC_WIN32_THREADS) || defined(GC_PTHREADS) + void GC_stop_world(void); + void GC_start_world(void); # define STOP_WORLD() GC_stop_world() # define START_WORLD() GC_start_world() # else @@ -330,7 +331,11 @@ void GC_print_callers(struct callinfo info[NFRAMES]); # define ABORT(s) PCR_Base_Panic(s) # else # ifdef SMALL_CONFIG -# define ABORT(msg) abort() +# if defined(MSWIN32) || defined(MSWINCE) +# define ABORT(msg) DebugBreak() +# else +# define ABORT(msg) abort() +# endif # else GC_API void GC_abort(const char * msg); # define ABORT(msg) GC_abort(msg) @@ -375,12 +380,14 @@ extern GC_warn_proc GC_current_warn_proc; # define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE_COUNT # define GC_MACH_HEADER mach_header # define GC_MACH_SECTION section +# define GC_GETSECTBYNAME getsectbynamefromheader # else # define GC_THREAD_STATE_T ppc_thread_state64_t # define GC_MACH_THREAD_STATE PPC_THREAD_STATE64 # define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE64_COUNT # define GC_MACH_HEADER mach_header_64 # define GC_MACH_SECTION section_64 +# define GC_GETSECTBYNAME getsectbynamefromheader_64 # endif # elif defined(I386) || defined(X86_64) # if CPP_WORDSZ == 32 @@ -389,15 +396,21 @@ extern GC_warn_proc GC_current_warn_proc; # define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE32_COUNT # define GC_MACH_HEADER mach_header # define GC_MACH_SECTION section +# define GC_GETSECTBYNAME getsectbynamefromheader # else # define GC_THREAD_STATE_T x86_thread_state64_t # define GC_MACH_THREAD_STATE x86_THREAD_STATE64 # define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT # define GC_MACH_HEADER mach_header_64 # define GC_MACH_SECTION section_64 +# define GC_GETSECTBYNAME getsectbynamefromheader_64 # endif # else -# error define GC_THREAD_STATE_T +# if defined(ARM32) +# define GC_THREAD_STATE_T arm_thread_state_t +# else +# error define GC_THREAD_STATE_T +# endif # define GC_MACH_THREAD_STATE MACHINE_THREAD_STATE # define GC_MACH_THREAD_STATE_COUNT MACHINE_THREAD_STATE_COUNT # endif @@ -486,17 +499,22 @@ extern GC_warn_proc GC_current_warn_proc; /* */ /*********************/ -/* heap block size, bytes. Should be power of 2 */ +/* Heap block size, bytes. Should be power of 2. */ +/* Incremental GC with MPROTECT_VDB currently requires the */ +/* page size to be a multiple of HBLKSIZE. Since most modern */ +/* architectures support variable page sizes down to 4K, and */ +/* X86 is generally 4K, we now default to 4K, except for */ +/* Alpha: Seems to be used with 8K pages. */ +/* SMALL_CONFIG: Want less block-level fragmentation. */ #ifndef HBLKSIZE # ifdef SMALL_CONFIG # define CPP_LOG_HBLKSIZE 10 # else -# if (CPP_WORDSZ == 32) || (defined(HPUX) && defined(HP_PA)) - /* HPUX/PA seems to use 4K pages with the 64 bit ABI */ -# define CPP_LOG_HBLKSIZE 12 -# else +# if defined(ALPHA) # define CPP_LOG_HBLKSIZE 13 +# else +# define CPP_LOG_HBLKSIZE 12 # endif # endif #else @@ -523,6 +541,7 @@ extern GC_warn_proc GC_current_warn_proc; # endif # undef HBLKSIZE #endif + # define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE) # define LOG_HBLKSIZE ((size_t)CPP_LOG_HBLKSIZE) # define HBLKSIZE ((size_t)CPP_HBLKSIZE) @@ -556,8 +575,6 @@ extern GC_warn_proc GC_current_warn_proc; # define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1)) /* Round up byte allocation requests to integral number of words, etc. */ -# define ROUNDED_UP_WORDS(n) \ - BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1 + EXTRA_BYTES)) # define ROUNDED_UP_GRANULES(n) \ BYTES_TO_GRANULES((n) + (GRANULE_BYTES - 1 + EXTRA_BYTES)) # if MAX_EXTRA_BYTES == 0 @@ -585,19 +602,26 @@ extern GC_warn_proc GC_current_warn_proc; */ # ifdef LARGE_CONFIG -# define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */ +# if CPP_WORDSZ == 32 +# define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */ /* which is >= 4GB. Each table takes */ /* 128KB, some of which may never be */ /* touched. */ +# else +# define LOG_PHT_ENTRIES 21 /* Collisions likely at 2M blocks, */ + /* which is >= 8GB. Each table takes */ + /* 256KB, some of which may never be */ + /* touched. */ +# endif # else # ifdef SMALL_CONFIG -# define LOG_PHT_ENTRIES 14 /* Collisions are likely if heap grows */ - /* to more than 16K hblks = 64MB. */ - /* Each hash table occupies 2K bytes. */ +# define LOG_PHT_ENTRIES 15 /* Collisions are likely if heap grows */ + /* to more than 32K hblks = 128MB. */ + /* Each hash table occupies 4K bytes. */ # else /* default "medium" configuration */ -# define LOG_PHT_ENTRIES 16 /* Collisions are likely if heap grows */ - /* to more than 64K hblks >= 256MB. */ - /* Each hash table occupies 8K bytes. */ +# define LOG_PHT_ENTRIES 18 /* Collisions are likely if heap grows */ + /* to more than 256K hblks >= 1GB. */ + /* Each hash table occupies 32K bytes. */ /* Even for somewhat smaller heaps, */ /* say half that, collisions may be an */ /* issue because we blacklist */ @@ -694,6 +718,9 @@ struct hblkhdr { /* changed. */ size_t hb_sz; /* If in use, size in bytes, of objects in the block. */ /* if free, the size in bytes of the whole block */ + /* We assume that this is convertible to signed_word */ + /* without generating a negative result. We avoid */ + /* generating free blocks larger than that. */ word hb_descr; /* object descriptor for marking. See */ /* mark.h. */ # ifdef MARK_BIT_PER_OBJ @@ -761,7 +788,7 @@ struct hblk { # define HBLK_IS_FREE(hdr) (((hdr) -> hb_flags & FREE_BLK) != 0) -# define OBJ_SZ_TO_BLOCKS(sz) divHBLKSZ(sz + HBLKSIZE-1) +# define OBJ_SZ_TO_BLOCKS(sz) divHBLKSZ((sz) + HBLKSIZE-1) /* Size of block (in units of HBLKSIZE) needed to hold objects of */ /* given sz (in bytes). */ @@ -776,11 +803,13 @@ struct hblk { /* MAX_ROOT_SETS is the maximum number of ranges that can be */ /* registered as static roots. */ # ifdef LARGE_CONFIG -# define MAX_ROOT_SETS 4096 +# define MAX_ROOT_SETS 8192 # else - /* GCJ LOCAL: MAX_ROOT_SETS increased to permit more shared */ - /* libraries to be loaded. */ -# define MAX_ROOT_SETS 1024 +# ifdef SMALL_CONFIG +# define MAX_ROOT_SETS 512 +# else +# define MAX_ROOT_SETS 2048 +# endif # endif # define MAX_EXCLUSIONS (MAX_ROOT_SETS/4) @@ -858,9 +887,14 @@ struct _GC_arrays { word _bytes_allocd; /* Number of words allocated during this collection cycle */ # endif + word _bytes_dropped; + /* Number of black-listed bytes dropped during GC cycle */ + /* as a result of repeated scanning during allocation */ + /* attempts. These are treated largely as allocated, */ + /* even though they are not useful to the client. */ word _bytes_finalized; /* Approximate number of bytes in objects (and headers) */ - /* That became ready for finalization in the last */ + /* that became ready for finalization in the last */ /* collection. */ word _non_gc_bytes_at_gc; /* Number of explicitly managed bytes of storage */ @@ -909,8 +943,8 @@ struct _GC_arrays { # endif size_t _size_map[MAXOBJBYTES+1]; - /* Number of words to allocate for a given allocation request in */ - /* bytes. */ + /* Number of granules to allocate when asked for a certain */ + /* number of bytes. */ # ifdef STUBBORN_ALLOC ptr_t _sobjfreelist[MAXOBJGRANULES+1]; @@ -959,7 +993,7 @@ struct _GC_arrays { # endif # ifdef LARGE_CONFIG # if CPP_WORDSZ > 32 -# define MAX_HEAP_SECTS 4096 /* overflows at roughly 64 GB */ +# define MAX_HEAP_SECTS 8192 /* overflows at roughly 128 GB */ # else # define MAX_HEAP_SECTS 768 /* Separately added heap sections. */ # endif @@ -967,19 +1001,30 @@ struct _GC_arrays { # ifdef SMALL_CONFIG # define MAX_HEAP_SECTS 128 /* Roughly 256MB (128*2048*1K) */ # else -# define MAX_HEAP_SECTS 384 /* Roughly 3GB */ +# if CPP_WORDSZ > 32 +# define MAX_HEAP_SECTS 1024 /* Roughly 8GB */ +# else +# define MAX_HEAP_SECTS 512 /* Roughly 4GB */ +# endif # endif # endif struct HeapSect { ptr_t hs_start; size_t hs_bytes; - } _heap_sects[MAX_HEAP_SECTS]; + } _heap_sects[MAX_HEAP_SECTS]; /* Heap segments potentially */ + /* client objects. */ +# if defined(USE_PROC_FOR_LIBRARIES) + struct HeapSect _our_memory[MAX_HEAP_SECTS]; + /* All GET_MEM allocated */ + /* memory. Includes block */ + /* headers and the like. */ +# endif # if defined(MSWIN32) || defined(MSWINCE) ptr_t _heap_bases[MAX_HEAP_SECTS]; /* Start address of memory regions obtained from kernel. */ # endif # ifdef MSWINCE word _heap_lengths[MAX_HEAP_SECTS]; - /* Commited lengths of memory regions obtained from kernel. */ + /* Committed lengths of memory regions obtained from kernel. */ # endif struct roots _static_roots[MAX_ROOT_SETS]; # if !defined(MSWIN32) && !defined(MSWINCE) @@ -1028,6 +1073,7 @@ GC_API GC_FAR struct _GC_arrays GC_arrays; # define GC_large_free_bytes GC_arrays._large_free_bytes # define GC_large_allocd_bytes GC_arrays._large_allocd_bytes # define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes +# define GC_bytes_dropped GC_arrays._bytes_dropped # define GC_bytes_finalized GC_arrays._bytes_finalized # define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc # define GC_bytes_freed GC_arrays._bytes_freed @@ -1040,12 +1086,17 @@ GC_API GC_FAR struct _GC_arrays GC_arrays; # define GC_requested_heapsize GC_arrays._requested_heapsize # define GC_bytes_allocd_before_gc GC_arrays._bytes_allocd_before_gc # define GC_heap_sects GC_arrays._heap_sects +# ifdef USE_PROC_FOR_LIBRARIES +# define GC_our_memory GC_arrays._our_memory +# endif # define GC_last_stack GC_arrays._last_stack #ifdef ENABLE_TRACE #define GC_trace_addr GC_arrays._trace_addr #endif # ifdef USE_MUNMAP # define GC_unmapped_bytes GC_arrays._unmapped_bytes +# else +# define GC_unmapped_bytes 0 # endif # if defined(MSWIN32) || defined(MSWINCE) # define GC_heap_bases GC_arrays._heap_bases @@ -1139,6 +1190,11 @@ GC_API word GC_fo_entries; extern word GC_n_heap_sects; /* Number of separately added heap */ /* sections. */ +#ifdef USE_PROC_FOR_LIBRARIES + extern word GC_n_memory; /* Number of GET_MEM allocated memory */ + /* sections. */ +#endif + extern word GC_page_size; # if defined(MSWIN32) || defined(MSWINCE) @@ -1197,12 +1253,6 @@ extern long GC_large_alloc_warn_suppressed; extern GC_bool GC_world_stopped; #endif -/* Operations */ -# ifndef abs -# define abs(x) ((x) < 0? (-(x)) : (x)) -# endif - - /* Marks are in a reserved area in */ /* each heap block. Each word has one mark bit associated */ /* with it. Only those corresponding to the beginning of an */ @@ -1291,7 +1341,7 @@ GC_bool GC_mark_some(ptr_t cold_gc_frame); void GC_initiate_gc(void); /* initiate collection. */ /* If the mark state is invalid, this */ - /* becomes full colleection. Otherwise */ + /* becomes full collection. Otherwise */ /* it's partial. */ void GC_push_all(ptr_t bottom, ptr_t top); /* Push everything in a range */ @@ -1330,42 +1380,29 @@ void GC_push_all_eager (ptr_t b, ptr_t t); /* stacks are scheduled for scanning in *GC_push_other_roots, which */ /* is thread-package-specific. */ #endif -void GC_push_current_stack(ptr_t cold_gc_frame, void *context); - /* Push enough of the current stack eagerly to */ - /* ensure that callee-save registers saved in */ - /* GC frames are scanned. */ - /* In the non-threads case, schedule entire */ - /* stack for scanning. */ - /* The second argument is a pointer to the */ - /* (possibly null) thread context, for */ - /* (currently hypothetical) more precise */ - /* stack scanning. */ void GC_push_roots(GC_bool all, ptr_t cold_gc_frame); /* Push all or dirty roots. */ extern void (*GC_push_other_roots)(void); /* Push system or application specific roots */ /* onto the mark stack. In some environments */ /* (e.g. threads environments) this is */ - /* predfined to be non-zero. A client supplied */ - /* replacement should also call the original */ - /* function. */ -extern void GC_push_gc_structures(void); - /* Push GC internal roots. These are normally */ - /* included in the static data segment, and */ - /* Thus implicitly pushed. But we must do this */ - /* explicitly if normal root processing is */ - /* disabled. Calls the following: */ + /* predefined to be non-zero. A client */ + /* supplied replacement should also call the */ + /* original function. */ + extern void GC_push_finalizer_structures(void); extern void GC_push_stubborn_structures (void); # ifdef THREADS extern void GC_push_thread_structures (void); # endif + extern void (*GC_push_typed_structures) (void); + /* A pointer such that we can avoid linking in */ + /* the typed allocation support if unused. */ extern void (*GC_start_call_back) (void); /* Called at start of full collections. */ /* Not called if 0. Called with allocation */ /* lock held. */ /* 0 by default. */ -void GC_push_regs_and_stack(ptr_t cold_gc_frame); void GC_push_regs(void); @@ -1415,9 +1452,6 @@ struct hblk * GC_push_next_marked(struct hblk * h); /* Ditto, but also mark from clean pages. */ struct hblk * GC_push_next_marked_uncollectable(struct hblk * h); /* Ditto, but mark only from uncollectable pages. */ -GC_bool GC_stopped_mark(GC_stop_func stop_func); - /* Stop world and mark from all roots */ - /* and rescuers. */ void GC_clear_hdr_marks(hdr * hhdr); /* Clear the mark bits in a header */ void GC_set_hdr_marks(hdr * hhdr); @@ -1520,8 +1554,7 @@ GC_bool GC_add_map_entry(size_t sz); /* Return FALSE on failure. */ void GC_register_displacement_inner(size_t offset); /* Version of GC_register_displacement */ - /* that assumes lock is already held */ - /* and signals are already disabled. */ + /* that assumes lock is already held. */ void GC_initialize_offsets(void); /* Initialize GC_valid_offsets, */ @@ -1595,20 +1628,16 @@ ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz, /* reclaimed bytes to *count. */ GC_bool GC_block_empty(hdr * hhdr); /* Block completely unmarked? */ -GC_bool GC_never_stop_func(void); +GC_bool GC_CALLBACK GC_never_stop_func(void); /* Returns FALSE. */ GC_bool GC_try_to_collect_inner(GC_stop_func f); /* Collect; caller must have acquired */ - /* lock and disabled signals. */ - /* Collection is aborted if f returns */ - /* TRUE. Returns TRUE if it completes */ - /* successfully. */ + /* lock. Collection is aborted if f */ + /* returns TRUE. Returns TRUE if it */ + /* completes successfully. */ # define GC_gcollect_inner() \ (void) GC_try_to_collect_inner(GC_never_stop_func) -void GC_finish_collection(void); - /* Finish collection. Mark bits are */ - /* consistent and lock is still held. */ GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page); /* Collect or expand heap in an attempt */ /* make the indicated number of free */ @@ -1706,13 +1735,21 @@ void GC_notify_or_invoke_finalizers(void); /* this procedure yet this GC cycle. */ GC_API void * GC_make_closure(GC_finalization_proc fn, void * data); -GC_API void GC_debug_invoke_finalizer(void * obj, void * data); +GC_API void GC_CALLBACK GC_debug_invoke_finalizer(void * obj, void * data); /* Auxiliary fns to make finalization work */ /* correctly with displaced pointers introduced */ /* by the debugging allocators. */ void GC_add_to_heap(struct hblk *p, size_t bytes); /* Add a HBLKSIZE aligned chunk to the heap. */ + +#ifdef USE_PROC_FOR_LIBRARIES + void GC_add_to_our_memory(ptr_t p, size_t bytes); + /* Add a chunk to GC_our_memory. */ + /* If p == 0, do nothing. */ +#else +# define GC_add_to_our_memory(p, bytes) +#endif void GC_print_obj(ptr_t p); /* P points to somewhere inside an object with */ @@ -1813,7 +1850,7 @@ GC_bool GC_page_was_ever_dirty(struct hblk *h); /* Could the page contain valid heap pointers? */ void GC_remove_protection(struct hblk *h, word nblocks, GC_bool pointerfree); - /* h is about to be writteni or allocated. Ensure */ + /* h is about to be written or allocated. Ensure */ /* that it's not write protected by the virtual */ /* dirty bit implementation. */ @@ -1851,7 +1888,7 @@ void GC_print_finalization_stats(void); #endif /* Make arguments appear live to compiler */ -# ifdef __WATCOMC__ +# if defined(__BORLANDC__) || defined(__WATCOMC__) void GC_noop(void*, ...); # else # ifdef __DMC__ @@ -1861,7 +1898,7 @@ void GC_print_finalization_stats(void); # endif # endif -void GC_noop1(word); +GC_API void GC_CALL GC_noop1(word); /* Logging and diagnostic output: */ GC_API void GC_printf (const char * format, ...); @@ -1898,10 +1935,10 @@ void GC_err_puts(const char *s); This code works correctly (ugliness is to avoid "unused var" warnings) */ # define GC_STATIC_ASSERT(expr) do { if (0) { char j[(expr)? 1 : -1]; j[0]='\0'; j[0]=j[0]; } } while(0) #else -# define GC_STATIC_ASSERT(expr) sizeof(char[(expr)? 1 : -1]) +# define GC_STATIC_ASSERT(expr) (void)sizeof(char[(expr)? 1 : -1]) #endif -# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) +# if defined(PARALLEL_MARK) /* We need additional synchronization facilities from the thread */ /* support. We believe these are less performance critical */ /* than the main garbage collector lock; standard pthreads-based */ @@ -1918,17 +1955,15 @@ void GC_err_puts(const char *s); /* GC_notify_all_builder() is called when GC_fl_builder_count */ /* reaches 0. */ - extern void GC_acquire_mark_lock(); - extern void GC_release_mark_lock(); - extern void GC_notify_all_builder(); - /* extern void GC_wait_builder(); */ - extern void GC_wait_for_reclaim(); + void GC_acquire_mark_lock(void); + void GC_release_mark_lock(void); + void GC_notify_all_builder(void); + void GC_wait_for_reclaim(void); extern word GC_fl_builder_count; /* Protected by mark lock. */ -# endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */ -# ifdef PARALLEL_MARK - extern void GC_notify_all_marker(); - extern void GC_wait_marker(); + + void GC_notify_all_marker(void); + void GC_wait_marker(void); extern word GC_mark_no; /* Protected by mark lock. */ extern void GC_help_marker(word my_mark_no); @@ -1970,7 +2005,7 @@ void GC_err_puts(const char *s); /* were possible, and a couple of routines to facilitate */ /* catching accesses to bad addresses when that's */ /* possible/needed. */ -#ifdef UNIX_LIKE +#if defined(UNIX_LIKE) || (defined(NEED_FIND_LIMIT) && defined(CYGWIN32)) # include # if defined(SUNOS5SIGS) && !defined(FREEBSD) # include