2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 # include "private/gc_priv.h"
19 # if defined(LINUX) && !defined(POWERPC)
20 # include <linux/version.h>
21 # if (LINUX_VERSION_CODE <= 0x10400)
22 /* Ugly hack to get struct sigcontext_struct definition. Required */
23 /* for some early 1.3.X releases. Will hopefully go away soon. */
24 /* in some later Linux releases, asm/sigcontext.h may have to */
25 /* be included instead. */
27 # include <asm/signal.h>
30 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 /* prototypes, so we have to include the top-level sigcontext.h to */
33 /* make sure the former gets defined to be the latter if appropriate. */
34 # include <features.h>
36 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 /* has the right declaration for glibc 2.1. */
39 # include <sigcontext.h>
40 # endif /* 0 == __GLIBC_MINOR__ */
41 # else /* not 2 <= __GLIBC__ */
42 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 # include <asm/sigcontext.h>
45 # endif /* 2 <= __GLIBC__ */
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
50 # include <sys/types.h>
51 # if !defined(MSWIN32) && !defined(SUNOS4)
58 # define SIGSEGV 0 /* value is irrelevant */
63 /* Blatantly OS dependent routines, except for those that are related */
64 /* to dynamic loading. */
66 # if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
67 # define NEED_FIND_LIMIT
70 # if !defined(STACKBOTTOM) && defined(HEURISTIC2)
71 # define NEED_FIND_LIMIT
74 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
75 # define NEED_FIND_LIMIT
78 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
79 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
80 # define NEED_FIND_LIMIT
83 #if defined(FREEBSD) && defined(I386)
84 # include <machine/trap.h>
86 # define NEED_FIND_LIMIT
90 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__) \
91 && !defined(NEED_FIND_LIMIT)
92 /* Used by GC_init_netbsd_elf() below. */
93 # define NEED_FIND_LIMIT
96 #ifdef NEED_FIND_LIMIT
101 # define GC_AMIGA_DEF
102 # include "AmigaOS.c"
106 #if defined(MSWIN32) || defined(MSWINCE)
107 # define WIN32_LEAN_AND_MEAN
109 # include <windows.h>
113 # include <Processes.h>
117 # include <sys/uio.h>
118 # include <malloc.h> /* for locking */
120 #if defined(USE_MMAP) || defined(USE_MUNMAP)
122 --> USE_MUNMAP requires USE_MMAP
124 # include <sys/types.h>
125 # include <sys/mman.h>
126 # include <sys/stat.h>
134 #if (defined(SUNOS5SIGS) || defined (HURD) || defined(LINUX) || defined(NETBSD)) && !defined(FREEBSD)
136 # include <sys/siginfo.h>
138 /* Define SETJMP and friends to be the version that restores */
139 /* the signal mask. */
140 # define SETJMP(env) sigsetjmp(env, 1)
141 # define LONGJMP(env, val) siglongjmp(env, val)
142 # define JMP_BUF sigjmp_buf
144 # define SETJMP(env) setjmp(env)
145 # define LONGJMP(env, val) longjmp(env, val)
146 # define JMP_BUF jmp_buf
150 /* for get_etext and friends */
151 #include <mach-o/getsect.h>
155 /* Apparently necessary for djgpp 2.01. May cause problems with */
156 /* other versions. */
157 typedef long unsigned int caddr_t;
161 # include "il/PCR_IL.h"
162 # include "th/PCR_ThCtl.h"
163 # include "mm/PCR_MM.h"
166 #if !defined(NO_EXECUTE_PERMISSION)
167 # define OPT_PROT_EXEC PROT_EXEC
169 # define OPT_PROT_EXEC 0
172 #if defined(LINUX) && \
173 (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64) || !defined(SMALL_CONFIG))
175 /* We need to parse /proc/self/maps, either to find dynamic libraries, */
176 /* and/or to find the register backing store base (IA64). Do it once */
181 /* Repeatedly perform a read call until the buffer is filled or */
182 /* we encounter EOF. */
183 ssize_t GC_repeat_read(int fd, char *buf, size_t count)
185 ssize_t num_read = 0;
188 while (num_read < count) {
189 result = READ(fd, buf + num_read, count - num_read);
190 if (result < 0) return result;
191 if (result == 0) break;
198 * Apply fn to a buffer containing the contents of /proc/self/maps.
199 * Return the result of fn or, if we failed, 0.
200 * We currently do nothing to /proc/self/maps other than simply read
201 * it. This code could be simplified if we could determine its size
205 word GC_apply_to_maps(word (*fn)(char *))
209 size_t maps_size = 4000; /* Initial guess. */
210 static char init_buf[1];
211 static char *maps_buf = init_buf;
212 static size_t maps_buf_sz = 1;
214 /* Read /proc/self/maps, growing maps_buf as necessary. */
215 /* Note that we may not allocate conventionally, and */
216 /* thus can't use stdio. */
218 if (maps_size >= maps_buf_sz) {
219 /* Grow only by powers of 2, since we leak "too small" buffers. */
220 while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
221 maps_buf = GC_scratch_alloc(maps_buf_sz);
222 if (maps_buf == 0) return 0;
224 f = open("/proc/self/maps", O_RDONLY);
225 if (-1 == f) return 0;
228 result = GC_repeat_read(f, maps_buf, maps_buf_sz-1);
229 if (result <= 0) return 0;
231 } while (result == maps_buf_sz-1);
233 } while (maps_size >= maps_buf_sz);
234 maps_buf[maps_size] = '\0';
236 /* Apply fn to result. */
240 #endif /* Need GC_apply_to_maps */
242 #if defined(LINUX) && (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64))
244 // GC_parse_map_entry parses an entry from /proc/self/maps so we can
245 // locate all writable data segments that belong to shared libraries.
246 // The format of one of these entries and the fields we care about
248 // XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
249 // ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
250 // start end prot maj_dev
256 // The parser is called with a pointer to the entry and the return value
257 // is either NULL or is advanced to the next entry(the byte after the
261 # define OFFSET_MAP_START 0
262 # define OFFSET_MAP_END 9
263 # define OFFSET_MAP_PROT 18
264 # define OFFSET_MAP_MAJDEV 32
265 # define ADDR_WIDTH 8
269 # define OFFSET_MAP_START 0
270 # define OFFSET_MAP_END 17
271 # define OFFSET_MAP_PROT 34
272 # define OFFSET_MAP_MAJDEV 56
273 # define ADDR_WIDTH 16
277 * Assign various fields of the first line in buf_ptr to *start, *end,
278 * *prot_buf and *maj_dev. Only *prot_buf may be set for unwritable maps.
280 char *GC_parse_map_entry(char *buf_ptr, word *start, word *end,
281 char *prot_buf, unsigned int *maj_dev)
286 if (buf_ptr == NULL || *buf_ptr == '\0') {
290 memcpy(prot_buf, buf_ptr+OFFSET_MAP_PROT, 4);
291 /* do the protections first. */
294 if (prot_buf[1] == 'w') {/* we can skip all of this if it's not writable. */
297 buf_ptr[OFFSET_MAP_START+ADDR_WIDTH] = '\0';
298 *start = strtoul(tok, NULL, 16);
300 tok = buf_ptr+OFFSET_MAP_END;
301 buf_ptr[OFFSET_MAP_END+ADDR_WIDTH] = '\0';
302 *end = strtoul(tok, NULL, 16);
304 buf_ptr += OFFSET_MAP_MAJDEV;
306 while (*buf_ptr != ':') buf_ptr++;
308 *maj_dev = strtoul(tok, NULL, 16);
311 while (*buf_ptr && *buf_ptr++ != '\n');
316 #endif /* Need to parse /proc/self/maps. */
318 #if defined(SEARCH_FOR_DATA_START)
319 /* The I386 case can be handled without a search. The Alpha case */
320 /* used to be handled differently as well, but the rules changed */
321 /* for recent Linux versions. This seems to be the easiest way to */
322 /* cover all versions. */
325 /* Some Linux distributions arrange to define __data_start. Some */
326 /* define data_start as a weak symbol. The latter is technically */
327 /* broken, since the user program may define data_start, in which */
328 /* case we lose. Nonetheless, we try both, prefering __data_start. */
329 /* We assume gcc-compatible pragmas. */
330 # pragma weak __data_start
331 extern int __data_start[];
332 # pragma weak data_start
333 extern int data_start[];
339 void GC_init_linux_data_start()
341 extern ptr_t GC_find_limit();
344 /* Try the easy approaches first: */
345 if ((ptr_t)__data_start != 0) {
346 GC_data_start = (ptr_t)(__data_start);
349 if ((ptr_t)data_start != 0) {
350 GC_data_start = (ptr_t)(data_start);
354 GC_data_start = GC_find_limit((ptr_t)(_end), FALSE);
360 # ifndef ECOS_GC_MEMORY_SIZE
361 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
362 # endif /* ECOS_GC_MEMORY_SIZE */
364 // setjmp() function, as described in ANSI para 7.6.1.1
366 #define SETJMP( __env__ ) hal_setjmp( __env__ )
368 // FIXME: This is a simple way of allocating memory which is
369 // compatible with ECOS early releases. Later releases use a more
370 // sophisticated means of allocating memory than this simple static
371 // allocator, but this method is at least bound to work.
372 static char memory[ECOS_GC_MEMORY_SIZE];
373 static char *brk = memory;
375 static void *tiny_sbrk(ptrdiff_t increment)
381 if (brk > memory + sizeof memory)
389 #define sbrk tiny_sbrk
392 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
395 void GC_init_netbsd_elf()
397 extern ptr_t GC_find_limit();
398 extern char **environ;
399 /* This may need to be environ, without the underscore, for */
401 GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
409 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
412 unsigned short magic_number;
413 unsigned short padding[29];
417 #define E_MAGIC(x) (x).magic_number
418 #define EMAGIC 0x5A4D
419 #define E_LFANEW(x) (x).new_exe_offset
422 unsigned char magic_number[2];
423 unsigned char byte_order;
424 unsigned char word_order;
425 unsigned long exe_format_level;
428 unsigned long padding1[13];
429 unsigned long object_table_offset;
430 unsigned long object_count;
431 unsigned long padding2[31];
434 #define E32_MAGIC1(x) (x).magic_number[0]
435 #define E32MAGIC1 'L'
436 #define E32_MAGIC2(x) (x).magic_number[1]
437 #define E32MAGIC2 'X'
438 #define E32_BORDER(x) (x).byte_order
440 #define E32_WORDER(x) (x).word_order
442 #define E32_CPU(x) (x).cpu
444 #define E32_OBJTAB(x) (x).object_table_offset
445 #define E32_OBJCNT(x) (x).object_count
451 unsigned long pagemap;
452 unsigned long mapsize;
453 unsigned long reserved;
456 #define O32_FLAGS(x) (x).flags
457 #define OBJREAD 0x0001L
458 #define OBJWRITE 0x0002L
459 #define OBJINVALID 0x0080L
460 #define O32_SIZE(x) (x).size
461 #define O32_BASE(x) (x).base
463 # else /* IBM's compiler */
465 /* A kludge to get around what appears to be a header file bug */
467 # define WORD unsigned short
470 # define DWORD unsigned long
477 # endif /* __IBMC__ */
479 # define INCL_DOSEXCEPTIONS
480 # define INCL_DOSPROCESS
481 # define INCL_DOSERRORS
482 # define INCL_DOSMODULEMGR
483 # define INCL_DOSMEMMGR
487 /* Disable and enable signals during nontrivial allocations */
489 void GC_disable_signals(void)
493 DosEnterMustComplete(&nest);
494 if (nest != 1) ABORT("nested GC_disable_signals");
497 void GC_enable_signals(void)
501 DosExitMustComplete(&nest);
502 if (nest != 0) ABORT("GC_enable_signals");
508 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
509 && !defined(MSWINCE) \
510 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
511 && !defined(NOSYS) && !defined(ECOS)
513 # if defined(sigmask) && !defined(UTS4) && !defined(HURD)
514 /* Use the traditional BSD interface */
515 # define SIGSET_T int
516 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
517 # define SIG_FILL(set) (set) = 0x7fffffff
518 /* Setting the leading bit appears to provoke a bug in some */
519 /* longjmp implementations. Most systems appear not to have */
521 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
523 /* Use POSIX/SYSV interface */
524 # define SIGSET_T sigset_t
525 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
526 # define SIG_FILL(set) sigfillset(&set)
527 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
530 static GC_bool mask_initialized = FALSE;
532 static SIGSET_T new_mask;
534 static SIGSET_T old_mask;
536 static SIGSET_T dummy;
538 #if defined(PRINTSTATS) && !defined(THREADS)
539 # define CHECK_SIGNALS
540 int GC_sig_disabled = 0;
543 void GC_disable_signals()
545 if (!mask_initialized) {
548 SIG_DEL(new_mask, SIGSEGV);
549 SIG_DEL(new_mask, SIGILL);
550 SIG_DEL(new_mask, SIGQUIT);
552 SIG_DEL(new_mask, SIGBUS);
555 SIG_DEL(new_mask, SIGIOT);
558 SIG_DEL(new_mask, SIGEMT);
561 SIG_DEL(new_mask, SIGTRAP);
563 mask_initialized = TRUE;
565 # ifdef CHECK_SIGNALS
566 if (GC_sig_disabled != 0) ABORT("Nested disables");
569 SIGSETMASK(old_mask,new_mask);
572 void GC_enable_signals()
574 # ifdef CHECK_SIGNALS
575 if (GC_sig_disabled != 1) ABORT("Unmatched enable");
578 SIGSETMASK(dummy,old_mask);
585 /* Ivan Demakov: simplest way (to me) */
587 void GC_disable_signals() { }
588 void GC_enable_signals() { }
591 /* Find the page size */
594 # if defined(MSWIN32) || defined(MSWINCE)
595 void GC_setpagesize()
597 GetSystemInfo(&GC_sysinfo);
598 GC_page_size = GC_sysinfo.dwPageSize;
602 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
603 || defined(USE_MUNMAP)
604 void GC_setpagesize()
606 GC_page_size = GETPAGESIZE();
609 /* It's acceptable to fake it. */
610 void GC_setpagesize()
612 GC_page_size = HBLKSIZE;
618 * Find the base of the stack.
619 * Used only in single-threaded environment.
620 * With threads, GC_mark_roots needs to know how to do this.
621 * Called with allocator lock held.
623 # if defined(MSWIN32) || defined(MSWINCE)
624 # define is_writable(prot) ((prot) == PAGE_READWRITE \
625 || (prot) == PAGE_WRITECOPY \
626 || (prot) == PAGE_EXECUTE_READWRITE \
627 || (prot) == PAGE_EXECUTE_WRITECOPY)
628 /* Return the number of bytes that are writable starting at p. */
629 /* The pointer p is assumed to be page aligned. */
630 /* If base is not 0, *base becomes the beginning of the */
631 /* allocation region containing p. */
632 word GC_get_writable_length(ptr_t p, ptr_t *base)
634 MEMORY_BASIC_INFORMATION buf;
638 result = VirtualQuery(p, &buf, sizeof(buf));
639 if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
640 if (base != 0) *base = (ptr_t)(buf.AllocationBase);
641 protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
642 if (!is_writable(protect)) {
645 if (buf.State != MEM_COMMIT) return(0);
646 return(buf.RegionSize);
649 ptr_t GC_get_stack_base()
652 ptr_t sp = (ptr_t)(&dummy);
653 ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
654 word size = GC_get_writable_length(trunc_sp, 0);
656 return(trunc_sp + size);
660 # endif /* MS Windows */
663 # include <kernel/OS.h>
664 ptr_t GC_get_stack_base(){
666 get_thread_info(find_thread(NULL),&th);
674 ptr_t GC_get_stack_base()
679 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
680 GC_err_printf0("DosGetInfoBlocks failed\n");
681 ABORT("DosGetInfoBlocks failed\n");
683 return((ptr_t)(ptib -> tib_pstacklimit));
690 # include "AmigaOS.c"
694 # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
697 typedef void (*handler)(int);
699 typedef void (*handler)();
702 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
703 || defined(HURD) || defined(NETBSD)
704 static struct sigaction old_segv_act;
705 # if defined(_sigargs) /* !Irix6.x */ || defined(HPUX) \
706 || defined(HURD) || defined(NETBSD)
707 static struct sigaction old_bus_act;
710 static handler old_segv_handler, old_bus_handler;
714 void GC_set_and_save_fault_handler(handler h)
716 void GC_set_and_save_fault_handler(h)
720 # if defined(SUNOS5SIGS) || defined(IRIX5) \
721 || defined(OSF1) || defined(HURD) || defined(NETBSD)
722 struct sigaction act;
725 # if 0 /* Was necessary for Solaris 2.3 and very temporary */
727 act.sa_flags = SA_RESTART | SA_NODEFER;
729 act.sa_flags = SA_RESTART;
732 (void) sigemptyset(&act.sa_mask);
733 # ifdef GC_IRIX_THREADS
734 /* Older versions have a bug related to retrieving and */
735 /* and setting a handler at the same time. */
736 (void) sigaction(SIGSEGV, 0, &old_segv_act);
737 (void) sigaction(SIGSEGV, &act, 0);
739 (void) sigaction(SIGSEGV, &act, &old_segv_act);
740 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
741 || defined(HPUX) || defined(HURD) || defined(NETBSD)
742 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
743 /* Pthreads doesn't exist under Irix 5.x, so we */
744 /* don't have to worry in the threads case. */
745 (void) sigaction(SIGBUS, &act, &old_bus_act);
747 # endif /* GC_IRIX_THREADS */
749 old_segv_handler = signal(SIGSEGV, h);
751 old_bus_handler = signal(SIGBUS, h);
755 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
757 # ifdef NEED_FIND_LIMIT
758 /* Some tools to implement HEURISTIC2 */
759 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
760 /* static */ JMP_BUF GC_jmp_buf;
763 void GC_fault_handler(sig)
766 LONGJMP(GC_jmp_buf, 1);
769 void GC_setup_temporary_fault_handler()
771 GC_set_and_save_fault_handler(GC_fault_handler);
774 void GC_reset_fault_handler()
776 # if defined(SUNOS5SIGS) || defined(IRIX5) \
777 || defined(OSF1) || defined(HURD) || defined(NETBSD)
778 (void) sigaction(SIGSEGV, &old_segv_act, 0);
779 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
780 || defined(HPUX) || defined(HURD) || defined(NETBSD)
781 (void) sigaction(SIGBUS, &old_bus_act, 0);
784 (void) signal(SIGSEGV, old_segv_handler);
786 (void) signal(SIGBUS, old_bus_handler);
791 /* Return the first nonaddressible location > p (up) or */
792 /* the smallest location q s.t. [q,p) is addressable (!up). */
793 /* We assume that p (up) or p-1 (!up) is addressable. */
794 ptr_t GC_find_limit(p, up)
798 static VOLATILE ptr_t result;
799 /* Needs to be static, since otherwise it may not be */
800 /* preserved across the longjmp. Can safely be */
801 /* static since it's only called once, with the */
802 /* allocation lock held. */
805 GC_setup_temporary_fault_handler();
806 if (SETJMP(GC_jmp_buf) == 0) {
807 result = (ptr_t)(((word)(p))
808 & ~(MIN_PAGE_SIZE-1));
811 result += MIN_PAGE_SIZE;
813 result -= MIN_PAGE_SIZE;
815 GC_noop1((word)(*result));
818 GC_reset_fault_handler();
820 result += MIN_PAGE_SIZE;
826 #if defined(ECOS) || defined(NOSYS)
827 ptr_t GC_get_stack_base()
833 #ifdef HPUX_STACKBOTTOM
835 #include <sys/param.h>
836 #include <sys/pstat.h>
838 ptr_t GC_get_register_stack_base(void)
840 struct pst_vm_status vm_status;
843 while (pstat_getprocvm(&vm_status, sizeof(vm_status), 0, i++) == 1) {
844 if (vm_status.pst_type == PS_RSESTACK) {
845 return (ptr_t) vm_status.pst_vaddr;
849 /* old way to get the register stackbottom */
850 return (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1)
851 & ~(BACKING_STORE_ALIGNMENT - 1));
854 #endif /* HPUX_STACK_BOTTOM */
856 #ifdef LINUX_STACKBOTTOM
858 #include <sys/types.h>
859 #include <sys/stat.h>
862 # define STAT_SKIP 27 /* Number of fields preceding startstack */
863 /* field in /proc/self/stat */
865 # pragma weak __libc_stack_end
866 extern ptr_t __libc_stack_end;
869 /* Try to read the backing store base from /proc/self/maps. */
870 /* We look for the writable mapping with a 0 major device, */
871 /* which is as close to our frame as possible, but below it.*/
872 static word backing_store_base_from_maps(char *maps)
875 char *buf_ptr = maps;
877 unsigned int maj_dev;
878 word current_best = 0;
882 buf_ptr = GC_parse_map_entry(buf_ptr, &start, &end, prot_buf, &maj_dev);
883 if (buf_ptr == NULL) return current_best;
884 if (prot_buf[1] == 'w' && maj_dev == 0) {
885 if (end < (word)(&dummy) && start > current_best) current_best = start;
891 static word backing_store_base_from_proc(void)
893 return GC_apply_to_maps(backing_store_base_from_maps);
896 # pragma weak __libc_ia64_register_backing_store_base
897 extern ptr_t __libc_ia64_register_backing_store_base;
899 ptr_t GC_get_register_stack_base(void)
901 if (0 != &__libc_ia64_register_backing_store_base
902 && 0 != __libc_ia64_register_backing_store_base) {
903 /* Glibc 2.2.4 has a bug such that for dynamically linked */
904 /* executables __libc_ia64_register_backing_store_base is */
905 /* defined but uninitialized during constructor calls. */
906 /* Hence we check for both nonzero address and value. */
907 return __libc_ia64_register_backing_store_base;
909 word result = backing_store_base_from_proc();
911 /* Use dumb heuristics. Works only for default configuration. */
912 result = (word)GC_stackbottom - BACKING_STORE_DISPLACEMENT;
913 result += BACKING_STORE_ALIGNMENT - 1;
914 result &= ~(BACKING_STORE_ALIGNMENT - 1);
915 /* Verify that it's at least readable. If not, we goofed. */
916 GC_noop1(*(word *)result);
918 return (ptr_t)result;
923 ptr_t GC_linux_stack_base(void)
925 /* We read the stack base value from /proc/self/stat. We do this */
926 /* using direct I/O system calls in order to avoid calling malloc */
927 /* in case REDIRECT_MALLOC is defined. */
928 # define STAT_BUF_SIZE 4096
929 # define STAT_READ read
930 /* Should probably call the real read, if read is wrapped. */
931 char stat_buf[STAT_BUF_SIZE];
935 size_t i, buf_offset = 0;
937 /* First try the easy way. This should work for glibc 2.2 */
938 /* This fails in a prelinked ("prelink" command) executable */
939 /* since the correct value of __libc_stack_end never */
940 /* becomes visible to us. The second test works around */
942 if (0 != &__libc_stack_end && 0 != __libc_stack_end ) {
944 /* Some versions of glibc set the address 16 bytes too */
945 /* low while the initialization code is running. */
946 if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
947 return __libc_stack_end + 0x10;
948 } /* Otherwise it's not safe to add 16 bytes and we fall */
949 /* back to using /proc. */
951 return __libc_stack_end;
954 f = open("/proc/self/stat", O_RDONLY);
955 if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) {
956 ABORT("Couldn't read /proc/self/stat");
958 c = stat_buf[buf_offset++];
959 /* Skip the required number of fields. This number is hopefully */
960 /* constant across all Linux implementations. */
961 for (i = 0; i < STAT_SKIP; ++i) {
962 while (isspace(c)) c = stat_buf[buf_offset++];
963 while (!isspace(c)) c = stat_buf[buf_offset++];
965 while (isspace(c)) c = stat_buf[buf_offset++];
969 c = stat_buf[buf_offset++];
972 if (result < 0x10000000) ABORT("Absurd stack bottom value");
973 return (ptr_t)result;
976 #endif /* LINUX_STACKBOTTOM */
978 #ifdef FREEBSD_STACKBOTTOM
980 /* This uses an undocumented sysctl call, but at least one expert */
981 /* believes it will stay. */
984 #include <sys/types.h>
985 #include <sys/sysctl.h>
987 ptr_t GC_freebsd_stack_base(void)
989 int nm[2] = {CTL_KERN, KERN_USRSTACK};
991 size_t len = sizeof(ptr_t);
992 int r = sysctl(nm, 2, &base, &len, NULL, 0);
994 if (r) ABORT("Error getting stack base");
999 #endif /* FREEBSD_STACKBOTTOM */
1001 #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
1002 && !defined(MSWINCE) && !defined(OS2) && !defined(NOSYS) && !defined(ECOS)
1004 ptr_t GC_get_stack_base()
1006 # if defined(HEURISTIC1) || defined(HEURISTIC2) || \
1007 defined(LINUX_STACKBOTTOM) || defined(FREEBSD_STACKBOTTOM)
1012 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
1015 return(STACKBOTTOM);
1018 # ifdef STACK_GROWS_DOWN
1019 result = (ptr_t)((((word)(&dummy))
1020 + STACKBOTTOM_ALIGNMENT_M1)
1021 & ~STACKBOTTOM_ALIGNMENT_M1);
1023 result = (ptr_t)(((word)(&dummy))
1024 & ~STACKBOTTOM_ALIGNMENT_M1);
1026 # endif /* HEURISTIC1 */
1027 # ifdef LINUX_STACKBOTTOM
1028 result = GC_linux_stack_base();
1030 # ifdef FREEBSD_STACKBOTTOM
1031 result = GC_freebsd_stack_base();
1034 # ifdef STACK_GROWS_DOWN
1035 result = GC_find_limit((ptr_t)(&dummy), TRUE);
1036 # ifdef HEURISTIC2_LIMIT
1037 if (result > HEURISTIC2_LIMIT
1038 && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
1039 result = HEURISTIC2_LIMIT;
1043 result = GC_find_limit((ptr_t)(&dummy), FALSE);
1044 # ifdef HEURISTIC2_LIMIT
1045 if (result < HEURISTIC2_LIMIT
1046 && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
1047 result = HEURISTIC2_LIMIT;
1052 # endif /* HEURISTIC2 */
1053 # ifdef STACK_GROWS_DOWN
1054 if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
1057 # endif /* STACKBOTTOM */
1060 # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS, !NOSYS, !ECOS */
1063 * Register static data segment(s) as roots.
1064 * If more data segments are added later then they need to be registered
1065 * add that point (as we do with SunOS dynamic loading),
1066 * or GC_mark_roots needs to check for them (as we do with PCR).
1067 * Called with allocator lock held.
1072 void GC_register_data_segments()
1076 HMODULE module_handle;
1077 # define PBUFSIZ 512
1078 UCHAR path[PBUFSIZ];
1080 struct exe_hdr hdrdos; /* MSDOS header. */
1081 struct e32_exe hdr386; /* Real header for my executable */
1082 struct o32_obj seg; /* Currrent segment */
1086 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
1087 GC_err_printf0("DosGetInfoBlocks failed\n");
1088 ABORT("DosGetInfoBlocks failed\n");
1090 module_handle = ppib -> pib_hmte;
1091 if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
1092 GC_err_printf0("DosQueryModuleName failed\n");
1093 ABORT("DosGetInfoBlocks failed\n");
1095 myexefile = fopen(path, "rb");
1096 if (myexefile == 0) {
1097 GC_err_puts("Couldn't open executable ");
1098 GC_err_puts(path); GC_err_puts("\n");
1099 ABORT("Failed to open executable\n");
1101 if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
1102 GC_err_puts("Couldn't read MSDOS header from ");
1103 GC_err_puts(path); GC_err_puts("\n");
1104 ABORT("Couldn't read MSDOS header");
1106 if (E_MAGIC(hdrdos) != EMAGIC) {
1107 GC_err_puts("Executable has wrong DOS magic number: ");
1108 GC_err_puts(path); GC_err_puts("\n");
1109 ABORT("Bad DOS magic number");
1111 if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
1112 GC_err_puts("Seek to new header failed in ");
1113 GC_err_puts(path); GC_err_puts("\n");
1114 ABORT("Bad DOS magic number");
1116 if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
1117 GC_err_puts("Couldn't read MSDOS header from ");
1118 GC_err_puts(path); GC_err_puts("\n");
1119 ABORT("Couldn't read OS/2 header");
1121 if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
1122 GC_err_puts("Executable has wrong OS/2 magic number:");
1123 GC_err_puts(path); GC_err_puts("\n");
1124 ABORT("Bad OS/2 magic number");
1126 if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
1127 GC_err_puts("Executable %s has wrong byte order: ");
1128 GC_err_puts(path); GC_err_puts("\n");
1129 ABORT("Bad byte order");
1131 if ( E32_CPU(hdr386) == E32CPU286) {
1132 GC_err_puts("GC can't handle 80286 executables: ");
1133 GC_err_puts(path); GC_err_puts("\n");
1136 if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
1138 GC_err_puts("Seek to object table failed: ");
1139 GC_err_puts(path); GC_err_puts("\n");
1140 ABORT("Seek to object table failed");
1142 for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
1144 if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
1145 GC_err_puts("Couldn't read obj table entry from ");
1146 GC_err_puts(path); GC_err_puts("\n");
1147 ABORT("Couldn't read obj table entry");
1149 flags = O32_FLAGS(seg);
1150 if (!(flags & OBJWRITE)) continue;
1151 if (!(flags & OBJREAD)) continue;
1152 if (flags & OBJINVALID) {
1153 GC_err_printf0("Object with invalid pages?\n");
1156 GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
1162 # if defined(MSWIN32) || defined(MSWINCE)
1165 /* Unfortunately, we have to handle win32s very differently from NT, */
1166 /* Since VirtualQuery has very different semantics. In particular, */
1167 /* under win32s a VirtualQuery call on an unmapped page returns an */
1168 /* invalid result. Under NT, GC_register_data_segments is a noop and */
1169 /* all real work is done by GC_register_dynamic_libraries. Under */
1170 /* win32s, we cannot find the data segments associated with dll's. */
1171 /* We register the main data segment here. */
1172 GC_bool GC_no_win32_dlls = FALSE;
1173 /* This used to be set for gcc, to avoid dealing with */
1174 /* the structured exception handling issues. But we now have */
1175 /* assembly code to do that right. */
1177 void GC_init_win32()
1179 /* if we're running under win32s, assume that no DLLs will be loaded */
1180 DWORD v = GetVersion();
1181 GC_no_win32_dlls |= ((v & 0x80000000) && (v & 0xff) <= 3);
1184 /* Return the smallest address a such that VirtualQuery */
1185 /* returns correct results for all addresses between a and start. */
1186 /* Assumes VirtualQuery returns correct information for start. */
1187 ptr_t GC_least_described_address(ptr_t start)
1189 MEMORY_BASIC_INFORMATION buf;
1195 limit = GC_sysinfo.lpMinimumApplicationAddress;
1196 p = (ptr_t)((word)start & ~(GC_page_size - 1));
1198 q = (LPVOID)(p - GC_page_size);
1199 if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
1200 result = VirtualQuery(q, &buf, sizeof(buf));
1201 if (result != sizeof(buf) || buf.AllocationBase == 0) break;
1202 p = (ptr_t)(buf.AllocationBase);
1208 # ifndef REDIRECT_MALLOC
1209 /* We maintain a linked list of AllocationBase values that we know */
1210 /* correspond to malloc heap sections. Currently this is only called */
1211 /* during a GC. But there is some hope that for long running */
1212 /* programs we will eventually see most heap sections. */
1214 /* In the long run, it would be more reliable to occasionally walk */
1215 /* the malloc heap with HeapWalk on the default heap. But that */
1216 /* apparently works only for NT-based Windows. */
1218 /* In the long run, a better data structure would also be nice ... */
1219 struct GC_malloc_heap_list {
1220 void * allocation_base;
1221 struct GC_malloc_heap_list *next;
1222 } *GC_malloc_heap_l = 0;
1224 /* Is p the base of one of the malloc heap sections we already know */
1226 GC_bool GC_is_malloc_heap_base(ptr_t p)
1228 struct GC_malloc_heap_list *q = GC_malloc_heap_l;
1231 if (q -> allocation_base == p) return TRUE;
1237 void *GC_get_allocation_base(void *p)
1239 MEMORY_BASIC_INFORMATION buf;
1240 DWORD result = VirtualQuery(p, &buf, sizeof(buf));
1241 if (result != sizeof(buf)) {
1242 ABORT("Weird VirtualQuery result");
1244 return buf.AllocationBase;
1247 size_t GC_max_root_size = 100000; /* Appr. largest root size. */
1249 void GC_add_current_malloc_heap()
1251 struct GC_malloc_heap_list *new_l =
1252 malloc(sizeof(struct GC_malloc_heap_list));
1253 void * candidate = GC_get_allocation_base(new_l);
1255 if (new_l == 0) return;
1256 if (GC_is_malloc_heap_base(candidate)) {
1257 /* Try a little harder to find malloc heap. */
1258 size_t req_size = 10000;
1260 void *p = malloc(req_size);
1261 if (0 == p) { free(new_l); return; }
1262 candidate = GC_get_allocation_base(p);
1265 } while (GC_is_malloc_heap_base(candidate)
1266 && req_size < GC_max_root_size/10 && req_size < 500000);
1267 if (GC_is_malloc_heap_base(candidate)) {
1268 free(new_l); return;
1273 GC_printf1("Found new system malloc AllocationBase at 0x%lx\n",
1276 new_l -> allocation_base = candidate;
1277 new_l -> next = GC_malloc_heap_l;
1278 GC_malloc_heap_l = new_l;
1280 # endif /* REDIRECT_MALLOC */
1282 /* Is p the start of either the malloc heap, or of one of our */
1283 /* heap sections? */
1284 GC_bool GC_is_heap_base (ptr_t p)
1289 # ifndef REDIRECT_MALLOC
1290 static word last_gc_no = -1;
1292 if (last_gc_no != GC_gc_no) {
1293 GC_add_current_malloc_heap();
1294 last_gc_no = GC_gc_no;
1296 if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size;
1297 if (GC_is_malloc_heap_base(p)) return TRUE;
1299 for (i = 0; i < GC_n_heap_bases; i++) {
1300 if (GC_heap_bases[i] == p) return TRUE;
1306 void GC_register_root_section(ptr_t static_root)
1308 MEMORY_BASIC_INFORMATION buf;
1313 char * limit, * new_limit;
1315 if (!GC_no_win32_dlls) return;
1316 p = base = limit = GC_least_described_address(static_root);
1317 while (p < GC_sysinfo.lpMaximumApplicationAddress) {
1318 result = VirtualQuery(p, &buf, sizeof(buf));
1319 if (result != sizeof(buf) || buf.AllocationBase == 0
1320 || GC_is_heap_base(buf.AllocationBase)) break;
1321 new_limit = (char *)p + buf.RegionSize;
1322 protect = buf.Protect;
1323 if (buf.State == MEM_COMMIT
1324 && is_writable(protect)) {
1325 if ((char *)p == limit) {
1328 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1333 if (p > (LPVOID)new_limit /* overflow */) break;
1334 p = (LPVOID)new_limit;
1336 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1340 void GC_register_data_segments()
1344 GC_register_root_section((ptr_t)(&dummy));
1348 # else /* !OS2 && !Windows */
1350 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1351 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1352 ptr_t GC_SysVGetDataStart(max_page_size, etext_addr)
1356 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1357 & ~(sizeof(word) - 1);
1358 /* etext rounded to word boundary */
1359 word next_page = ((text_end + (word)max_page_size - 1)
1360 & ~((word)max_page_size - 1));
1361 word page_offset = (text_end & ((word)max_page_size - 1));
1362 VOLATILE char * result = (char *)(next_page + page_offset);
1363 /* Note that this isnt equivalent to just adding */
1364 /* max_page_size to &etext if &etext is at a page boundary */
1366 GC_setup_temporary_fault_handler();
1367 if (SETJMP(GC_jmp_buf) == 0) {
1368 /* Try writing to the address. */
1370 GC_reset_fault_handler();
1372 GC_reset_fault_handler();
1373 /* We got here via a longjmp. The address is not readable. */
1374 /* This is known to happen under Solaris 2.4 + gcc, which place */
1375 /* string constants in the text segment, but after etext. */
1376 /* Use plan B. Note that we now know there is a gap between */
1377 /* text and data segments, so plan A bought us something. */
1378 result = (char *)GC_find_limit((ptr_t)(DATAEND), FALSE);
1380 return((ptr_t)result);
1384 # if defined(FREEBSD) && defined(I386) && !defined(PCR)
1385 /* Its unclear whether this should be identical to the above, or */
1386 /* whether it should apply to non-X86 architectures. */
1387 /* For now we don't assume that there is always an empty page after */
1388 /* etext. But in some cases there actually seems to be slightly more. */
1389 /* This also deals with holes between read-only data and writable data. */
1390 ptr_t GC_FreeBSDGetDataStart(max_page_size, etext_addr)
1394 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1395 & ~(sizeof(word) - 1);
1396 /* etext rounded to word boundary */
1397 VOLATILE word next_page = (text_end + (word)max_page_size - 1)
1398 & ~((word)max_page_size - 1);
1399 VOLATILE ptr_t result = (ptr_t)text_end;
1400 GC_setup_temporary_fault_handler();
1401 if (SETJMP(GC_jmp_buf) == 0) {
1402 /* Try reading at the address. */
1403 /* This should happen before there is another thread. */
1404 for (; next_page < (word)(DATAEND); next_page += (word)max_page_size)
1405 *(VOLATILE char *)next_page;
1406 GC_reset_fault_handler();
1408 GC_reset_fault_handler();
1409 /* As above, we go to plan B */
1410 result = GC_find_limit((ptr_t)(DATAEND), FALSE);
1420 # define GC_AMIGA_DS
1421 # include "AmigaOS.c"
1424 #else /* !OS2 && !Windows && !AMIGA */
1426 void GC_register_data_segments()
1428 # if !defined(PCR) && !defined(SRC_M3) && !defined(MACOS)
1429 # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
1430 /* As of Solaris 2.3, the Solaris threads implementation */
1431 /* allocates the data structure for the initial thread with */
1432 /* sbrk at process startup. It needs to be scanned, so that */
1433 /* we don't lose some malloc allocated data structures */
1434 /* hanging from it. We're on thin ice here ... */
1435 extern caddr_t sbrk();
1437 GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
1439 GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
1440 # if defined(DATASTART2)
1441 GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), FALSE);
1447 # if defined(THINK_C)
1448 extern void* GC_MacGetDataStart(void);
1449 /* globals begin above stack and end at a5. */
1450 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1451 (ptr_t)LMGetCurrentA5(), FALSE);
1453 # if defined(__MWERKS__)
1455 extern void* GC_MacGetDataStart(void);
1456 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1457 # if __option(far_data)
1458 extern void* GC_MacGetDataEnd(void);
1460 /* globals begin above stack and end at a5. */
1461 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1462 (ptr_t)LMGetCurrentA5(), FALSE);
1463 /* MATTHEW: Handle Far Globals */
1464 # if __option(far_data)
1465 /* Far globals follow he QD globals: */
1466 GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1467 (ptr_t)GC_MacGetDataEnd(), FALSE);
1470 extern char __data_start__[], __data_end__[];
1471 GC_add_roots_inner((ptr_t)&__data_start__,
1472 (ptr_t)&__data_end__, FALSE);
1473 # endif /* __POWERPC__ */
1474 # endif /* __MWERKS__ */
1475 # endif /* !THINK_C */
1479 /* Dynamic libraries are added at every collection, since they may */
1483 # endif /* ! AMIGA */
1484 # endif /* ! MSWIN32 && ! MSWINCE*/
1488 * Auxiliary routines for obtaining memory from OS.
1491 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1492 && !defined(MSWIN32) && !defined(MSWINCE) \
1493 && !defined(MACOS) && !defined(DOS4GW)
1496 extern caddr_t sbrk();
1499 # define SBRK_ARG_T ptrdiff_t
1501 # define SBRK_ARG_T int
1506 /* The compiler seems to generate speculative reads one past the end of */
1507 /* an allocated object. Hence we need to make sure that the page */
1508 /* following the last heap page is also mapped. */
1509 ptr_t GC_unix_get_mem(bytes)
1512 caddr_t cur_brk = (caddr_t)sbrk(0);
1514 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1515 static caddr_t my_brk_val = 0;
1517 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1519 if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
1521 if (cur_brk == my_brk_val) {
1522 /* Use the extra block we allocated last time. */
1523 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1524 if (result == (caddr_t)(-1)) return(0);
1525 result -= GC_page_size;
1527 result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
1528 if (result == (caddr_t)(-1)) return(0);
1530 my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
1531 return((ptr_t)result);
1534 #else /* Not RS6000 */
1536 #if defined(USE_MMAP) || defined(USE_MUNMAP)
1538 #ifdef USE_MMAP_FIXED
1539 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1540 /* Seems to yield better performance on Solaris 2, but can */
1541 /* be unreliable if something is already mapped at the address. */
1543 # define GC_MMAP_FLAGS MAP_PRIVATE
1546 #ifdef USE_MMAP_ANON
1548 # if defined(MAP_ANONYMOUS)
1549 # define OPT_MAP_ANON MAP_ANONYMOUS
1551 # define OPT_MAP_ANON MAP_ANON
1555 # define OPT_MAP_ANON 0
1558 #endif /* defined(USE_MMAP) || defined(USE_MUNMAP) */
1560 #if defined(USE_MMAP)
1561 /* Tested only under Linux, IRIX5 and Solaris 2 */
1564 # define HEAP_START 0
1567 ptr_t GC_unix_get_mem(bytes)
1571 static ptr_t last_addr = HEAP_START;
1573 # ifndef USE_MMAP_ANON
1574 static GC_bool initialized = FALSE;
1577 zero_fd = open("/dev/zero", O_RDONLY);
1578 fcntl(zero_fd, F_SETFD, FD_CLOEXEC);
1583 if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
1584 result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1585 GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */);
1586 if (result == MAP_FAILED) return(0);
1587 last_addr = (ptr_t)result + bytes + GC_page_size - 1;
1588 last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
1589 # if !defined(LINUX)
1590 if (last_addr == 0) {
1591 /* Oops. We got the end of the address space. This isn't */
1592 /* usable by arbitrary C code, since one-past-end pointers */
1593 /* don't work, so we discard it and try again. */
1594 munmap(result, (size_t)(-GC_page_size) - (size_t)result);
1595 /* Leave last page mapped, so we can't repeat. */
1596 return GC_unix_get_mem(bytes);
1599 GC_ASSERT(last_addr != 0);
1601 return((ptr_t)result);
1604 #else /* Not RS6000, not USE_MMAP */
1605 ptr_t GC_unix_get_mem(bytes)
1610 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1611 /* The equivalent may be needed on other systems as well. */
1615 ptr_t cur_brk = (ptr_t)sbrk(0);
1616 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1618 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1620 if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
1622 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1623 if (result == (ptr_t)(-1)) result = 0;
1631 #endif /* Not USE_MMAP */
1632 #endif /* Not RS6000 */
1638 void * os2_alloc(size_t bytes)
1642 if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
1643 PAG_WRITE | PAG_COMMIT)
1647 if (result == 0) return(os2_alloc(bytes));
1654 # if defined(MSWIN32) || defined(MSWINCE)
1655 SYSTEM_INFO GC_sysinfo;
1660 # ifdef USE_GLOBAL_ALLOC
1661 # define GLOBAL_ALLOC_TEST 1
1663 # define GLOBAL_ALLOC_TEST GC_no_win32_dlls
1666 word GC_n_heap_bases = 0;
1668 ptr_t GC_win32_get_mem(bytes)
1673 if (GLOBAL_ALLOC_TEST) {
1674 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1675 /* There are also unconfirmed rumors of other */
1676 /* problems, so we dodge the issue. */
1677 result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
1678 result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
1680 /* VirtualProtect only works on regions returned by a */
1681 /* single VirtualAlloc call. Thus we allocate one */
1682 /* extra page, which will prevent merging of blocks */
1683 /* in separate regions, and eliminate any temptation */
1684 /* to call VirtualProtect on a range spanning regions. */
1685 /* This wastes a small amount of memory, and risks */
1686 /* increased fragmentation. But better alternatives */
1687 /* would require effort. */
1688 result = (ptr_t) VirtualAlloc(NULL, bytes + 1,
1689 MEM_COMMIT | MEM_RESERVE,
1690 PAGE_EXECUTE_READWRITE);
1692 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1693 /* If I read the documentation correctly, this can */
1694 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1695 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1696 GC_heap_bases[GC_n_heap_bases++] = result;
1700 void GC_win32_free_heap ()
1702 if (GC_no_win32_dlls) {
1703 while (GC_n_heap_bases > 0) {
1704 GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
1705 GC_heap_bases[GC_n_heap_bases] = 0;
1712 # define GC_AMIGA_AM
1713 # include "AmigaOS.c"
1719 word GC_n_heap_bases = 0;
1721 ptr_t GC_wince_get_mem(bytes)
1727 /* Round up allocation size to multiple of page size */
1728 bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
1730 /* Try to find reserved, uncommitted pages */
1731 for (i = 0; i < GC_n_heap_bases; i++) {
1732 if (((word)(-(signed_word)GC_heap_lengths[i])
1733 & (GC_sysinfo.dwAllocationGranularity-1))
1735 result = GC_heap_bases[i] + GC_heap_lengths[i];
1740 if (i == GC_n_heap_bases) {
1741 /* Reserve more pages */
1742 word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
1743 & ~(GC_sysinfo.dwAllocationGranularity-1);
1744 /* If we ever support MPROTECT_VDB here, we will probably need to */
1745 /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
1746 /* never spans regions. It seems to be OK for a VirtualFree argument */
1747 /* to span regions, so we should be OK for now. */
1748 result = (ptr_t) VirtualAlloc(NULL, res_bytes,
1749 MEM_RESERVE | MEM_TOP_DOWN,
1750 PAGE_EXECUTE_READWRITE);
1751 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1752 /* If I read the documentation correctly, this can */
1753 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1754 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1755 GC_heap_bases[GC_n_heap_bases] = result;
1756 GC_heap_lengths[GC_n_heap_bases] = 0;
1761 result = (ptr_t) VirtualAlloc(result, bytes,
1763 PAGE_EXECUTE_READWRITE);
1764 if (result != NULL) {
1765 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1766 GC_heap_lengths[i] += bytes;
1775 /* For now, this only works on Win32/WinCE and some Unix-like */
1776 /* systems. If you have something else, don't define */
1778 /* We assume ANSI C to support this feature. */
1780 #if !defined(MSWIN32) && !defined(MSWINCE)
1783 #include <sys/mman.h>
1784 #include <sys/stat.h>
1785 #include <sys/types.h>
1789 /* Compute a page aligned starting address for the unmap */
1790 /* operation on a block of size bytes starting at start. */
1791 /* Return 0 if the block is too small to make this feasible. */
1792 ptr_t GC_unmap_start(ptr_t start, word bytes)
1794 ptr_t result = start;
1795 /* Round start to next page boundary. */
1796 result += GC_page_size - 1;
1797 result = (ptr_t)((word)result & ~(GC_page_size - 1));
1798 if (result + GC_page_size > start + bytes) return 0;
1802 /* Compute end address for an unmap operation on the indicated */
1804 ptr_t GC_unmap_end(ptr_t start, word bytes)
1806 ptr_t end_addr = start + bytes;
1807 end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
1811 /* Under Win32/WinCE we commit (map) and decommit (unmap) */
1812 /* memory using VirtualAlloc and VirtualFree. These functions */
1813 /* work on individual allocations of virtual memory, made */
1814 /* previously using VirtualAlloc with the MEM_RESERVE flag. */
1815 /* The ranges we need to (de)commit may span several of these */
1816 /* allocations; therefore we use VirtualQuery to check */
1817 /* allocation lengths, and split up the range as necessary. */
1819 /* We assume that GC_remap is called on exactly the same range */
1820 /* as a previous call to GC_unmap. It is safe to consistently */
1821 /* round the endpoints in both places. */
1822 void GC_unmap(ptr_t start, word bytes)
1824 ptr_t start_addr = GC_unmap_start(start, bytes);
1825 ptr_t end_addr = GC_unmap_end(start, bytes);
1826 word len = end_addr - start_addr;
1827 if (0 == start_addr) return;
1828 # if defined(MSWIN32) || defined(MSWINCE)
1830 MEMORY_BASIC_INFORMATION mem_info;
1832 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1833 != sizeof(mem_info))
1834 ABORT("Weird VirtualQuery result");
1835 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1836 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1837 ABORT("VirtualFree failed");
1838 GC_unmapped_bytes += free_len;
1839 start_addr += free_len;
1843 /* We immediately remap it to prevent an intervening mmap from */
1844 /* accidentally grabbing the same address space. */
1847 result = mmap(start_addr, len, PROT_NONE,
1848 MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
1849 zero_fd, 0/* offset */);
1850 if (result != (void *)start_addr) ABORT("mmap(...PROT_NONE...) failed");
1852 GC_unmapped_bytes += len;
1857 void GC_remap(ptr_t start, word bytes)
1859 ptr_t start_addr = GC_unmap_start(start, bytes);
1860 ptr_t end_addr = GC_unmap_end(start, bytes);
1861 word len = end_addr - start_addr;
1863 # if defined(MSWIN32) || defined(MSWINCE)
1866 if (0 == start_addr) return;
1868 MEMORY_BASIC_INFORMATION mem_info;
1870 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1871 != sizeof(mem_info))
1872 ABORT("Weird VirtualQuery result");
1873 alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1874 result = VirtualAlloc(start_addr, alloc_len,
1876 PAGE_EXECUTE_READWRITE);
1877 if (result != start_addr) {
1878 ABORT("VirtualAlloc remapping failed");
1880 GC_unmapped_bytes -= alloc_len;
1881 start_addr += alloc_len;
1885 /* It was already remapped with PROT_NONE. */
1888 if (0 == start_addr) return;
1889 result = mprotect(start_addr, len,
1890 PROT_READ | PROT_WRITE | OPT_PROT_EXEC);
1893 "Mprotect failed at 0x%lx (length %ld) with errno %ld\n",
1894 start_addr, len, errno);
1895 ABORT("Mprotect remapping failed");
1897 GC_unmapped_bytes -= len;
1901 /* Two adjacent blocks have already been unmapped and are about to */
1902 /* be merged. Unmap the whole block. This typically requires */
1903 /* that we unmap a small section in the middle that was not previously */
1904 /* unmapped due to alignment constraints. */
1905 void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
1907 ptr_t start1_addr = GC_unmap_start(start1, bytes1);
1908 ptr_t end1_addr = GC_unmap_end(start1, bytes1);
1909 ptr_t start2_addr = GC_unmap_start(start2, bytes2);
1910 ptr_t end2_addr = GC_unmap_end(start2, bytes2);
1911 ptr_t start_addr = end1_addr;
1912 ptr_t end_addr = start2_addr;
1914 GC_ASSERT(start1 + bytes1 == start2);
1915 if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
1916 if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
1917 if (0 == start_addr) return;
1918 len = end_addr - start_addr;
1919 # if defined(MSWIN32) || defined(MSWINCE)
1921 MEMORY_BASIC_INFORMATION mem_info;
1923 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1924 != sizeof(mem_info))
1925 ABORT("Weird VirtualQuery result");
1926 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1927 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1928 ABORT("VirtualFree failed");
1929 GC_unmapped_bytes += free_len;
1930 start_addr += free_len;
1934 if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
1935 GC_unmapped_bytes += len;
1939 #endif /* USE_MUNMAP */
1941 /* Routine for pushing any additional roots. In THREADS */
1942 /* environment, this is also responsible for marking from */
1943 /* thread stacks. */
1945 void (*GC_push_other_roots)() = 0;
1949 PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
1951 struct PCR_ThCtl_TInfoRep info;
1954 info.ti_stkLow = info.ti_stkHi = 0;
1955 result = PCR_ThCtl_GetInfo(t, &info);
1956 GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
1960 /* Push the contents of an old object. We treat this as stack */
1961 /* data only becasue that makes it robust against mark stack */
1963 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
1965 GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
1966 return(PCR_ERes_okay);
1970 void GC_default_push_other_roots GC_PROTO((void))
1972 /* Traverse data allocated by previous memory managers. */
1974 extern struct PCR_MM_ProcsRep * GC_old_allocator;
1976 if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
1979 ABORT("Old object enumeration failed");
1982 /* Traverse all thread stacks. */
1984 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
1985 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1986 ABORT("Thread stack marking failed\n");
1994 # ifdef ALL_INTERIOR_POINTERS
1998 void GC_push_thread_structures GC_PROTO((void))
2000 /* Not our responsibibility. */
2003 extern void ThreadF__ProcessStacks();
2005 void GC_push_thread_stack(start, stop)
2008 GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
2011 /* Push routine with M3 specific calling convention. */
2012 GC_m3_push_root(dummy1, p, dummy2, dummy3)
2014 ptr_t dummy1, dummy2;
2019 GC_PUSH_ONE_STACK(q, p);
2022 /* M3 set equivalent to RTHeap.TracedRefTypes */
2023 typedef struct { int elts[1]; } RefTypeSet;
2024 RefTypeSet GC_TracedRefTypes = {{0x1}};
2026 void GC_default_push_other_roots GC_PROTO((void))
2028 /* Use the M3 provided routine for finding static roots. */
2029 /* This is a bit dubious, since it presumes no C roots. */
2030 /* We handle the collector roots explicitly in GC_push_roots */
2031 RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
2032 if (GC_words_allocd > 0) {
2033 ThreadF__ProcessStacks(GC_push_thread_stack);
2035 /* Otherwise this isn't absolutely necessary, and we have */
2036 /* startup ordering problems. */
2039 # endif /* SRC_M3 */
2041 # if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \
2042 defined(GC_WIN32_THREADS)
2044 extern void GC_push_all_stacks();
2046 void GC_default_push_other_roots GC_PROTO((void))
2048 GC_push_all_stacks();
2051 # endif /* GC_SOLARIS_THREADS || GC_PTHREADS */
2053 void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots;
2055 #endif /* THREADS */
2058 * Routines for accessing dirty bits on virtual pages.
2059 * We plan to eventually implement four strategies for doing so:
2060 * DEFAULT_VDB: A simple dummy implementation that treats every page
2061 * as possibly dirty. This makes incremental collection
2062 * useless, but the implementation is still correct.
2063 * PCR_VDB: Use PPCRs virtual dirty bit facility.
2064 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
2065 * works under some SVR4 variants. Even then, it may be
2066 * too slow to be entirely satisfactory. Requires reading
2067 * dirty bits for entire address space. Implementations tend
2068 * to assume that the client is a (slow) debugger.
2069 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
2070 * dirtied pages. The implementation (and implementability)
2071 * is highly system dependent. This usually fails when system
2072 * calls write to a protected page. We prevent the read system
2073 * call from doing so. It is the clients responsibility to
2074 * make sure that other system calls are similarly protected
2075 * or write only to the stack.
2077 GC_bool GC_dirty_maintained = FALSE;
2081 /* All of the following assume the allocation lock is held, and */
2082 /* signals are disabled. */
2084 /* The client asserts that unallocated pages in the heap are never */
2087 /* Initialize virtual dirty bit implementation. */
2088 void GC_dirty_init()
2091 GC_printf0("Initializing DEFAULT_VDB...\n");
2093 GC_dirty_maintained = TRUE;
2096 /* Retrieve system dirty bits for heap to a local buffer. */
2097 /* Restore the systems notion of which pages are dirty. */
2098 void GC_read_dirty()
2101 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
2102 /* If the actual page size is different, this returns TRUE if any */
2103 /* of the pages overlapping h are dirty. This routine may err on the */
2104 /* side of labelling pages as dirty (and this implementation does). */
2106 GC_bool GC_page_was_dirty(h)
2113 * The following two routines are typically less crucial. They matter
2114 * most with large dynamic libraries, or if we can't accurately identify
2115 * stacks, e.g. under Solaris 2.X. Otherwise the following default
2116 * versions are adequate.
2119 /* Could any valid GC heap pointer ever have been written to this page? */
2121 GC_bool GC_page_was_ever_dirty(h)
2127 /* Reset the n pages starting at h to "was never dirty" status. */
2128 void GC_is_fresh(h, n)
2135 /* I) hints that [h, h+nblocks) is about to be written. */
2136 /* II) guarantees that protection is removed. */
2137 /* (I) may speed up some dirty bit implementations. */
2138 /* (II) may be essential if we need to ensure that */
2139 /* pointer-free system call buffers in the heap are */
2140 /* not protected. */
2142 void GC_remove_protection(h, nblocks, is_ptrfree)
2149 # endif /* DEFAULT_VDB */
2152 # ifdef MPROTECT_VDB
2155 * See DEFAULT_VDB for interface descriptions.
2159 * This implementation maintains dirty bits itself by catching write
2160 * faults and keeping track of them. We assume nobody else catches
2161 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
2162 * This means that clients must ensure that system calls don't write
2163 * to the write-protected heap. Probably the best way to do this is to
2164 * ensure that system calls write at most to POINTERFREE objects in the
2165 * heap, and do even that only if we are on a platform on which those
2166 * are not protected. Another alternative is to wrap system calls
2167 * (see example for read below), but the current implementation holds
2168 * a lock across blocking calls, making it problematic for multithreaded
2170 * We assume the page size is a multiple of HBLKSIZE.
2171 * We prefer them to be the same. We avoid protecting POINTERFREE
2172 * objects only if they are the same.
2175 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(DARWIN)
2177 # include <sys/mman.h>
2178 # include <signal.h>
2179 # include <sys/syscall.h>
2181 # define PROTECT(addr, len) \
2182 if (mprotect((caddr_t)(addr), (size_t)(len), \
2183 PROT_READ | OPT_PROT_EXEC) < 0) { \
2184 ABORT("mprotect failed"); \
2186 # define UNPROTECT(addr, len) \
2187 if (mprotect((caddr_t)(addr), (size_t)(len), \
2188 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
2189 ABORT("un-mprotect failed"); \
2195 /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
2196 decrease the likelihood of some of the problems described below. */
2197 #include <mach/vm_map.h>
2198 static mach_port_t GC_task_self;
2199 #define PROTECT(addr,len) \
2200 if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
2201 FALSE,VM_PROT_READ) != KERN_SUCCESS) { \
2202 ABORT("vm_portect failed"); \
2204 #define UNPROTECT(addr,len) \
2205 if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
2206 FALSE,VM_PROT_READ|VM_PROT_WRITE) != KERN_SUCCESS) { \
2207 ABORT("vm_portect failed"); \
2212 # include <signal.h>
2215 static DWORD protect_junk;
2216 # define PROTECT(addr, len) \
2217 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
2219 DWORD last_error = GetLastError(); \
2220 GC_printf1("Last error code: %lx\n", last_error); \
2221 ABORT("VirtualProtect failed"); \
2223 # define UNPROTECT(addr, len) \
2224 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
2226 ABORT("un-VirtualProtect failed"); \
2228 # endif /* !DARWIN */
2229 # endif /* MSWIN32 || MSWINCE || DARWIN */
2231 #if defined(SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
2232 typedef void (* SIG_PF)();
2233 #endif /* SUNOS4 || (FREEBSD && !SUNOS5SIGS) */
2235 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \
2238 typedef void (* SIG_PF)(int);
2240 typedef void (* SIG_PF)();
2242 #endif /* SUNOS5SIGS || OSF1 || LINUX || HURD */
2244 #if defined(MSWIN32)
2245 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
2247 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
2249 #if defined(MSWINCE)
2250 typedef LONG (WINAPI *SIG_PF)(struct _EXCEPTION_POINTERS *);
2252 # define SIG_DFL (SIG_PF) (-1)
2255 #if defined(IRIX5) || defined(OSF1) || defined(HURD)
2256 typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
2257 #endif /* IRIX5 || OSF1 || HURD */
2259 #if defined(SUNOS5SIGS)
2260 # if defined(HPUX) || defined(FREEBSD)
2261 # define SIGINFO_T siginfo_t
2263 # define SIGINFO_T struct siginfo
2266 typedef void (* REAL_SIG_PF)(int, SIGINFO_T *, void *);
2268 typedef void (* REAL_SIG_PF)();
2270 #endif /* SUNOS5SIGS */
2273 # if __GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2
2274 typedef struct sigcontext s_c;
2275 # else /* glibc < 2.2 */
2276 # include <linux/version.h>
2277 # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(ARM32)
2278 typedef struct sigcontext s_c;
2280 typedef struct sigcontext_struct s_c;
2282 # endif /* glibc < 2.2 */
2283 # if defined(ALPHA) || defined(M68K)
2284 typedef void (* REAL_SIG_PF)(int, int, s_c *);
2286 # if defined(IA64) || defined(HP_PA)
2287 typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
2289 typedef void (* REAL_SIG_PF)(int, s_c);
2293 /* Retrieve fault address from sigcontext structure by decoding */
2295 char * get_fault_addr(s_c *sc) {
2299 instr = *((unsigned *)(sc->sc_pc));
2300 faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
2301 faultaddr += (word) (((int)instr << 16) >> 16);
2302 return (char *)faultaddr;
2304 # endif /* !ALPHA */
2308 SIG_PF GC_old_bus_handler;
2309 SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
2310 #endif /* !DARWIN */
2312 #if defined(THREADS)
2313 /* We need to lock around the bitmap update in the write fault handler */
2314 /* in order to avoid the risk of losing a bit. We do this with a */
2315 /* test-and-set spin lock if we know how to do that. Otherwise we */
2316 /* check whether we are already in the handler and use the dumb but */
2317 /* safe fallback algorithm of setting all bits in the word. */
2318 /* Contention should be very rare, so we do the minimum to handle it */
2320 #ifdef GC_TEST_AND_SET_DEFINED
2321 static VOLATILE unsigned int fault_handler_lock = 0;
2322 void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
2323 while (GC_test_and_set(&fault_handler_lock)) {}
2324 /* Could also revert to set_pht_entry_from_index_safe if initial */
2325 /* GC_test_and_set fails. */
2326 set_pht_entry_from_index(db, index);
2327 GC_clear(&fault_handler_lock);
2329 #else /* !GC_TEST_AND_SET_DEFINED */
2330 /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
2331 /* just before we notice the conflict and correct it. We may end up */
2332 /* looking at it while it's wrong. But this requires contention */
2333 /* exactly when a GC is triggered, which seems far less likely to */
2334 /* fail than the old code, which had no reported failures. Thus we */
2335 /* leave it this way while we think of something better, or support */
2336 /* GC_test_and_set on the remaining platforms. */
2337 static VOLATILE word currently_updating = 0;
2338 void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
2339 unsigned int update_dummy;
2340 currently_updating = (word)(&update_dummy);
2341 set_pht_entry_from_index(db, index);
2342 /* If we get contention in the 10 or so instruction window here, */
2343 /* and we get stopped by a GC between the two updates, we lose! */
2344 if (currently_updating != (word)(&update_dummy)) {
2345 set_pht_entry_from_index_safe(db, index);
2346 /* We claim that if two threads concurrently try to update the */
2347 /* dirty bit vector, the first one to execute UPDATE_START */
2348 /* will see it changed when UPDATE_END is executed. (Note that */
2349 /* &update_dummy must differ in two distinct threads.) It */
2350 /* will then execute set_pht_entry_from_index_safe, thus */
2351 /* returning us to a safe state, though not soon enough. */
2354 #endif /* !GC_TEST_AND_SET_DEFINED */
2355 #else /* !THREADS */
2356 # define async_set_pht_entry_from_index(db, index) \
2357 set_pht_entry_from_index(db, index)
2358 #endif /* !THREADS */
2361 #if !defined(DARWIN)
2362 # if defined (SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
2363 void GC_write_fault_handler(sig, code, scp, addr)
2365 struct sigcontext *scp;
2368 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2369 # define CODE_OK (FC_CODE(code) == FC_PROT \
2370 || (FC_CODE(code) == FC_OBJERR \
2371 && FC_ERRNO(code) == FC_PROT))
2374 # define SIG_OK (sig == SIGBUS)
2375 # define CODE_OK (code == BUS_PAGE_FAULT)
2377 # endif /* SUNOS4 || (FREEBSD && !SUNOS5SIGS) */
2379 # if defined(IRIX5) || defined(OSF1) || defined(HURD)
2381 void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
2383 # define SIG_OK (sig == SIGSEGV)
2384 # define CODE_OK (code == 2 /* experimentally determined */)
2387 # define SIG_OK (sig == SIGSEGV)
2388 # define CODE_OK (code == EACCES)
2391 # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
2392 # define CODE_OK TRUE
2394 # endif /* IRIX5 || OSF1 || HURD */
2397 # if defined(ALPHA) || defined(M68K)
2398 void GC_write_fault_handler(int sig, int code, s_c * sc)
2400 # if defined(IA64) || defined(HP_PA)
2401 void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
2404 void GC_write_fault_handler(int sig, int a2, int a3, int a4, s_c sc)
2406 void GC_write_fault_handler(int sig, s_c sc)
2410 # define SIG_OK (sig == SIGSEGV)
2411 # define CODE_OK TRUE
2412 /* Empirically c.trapno == 14, on IA32, but is that useful? */
2413 /* Should probably consider alignment issues on other */
2414 /* architectures. */
2417 # if defined(SUNOS5SIGS)
2419 void GC_write_fault_handler(int sig, SIGINFO_T *scp, void * context)
2421 void GC_write_fault_handler(sig, scp, context)
2427 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2428 # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
2429 || (scp -> si_code == BUS_ADRERR) \
2430 || (scp -> si_code == BUS_UNKNOWN) \
2431 || (scp -> si_code == SEGV_UNKNOWN) \
2432 || (scp -> si_code == BUS_OBJERR)
2435 # define SIG_OK (sig == SIGBUS)
2436 # define CODE_OK (scp -> si_code == BUS_PAGE_FAULT)
2438 # define SIG_OK (sig == SIGSEGV)
2439 # define CODE_OK (scp -> si_code == SEGV_ACCERR)
2442 # endif /* SUNOS5SIGS */
2444 # if defined(MSWIN32) || defined(MSWINCE)
2445 LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
2446 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
2447 STATUS_ACCESS_VIOLATION)
2448 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
2450 # endif /* MSWIN32 || MSWINCE */
2452 register unsigned i;
2454 char *addr = (char *) code;
2457 char * addr = (char *) (size_t) (scp -> sc_badvaddr);
2459 # if defined(OSF1) && defined(ALPHA)
2460 char * addr = (char *) (scp -> sc_traparg_a0);
2463 char * addr = (char *) (scp -> si_addr);
2466 # if defined(I386) || defined (X86_64)
2467 char * addr = (char *) (sc.cr2);
2472 struct sigcontext *scp = (struct sigcontext *)(sc);
2474 int format = (scp->sc_formatvec >> 12) & 0xf;
2475 unsigned long *framedata = (unsigned long *)(scp + 1);
2478 if (format == 0xa || format == 0xb) {
2481 } else if (format == 7) {
2484 if (framedata[1] & 0x08000000) {
2485 /* correct addr on misaligned access */
2486 ea = (ea+4095)&(~4095);
2488 } else if (format == 4) {
2491 if (framedata[1] & 0x08000000) {
2492 /* correct addr on misaligned access */
2493 ea = (ea+4095)&(~4095);
2499 char * addr = get_fault_addr(sc);
2501 # if defined(IA64) || defined(HP_PA)
2502 char * addr = si -> si_addr;
2503 /* I believe this is claimed to work on all platforms for */
2504 /* Linux 2.3.47 and later. Hopefully we don't have to */
2505 /* worry about earlier kernels on IA64. */
2507 # if defined(POWERPC)
2508 char * addr = (char *) (sc.regs->dar);
2511 char * addr = (char *)sc.fault_address;
2513 --> architecture not supported
2521 # if defined(MSWIN32) || defined(MSWINCE)
2522 char * addr = (char *) (exc_info -> ExceptionRecord
2523 -> ExceptionInformation[1]);
2524 # define sig SIGSEGV
2527 if (SIG_OK && CODE_OK) {
2528 register struct hblk * h =
2529 (struct hblk *)((word)addr & ~(GC_page_size-1));
2530 GC_bool in_allocd_block;
2533 /* Address is only within the correct physical page. */
2534 in_allocd_block = FALSE;
2535 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2536 if (HDR(h+i) != 0) {
2537 in_allocd_block = TRUE;
2541 in_allocd_block = (HDR(addr) != 0);
2543 if (!in_allocd_block) {
2544 /* FIXME - We should make sure that we invoke the */
2545 /* old handler with the appropriate calling */
2546 /* sequence, which often depends on SA_SIGINFO. */
2548 /* Heap blocks now begin and end on page boundaries */
2551 if (sig == SIGSEGV) {
2552 old_handler = GC_old_segv_handler;
2554 old_handler = GC_old_bus_handler;
2556 if (old_handler == SIG_DFL) {
2557 # if !defined(MSWIN32) && !defined(MSWINCE)
2558 GC_err_printf1("Segfault at 0x%lx\n", addr);
2559 ABORT("Unexpected bus error or segmentation fault");
2561 return(EXCEPTION_CONTINUE_SEARCH);
2564 # if defined (SUNOS4) \
2565 || (defined(FREEBSD) && !defined(SUNOS5SIGS))
2566 (*old_handler) (sig, code, scp, addr);
2569 # if defined (SUNOS5SIGS)
2571 * FIXME: For FreeBSD, this code should check if the
2572 * old signal handler used the traditional BSD style and
2573 * if so call it using that style.
2575 (*(REAL_SIG_PF)old_handler) (sig, scp, context);
2578 # if defined (LINUX)
2579 # if defined(ALPHA) || defined(M68K)
2580 (*(REAL_SIG_PF)old_handler) (sig, code, sc);
2582 # if defined(IA64) || defined(HP_PA)
2583 (*(REAL_SIG_PF)old_handler) (sig, si, scp);
2585 (*(REAL_SIG_PF)old_handler) (sig, sc);
2590 # if defined (IRIX5) || defined(OSF1) || defined(HURD)
2591 (*(REAL_SIG_PF)old_handler) (sig, code, scp);
2595 return((*old_handler)(exc_info));
2599 UNPROTECT(h, GC_page_size);
2600 /* We need to make sure that no collection occurs between */
2601 /* the UNPROTECT and the setting of the dirty bit. Otherwise */
2602 /* a write by a third thread might go unnoticed. Reversing */
2603 /* the order is just as bad, since we would end up unprotecting */
2604 /* a page in a GC cycle during which it's not marked. */
2605 /* Currently we do this by disabling the thread stopping */
2606 /* signals while this handler is running. An alternative might */
2607 /* be to record the fact that we're about to unprotect, or */
2608 /* have just unprotected a page in the GC's thread structure, */
2609 /* and then to have the thread stopping code set the dirty */
2610 /* flag, if necessary. */
2611 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2612 register int index = PHT_HASH(h+i);
2614 async_set_pht_entry_from_index(GC_dirty_pages, index);
2617 /* These reset the signal handler each time by default. */
2618 signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
2620 /* The write may not take place before dirty bits are read. */
2621 /* But then we'll fault again ... */
2622 # if defined(MSWIN32) || defined(MSWINCE)
2623 return(EXCEPTION_CONTINUE_EXECUTION);
2628 #if defined(MSWIN32) || defined(MSWINCE)
2629 return EXCEPTION_CONTINUE_SEARCH;
2631 GC_err_printf1("Segfault at 0x%lx\n", addr);
2632 ABORT("Unexpected bus error or segmentation fault");
2635 #endif /* !DARWIN */
2638 * We hold the allocation lock. We expect block h to be written
2639 * shortly. Ensure that all pages containing any part of the n hblks
2640 * starting at h are no longer protected. If is_ptrfree is false,
2641 * also ensure that they will subsequently appear to be dirty.
2643 void GC_remove_protection(h, nblocks, is_ptrfree)
2648 struct hblk * h_trunc; /* Truncated to page boundary */
2649 struct hblk * h_end; /* Page boundary following block end */
2650 struct hblk * current;
2651 GC_bool found_clean;
2653 if (!GC_dirty_maintained) return;
2654 h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
2655 h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1)
2656 & ~(GC_page_size-1));
2657 found_clean = FALSE;
2658 for (current = h_trunc; current < h_end; ++current) {
2659 int index = PHT_HASH(current);
2661 if (!is_ptrfree || current < h || current >= h + nblocks) {
2662 async_set_pht_entry_from_index(GC_dirty_pages, index);
2665 UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
2668 #if !defined(DARWIN)
2669 void GC_dirty_init()
2671 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \
2672 defined(OSF1) || defined(HURD)
2673 struct sigaction act, oldact;
2674 /* We should probably specify SA_SIGINFO for Linux, and handle */
2675 /* the different architectures more uniformly. */
2676 # if defined(IRIX5) || defined(LINUX) || defined(OSF1) || defined(HURD)
2677 act.sa_flags = SA_RESTART;
2678 act.sa_handler = (SIG_PF)GC_write_fault_handler;
2680 act.sa_flags = SA_RESTART | SA_SIGINFO;
2681 act.sa_sigaction = GC_write_fault_handler;
2683 (void)sigemptyset(&act.sa_mask);
2685 /* Arrange to postpone SIG_SUSPEND while we're in a write fault */
2686 /* handler. This effectively makes the handler atomic w.r.t. */
2687 /* stopping the world for GC. */
2688 (void)sigaddset(&act.sa_mask, SIG_SUSPEND);
2689 # endif /* SIG_SUSPEND */
2692 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2694 GC_dirty_maintained = TRUE;
2695 if (GC_page_size % HBLKSIZE != 0) {
2696 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2697 ABORT("Page size not multiple of HBLKSIZE");
2699 # if defined(SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
2700 GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
2701 if (GC_old_bus_handler == SIG_IGN) {
2702 GC_err_printf0("Previously ignored bus error!?");
2703 GC_old_bus_handler = SIG_DFL;
2705 if (GC_old_bus_handler != SIG_DFL) {
2707 GC_err_printf0("Replaced other SIGBUS handler\n");
2711 # if defined(SUNOS4)
2712 GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
2713 if (GC_old_segv_handler == SIG_IGN) {
2714 GC_err_printf0("Previously ignored segmentation violation!?");
2715 GC_old_segv_handler = SIG_DFL;
2717 if (GC_old_segv_handler != SIG_DFL) {
2719 GC_err_printf0("Replaced other SIGSEGV handler\n");
2723 # if (defined(SUNOS5SIGS) && !defined(FREEBSD)) || defined(IRIX5) \
2724 || defined(LINUX) || defined(OSF1) || defined(HURD)
2725 /* SUNOS5SIGS includes HPUX */
2726 # if defined(GC_IRIX_THREADS)
2727 sigaction(SIGSEGV, 0, &oldact);
2728 sigaction(SIGSEGV, &act, 0);
2731 int res = sigaction(SIGSEGV, &act, &oldact);
2732 if (res != 0) ABORT("Sigaction failed");
2735 # if defined(_sigargs) || defined(HURD) || !defined(SA_SIGINFO)
2736 /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2738 GC_old_segv_handler = oldact.sa_handler;
2739 # else /* Irix 6.x or SUNOS5SIGS or LINUX */
2740 if (oldact.sa_flags & SA_SIGINFO) {
2741 GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
2743 GC_old_segv_handler = oldact.sa_handler;
2746 if (GC_old_segv_handler == SIG_IGN) {
2747 GC_err_printf0("Previously ignored segmentation violation!?");
2748 GC_old_segv_handler = SIG_DFL;
2750 if (GC_old_segv_handler != SIG_DFL) {
2752 GC_err_printf0("Replaced other SIGSEGV handler\n");
2755 # endif /* (SUNOS5SIGS && !FREEBSD) || IRIX5 || LINUX || OSF1 || HURD */
2756 # if defined(HPUX) || defined(LINUX) || defined(HURD) \
2757 || (defined(FREEBSD) && defined(SUNOS5SIGS))
2758 sigaction(SIGBUS, &act, &oldact);
2759 GC_old_bus_handler = oldact.sa_handler;
2760 if (GC_old_bus_handler == SIG_IGN) {
2761 GC_err_printf0("Previously ignored bus error!?");
2762 GC_old_bus_handler = SIG_DFL;
2764 if (GC_old_bus_handler != SIG_DFL) {
2766 GC_err_printf0("Replaced other SIGBUS handler\n");
2769 # endif /* HPUX || LINUX || HURD || (FREEBSD && SUNOS5SIGS) */
2770 # if defined(MSWIN32)
2771 GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
2772 if (GC_old_segv_handler != NULL) {
2774 GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2777 GC_old_segv_handler = SIG_DFL;
2781 #endif /* !DARWIN */
2783 int GC_incremental_protection_needs()
2785 if (GC_page_size == HBLKSIZE) {
2786 return GC_PROTECTS_POINTER_HEAP;
2788 return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
2792 #define HAVE_INCREMENTAL_PROTECTION_NEEDS
2794 #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
2796 #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
2797 void GC_protect_heap()
2801 struct hblk * current;
2802 struct hblk * current_start; /* Start of block to be protected. */
2803 struct hblk * limit;
2805 GC_bool protect_all =
2806 (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
2807 for (i = 0; i < GC_n_heap_sects; i++) {
2808 start = GC_heap_sects[i].hs_start;
2809 len = GC_heap_sects[i].hs_bytes;
2811 PROTECT(start, len);
2813 GC_ASSERT(PAGE_ALIGNED(len))
2814 GC_ASSERT(PAGE_ALIGNED(start))
2815 current_start = current = (struct hblk *)start;
2816 limit = (struct hblk *)(start + len);
2817 while (current < limit) {
2822 GC_ASSERT(PAGE_ALIGNED(current));
2823 GET_HDR(current, hhdr);
2824 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
2825 /* This can happen only if we're at the beginning of a */
2826 /* heap segment, and a block spans heap segments. */
2827 /* We will handle that block as part of the preceding */
2829 GC_ASSERT(current_start == current);
2830 current_start = ++current;
2833 if (HBLK_IS_FREE(hhdr)) {
2834 GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
2835 nhblks = divHBLKSZ(hhdr -> hb_sz);
2836 is_ptrfree = TRUE; /* dirty on alloc */
2838 nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
2839 is_ptrfree = IS_PTRFREE(hhdr);
2842 if (current_start < current) {
2843 PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
2845 current_start = (current += nhblks);
2850 if (current_start < current) {
2851 PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
2857 /* We assume that either the world is stopped or its OK to lose dirty */
2858 /* bits while this is happenning (as in GC_enable_incremental). */
2859 void GC_read_dirty()
2861 BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2862 (sizeof GC_dirty_pages));
2863 BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2867 GC_bool GC_page_was_dirty(h)
2870 register word index = PHT_HASH(h);
2872 return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2876 * Acquiring the allocation lock here is dangerous, since this
2877 * can be called from within GC_call_with_alloc_lock, and the cord
2878 * package does so. On systems that allow nested lock acquisition, this
2880 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2883 static GC_bool syscall_acquired_lock = FALSE; /* Protected by GC lock. */
2885 void GC_begin_syscall()
2887 if (!I_HOLD_LOCK()) {
2889 syscall_acquired_lock = TRUE;
2893 void GC_end_syscall()
2895 if (syscall_acquired_lock) {
2896 syscall_acquired_lock = FALSE;
2901 void GC_unprotect_range(addr, len)
2905 struct hblk * start_block;
2906 struct hblk * end_block;
2907 register struct hblk *h;
2910 if (!GC_dirty_maintained) return;
2911 obj_start = GC_base(addr);
2912 if (obj_start == 0) return;
2913 if (GC_base(addr + len - 1) != obj_start) {
2914 ABORT("GC_unprotect_range(range bigger than object)");
2916 start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
2917 end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
2918 end_block += GC_page_size/HBLKSIZE - 1;
2919 for (h = start_block; h <= end_block; h++) {
2920 register word index = PHT_HASH(h);
2922 async_set_pht_entry_from_index(GC_dirty_pages, index);
2924 UNPROTECT(start_block,
2925 ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
2930 /* We no longer wrap read by default, since that was causing too many */
2931 /* problems. It is preferred that the client instead avoids writing */
2932 /* to the write-protected heap with a system call. */
2933 /* This still serves as sample code if you do want to wrap system calls.*/
2935 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_USE_LD_WRAP)
2936 /* Replacement for UNIX system call. */
2937 /* Other calls that write to the heap should be handled similarly. */
2938 /* Note that this doesn't work well for blocking reads: It will hold */
2939 /* the allocation lock for the entire duration of the call. Multithreaded */
2940 /* clients should really ensure that it won't block, either by setting */
2941 /* the descriptor nonblocking, or by calling select or poll first, to */
2942 /* make sure that input is available. */
2943 /* Another, preferred alternative is to ensure that system calls never */
2944 /* write to the protected heap (see above). */
2945 # if defined(__STDC__) && !defined(SUNOS4)
2946 # include <unistd.h>
2947 # include <sys/uio.h>
2948 ssize_t read(int fd, void *buf, size_t nbyte)
2951 int read(fd, buf, nbyte)
2953 int GC_read(fd, buf, nbyte)
2963 GC_unprotect_range(buf, (word)nbyte);
2964 # if defined(IRIX5) || defined(GC_LINUX_THREADS)
2965 /* Indirect system call may not always be easily available. */
2966 /* We could call _read, but that would interfere with the */
2967 /* libpthread interception of read. */
2968 /* On Linux, we have to be careful with the linuxthreads */
2969 /* read interception. */
2974 iov.iov_len = nbyte;
2975 result = readv(fd, &iov, 1);
2979 result = __read(fd, buf, nbyte);
2981 /* The two zero args at the end of this list are because one
2982 IA-64 syscall() implementation actually requires six args
2983 to be passed, even though they aren't always used. */
2984 result = syscall(SYS_read, fd, buf, nbyte, 0, 0);
2990 #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
2992 #if defined(GC_USE_LD_WRAP) && !defined(THREADS)
2993 /* We use the GNU ld call wrapping facility. */
2994 /* This requires that the linker be invoked with "--wrap read". */
2995 /* This can be done by passing -Wl,"--wrap read" to gcc. */
2996 /* I'm not sure that this actually wraps whatever version of read */
2997 /* is called by stdio. That code also mentions __read. */
2998 # include <unistd.h>
2999 ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
3004 GC_unprotect_range(buf, (word)nbyte);
3005 result = __real_read(fd, buf, nbyte);
3010 /* We should probably also do this for __read, or whatever stdio */
3011 /* actually calls. */
3017 GC_bool GC_page_was_ever_dirty(h)
3023 /* Reset the n pages starting at h to "was never dirty" status. */
3025 void GC_is_fresh(h, n)
3031 # endif /* MPROTECT_VDB */
3036 * See DEFAULT_VDB for interface descriptions.
3040 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
3041 * from which we can read page modified bits. This facility is far from
3042 * optimal (e.g. we would like to get the info for only some of the
3043 * address space), but it avoids intercepting system calls.
3047 #include <sys/types.h>
3048 #include <sys/signal.h>
3049 #include <sys/fault.h>
3050 #include <sys/syscall.h>
3051 #include <sys/procfs.h>
3052 #include <sys/stat.h>
3054 #define INITIAL_BUF_SZ 4096
3055 word GC_proc_buf_size = INITIAL_BUF_SZ;
3058 #ifdef GC_SOLARIS_THREADS
3059 /* We don't have exact sp values for threads. So we count on */
3060 /* occasionally declaring stack pages to be fresh. Thus we */
3061 /* need a real implementation of GC_is_fresh. We can't clear */
3062 /* entries in GC_written_pages, since that would declare all */
3063 /* pages with the given hash address to be fresh. */
3064 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
3065 struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
3066 /* Collisions are dropped. */
3068 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
3069 # define ADD_FRESH_PAGE(h) \
3070 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
3071 # define PAGE_IS_FRESH(h) \
3072 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
3075 /* Add all pages in pht2 to pht1 */
3076 void GC_or_pages(pht1, pht2)
3077 page_hash_table pht1, pht2;
3081 for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
3086 void GC_dirty_init()
3091 GC_dirty_maintained = TRUE;
3092 if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
3095 for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
3097 GC_printf1("Allocated words:%lu:all pages may have been written\n",
3099 (GC_words_allocd + GC_words_allocd_before_gc));
3102 sprintf(buf, "/proc/%d", getpid());
3103 fd = open(buf, O_RDONLY);
3105 ABORT("/proc open failed");
3107 GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
3109 syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC);
3110 if (GC_proc_fd < 0) {
3111 ABORT("/proc ioctl failed");
3113 GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
3114 # ifdef GC_SOLARIS_THREADS
3115 GC_fresh_pages = (struct hblk **)
3116 GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
3117 if (GC_fresh_pages == 0) {
3118 GC_err_printf0("No space for fresh pages\n");
3121 BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
3125 /* Ignore write hints. They don't help us here. */
3127 void GC_remove_protection(h, nblocks, is_ptrfree)
3134 #ifdef GC_SOLARIS_THREADS
3135 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
3137 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
3140 void GC_read_dirty()
3142 unsigned long ps, np;
3145 struct prasmap * map;
3147 ptr_t current_addr, limit;
3151 BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
3154 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3156 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
3160 /* Retry with larger buffer. */
3161 word new_size = 2 * GC_proc_buf_size;
3162 char * new_buf = GC_scratch_alloc(new_size);
3165 GC_proc_buf = bufp = new_buf;
3166 GC_proc_buf_size = new_size;
3168 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3169 WARN("Insufficient space for /proc read\n", 0);
3171 memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
3172 memset(GC_written_pages, 0xff, sizeof(page_hash_table));
3173 # ifdef GC_SOLARIS_THREADS
3174 BZERO(GC_fresh_pages,
3175 MAX_FRESH_PAGES * sizeof (struct hblk *));
3181 /* Copy dirty bits into GC_grungy_pages */
3182 nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
3183 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
3184 nmaps, PG_REFERENCED, PG_MODIFIED); */
3185 bufp = bufp + sizeof(struct prpageheader);
3186 for (i = 0; i < nmaps; i++) {
3187 map = (struct prasmap *)bufp;
3188 vaddr = (ptr_t)(map -> pr_vaddr);
3189 ps = map -> pr_pagesize;
3190 np = map -> pr_npage;
3191 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
3192 limit = vaddr + ps * np;
3193 bufp += sizeof (struct prasmap);
3194 for (current_addr = vaddr;
3195 current_addr < limit; current_addr += ps){
3196 if ((*bufp++) & PG_MODIFIED) {
3197 register struct hblk * h = (struct hblk *) current_addr;
3199 while ((ptr_t)h < current_addr + ps) {
3200 register word index = PHT_HASH(h);
3202 set_pht_entry_from_index(GC_grungy_pages, index);
3203 # ifdef GC_SOLARIS_THREADS
3205 register int slot = FRESH_PAGE_SLOT(h);
3207 if (GC_fresh_pages[slot] == h) {
3208 GC_fresh_pages[slot] = 0;
3216 bufp += sizeof(long) - 1;
3217 bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
3219 /* Update GC_written_pages. */
3220 GC_or_pages(GC_written_pages, GC_grungy_pages);
3221 # ifdef GC_SOLARIS_THREADS
3222 /* Make sure that old stacks are considered completely clean */
3223 /* unless written again. */
3224 GC_old_stacks_are_fresh();
3230 GC_bool GC_page_was_dirty(h)
3233 register word index = PHT_HASH(h);
3234 register GC_bool result;
3236 result = get_pht_entry_from_index(GC_grungy_pages, index);
3237 # ifdef GC_SOLARIS_THREADS
3238 if (result && PAGE_IS_FRESH(h)) result = FALSE;
3239 /* This happens only if page was declared fresh since */
3240 /* the read_dirty call, e.g. because it's in an unused */
3241 /* thread stack. It's OK to treat it as clean, in */
3242 /* that case. And it's consistent with */
3243 /* GC_page_was_ever_dirty. */
3248 GC_bool GC_page_was_ever_dirty(h)
3251 register word index = PHT_HASH(h);
3252 register GC_bool result;
3254 result = get_pht_entry_from_index(GC_written_pages, index);
3255 # ifdef GC_SOLARIS_THREADS
3256 if (result && PAGE_IS_FRESH(h)) result = FALSE;
3261 /* Caller holds allocation lock. */
3262 void GC_is_fresh(h, n)
3267 register word index;
3269 # ifdef GC_SOLARIS_THREADS
3272 if (GC_fresh_pages != 0) {
3273 for (i = 0; i < n; i++) {
3274 ADD_FRESH_PAGE(h + i);
3280 # endif /* PROC_VDB */
3285 # include "vd/PCR_VD.h"
3287 # define NPAGES (32*1024) /* 128 MB */
3289 PCR_VD_DB GC_grungy_bits[NPAGES];
3291 ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
3292 /* HBLKSIZE aligned. */
3294 void GC_dirty_init()
3296 GC_dirty_maintained = TRUE;
3297 /* For the time being, we assume the heap generally grows up */
3298 GC_vd_base = GC_heap_sects[0].hs_start;
3299 if (GC_vd_base == 0) {
3300 ABORT("Bad initial heap segment");
3302 if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
3304 ABORT("dirty bit initialization failed");
3308 void GC_read_dirty()
3310 /* lazily enable dirty bits on newly added heap sects */
3312 static int onhs = 0;
3313 int nhs = GC_n_heap_sects;
3314 for( ; onhs < nhs; onhs++ ) {
3315 PCR_VD_WriteProtectEnable(
3316 GC_heap_sects[onhs].hs_start,
3317 GC_heap_sects[onhs].hs_bytes );
3322 if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
3324 ABORT("dirty bit read failed");
3328 GC_bool GC_page_was_dirty(h)
3331 if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
3334 return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
3338 void GC_remove_protection(h, nblocks, is_ptrfree)
3343 PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
3344 PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
3347 # endif /* PCR_VDB */
3349 #if defined(MPROTECT_VDB) && defined(DARWIN)
3350 /* The following sources were used as a *reference* for this exception handling
3352 1. Apple's mach/xnu documentation
3353 2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
3354 omnigroup's macosx-dev list.
3355 www.omnigroup.com/mailman/archive/macosx-dev/2000-June/002030.html
3356 3. macosx-nat.c from Apple's GDB source code.
3359 /* The bug that caused all this trouble should now be fixed. This should
3360 eventually be removed if all goes well. */
3361 /* define BROKEN_EXCEPTION_HANDLING */
3363 #include <mach/mach.h>
3364 #include <mach/mach_error.h>
3365 #include <mach/thread_status.h>
3366 #include <mach/exception.h>
3367 #include <mach/task.h>
3368 #include <pthread.h>
3370 /* These are not defined in any header, although they are documented */
3371 extern boolean_t exc_server(mach_msg_header_t *,mach_msg_header_t *);
3372 extern kern_return_t exception_raise(
3373 mach_port_t,mach_port_t,mach_port_t,
3374 exception_type_t,exception_data_t,mach_msg_type_number_t);
3375 extern kern_return_t exception_raise_state(
3376 mach_port_t,mach_port_t,mach_port_t,
3377 exception_type_t,exception_data_t,mach_msg_type_number_t,
3378 thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
3379 thread_state_t,mach_msg_type_number_t*);
3380 extern kern_return_t exception_raise_state_identity(
3381 mach_port_t,mach_port_t,mach_port_t,
3382 exception_type_t,exception_data_t,mach_msg_type_number_t,
3383 thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
3384 thread_state_t,mach_msg_type_number_t*);
3387 #define MAX_EXCEPTION_PORTS 16
3389 static mach_port_t GC_task_self;
3392 mach_msg_type_number_t count;
3393 exception_mask_t masks[MAX_EXCEPTION_PORTS];
3394 exception_handler_t ports[MAX_EXCEPTION_PORTS];
3395 exception_behavior_t behaviors[MAX_EXCEPTION_PORTS];
3396 thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS];
3400 mach_port_t exception;
3401 #if defined(THREADS)
3407 mach_msg_header_t head;
3411 GC_MP_NORMAL, GC_MP_DISCARDING, GC_MP_STOPPED
3412 } GC_mprotect_state_t;
3414 /* FIXME: 1 and 2 seem to be safe to use in the msgh_id field,
3415 but it isn't documented. Use the source and see if they
3420 /* These values are only used on the reply port */
3423 #if defined(THREADS)
3425 GC_mprotect_state_t GC_mprotect_state;
3427 /* The following should ONLY be called when the world is stopped */
3428 static void GC_mprotect_thread_notify(mach_msg_id_t id) {
3431 mach_msg_trailer_t trailer;
3433 mach_msg_return_t r;
3435 buf.msg.head.msgh_bits =
3436 MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
3437 buf.msg.head.msgh_size = sizeof(buf.msg);
3438 buf.msg.head.msgh_remote_port = GC_ports.exception;
3439 buf.msg.head.msgh_local_port = MACH_PORT_NULL;
3440 buf.msg.head.msgh_id = id;
3444 MACH_SEND_MSG|MACH_RCV_MSG|MACH_RCV_LARGE,
3448 MACH_MSG_TIMEOUT_NONE,
3450 if(r != MACH_MSG_SUCCESS)
3451 ABORT("mach_msg failed in GC_mprotect_thread_notify");
3452 if(buf.msg.head.msgh_id != ID_ACK)
3453 ABORT("invalid ack in GC_mprotect_thread_notify");
3456 /* Should only be called by the mprotect thread */
3457 static void GC_mprotect_thread_reply() {
3459 mach_msg_return_t r;
3461 msg.head.msgh_bits =
3462 MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
3463 msg.head.msgh_size = sizeof(msg);
3464 msg.head.msgh_remote_port = GC_ports.reply;
3465 msg.head.msgh_local_port = MACH_PORT_NULL;
3466 msg.head.msgh_id = ID_ACK;
3474 MACH_MSG_TIMEOUT_NONE,
3476 if(r != MACH_MSG_SUCCESS)
3477 ABORT("mach_msg failed in GC_mprotect_thread_reply");
3480 void GC_mprotect_stop() {
3481 GC_mprotect_thread_notify(ID_STOP);
3483 void GC_mprotect_resume() {
3484 GC_mprotect_thread_notify(ID_RESUME);
3487 #else /* !THREADS */
3488 /* The compiler should optimize away any GC_mprotect_state computations */
3489 #define GC_mprotect_state GC_MP_NORMAL
3492 static void *GC_mprotect_thread(void *arg) {
3493 mach_msg_return_t r;
3494 /* These two structures contain some private kernel data. We don't need to
3495 access any of it so we don't bother defining a proper struct. The
3496 correct definitions are in the xnu source code. */
3498 mach_msg_header_t head;
3502 mach_msg_header_t head;
3503 mach_msg_body_t msgh_body;
3509 GC_darwin_register_mach_handler_thread(mach_thread_self());
3514 MACH_RCV_MSG|MACH_RCV_LARGE|
3515 (GC_mprotect_state == GC_MP_DISCARDING ? MACH_RCV_TIMEOUT : 0),
3519 GC_mprotect_state == GC_MP_DISCARDING ? 0 : MACH_MSG_TIMEOUT_NONE,
3522 id = r == MACH_MSG_SUCCESS ? msg.head.msgh_id : -1;
3524 #if defined(THREADS)
3525 if(GC_mprotect_state == GC_MP_DISCARDING) {
3526 if(r == MACH_RCV_TIMED_OUT) {
3527 GC_mprotect_state = GC_MP_STOPPED;
3528 GC_mprotect_thread_reply();
3531 if(r == MACH_MSG_SUCCESS && (id == ID_STOP || id == ID_RESUME))
3532 ABORT("out of order mprotect thread request");
3536 if(r != MACH_MSG_SUCCESS) {
3537 GC_err_printf2("mach_msg failed with %d %s\n",
3538 (int)r,mach_error_string(r));
3539 ABORT("mach_msg failed");
3543 #if defined(THREADS)
3545 if(GC_mprotect_state != GC_MP_NORMAL)
3546 ABORT("Called mprotect_stop when state wasn't normal");
3547 GC_mprotect_state = GC_MP_DISCARDING;
3550 if(GC_mprotect_state != GC_MP_STOPPED)
3551 ABORT("Called mprotect_resume when state wasn't stopped");
3552 GC_mprotect_state = GC_MP_NORMAL;
3553 GC_mprotect_thread_reply();
3555 #endif /* THREADS */
3557 /* Handle the message (calls catch_exception_raise) */
3558 if(!exc_server(&msg.head,&reply.head))
3559 ABORT("exc_server failed");
3560 /* Send the reply */
3564 reply.head.msgh_size,
3567 MACH_MSG_TIMEOUT_NONE,
3569 if(r != MACH_MSG_SUCCESS) {
3570 /* This will fail if the thread dies, but the thread shouldn't
3572 #ifdef BROKEN_EXCEPTION_HANDLING
3574 "mach_msg failed with %d %s while sending exc reply\n",
3575 (int)r,mach_error_string(r));
3577 ABORT("mach_msg failed while sending exception reply");
3586 /* All this SIGBUS code shouldn't be necessary. All protection faults should
3587 be going throught the mach exception handler. However, it seems a SIGBUS is
3588 occasionally sent for some unknown reason. Even more odd, it seems to be
3589 meaningless and safe to ignore. */
3590 #ifdef BROKEN_EXCEPTION_HANDLING
3592 typedef void (* SIG_PF)();
3593 static SIG_PF GC_old_bus_handler;
3595 /* Updates to this aren't atomic, but the SIGBUSs seem pretty rare.
3596 Even if this doesn't get updated property, it isn't really a problem */
3597 static int GC_sigbus_count;
3599 static void GC_darwin_sigbus(int num,siginfo_t *sip,void *context) {
3600 if(num != SIGBUS) ABORT("Got a non-sigbus signal in the sigbus handler");
3602 /* Ugh... some seem safe to ignore, but too many in a row probably means
3603 trouble. GC_sigbus_count is reset for each mach exception that is
3605 if(GC_sigbus_count >= 8) {
3606 ABORT("Got more than 8 SIGBUSs in a row!");
3609 GC_err_printf0("GC: WARNING: Ignoring SIGBUS.\n");
3612 #endif /* BROKEN_EXCEPTION_HANDLING */
3614 void GC_dirty_init() {
3618 pthread_attr_t attr;
3619 exception_mask_t mask;
3622 GC_printf0("Inititalizing mach/darwin mprotect virtual dirty bit "
3623 "implementation\n");
3625 # ifdef BROKEN_EXCEPTION_HANDLING
3626 GC_err_printf0("GC: WARNING: Enabling workarounds for various darwin "
3627 "exception handling bugs.\n");
3629 GC_dirty_maintained = TRUE;
3630 if (GC_page_size % HBLKSIZE != 0) {
3631 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
3632 ABORT("Page size not multiple of HBLKSIZE");
3635 GC_task_self = me = mach_task_self();
3637 r = mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.exception);
3638 if(r != KERN_SUCCESS) ABORT("mach_port_allocate failed (exception port)");
3640 r = mach_port_insert_right(me,GC_ports.exception,GC_ports.exception,
3641 MACH_MSG_TYPE_MAKE_SEND);
3642 if(r != KERN_SUCCESS)
3643 ABORT("mach_port_insert_right failed (exception port)");
3645 #if defined(THREADS)
3646 r = mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.reply);
3647 if(r != KERN_SUCCESS) ABORT("mach_port_allocate failed (reply port)");
3650 /* The exceptions we want to catch */
3651 mask = EXC_MASK_BAD_ACCESS;
3653 r = task_get_exception_ports(
3656 GC_old_exc_ports.masks,
3657 &GC_old_exc_ports.count,
3658 GC_old_exc_ports.ports,
3659 GC_old_exc_ports.behaviors,
3660 GC_old_exc_ports.flavors
3662 if(r != KERN_SUCCESS) ABORT("task_get_exception_ports failed");
3664 r = task_set_exception_ports(
3669 MACHINE_THREAD_STATE
3671 if(r != KERN_SUCCESS) ABORT("task_set_exception_ports failed");
3673 if(pthread_attr_init(&attr) != 0) ABORT("pthread_attr_init failed");
3674 if(pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_DETACHED) != 0)
3675 ABORT("pthread_attr_setdetachedstate failed");
3677 # undef pthread_create
3678 /* This will call the real pthread function, not our wrapper */
3679 if(pthread_create(&thread,&attr,GC_mprotect_thread,NULL) != 0)
3680 ABORT("pthread_create failed");
3681 pthread_attr_destroy(&attr);
3683 /* Setup the sigbus handler for ignoring the meaningless SIGBUSs */
3684 #ifdef BROKEN_EXCEPTION_HANDLING
3686 struct sigaction sa, oldsa;
3687 sa.sa_handler = (SIG_PF)GC_darwin_sigbus;
3688 sigemptyset(&sa.sa_mask);
3689 sa.sa_flags = SA_RESTART|SA_SIGINFO;
3690 if(sigaction(SIGBUS,&sa,&oldsa) < 0) ABORT("sigaction");
3691 GC_old_bus_handler = (SIG_PF)oldsa.sa_handler;
3692 if (GC_old_bus_handler != SIG_DFL) {
3694 GC_err_printf0("Replaced other SIGBUS handler\n");
3698 #endif /* BROKEN_EXCEPTION_HANDLING */
3701 /* The source code for Apple's GDB was used as a reference for the exception
3702 forwarding code. This code is similar to be GDB code only because there is
3703 only one way to do it. */
3704 static kern_return_t GC_forward_exception(
3707 exception_type_t exception,
3708 exception_data_t data,
3709 mach_msg_type_number_t data_count
3714 exception_behavior_t behavior;
3715 thread_state_flavor_t flavor;
3717 thread_state_data_t thread_state;
3718 mach_msg_type_number_t thread_state_count = THREAD_STATE_MAX;
3720 for(i=0;i<GC_old_exc_ports.count;i++)
3721 if(GC_old_exc_ports.masks[i] & (1 << exception))
3723 if(i==GC_old_exc_ports.count) ABORT("No handler for exception!");
3725 port = GC_old_exc_ports.ports[i];
3726 behavior = GC_old_exc_ports.behaviors[i];
3727 flavor = GC_old_exc_ports.flavors[i];
3729 if(behavior != EXCEPTION_DEFAULT) {
3730 r = thread_get_state(thread,flavor,thread_state,&thread_state_count);
3731 if(r != KERN_SUCCESS)
3732 ABORT("thread_get_state failed in forward_exception");
3736 case EXCEPTION_DEFAULT:
3737 r = exception_raise(port,thread,task,exception,data,data_count);
3739 case EXCEPTION_STATE:
3740 r = exception_raise_state(port,thread,task,exception,data,
3741 data_count,&flavor,thread_state,thread_state_count,
3742 thread_state,&thread_state_count);
3744 case EXCEPTION_STATE_IDENTITY:
3745 r = exception_raise_state_identity(port,thread,task,exception,data,
3746 data_count,&flavor,thread_state,thread_state_count,
3747 thread_state,&thread_state_count);
3750 r = KERN_FAILURE; /* make gcc happy */
3751 ABORT("forward_exception: unknown behavior");
3755 if(behavior != EXCEPTION_DEFAULT) {
3756 r = thread_set_state(thread,flavor,thread_state,thread_state_count);
3757 if(r != KERN_SUCCESS)
3758 ABORT("thread_set_state failed in forward_exception");
3764 #define FWD() GC_forward_exception(thread,task,exception,code,code_count)
3766 /* This violates the namespace rules but there isn't anything that can be done
3767 about it. The exception handling stuff is hard coded to call this */
3769 catch_exception_raise(
3770 mach_port_t exception_port,mach_port_t thread,mach_port_t task,
3771 exception_type_t exception,exception_data_t code,
3772 mach_msg_type_number_t code_count
3779 thread_state_flavor_t flavor = PPC_EXCEPTION_STATE;
3780 mach_msg_type_number_t exc_state_count = PPC_EXCEPTION_STATE_COUNT;
3781 ppc_exception_state_t exc_state;
3783 # error FIXME for non-ppc darwin
3787 if(exception != EXC_BAD_ACCESS || code[0] != KERN_PROTECTION_FAILURE) {
3788 #ifdef DEBUG_EXCEPTION_HANDLING
3789 /* We aren't interested, pass it on to the old handler */
3790 GC_printf3("Exception: 0x%x Code: 0x%x 0x%x in catch....\n",
3792 code_count > 0 ? code[0] : -1,
3793 code_count > 1 ? code[1] : -1);
3798 r = thread_get_state(thread,flavor,
3799 (natural_t*)&exc_state,&exc_state_count);
3800 if(r != KERN_SUCCESS) {
3801 /* The thread is supposed to be suspended while the exception handler
3802 is called. This shouldn't fail. */
3803 #ifdef BROKEN_EXCEPTION_HANDLING
3804 GC_err_printf0("thread_get_state failed in "
3805 "catch_exception_raise\n");
3806 return KERN_SUCCESS;
3808 ABORT("thread_get_state failed in catch_exception_raise");
3812 /* This is the address that caused the fault */
3813 addr = (char*) exc_state.dar;
3815 if((HDR(addr)) == 0) {
3816 /* Ugh... just like the SIGBUS problem above, it seems we get a bogus
3817 KERN_PROTECTION_FAILURE every once and a while. We wait till we get
3818 a bunch in a row before doing anything about it. If a "real" fault
3819 ever occurres it'll just keep faulting over and over and we'll hit
3820 the limit pretty quickly. */
3821 #ifdef BROKEN_EXCEPTION_HANDLING
3822 static char *last_fault;
3823 static int last_fault_count;
3825 if(addr != last_fault) {
3827 last_fault_count = 0;
3829 if(++last_fault_count < 32) {
3830 if(last_fault_count == 1)
3832 "GC: WARNING: Ignoring KERN_PROTECTION_FAILURE at %p\n",
3834 return KERN_SUCCESS;
3837 GC_err_printf1("Unexpected KERN_PROTECTION_FAILURE at %p\n",addr);
3838 /* Can't pass it along to the signal handler because that is
3839 ignoring SIGBUS signals. We also shouldn't call ABORT here as
3840 signals don't always work too well from the exception handler. */
3841 GC_err_printf0("Aborting\n");
3843 #else /* BROKEN_EXCEPTION_HANDLING */
3844 /* Pass it along to the next exception handler
3845 (which should call SIGBUS/SIGSEGV) */
3847 #endif /* !BROKEN_EXCEPTION_HANDLING */
3850 #ifdef BROKEN_EXCEPTION_HANDLING
3851 /* Reset the number of consecutive SIGBUSs */
3852 GC_sigbus_count = 0;
3855 if(GC_mprotect_state == GC_MP_NORMAL) { /* common case */
3856 h = (struct hblk*)((word)addr & ~(GC_page_size-1));
3857 UNPROTECT(h, GC_page_size);
3858 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
3859 register int index = PHT_HASH(h+i);
3860 async_set_pht_entry_from_index(GC_dirty_pages, index);
3862 } else if(GC_mprotect_state == GC_MP_DISCARDING) {
3863 /* Lie to the thread for now. No sense UNPROTECT()ing the memory
3864 when we're just going to PROTECT() it again later. The thread
3865 will just fault again once it resumes */
3867 /* Shouldn't happen, i don't think */
3868 GC_printf0("KERN_PROTECTION_FAILURE while world is stopped\n");
3871 return KERN_SUCCESS;
3875 /* These should never be called, but just in case... */
3876 kern_return_t catch_exception_raise_state(mach_port_name_t exception_port,
3877 int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
3878 int flavor, thread_state_t old_state, int old_stateCnt,
3879 thread_state_t new_state, int new_stateCnt)
3881 ABORT("catch_exception_raise_state");
3882 return(KERN_INVALID_ARGUMENT);
3884 kern_return_t catch_exception_raise_state_identity(
3885 mach_port_name_t exception_port, mach_port_t thread, mach_port_t task,
3886 int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
3887 int flavor, thread_state_t old_state, int old_stateCnt,
3888 thread_state_t new_state, int new_stateCnt)
3890 ABORT("catch_exception_raise_state_identity");
3891 return(KERN_INVALID_ARGUMENT);
3895 #endif /* DARWIN && MPROTECT_VDB */
3897 # ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
3898 int GC_incremental_protection_needs()
3900 return GC_PROTECTS_NONE;
3902 # endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
3905 * Call stack save code for debugging.
3906 * Should probably be in mach_dep.c, but that requires reorganization.
3909 /* I suspect the following works for most X86 *nix variants, so */
3910 /* long as the frame pointer is explicitly stored. In the case of gcc, */
3911 /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
3912 #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
3913 # include <features.h>
3916 struct frame *fr_savfp;
3918 long fr_arg[NARGS]; /* All the arguments go here. */
3924 # include <features.h>
3929 struct frame *fr_savfp;
3938 # if defined(SUNOS4)
3939 # include <machine/frame.h>
3941 # if defined (DRSNX)
3942 # include <sys/sparc/frame.h>
3944 # if defined(OPENBSD) || defined(NETBSD)
3947 # include <sys/frame.h>
3953 --> We only know how to to get the first 6 arguments
3957 #ifdef NEED_CALLINFO
3958 /* Fill in the pc and argument information for up to NFRAMES of my */
3959 /* callers. Ignore my frame and my callers frame. */
3962 # include <unistd.h>
3965 #endif /* NEED_CALLINFO */
3967 #if defined(GC_HAVE_BUILTIN_BACKTRACE)
3968 # include <execinfo.h>
3971 #ifdef SAVE_CALL_CHAIN
3973 #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
3974 && defined(GC_HAVE_BUILTIN_BACKTRACE)
3976 void GC_save_callers (info)
3977 struct callinfo info[NFRAMES];
3979 void * tmp_info[NFRAMES + 1];
3981 # define IGNORE_FRAMES 1
3983 /* We retrieve NFRAMES+1 pc values, but discard the first, since it */
3984 /* points to our own frame. */
3985 GC_ASSERT(sizeof(struct callinfo) == sizeof(void *));
3986 npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES);
3987 BCOPY(tmp_info+IGNORE_FRAMES, info, (npcs - IGNORE_FRAMES) * sizeof(void *));
3988 for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0;
3991 #else /* No builtin backtrace; do it ourselves */
3993 #if (defined(OPENBSD) || defined(NETBSD)) && defined(SPARC)
3994 # define FR_SAVFP fr_fp
3995 # define FR_SAVPC fr_pc
3997 # define FR_SAVFP fr_savfp
3998 # define FR_SAVPC fr_savpc
4001 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
4007 void GC_save_callers (info)
4008 struct callinfo info[NFRAMES];
4010 struct frame *frame;
4014 /* We assume this is turned on only with gcc as the compiler. */
4015 asm("movl %%ebp,%0" : "=r"(frame));
4018 frame = (struct frame *) GC_save_regs_in_stack ();
4019 fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
4022 for (; (!(fp HOTTER_THAN frame) && !(GC_stackbottom HOTTER_THAN (ptr_t)fp)
4023 && (nframes < NFRAMES));
4024 fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
4027 info[nframes].ci_pc = fp->FR_SAVPC;
4029 for (i = 0; i < NARGS; i++) {
4030 info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
4032 # endif /* NARGS > 0 */
4034 if (nframes < NFRAMES) info[nframes].ci_pc = 0;
4037 #endif /* No builtin backtrace */
4039 #endif /* SAVE_CALL_CHAIN */
4041 #ifdef NEED_CALLINFO
4043 /* Print info to stderr. We do NOT hold the allocation lock */
4044 void GC_print_callers (info)
4045 struct callinfo info[NFRAMES];
4048 static int reentry_count = 0;
4049 GC_bool stop = FALSE;
4051 /* FIXME: This should probably use a different lock, so that we */
4052 /* become callable with or without the allocation lock. */
4058 GC_err_printf0("\tCaller at allocation:\n");
4060 GC_err_printf0("\tCall chain at allocation:\n");
4062 for (i = 0; i < NFRAMES && !stop ; i++) {
4063 if (info[i].ci_pc == 0) break;
4068 GC_err_printf0("\t\targs: ");
4069 for (j = 0; j < NARGS; j++) {
4070 if (j != 0) GC_err_printf0(", ");
4071 GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
4072 ~(info[i].ci_arg[j]));
4074 GC_err_printf0("\n");
4077 if (reentry_count > 1) {
4078 /* We were called during an allocation during */
4079 /* a previous GC_print_callers call; punt. */
4080 GC_err_printf1("\t\t##PC##= 0x%lx\n", info[i].ci_pc);
4087 # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
4088 && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
4090 backtrace_symbols((void **)(&(info[i].ci_pc)), 1);
4091 char *name = sym_name[0];
4095 sprintf(buf, "##PC##= 0x%lx", info[i].ci_pc);
4097 # if defined(LINUX) && !defined(SMALL_CONFIG)
4098 /* Try for a line number. */
4101 static char exe_name[EXE_SZ];
4103 char cmd_buf[CMD_SZ];
4104 # define RESULT_SZ 200
4105 static char result_buf[RESULT_SZ];
4108 # define PRELOAD_SZ 200
4109 char preload_buf[PRELOAD_SZ];
4110 static GC_bool found_exe_name = FALSE;
4111 static GC_bool will_fail = FALSE;
4113 /* Try to get it via a hairy and expensive scheme. */
4114 /* First we get the name of the executable: */
4115 if (will_fail) goto out;
4116 if (!found_exe_name) {
4117 ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ);
4118 if (ret_code < 0 || ret_code >= EXE_SZ
4119 || exe_name[0] != '/') {
4120 will_fail = TRUE; /* Dont try again. */
4123 exe_name[ret_code] = '\0';
4124 found_exe_name = TRUE;
4126 /* Then we use popen to start addr2line -e <exe> <addr> */
4127 /* There are faster ways to do this, but hopefully this */
4128 /* isn't time critical. */
4129 sprintf(cmd_buf, "/usr/bin/addr2line -f -e %s 0x%lx", exe_name,
4130 (unsigned long)info[i].ci_pc);
4131 old_preload = getenv ("LD_PRELOAD");
4132 if (0 != old_preload) {
4133 if (strlen (old_preload) >= PRELOAD_SZ) {
4137 strcpy (preload_buf, old_preload);
4138 unsetenv ("LD_PRELOAD");
4140 pipe = popen(cmd_buf, "r");
4141 if (0 != old_preload
4142 && 0 != setenv ("LD_PRELOAD", preload_buf, 0)) {
4143 WARN("Failed to reset LD_PRELOAD\n", 0);
4146 || (result_len = fread(result_buf, 1, RESULT_SZ - 1, pipe))
4148 if (pipe != NULL) pclose(pipe);
4152 if (result_buf[result_len - 1] == '\n') --result_len;
4153 result_buf[result_len] = 0;
4154 if (result_buf[0] == '?'
4155 || result_buf[result_len-2] == ':'
4156 && result_buf[result_len-1] == '0') {
4160 /* Get rid of embedded newline, if any. Test for "main" */
4162 char * nl = strchr(result_buf, '\n');
4163 if (nl != NULL && nl < result_buf + result_len) {
4166 if (strncmp(result_buf, "main", nl - result_buf) == 0) {
4170 if (result_len < RESULT_SZ - 25) {
4171 /* Add in hex address */
4172 sprintf(result_buf + result_len, " [0x%lx]",
4173 (unsigned long)info[i].ci_pc);
4180 GC_err_printf1("\t\t%s\n", name);
4181 # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
4182 && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
4183 free(sym_name); /* May call GC_free; that's OK */
4192 #endif /* NEED_CALLINFO */
4196 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
4198 /* Dump /proc/self/maps to GC_stderr, to enable looking up names for
4199 addresses in FIND_LEAK output. */
4201 static word dump_maps(char *maps)
4203 GC_err_write(maps, strlen(maps));
4207 void GC_print_address_map()
4209 GC_err_printf0("---------- Begin address map ----------\n");
4210 GC_apply_to_maps(dump_maps);
4211 GC_err_printf0("---------- End address map ----------\n");