2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 * Original author: Bill Janssen
15 * Heavily modified by Hans Boehm and others
19 * This is incredibly OS specific code for tracking down data sections in
20 * dynamic libraries. There appears to be no way of doing this quickly
21 * without groveling through undocumented data structures. We would argue
22 * that this is a bug in the design of the dlopen interface. THIS CODE
23 * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
24 * to let your vendor know ...
26 * None of this is safe with dlclose and incremental collection.
27 * But then not much of anything is safe in the presence of dlclose.
29 #if (defined(__linux__) || defined(__GLIBC__) || defined(__GNU__)) \
30 && !defined(_GNU_SOURCE)
31 /* Can't test LINUX, since this must be defined before other includes */
34 #if !defined(MACOS) && !defined(_WIN32_WCE)
35 # include <sys/types.h>
37 #include "private/gc_priv.h"
39 /* BTL: avoid circular redefinition of dlopen if GC_SOLARIS_THREADS defined */
40 # if (defined(GC_PTHREADS) || defined(GC_SOLARIS_THREADS)) \
41 && defined(dlopen) && !defined(GC_USE_LD_WRAP)
42 /* To support threads in Solaris, gc.h interposes on dlopen by */
43 /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
44 /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
45 /* real system dlopen() in their implementation. We first remove */
46 /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
48 # define GC_must_restore_redefined_dlopen
50 # undef GC_must_restore_redefined_dlopen
53 #if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE)) \
55 #if !defined(SOLARISDL) && !defined(IRIX5) && \
56 !defined(MSWIN32) && !defined(MSWINCE) && \
57 !(defined(ALPHA) && defined(OSF1)) && \
58 !defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
59 !defined(AIX) && !defined(SCO_ELF) && !defined(DGUX) && \
60 !(defined(FREEBSD) && defined(__ELF__)) && \
61 !(defined(NETBSD) && defined(__ELF__)) && !defined(HURD) && \
62 !defined(DARWIN) && !defined(CYGWIN32)
63 --> We only know how to find data segments of dynamic libraries for the
64 --> above. Additional SVR4 variants might not be too
76 # include <machine/elf_machdep.h>
77 # define ELFSIZE ARCH_ELFSIZE
80 #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF) || \
81 (defined(FREEBSD) && defined(__ELF__)) || defined(DGUX) || \
82 (defined(NETBSD) && defined(__ELF__)) || defined(HURD)
88 /* Newer versions of GNU/Linux define this macro. We
89 * define it similarly for any ELF systems that don't. */
92 # if __ELF_WORD_SIZE == 32
93 # define ElfW(type) Elf32_##type
95 # define ElfW(type) Elf64_##type
97 # elif defined(NETBSD)
99 # define ElfW(type) Elf32_##type
101 # define ElfW(type) Elf64_##type
104 # if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
105 # define ElfW(type) Elf32_##type
107 # define ElfW(type) Elf64_##type
112 #if defined(SOLARISDL) && !defined(USE_PROC_FOR_LIBRARIES)
118 static struct link_map *
119 GC_FirstDLOpenedLinkMap(void)
121 extern ElfW(Dyn) _DYNAMIC;
124 static struct link_map * cachedResult = 0;
125 static ElfW(Dyn) *dynStructureAddr = 0;
126 /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
128 # ifdef SUNOS53_SHARED_LIB
129 /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
130 /* up properly in dynamically linked .so's. This means we have */
131 /* to use its value in the set of original object files loaded */
132 /* at program startup. */
133 if( dynStructureAddr == 0 ) {
134 void* startupSyms = dlopen(0, RTLD_LAZY);
135 dynStructureAddr = (ElfW(Dyn)*)dlsym(startupSyms, "_DYNAMIC");
138 dynStructureAddr = &_DYNAMIC;
141 if( dynStructureAddr == 0) {
144 if( cachedResult == 0 ) {
146 for( dp = ((ElfW(Dyn) *)(&_DYNAMIC)); (tag = dp->d_tag) != 0; dp++ ) {
147 if( tag == DT_DEBUG ) {
149 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
150 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
158 #endif /* SOLARISDL ... */
160 /* BTL: added to fix circular dlopen definition if GC_SOLARIS_THREADS defined */
161 # if defined(GC_must_restore_redefined_dlopen)
162 # define dlopen GC_dlopen
165 # if defined(SOLARISDL)
166 /* Add dynamic library data sections to the root set. */
167 # if !defined(PCR) && !defined(GC_SOLARIS_THREADS) && defined(THREADS)
168 --> fix mutual exclusion with dlopen
171 # ifndef USE_PROC_FOR_LIBRARIES
172 void GC_register_dynamic_libraries(void)
174 struct link_map *lm = GC_FirstDLOpenedLinkMap();
177 for (lm = GC_FirstDLOpenedLinkMap();
178 lm != (struct link_map *) 0; lm = lm->l_next)
182 unsigned long offset;
186 e = (ElfW(Ehdr) *) lm->l_addr;
187 p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
188 offset = ((unsigned long)(lm->l_addr));
189 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
190 switch( p->p_type ) {
193 if( !(p->p_flags & PF_W) ) break;
194 start = ((char *)(p->p_vaddr)) + offset;
209 # endif /* !USE_PROC ... */
210 # endif /* SOLARISDL */
212 #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF) || \
213 (defined(FREEBSD) && defined(__ELF__)) || defined(DGUX) || \
214 (defined(NETBSD) && defined(__ELF__)) || defined(HURD)
217 #ifdef USE_PROC_FOR_LIBRARIES
221 #include <sys/stat.h>
225 #define MAPS_BUF_SIZE (32*1024)
227 extern ssize_t GC_repeat_read(int fd, char *buf, size_t count);
228 /* Repeatedly read until buffer is filled, or EOF is encountered */
229 /* Defined in os_dep.c. */
231 char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
232 char **prot, unsigned int *maj_dev,
233 char **mapping_name);
234 char *GC_get_maps(void);
237 /* Sort an array of HeapSects by start address. */
238 /* Unfortunately at least some versions of */
239 /* Linux qsort end up calling malloc by way of sysconf, and hence can't */
240 /* be used in the collector. Hence we roll our own. Should be */
241 /* reasonably fast if the array is already mostly sorted, as we expect */
243 static void sort_heap_sects(struct HeapSect *base, size_t number_of_elements)
245 signed_word n = (signed_word)number_of_elements;
246 signed_word nsorted = 1;
249 while (nsorted < n) {
250 while (nsorted < n &&
251 base[nsorted-1].hs_start < base[nsorted].hs_start)
253 if (nsorted == n) break;
254 GC_ASSERT(base[nsorted-1].hs_start > base[nsorted].hs_start);
256 while (i >= 0 && base[i].hs_start > base[i+1].hs_start) {
257 struct HeapSect tmp = base[i];
262 GC_ASSERT(base[nsorted-1].hs_start < base[nsorted].hs_start);
267 STATIC word GC_register_map_entries(char *maps)
270 char *buf_ptr = maps;
273 unsigned int maj_dev;
274 ptr_t least_ha, greatest_ha;
276 ptr_t datastart = (ptr_t)(DATASTART);
278 GC_ASSERT(I_HOLD_LOCK());
279 sort_heap_sects(GC_our_memory, GC_n_memory);
280 least_ha = GC_our_memory[0].hs_start;
281 greatest_ha = GC_our_memory[GC_n_memory-1].hs_start
282 + GC_our_memory[GC_n_memory-1].hs_bytes;
285 buf_ptr = GC_parse_map_entry(buf_ptr, &start, &end, &prot, &maj_dev, 0);
286 if (buf_ptr == NULL) return 1;
287 if (prot[1] == 'w') {
288 /* This is a writable mapping. Add it to */
289 /* the root set unless it is already otherwise */
291 if (start <= GC_stackbottom && end >= GC_stackbottom) {
292 /* Stack mapping; discard */
296 /* This may fail, since a thread may already be */
297 /* unregistered, but its thread stack may still be there. */
298 /* That can fail because the stack may disappear while */
299 /* we're marking. Thus the marker is, and has to be */
300 /* prepared to recover from segmentation faults. */
302 if (GC_segment_is_thread_stack(start, end)) continue;
304 /* FIXME: NPTL squirrels */
305 /* away pointers in pieces of the stack segment that we */
306 /* don't scan. We work around this */
307 /* by treating anything allocated by libpthread as */
308 /* uncollectable, as we do in some other cases. */
309 /* A specifically identified problem is that */
310 /* thread stacks contain pointers to dynamic thread */
311 /* vectors, which may be reused due to thread caching. */
312 /* They may not be marked if the thread is still live. */
313 /* This specific instance should be addressed by */
314 /* INCLUDE_LINUX_THREAD_DESCR, but that doesn't quite */
315 /* seem to suffice. */
316 /* We currently trace entire thread stacks, if they are */
317 /* are currently cached but unused. This is */
318 /* very suboptimal for performance reasons. */
320 /* We no longer exclude the main data segment. */
321 if (end <= least_ha || start >= greatest_ha) {
322 /* The easy case; just trace entire segment */
323 GC_add_roots_inner((char *)start, (char *)end, TRUE);
326 /* Add sections that dont belong to us. */
328 while (GC_our_memory[i].hs_start + GC_our_memory[i].hs_bytes
331 GC_ASSERT(i < GC_n_memory);
332 if (GC_our_memory[i].hs_start <= start) {
333 start = GC_our_memory[i].hs_start
334 + GC_our_memory[i].hs_bytes;
337 while (i < GC_n_memory && GC_our_memory[i].hs_start < end
339 if ((char *)start < GC_our_memory[i].hs_start)
340 GC_add_roots_inner((char *)start,
341 GC_our_memory[i].hs_start, TRUE);
342 start = GC_our_memory[i].hs_start
343 + GC_our_memory[i].hs_bytes;
347 GC_add_roots_inner((char *)start, (char *)end, TRUE);
353 void GC_register_dynamic_libraries(void)
355 if (!GC_register_map_entries(GC_get_maps()))
356 ABORT("Failed to read /proc for library registration.");
359 /* We now take care of the main data segment ourselves: */
360 GC_bool GC_register_main_static_data(void)
365 # define HAVE_REGISTER_MAIN_STATIC_DATA
367 #endif /* USE_PROC_FOR_LIBRARIES */
369 #if !defined(USE_PROC_FOR_LIBRARIES)
370 /* The following is the preferred way to walk dynamic libraries */
371 /* For glibc 2.2.4+. Unfortunately, it doesn't work for older */
372 /* versions. Thanks to Jakub Jelinek for most of the code. */
374 # if (defined(LINUX) || defined (__GLIBC__)) /* Are others OK here, too? */ \
375 && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 2) \
376 || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 2 && defined(DT_CONFIG)))
377 /* We have the header files for a glibc that includes dl_iterate_phdr. */
378 /* It may still not be available in the library on the target system. */
379 /* Thus we also treat it as a weak symbol. */
380 #define HAVE_DL_ITERATE_PHDR
381 #pragma weak dl_iterate_phdr
384 # if (defined(FREEBSD) && __FreeBSD__ >= 7)
385 /* On the FreeBSD system, any target system at major version 7 shall */
386 /* have dl_iterate_phdr; therefore, we need not make it weak as above. */
387 #define HAVE_DL_ITERATE_PHDR
390 #if defined(HAVE_DL_ITERATE_PHDR)
394 /* Instead of registering PT_LOAD sections directly, we keep them */
395 /* in a temporary list, and filter them by excluding PT_GNU_RELRO */
396 /* segments. Processing PT_GNU_RELRO sections with */
397 /* GC_exclude_static_roots instead would be superficially cleaner. But */
398 /* it runs into trouble if a client registers an overlapping segment, */
399 /* which unfortunately seems quite possible. */
401 #define MAX_LOAD_SEGS MAX_ROOT_SETS
403 static struct load_segment {
406 /* Room for a second segment if we remove a RELRO segment */
407 /* from the middle. */
410 } load_segs[MAX_LOAD_SEGS];
412 static int n_load_segs;
414 # endif /* PT_GNU_RELRO */
416 /* A user-supplied routine that is called to determine if a DSO must
417 be scanned by the gc. */
418 static int (GC_CALLBACK * GC_has_static_roots)(const char *, void *, size_t);
420 static int GC_register_dynlib_callback(info, size, ptr)
421 struct dl_phdr_info * info;
425 const ElfW(Phdr) * p;
429 /* Make sure struct dl_phdr_info is at least as big as we need. */
430 if (size < offsetof (struct dl_phdr_info, dlpi_phnum)
431 + sizeof (info->dlpi_phnum))
435 for( i = 0; i < (int)(info->dlpi_phnum); ((i++),(p++)) ) {
436 switch( p->p_type ) {
439 /* This entry is known to be constant and will eventually be remapped
440 read-only. However, the address range covered by this entry is
441 typically a subset of a previously encountered `LOAD' segment, so
442 we need to exclude it. */
446 start = ((ptr_t)(p->p_vaddr)) + info->dlpi_addr;
447 end = start + p->p_memsz;
448 for (j = n_load_segs; --j >= 0; ) {
449 if (start >= load_segs[j].start && start < load_segs[j].end) {
450 if (load_segs[j].start2 != 0) {
451 WARN("More than one GNU_RELRO segment per load seg\n",0);
453 GC_ASSERT(end <= load_segs[j].end);
454 /* Remove from the existing load segment */
455 load_segs[j].end2 = load_segs[j].end;
456 load_segs[j].end = start;
457 load_segs[j].start2 = end;
461 if (j == 0) WARN("Failed to find PT_GNU_RELRO segment"
462 " inside PT_LOAD region", 0);
471 if( !(p->p_flags & PF_W) ) break;
472 start = ((char *)(p->p_vaddr)) + info->dlpi_addr;
473 end = start + p->p_memsz;
475 if (GC_has_static_roots
476 && !GC_has_static_roots(info->dlpi_name, start, p->p_memsz))
479 if (n_load_segs >= MAX_LOAD_SEGS) ABORT("Too many PT_LOAD segs");
480 load_segs[n_load_segs].start = start;
481 load_segs[n_load_segs].end = end;
482 load_segs[n_load_segs].start2 = 0;
483 load_segs[n_load_segs].end2 = 0;
486 GC_add_roots_inner(start, end, TRUE);
487 # endif /* PT_GNU_RELRO */
495 * (int *)ptr = 1; /* Signal that we were called */
499 /* Return TRUE if we succeed, FALSE if dl_iterate_phdr wasn't there. */
501 GC_bool GC_register_dynamic_libraries_dl_iterate_phdr(void)
503 if (dl_iterate_phdr) {
504 int did_something = 0;
507 static GC_bool excluded_segs = FALSE;
509 if (!excluded_segs) {
510 GC_exclude_static_roots((ptr_t)load_segs,
511 (ptr_t)load_segs + sizeof(load_segs));
512 excluded_segs = TRUE;
515 dl_iterate_phdr(GC_register_dynlib_callback, &did_something);
520 for (i = 0; i < n_load_segs; ++i) {
521 if (load_segs[i].end > load_segs[i].start) {
522 GC_add_roots_inner(load_segs[i].start, load_segs[i].end, TRUE);
524 if (load_segs[i].end2 > load_segs[i].start2) {
525 GC_add_roots_inner(load_segs[i].start2, load_segs[i].end2, TRUE);
530 /* dl_iterate_phdr may forget the static data segment in */
531 /* statically linked executables. */
532 GC_add_roots_inner(DATASTART, (char *)(DATAEND), TRUE);
533 # if defined(DATASTART2)
534 GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), TRUE);
544 /* Do we need to separately register the main static data segment? */
545 GC_bool GC_register_main_static_data(void)
547 return (dl_iterate_phdr == 0);
550 #define HAVE_REGISTER_MAIN_STATIC_DATA
552 # else /* !LINUX || version(glibc) < 2.2.4 */
554 /* Dynamic loading code for Linux running ELF. Somewhat tested on
555 * Linux/x86, untested but hopefully should work on Linux/Alpha.
556 * This code was derived from the Solaris/ELF support. Thanks to
557 * whatever kind soul wrote that. - Patrick Bridges */
559 /* This doesn't necessarily work in all cases, e.g. with preloaded
560 * dynamic libraries. */
563 # include <sys/exec_elf.h>
564 /* for compatibility with 1.4.x */
582 # pragma weak _DYNAMIC
584 extern ElfW(Dyn) _DYNAMIC[];
586 static struct link_map *
587 GC_FirstDLOpenedLinkMap(void)
590 static struct link_map *cachedResult = 0;
595 if( cachedResult == 0 ) {
597 for( dp = _DYNAMIC; (tag = dp->d_tag) != 0; dp++ ) {
598 if( tag == DT_DEBUG ) {
600 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
601 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
610 void GC_register_dynamic_libraries(void)
615 # ifdef HAVE_DL_ITERATE_PHDR
616 if (GC_register_dynamic_libraries_dl_iterate_phdr()) {
620 lm = GC_FirstDLOpenedLinkMap();
621 for (lm = GC_FirstDLOpenedLinkMap();
622 lm != (struct link_map *) 0; lm = lm->l_next)
626 unsigned long offset;
630 e = (ElfW(Ehdr) *) lm->l_addr;
631 p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
632 offset = ((unsigned long)(lm->l_addr));
633 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
634 switch( p->p_type ) {
637 if( !(p->p_flags & PF_W) ) break;
638 start = ((char *)(p->p_vaddr)) + offset;
639 GC_add_roots_inner(start, start + p->p_memsz, TRUE);
649 #endif /* !USE_PROC_FOR_LIBRARIES */
653 #if defined(IRIX5) || (defined(USE_PROC_FOR_LIBRARIES) && !defined(LINUX))
655 #include <sys/procfs.h>
656 #include <sys/stat.h>
660 #include <signal.h> /* Only for the following test. */
665 extern void * GC_roots_present(ptr_t);
666 /* The type is a lie, since the real type doesn't make sense here, */
667 /* and we only test for NULL. */
670 /* We use /proc to track down all parts of the address space that are */
671 /* mapped by the process, and throw out regions we know we shouldn't */
672 /* worry about. This may also work under other SVR4 variants. */
673 void GC_register_dynamic_libraries(void)
677 static prmap_t * addr_map = 0;
678 static int current_sz = 0; /* Number of records currently in addr_map */
679 static int needed_sz; /* Required size of addr_map */
684 ptr_t heap_start = HEAP_START;
685 ptr_t heap_end = heap_start;
689 # endif /* SOLARISDL */
692 sprintf(buf, "/proc/%ld", (long)getpid());
693 /* The above generates a lint complaint, since pid_t varies. */
694 /* It's unclear how to improve this. */
695 fd = open(buf, O_RDONLY);
697 ABORT("/proc open failed");
700 if (ioctl(fd, PIOCNMAP, &needed_sz) < 0) {
701 GC_err_printf("fd = %d, errno = %d\n", fd, errno);
702 ABORT("/proc PIOCNMAP ioctl failed");
704 if (needed_sz >= current_sz) {
705 current_sz = needed_sz * 2 + 1;
706 /* Expansion, plus room for 0 record */
707 addr_map = (prmap_t *)GC_scratch_alloc((word)
708 (current_sz * sizeof(prmap_t)));
710 if (ioctl(fd, PIOCMAP, addr_map) < 0) {
711 GC_err_printf("fd = %d, errno = %d, needed_sz = %d, addr_map = %p\n",
712 fd, errno, needed_sz, addr_map);
713 ABORT("/proc PIOCMAP ioctl failed");
715 if (GC_n_heap_sects > 0) {
716 heap_end = GC_heap_sects[GC_n_heap_sects-1].hs_start
717 + GC_heap_sects[GC_n_heap_sects-1].hs_bytes;
718 if (heap_end < GC_scratch_last_end_ptr) heap_end = GC_scratch_last_end_ptr;
720 for (i = 0; i < needed_sz; i++) {
721 flags = addr_map[i].pr_mflags;
722 if ((flags & (MA_BREAK | MA_STACK | MA_PHYS
723 | MA_FETCHOP | MA_NOTCACHED)) != 0) goto irrelevant;
724 if ((flags & (MA_READ | MA_WRITE)) != (MA_READ | MA_WRITE))
726 /* The latter test is empirically useless in very old Irix */
727 /* versions. Other than the */
728 /* main data and stack segments, everything appears to be */
729 /* mapped readable, writable, executable, and shared(!!). */
730 /* This makes no sense to me. - HB */
731 start = (ptr_t)(addr_map[i].pr_vaddr);
732 if (GC_roots_present(start)) goto irrelevant;
733 if (start < heap_end && start >= heap_start)
736 if (GC_is_thread_stack(start)) goto irrelevant;
737 # endif /* MMAP_STACKS */
739 limit = start + addr_map[i].pr_size;
740 /* The following seemed to be necessary for very old versions */
741 /* of Irix, but it has been reported to discard relevant */
742 /* segments under Irix 6.5. */
744 if (addr_map[i].pr_off == 0 && strncmp(start, ELFMAG, 4) == 0) {
745 /* Discard text segments, i.e. 0-offset mappings against */
746 /* executable files which appear to have ELF headers. */
749 # define MAP_IRR_SZ 10
750 static ptr_t map_irr[MAP_IRR_SZ];
751 /* Known irrelevant map entries */
752 static int n_irr = 0;
756 for (i = 0; i < n_irr; i++) {
757 if (map_irr[i] == start) goto irrelevant;
759 arg = (caddr_t)start;
760 obj = ioctl(fd, PIOCOPENM, &arg);
764 if ((buf.st_mode & 0111) != 0) {
765 if (n_irr < MAP_IRR_SZ) {
766 map_irr[n_irr++] = start;
773 GC_add_roots_inner(start, limit, TRUE);
776 /* Dont keep cached descriptor, for now. Some kernels don't like us */
777 /* to keep a /proc file descriptor around during kill -9. */
778 if (close(fd) < 0) ABORT("Couldnt close /proc file");
782 # endif /* USE_PROC || IRIX5 */
784 # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
786 # define WIN32_LEAN_AND_MEAN
788 # include <windows.h>
791 /* We traverse the entire address space and register all segments */
792 /* that could possibly have been written to. */
794 extern GC_bool GC_is_heap_base (ptr_t p);
796 # ifdef GC_WIN32_THREADS
797 extern void GC_get_next_stack(char *start, char * limit, char **lo,
800 void GC_cond_add_roots(char *base, char * limit)
802 char * curr_base = base;
803 char * next_stack_lo;
804 char * next_stack_hi;
806 if (base == limit) return;
808 GC_get_next_stack(curr_base, limit, &next_stack_lo, &next_stack_hi);
809 if (next_stack_lo >= limit) break;
810 if (next_stack_lo > curr_base)
811 GC_add_roots_inner(curr_base, next_stack_lo, TRUE);
812 curr_base = next_stack_hi;
814 if (curr_base < limit) GC_add_roots_inner(curr_base, limit, TRUE);
817 void GC_cond_add_roots(char *base, char * limit)
821 = (char *) ((word)(&dummy) & ~(GC_sysinfo.dwAllocationGranularity-1));
822 if (base == limit) return;
823 if (limit > stack_top && base < GC_stackbottom) {
824 /* Part of the stack; ignore it. */
827 GC_add_roots_inner(base, limit, TRUE);
832 /* Do we need to separately register the main static data segment? */
833 GC_bool GC_register_main_static_data(void)
838 extern GC_bool GC_no_win32_dlls;
840 GC_bool GC_register_main_static_data(void)
842 return GC_no_win32_dlls;
846 # define HAVE_REGISTER_MAIN_STATIC_DATA
848 # ifdef DEBUG_VIRTUALQUERY
849 void GC_dump_meminfo(MEMORY_BASIC_INFORMATION *buf)
851 GC_printf("BaseAddress = %lx, AllocationBase = %lx, RegionSize = %lx(%lu)\n",
852 buf -> BaseAddress, buf -> AllocationBase, buf -> RegionSize,
854 GC_printf("\tAllocationProtect = %lx, State = %lx, Protect = %lx, "
856 buf -> AllocationProtect, buf -> State, buf -> Protect,
859 # endif /* DEBUG_VIRTUALQUERY */
861 extern GC_bool GC_wnt; /* Is Windows NT derivative. */
862 /* Defined and set in os_dep.c. */
864 void GC_register_dynamic_libraries(void)
866 MEMORY_BASIC_INFORMATION buf;
871 char * limit, * new_limit;
874 if (GC_no_win32_dlls) return;
876 base = limit = p = GC_sysinfo.lpMinimumApplicationAddress;
877 # if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
878 /* Only the first 32 MB of address space belongs to the current process */
879 while (p < (LPVOID)0x02000000) {
880 result = VirtualQuery(p, &buf, sizeof(buf));
882 /* Page is free; advance to the next possible allocation base */
884 (((DWORD) p + GC_sysinfo.dwAllocationGranularity)
885 & ~(GC_sysinfo.dwAllocationGranularity-1));
888 while (p < GC_sysinfo.lpMaximumApplicationAddress) {
889 result = VirtualQuery(p, &buf, sizeof(buf));
892 if (result != sizeof(buf)) {
893 ABORT("Weird VirtualQuery result");
895 new_limit = (char *)p + buf.RegionSize;
896 protect = buf.Protect;
897 if (buf.State == MEM_COMMIT
898 && (protect == PAGE_EXECUTE_READWRITE
899 || protect == PAGE_READWRITE)
900 && !GC_is_heap_base(buf.AllocationBase)
901 /* There is some evidence that we cannot always
902 * ignore MEM_PRIVATE sections under Windows ME
903 * and predecessors. Hence we now also check for
905 && (buf.Type == MEM_IMAGE ||
906 (!GC_wnt && buf.Type == MEM_PRIVATE))) {
907 # ifdef DEBUG_VIRTUALQUERY
908 GC_dump_meminfo(&buf);
910 if ((char *)p != limit) {
911 GC_cond_add_roots(base, limit);
917 if (p > (LPVOID)new_limit /* overflow */) break;
918 p = (LPVOID)new_limit;
920 GC_cond_add_roots(base, limit);
923 #endif /* MSWIN32 || MSWINCE || CYGWIN32 */
925 #if defined(ALPHA) && defined(OSF1)
929 void GC_register_dynamic_libraries(void)
935 ldr_module_t moduleid = LDR_NULL_MODULE;
936 ldr_module_info_t moduleinfo;
937 size_t moduleinfosize = sizeof(moduleinfo);
938 size_t modulereturnsize;
942 ldr_region_info_t regioninfo;
943 size_t regioninfosize = sizeof(regioninfo);
944 size_t regionreturnsize;
946 /* Obtain id of this process */
947 mypid = ldr_my_process();
949 /* For each module */
952 /* Get the next (first) module */
953 status = ldr_next_module(mypid, &moduleid);
955 /* Any more modules? */
956 if (moduleid == LDR_NULL_MODULE)
957 break; /* No more modules */
959 /* Check status AFTER checking moduleid because */
960 /* of a bug in the non-shared ldr_next_module stub */
962 GC_printf("dynamic_load: status = %d\n", status);
964 extern char *sys_errlist[];
967 if (errno <= sys_nerr) {
968 GC_printf("dynamic_load: %s\n", sys_errlist[errno]);
970 GC_printf("dynamic_load: %d\n", errno);
973 ABORT("ldr_next_module failed");
976 /* Get the module information */
977 status = ldr_inq_module(mypid, moduleid, &moduleinfo,
978 moduleinfosize, &modulereturnsize);
980 ABORT("ldr_inq_module failed");
982 /* is module for the main program (i.e. nonshared portion)? */
983 if (moduleinfo.lmi_flags & LDR_MAIN)
984 continue; /* skip the main module */
987 GC_printf("---Module---\n");
988 GC_printf("Module ID = %16ld\n", moduleinfo.lmi_modid);
989 GC_printf("Count of regions = %16d\n", moduleinfo.lmi_nregion);
990 GC_printf("flags for module = %16lx\n", moduleinfo.lmi_flags);
991 GC_printf("pathname of module = \"%s\"\n", moduleinfo.lmi_name);
994 /* For each region in this module */
995 for (region = 0; region < moduleinfo.lmi_nregion; region++) {
997 /* Get the region information */
998 status = ldr_inq_region(mypid, moduleid, region, ®ioninfo,
999 regioninfosize, ®ionreturnsize);
1001 ABORT("ldr_inq_region failed");
1003 /* only process writable (data) regions */
1004 if (! (regioninfo.lri_prot & LDR_W))
1008 GC_printf("--- Region ---\n");
1009 GC_printf("Region number = %16ld\n",
1010 regioninfo.lri_region_no);
1011 GC_printf("Protection flags = %016x\n", regioninfo.lri_prot);
1012 GC_printf("Virtual address = %16p\n", regioninfo.lri_vaddr);
1013 GC_printf("Mapped address = %16p\n", regioninfo.lri_mapaddr);
1014 GC_printf("Region size = %16ld\n", regioninfo.lri_size);
1015 GC_printf("Region name = \"%s\"\n", regioninfo.lri_name);
1018 /* register region as a garbage collection root */
1019 GC_add_roots_inner (
1020 (char *)regioninfo.lri_mapaddr,
1021 (char *)regioninfo.lri_mapaddr + regioninfo.lri_size,
1034 extern char *sys_errlist[];
1035 extern int sys_nerr;
1037 void GC_register_dynamic_libraries()
1040 int index = 1; /* Ordinal position in shared library search list */
1041 struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
1043 /* For each dynamic library loaded */
1046 /* Get info about next shared library */
1047 status = shl_get(index, &shl_desc);
1049 /* Check if this is the end of the list or if some error occured */
1051 # ifdef GC_HPUX_THREADS
1052 /* I've seen errno values of 0. The man page is not clear */
1053 /* as to whether errno should get set on a -1 return. */
1056 if (errno == EINVAL) {
1057 break; /* Moved past end of shared library list --> finished */
1059 if (errno <= sys_nerr) {
1060 GC_printf("dynamic_load: %s\n", sys_errlist[errno]);
1062 GC_printf("dynamic_load: %d\n", errno);
1064 ABORT("shl_get failed");
1070 GC_printf("---Shared library---\n");
1071 GC_printf("\tfilename = \"%s\"\n", shl_desc->filename);
1072 GC_printf("\tindex = %d\n", index);
1073 GC_printf("\thandle = %08x\n",
1074 (unsigned long) shl_desc->handle);
1075 GC_printf("\ttext seg. start = %08x\n", shl_desc->tstart);
1076 GC_printf("\ttext seg. end = %08x\n", shl_desc->tend);
1077 GC_printf("\tdata seg. start = %08x\n", shl_desc->dstart);
1078 GC_printf("\tdata seg. end = %08x\n", shl_desc->dend);
1079 GC_printf("\tref. count = %lu\n", shl_desc->ref_count);
1082 /* register shared library's data segment as a garbage collection root */
1083 GC_add_roots_inner((char *) shl_desc->dstart,
1084 (char *) shl_desc->dend, TRUE);
1093 #include <sys/ldr.h>
1094 #include <sys/errno.h>
1095 void GC_register_dynamic_libraries(void)
1100 struct ld_info *ldi;
1102 ldibuf = alloca(ldibuflen = 8192);
1104 while ( (len = loadquery(L_GETINFO,ldibuf,ldibuflen)) < 0) {
1105 if (errno != ENOMEM) {
1106 ABORT("loadquery failed");
1108 ldibuf = alloca(ldibuflen *= 2);
1111 ldi = (struct ld_info *)ldibuf;
1113 len = ldi->ldinfo_next;
1115 ldi->ldinfo_dataorg,
1116 (ptr_t)(unsigned long)ldi->ldinfo_dataorg
1117 + ldi->ldinfo_datasize,
1119 ldi = len ? (struct ld_info *)((char *)ldi + len) : 0;
1126 /* __private_extern__ hack required for pre-3.4 gcc versions. */
1127 #ifndef __private_extern__
1128 # define __private_extern__ extern
1129 # include <mach-o/dyld.h>
1130 # undef __private_extern__
1132 # include <mach-o/dyld.h>
1134 #include <mach-o/getsect.h>
1136 /*#define DARWIN_DEBUG*/
1138 const static struct {
1141 } GC_dyld_sections[] = {
1142 { SEG_DATA, SECT_DATA },
1143 { SEG_DATA, SECT_BSS },
1144 { SEG_DATA, SECT_COMMON }
1148 static const char *GC_dyld_name_for_hdr(const struct GC_MACH_HEADER *hdr) {
1150 c = _dyld_image_count();
1151 for(i=0;i<c;i++) if(_dyld_get_image_header(i) == hdr)
1152 return _dyld_get_image_name(i);
1157 /* This should never be called by a thread holding the lock */
1158 static void GC_dyld_image_add(const struct GC_MACH_HEADER *hdr, intptr_t slide)
1160 unsigned long start,end,i;
1161 const struct GC_MACH_SECTION *sec;
1162 if (GC_no_dls) return;
1163 for(i=0;i<sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]);i++) {
1164 sec = GC_GETSECTBYNAME(hdr, GC_dyld_sections[i].seg,
1165 GC_dyld_sections[i].sect);
1166 if(sec == NULL || sec->size == 0) continue;
1167 start = slide + sec->addr;
1168 end = start + sec->size;
1169 # ifdef DARWIN_DEBUG
1170 GC_printf("Adding section at %p-%p (%lu bytes) from image %s\n",
1171 start,end,sec->size,GC_dyld_name_for_hdr(hdr));
1173 GC_add_roots((char*)start,(char*)end);
1175 # ifdef DARWIN_DEBUG
1176 GC_print_static_roots();
1180 /* This should never be called by a thread holding the lock */
1181 static void GC_dyld_image_remove(const struct GC_MACH_HEADER *hdr,
1184 unsigned long start,end,i;
1185 const struct GC_MACH_SECTION *sec;
1186 for(i=0;i<sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]);i++) {
1187 sec = GC_GETSECTBYNAME(hdr, GC_dyld_sections[i].seg,
1188 GC_dyld_sections[i].sect);
1189 if(sec == NULL || sec->size == 0) continue;
1190 start = slide + sec->addr;
1191 end = start + sec->size;
1192 # ifdef DARWIN_DEBUG
1193 GC_printf("Removing section at %p-%p (%lu bytes) from image %s\n",
1194 start,end,sec->size,GC_dyld_name_for_hdr(hdr));
1196 GC_remove_roots((char*)start,(char*)end);
1198 # ifdef DARWIN_DEBUG
1199 GC_print_static_roots();
1203 void GC_register_dynamic_libraries() {
1204 /* Currently does nothing. The callbacks are setup by GC_init_dyld()
1205 The dyld library takes it from there. */
1208 /* The _dyld_* functions have an internal lock so no _dyld functions
1209 can be called while the world is stopped without the risk of a deadlock.
1210 Because of this we MUST setup callbacks BEFORE we ever stop the world.
1211 This should be called BEFORE any thread in created and WITHOUT the
1212 allocation lock held. */
1214 void GC_init_dyld() {
1215 static GC_bool initialized = FALSE;
1216 char *bind_fully_env = NULL;
1218 if(initialized) return;
1220 # ifdef DARWIN_DEBUG
1221 GC_printf("Registering dyld callbacks...\n");
1224 /* Apple's Documentation:
1225 When you call _dyld_register_func_for_add_image, the dynamic linker runtime
1226 calls the specified callback (func) once for each of the images that is
1227 currently loaded into the program. When a new image is added to the program,
1228 your callback is called again with the mach_header for the new image, and the
1229 virtual memory slide amount of the new image.
1231 This WILL properly register already linked libraries and libraries
1232 linked in the future
1235 _dyld_register_func_for_add_image(GC_dyld_image_add);
1236 _dyld_register_func_for_remove_image(GC_dyld_image_remove);
1238 /* Set this early to avoid reentrancy issues. */
1241 bind_fully_env = getenv("DYLD_BIND_AT_LAUNCH");
1243 if (bind_fully_env == NULL) {
1244 # ifdef DARWIN_DEBUG
1245 GC_printf("Forcing full bind of GC code...\n");
1248 if(!_dyld_bind_fully_image_containing_address((unsigned long*)GC_malloc))
1249 ABORT("_dyld_bind_fully_image_containing_address failed");
1254 #define HAVE_REGISTER_MAIN_STATIC_DATA
1255 GC_bool GC_register_main_static_data(void)
1257 /* Already done through dyld callbacks */
1263 #else /* !DYNAMIC_LOADING */
1267 # include "il/PCR_IL.h"
1268 # include "th/PCR_ThCtl.h"
1269 # include "mm/PCR_MM.h"
1271 void GC_register_dynamic_libraries(void)
1273 /* Add new static data areas of dynamically loaded modules. */
1275 PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
1276 PCR_IL_LoadedSegment * q;
1278 /* Skip uncommitted files */
1279 while (p != NIL && !(p -> lf_commitPoint)) {
1280 /* The loading of this file has not yet been committed */
1281 /* Hence its description could be inconsistent. */
1282 /* Furthermore, it hasn't yet been run. Hence its data */
1283 /* segments can't possibly reference heap allocated */
1287 for (; p != NIL; p = p -> lf_prev) {
1288 for (q = p -> lf_ls; q != NIL; q = q -> ls_next) {
1289 if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
1290 == PCR_IL_SegFlags_Traced_on) {
1292 ((char *)(q -> ls_addr),
1293 (char *)(q -> ls_addr) + q -> ls_bytes,
1304 void GC_register_dynamic_libraries(){}
1308 #endif /* !DYNAMIC_LOADING */
1310 #ifndef HAVE_REGISTER_MAIN_STATIC_DATA
1312 /* Do we need to separately register the main static data segment? */
1313 GC_bool GC_register_main_static_data(void)
1318 /* Register a routine to filter dynamic library registration. */
1319 GC_API void GC_CALL GC_register_has_static_roots_callback
1320 (int (GC_CALLBACK * callback)(const char *, void *, size_t)) {
1321 # ifdef HAVE_DL_ITERATE_PHDR
1322 GC_has_static_roots = callback;
1326 #endif /* HAVE_REGISTER_MAIN_STATIC_DATA */