2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 * Original author: Bill Janssen
15 * Heavily modified by Hans Boehm and others
19 * This is incredibly OS specific code for tracking down data sections in
20 * dynamic libraries. There appears to be no way of doing this quickly
21 * without groveling through undocumented data structures. We would argue
22 * that this is a bug in the design of the dlopen interface. THIS CODE
23 * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
24 * to let your vendor know ...
26 * None of this is safe with dlclose and incremental collection.
27 * But then not much of anything is safe in the presence of dlclose.
32 #if defined(__linux__) && !defined(_GNU_SOURCE)
33 /* Can't test LINUX, since this must be define before other includes */
36 #if !defined(MACOS) && !defined(_WIN32_WCE)
37 # include <sys/types.h>
39 #include "private/gc_priv.h"
41 /* BTL: avoid circular redefinition of dlopen if GC_SOLARIS_THREADS defined */
42 # if (defined(GC_PTHREADS) || defined(GC_SOLARIS_THREADS)) \
43 && defined(dlopen) && !defined(GC_USE_LD_WRAP)
44 /* To support threads in Solaris, gc.h interposes on dlopen by */
45 /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
46 /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
47 /* real system dlopen() in their implementation. We first remove */
48 /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
50 # define GC_must_restore_redefined_dlopen
52 # undef GC_must_restore_redefined_dlopen
55 #if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE)) \
57 #if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
58 !defined(MSWIN32) && !defined(MSWINCE) && \
59 !(defined(ALPHA) && defined(OSF1)) && \
60 !defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
61 !defined(RS6000) && !defined(SCO_ELF) && !defined(DGUX) && \
62 !(defined(FREEBSD) && defined(__ELF__)) && \
63 !(defined(NETBSD) && defined(__ELF__)) && !defined(HURD) && \
65 --> We only know how to find data segments of dynamic libraries for the
66 --> above. Additional SVR4 variants might not be too
80 /* struct link_map field overrides */
81 # define l_next lm_next
82 # define l_addr lm_addr
83 # define l_name lm_name
87 # include <machine/elf_machdep.h>
88 # define ELFSIZE ARCH_ELFSIZE
91 #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF) || \
92 (defined(FREEBSD) && defined(__ELF__)) || defined(DGUX) || \
93 (defined(NETBSD) && defined(__ELF__)) || defined(HURD)
99 /* Newer versions of GNU/Linux define this macro. We
100 * define it similarly for any ELF systems that don't. */
102 # if defined(FREEBSD)
103 # if __ELF_WORD_SIZE == 32
104 # define ElfW(type) Elf32_##type
106 # define ElfW(type) Elf64_##type
111 # define ElfW(type) Elf32_##type
113 # define ElfW(type) Elf64_##type
116 # if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
117 # define ElfW(type) Elf32_##type
119 # define ElfW(type) Elf64_##type
125 #if defined(SUNOS5DL) && !defined(USE_PROC_FOR_LIBRARIES)
131 static struct link_map *
132 GC_FirstDLOpenedLinkMap()
134 extern ElfW(Dyn) _DYNAMIC;
137 static struct link_map * cachedResult = 0;
138 static ElfW(Dyn) *dynStructureAddr = 0;
139 /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
141 # ifdef SUNOS53_SHARED_LIB
142 /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
143 /* up properly in dynamically linked .so's. This means we have */
144 /* to use its value in the set of original object files loaded */
145 /* at program startup. */
146 if( dynStructureAddr == 0 ) {
147 void* startupSyms = dlopen(0, RTLD_LAZY);
148 dynStructureAddr = (ElfW(Dyn)*)dlsym(startupSyms, "_DYNAMIC");
151 dynStructureAddr = &_DYNAMIC;
154 if( dynStructureAddr == 0) {
157 if( cachedResult == 0 ) {
159 for( dp = ((ElfW(Dyn) *)(&_DYNAMIC)); (tag = dp->d_tag) != 0; dp++ ) {
160 if( tag == DT_DEBUG ) {
162 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
163 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
171 #endif /* SUNOS5DL ... */
173 /* BTL: added to fix circular dlopen definition if GC_SOLARIS_THREADS defined */
174 # if defined(GC_must_restore_redefined_dlopen)
175 # define dlopen GC_dlopen
178 #if defined(SUNOS4) && !defined(USE_PROC_FOR_LIBRARIES)
181 struct link_dynamic _DYNAMIC;
184 static struct link_map *
185 GC_FirstDLOpenedLinkMap()
187 extern struct link_dynamic _DYNAMIC;
189 if( &_DYNAMIC == 0) {
192 return(_DYNAMIC.ld_un.ld_1->ld_loaded);
195 /* Return the address of the ld.so allocated common symbol */
196 /* with the least address, or 0 if none. */
197 static ptr_t GC_first_common()
200 extern struct link_dynamic _DYNAMIC;
201 struct rtc_symb * curr_symbol;
203 if( &_DYNAMIC == 0) {
206 curr_symbol = _DYNAMIC.ldd -> ldd_cp;
207 for (; curr_symbol != 0; curr_symbol = curr_symbol -> rtc_next) {
209 || (ptr_t)(curr_symbol -> rtc_sp -> n_value) < result) {
210 result = (ptr_t)(curr_symbol -> rtc_sp -> n_value);
216 #endif /* SUNOS4 ... */
218 # if defined(SUNOS4) || defined(SUNOS5DL)
219 /* Add dynamic library data sections to the root set. */
220 # if !defined(PCR) && !defined(GC_SOLARIS_THREADS) && defined(THREADS)
222 --> fix mutual exclusion with dlopen
223 # endif /* We assume M3 programs don't call dlopen for now */
226 # ifndef USE_PROC_FOR_LIBRARIES
227 void GC_register_dynamic_libraries()
229 struct link_map *lm = GC_FirstDLOpenedLinkMap();
232 for (lm = GC_FirstDLOpenedLinkMap();
233 lm != (struct link_map *) 0; lm = lm->l_next)
238 e = (struct exec *) lm->lm_addr;
240 ((char *) (N_DATOFF(*e) + lm->lm_addr)),
241 ((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)),
247 unsigned long offset;
251 e = (ElfW(Ehdr) *) lm->l_addr;
252 p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
253 offset = ((unsigned long)(lm->l_addr));
254 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
255 switch( p->p_type ) {
258 if( !(p->p_flags & PF_W) ) break;
259 start = ((char *)(p->p_vaddr)) + offset;
275 static ptr_t common_start = 0;
277 extern ptr_t GC_find_limit();
279 if (common_start == 0) common_start = GC_first_common();
280 if (common_start != 0) {
281 common_end = GC_find_limit(common_start, TRUE);
282 GC_add_roots_inner((char *)common_start, (char *)common_end, TRUE);
288 # endif /* !USE_PROC ... */
291 #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF) || \
292 (defined(FREEBSD) && defined(__ELF__)) || defined(DGUX) || \
293 (defined(NETBSD) && defined(__ELF__)) || defined(HURD)
296 #ifdef USE_PROC_FOR_LIBRARIES
300 #include <sys/stat.h>
304 #define MAPS_BUF_SIZE (32*1024)
306 extern ssize_t GC_repeat_read(int fd, char *buf, size_t count);
307 /* Repeatedly read until buffer is filled, or EOF is encountered */
308 /* Defined in os_dep.c. */
310 char *GC_parse_map_entry(char *buf_ptr, word *start, word *end,
311 char *prot_buf, unsigned int *maj_dev);
312 word GC_apply_to_maps(word (*fn)(char *));
315 word GC_register_map_entries(char *maps)
318 char *buf_ptr = maps;
321 unsigned int maj_dev;
322 word least_ha, greatest_ha;
324 word datastart = (word)(DATASTART);
326 /* Compute heap bounds. FIXME: Should be done by add_to_heap? */
327 least_ha = (word)(-1);
329 for (i = 0; i < GC_n_heap_sects; ++i) {
330 word sect_start = (word)GC_heap_sects[i].hs_start;
331 word sect_end = sect_start + GC_heap_sects[i].hs_bytes;
332 if (sect_start < least_ha) least_ha = sect_start;
333 if (sect_end > greatest_ha) greatest_ha = sect_end;
335 if (greatest_ha < (word)GC_scratch_last_end_ptr)
336 greatest_ha = (word)GC_scratch_last_end_ptr;
339 buf_ptr = GC_parse_map_entry(buf_ptr, &start, &end, prot_buf, &maj_dev);
340 if (buf_ptr == NULL) return 1;
341 if (prot_buf[1] == 'w') {
342 /* This is a writable mapping. Add it to */
343 /* the root set unless it is already otherwise */
345 if (start <= (word)GC_stackbottom && end >= (word)GC_stackbottom) {
346 /* Stack mapping; discard */
350 if (GC_segment_is_thread_stack(start, end)) continue;
352 /* We no longer exclude the main data segment. */
353 if (start < least_ha && end > least_ha) {
356 if (start < greatest_ha && end > greatest_ha) {
359 if (start >= least_ha && end <= greatest_ha) continue;
360 GC_add_roots_inner((char *)start, (char *)end, TRUE);
366 void GC_register_dynamic_libraries()
368 if (!GC_apply_to_maps(GC_register_map_entries))
369 ABORT("Failed to read /proc for library registration.");
372 /* We now take care of the main data segment ourselves: */
373 GC_bool GC_register_main_static_data()
378 # define HAVE_REGISTER_MAIN_STATIC_DATA
380 #endif /* USE_PROC_FOR_LIBRARIES */
382 #if !defined(USE_PROC_FOR_LIBRARIES)
383 /* The following is the preferred way to walk dynamic libraries */
384 /* For glibc 2.2.4+. Unfortunately, it doesn't work for older */
385 /* versions. Thanks to Jakub Jelinek for most of the code. */
387 # if defined(LINUX) /* Are others OK here, too? */ \
388 && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 2) \
389 || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 2 && defined(DT_CONFIG)))
391 /* We have the header files for a glibc that includes dl_iterate_phdr. */
392 /* It may still not be available in the library on the target system. */
393 /* Thus we also treat it as a weak symbol. */
394 #define HAVE_DL_ITERATE_PHDR
396 static int GC_register_dynlib_callback(info, size, ptr)
397 struct dl_phdr_info * info;
401 const ElfW(Phdr) * p;
405 /* Make sure struct dl_phdr_info is at least as big as we need. */
406 if (size < offsetof (struct dl_phdr_info, dlpi_phnum)
407 + sizeof (info->dlpi_phnum))
411 for( i = 0; i < (int)(info->dlpi_phnum); ((i++),(p++)) ) {
412 switch( p->p_type ) {
415 if( !(p->p_flags & PF_W) ) break;
416 start = ((char *)(p->p_vaddr)) + info->dlpi_addr;
417 GC_add_roots_inner(start, start + p->p_memsz, TRUE);
425 * (int *)ptr = 1; /* Signal that we were called */
429 /* Return TRUE if we succeed, FALSE if dl_iterate_phdr wasn't there. */
431 #pragma weak dl_iterate_phdr
433 GC_bool GC_register_dynamic_libraries_dl_iterate_phdr()
435 if (dl_iterate_phdr) {
436 int did_something = 0;
437 dl_iterate_phdr(GC_register_dynlib_callback, &did_something);
438 if (!did_something) {
439 /* dl_iterate_phdr may forget the static data segment in */
440 /* statically linked executables. */
441 GC_add_roots_inner(DATASTART, (char *)(DATAEND), TRUE);
442 # if defined(DATASTART2)
443 GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), TRUE);
453 /* Do we need to separately register the main static data segment? */
454 GC_bool GC_register_main_static_data()
456 return (dl_iterate_phdr == 0);
459 #define HAVE_REGISTER_MAIN_STATIC_DATA
461 # else /* !LINUX || version(glibc) < 2.2.4 */
463 /* Dynamic loading code for Linux running ELF. Somewhat tested on
464 * Linux/x86, untested but hopefully should work on Linux/Alpha.
465 * This code was derived from the Solaris/ELF support. Thanks to
466 * whatever kind soul wrote that. - Patrick Bridges */
468 /* This doesn't necessarily work in all cases, e.g. with preloaded
469 * dynamic libraries. */
472 # include <sys/exec_elf.h>
473 /* for compatibility with 1.4.x */
491 # pragma weak _DYNAMIC
493 extern ElfW(Dyn) _DYNAMIC[];
495 static struct link_map *
496 GC_FirstDLOpenedLinkMap()
499 static struct link_map *cachedResult = 0;
504 if( cachedResult == 0 ) {
506 for( dp = _DYNAMIC; (tag = dp->d_tag) != 0; dp++ ) {
507 /* FIXME: The DT_DEBUG header is not mandated by the */
508 /* ELF spec. This code appears to be dependent on */
509 /* idiosynchracies of older GNU tool chains. If this code */
510 /* fails for you, the real problem is probably that it is */
511 /* being used at all. You should be getting the */
512 /* dl_iterate_phdr version. */
513 if( tag == DT_DEBUG ) {
515 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
516 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
525 void GC_register_dynamic_libraries()
530 # ifdef HAVE_DL_ITERATE_PHDR
531 if (GC_register_dynamic_libraries_dl_iterate_phdr()) {
535 lm = GC_FirstDLOpenedLinkMap();
536 for (lm = GC_FirstDLOpenedLinkMap();
537 lm != (struct link_map *) 0; lm = lm->l_next)
541 unsigned long offset;
545 e = (ElfW(Ehdr) *) lm->l_addr;
546 p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
547 offset = ((unsigned long)(lm->l_addr));
548 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
549 switch( p->p_type ) {
552 if( !(p->p_flags & PF_W) ) break;
553 start = ((char *)(p->p_vaddr)) + offset;
554 GC_add_roots_inner(start, start + p->p_memsz, TRUE);
564 #endif /* !USE_PROC_FOR_LIBRARIES */
568 #if defined(IRIX5) || (defined(USE_PROC_FOR_LIBRARIES) && !defined(LINUX))
570 #include <sys/procfs.h>
571 #include <sys/stat.h>
575 #include <signal.h> /* Only for the following test. */
580 extern void * GC_roots_present();
581 /* The type is a lie, since the real type doesn't make sense here, */
582 /* and we only test for NULL. */
585 /* We use /proc to track down all parts of the address space that are */
586 /* mapped by the process, and throw out regions we know we shouldn't */
587 /* worry about. This may also work under other SVR4 variants. */
588 void GC_register_dynamic_libraries()
592 static prmap_t * addr_map = 0;
593 static int current_sz = 0; /* Number of records currently in addr_map */
594 static int needed_sz; /* Required size of addr_map */
597 register ptr_t start;
598 register ptr_t limit;
599 ptr_t heap_start = (ptr_t)HEAP_START;
600 ptr_t heap_end = heap_start;
604 # endif /* SUNOS5DL */
607 sprintf(buf, "/proc/%d", getpid());
608 /* The above generates a lint complaint, since pid_t varies. */
609 /* It's unclear how to improve this. */
610 fd = open(buf, O_RDONLY);
612 ABORT("/proc open failed");
615 if (ioctl(fd, PIOCNMAP, &needed_sz) < 0) {
616 GC_err_printf2("fd = %d, errno = %d\n", fd, errno);
617 ABORT("/proc PIOCNMAP ioctl failed");
619 if (needed_sz >= current_sz) {
620 current_sz = needed_sz * 2 + 1;
621 /* Expansion, plus room for 0 record */
622 addr_map = (prmap_t *)GC_scratch_alloc((word)
623 (current_sz * sizeof(prmap_t)));
625 if (ioctl(fd, PIOCMAP, addr_map) < 0) {
626 GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
627 fd, errno, needed_sz, addr_map);
628 ABORT("/proc PIOCMAP ioctl failed");
630 if (GC_n_heap_sects > 0) {
631 heap_end = GC_heap_sects[GC_n_heap_sects-1].hs_start
632 + GC_heap_sects[GC_n_heap_sects-1].hs_bytes;
633 if (heap_end < GC_scratch_last_end_ptr) heap_end = GC_scratch_last_end_ptr;
635 for (i = 0; i < needed_sz; i++) {
636 flags = addr_map[i].pr_mflags;
637 if ((flags & (MA_BREAK | MA_STACK | MA_PHYS
638 | MA_FETCHOP | MA_NOTCACHED)) != 0) goto irrelevant;
639 if ((flags & (MA_READ | MA_WRITE)) != (MA_READ | MA_WRITE))
641 /* The latter test is empirically useless in very old Irix */
642 /* versions. Other than the */
643 /* main data and stack segments, everything appears to be */
644 /* mapped readable, writable, executable, and shared(!!). */
645 /* This makes no sense to me. - HB */
646 start = (ptr_t)(addr_map[i].pr_vaddr);
647 if (GC_roots_present(start)) goto irrelevant;
648 if (start < heap_end && start >= heap_start)
651 if (GC_is_thread_stack(start)) goto irrelevant;
652 # endif /* MMAP_STACKS */
654 limit = start + addr_map[i].pr_size;
655 /* The following seemed to be necessary for very old versions */
656 /* of Irix, but it has been reported to discard relevant */
657 /* segments under Irix 6.5. */
659 if (addr_map[i].pr_off == 0 && strncmp(start, ELFMAG, 4) == 0) {
660 /* Discard text segments, i.e. 0-offset mappings against */
661 /* executable files which appear to have ELF headers. */
664 # define MAP_IRR_SZ 10
665 static ptr_t map_irr[MAP_IRR_SZ];
666 /* Known irrelevant map entries */
667 static int n_irr = 0;
671 for (i = 0; i < n_irr; i++) {
672 if (map_irr[i] == start) goto irrelevant;
674 arg = (caddr_t)start;
675 obj = ioctl(fd, PIOCOPENM, &arg);
679 if ((buf.st_mode & 0111) != 0) {
680 if (n_irr < MAP_IRR_SZ) {
681 map_irr[n_irr++] = start;
688 GC_add_roots_inner(start, limit, TRUE);
691 /* Dont keep cached descriptor, for now. Some kernels don't like us */
692 /* to keep a /proc file descriptor around during kill -9. */
693 if (close(fd) < 0) ABORT("Couldnt close /proc file");
697 # endif /* USE_PROC || IRIX5 */
699 # if defined(MSWIN32) || defined(MSWINCE)
701 # define WIN32_LEAN_AND_MEAN
703 # include <windows.h>
706 /* We traverse the entire address space and register all segments */
707 /* that could possibly have been written to. */
709 extern GC_bool GC_is_heap_base (ptr_t p);
711 # ifdef GC_WIN32_THREADS
712 extern void GC_get_next_stack(char *start, char **lo, char **hi);
713 void GC_cond_add_roots(char *base, char * limit)
715 char * curr_base = base;
716 char * next_stack_lo;
717 char * next_stack_hi;
719 if (base == limit) return;
721 GC_get_next_stack(curr_base, &next_stack_lo, &next_stack_hi);
722 if (next_stack_lo >= limit) break;
723 GC_add_roots_inner(curr_base, next_stack_lo, TRUE);
724 curr_base = next_stack_hi;
726 if (curr_base < limit) GC_add_roots_inner(curr_base, limit, TRUE);
729 void GC_cond_add_roots(char *base, char * limit)
733 = (char *) ((word)(&dummy) & ~(GC_sysinfo.dwAllocationGranularity-1));
734 if (base == limit) return;
735 if (limit > stack_top && base < GC_stackbottom) {
736 /* Part of the stack; ignore it. */
739 GC_add_roots_inner(base, limit, TRUE);
744 /* Do we need to separately register the main static data segment? */
745 GC_bool GC_register_main_static_data()
750 extern GC_bool GC_no_win32_dlls;
752 GC_bool GC_register_main_static_data()
754 return GC_no_win32_dlls;
758 # define HAVE_REGISTER_MAIN_STATIC_DATA
760 /* The frame buffer testing code is dead in this version. */
761 /* We leave it here temporarily in case the switch to just */
762 /* testing for MEM_IMAGE sections causes un expected */
764 GC_bool GC_warn_fb = TRUE; /* Warn about traced likely */
765 /* graphics memory. */
766 GC_bool GC_disallow_ignore_fb = FALSE;
767 int GC_ignore_fb_mb; /* Ignore mappings bigger than the */
768 /* specified number of MB. */
769 GC_bool GC_ignore_fb = FALSE; /* Enable frame buffer */
772 /* Issue warning if tracing apparent framebuffer. */
773 /* This limits us to one warning, and it's a back door to */
776 /* Should [start, start+len) be treated as a frame buffer */
778 /* Unfortunately, we currently are not quite sure how to tell */
779 /* this automatically, and rely largely on user input. */
780 /* We expect that any mapping with type MEM_MAPPED (which */
781 /* apparently excludes library data sections) can be safely */
782 /* ignored. But we're too chicken to do that in this */
784 /* Based on a very limited sample, it appears that: */
785 /* - Frame buffer mappings appear as mappings of large */
786 /* length, usually a bit less than a power of two. */
787 /* - The definition of "a bit less" in the above cannot */
788 /* be made more precise. */
789 /* - Have a starting address at best 64K aligned. */
790 /* - Have type == MEM_MAPPED. */
791 static GC_bool is_frame_buffer(ptr_t start, size_t len, DWORD tp)
793 static GC_bool initialized = FALSE;
794 # define MB (1024*1024)
795 # define DEFAULT_FB_MB 15
798 if (GC_disallow_ignore_fb || tp != MEM_MAPPED) return FALSE;
800 char * ignore_fb_string = GETENV("GC_IGNORE_FB");
802 if (0 != ignore_fb_string) {
803 while (*ignore_fb_string == ' ' || *ignore_fb_string == '\t')
805 if (*ignore_fb_string == '\0') {
806 GC_ignore_fb_mb = DEFAULT_FB_MB;
808 GC_ignore_fb_mb = atoi(ignore_fb_string);
809 if (GC_ignore_fb_mb < MIN_FB_MB) {
810 WARN("Bad GC_IGNORE_FB value. Using %ld\n", DEFAULT_FB_MB);
811 GC_ignore_fb_mb = DEFAULT_FB_MB;
816 GC_ignore_fb_mb = DEFAULT_FB_MB; /* For warning */
820 if (len >= ((size_t)GC_ignore_fb_mb << 20)) {
825 WARN("Possible frame buffer mapping at 0x%lx: \n"
826 "\tConsider setting GC_IGNORE_FB to improve performance.\n",
837 # ifdef DEBUG_VIRTUALQUERY
838 void GC_dump_meminfo(MEMORY_BASIC_INFORMATION *buf)
840 GC_printf4("BaseAddress = %lx, AllocationBase = %lx, RegionSize = %lx(%lu)\n",
841 buf -> BaseAddress, buf -> AllocationBase, buf -> RegionSize,
843 GC_printf4("\tAllocationProtect = %lx, State = %lx, Protect = %lx, "
845 buf -> AllocationProtect, buf -> State, buf -> Protect,
848 # endif /* DEBUG_VIRTUALQUERY */
850 void GC_register_dynamic_libraries()
852 MEMORY_BASIC_INFORMATION buf;
857 char * limit, * new_limit;
860 if (GC_no_win32_dlls) return;
862 base = limit = p = GC_sysinfo.lpMinimumApplicationAddress;
863 # if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
864 /* Only the first 32 MB of address space belongs to the current process */
865 while (p < (LPVOID)0x02000000) {
866 result = VirtualQuery(p, &buf, sizeof(buf));
868 /* Page is free; advance to the next possible allocation base */
870 (((DWORD) p + GC_sysinfo.dwAllocationGranularity)
871 & ~(GC_sysinfo.dwAllocationGranularity-1));
874 while (p < GC_sysinfo.lpMaximumApplicationAddress) {
875 result = VirtualQuery(p, &buf, sizeof(buf));
878 if (result != sizeof(buf)) {
879 ABORT("Weird VirtualQuery result");
881 new_limit = (char *)p + buf.RegionSize;
882 protect = buf.Protect;
883 if (buf.State == MEM_COMMIT
884 && (protect == PAGE_EXECUTE_READWRITE
885 || protect == PAGE_READWRITE)
886 && !GC_is_heap_base(buf.AllocationBase)
887 /* This used to check for
888 * !is_frame_buffer(p, buf.RegionSize, buf.Type)
889 * instead of just checking for MEM_IMAGE.
890 * If something breaks, change it back. */
891 && buf.Type == MEM_IMAGE) {
892 # ifdef DEBUG_VIRTUALQUERY
893 GC_dump_meminfo(&buf);
895 if ((char *)p != limit) {
896 GC_cond_add_roots(base, limit);
902 if (p > (LPVOID)new_limit /* overflow */) break;
903 p = (LPVOID)new_limit;
905 GC_cond_add_roots(base, limit);
908 #endif /* MSWIN32 || MSWINCE */
910 #if defined(ALPHA) && defined(OSF1)
914 void GC_register_dynamic_libraries()
920 ldr_module_t moduleid = LDR_NULL_MODULE;
921 ldr_module_info_t moduleinfo;
922 size_t moduleinfosize = sizeof(moduleinfo);
923 size_t modulereturnsize;
927 ldr_region_info_t regioninfo;
928 size_t regioninfosize = sizeof(regioninfo);
929 size_t regionreturnsize;
931 /* Obtain id of this process */
932 mypid = ldr_my_process();
934 /* For each module */
937 /* Get the next (first) module */
938 status = ldr_next_module(mypid, &moduleid);
940 /* Any more modules? */
941 if (moduleid == LDR_NULL_MODULE)
942 break; /* No more modules */
944 /* Check status AFTER checking moduleid because */
945 /* of a bug in the non-shared ldr_next_module stub */
947 GC_printf1("dynamic_load: status = %ld\n", (long)status);
949 extern char *sys_errlist[];
952 if (errno <= sys_nerr) {
953 GC_printf1("dynamic_load: %s\n", (long)sys_errlist[errno]);
955 GC_printf1("dynamic_load: %d\n", (long)errno);
958 ABORT("ldr_next_module failed");
961 /* Get the module information */
962 status = ldr_inq_module(mypid, moduleid, &moduleinfo,
963 moduleinfosize, &modulereturnsize);
965 ABORT("ldr_inq_module failed");
967 /* is module for the main program (i.e. nonshared portion)? */
968 if (moduleinfo.lmi_flags & LDR_MAIN)
969 continue; /* skip the main module */
972 GC_printf("---Module---\n");
973 GC_printf("Module ID = %16ld\n", moduleinfo.lmi_modid);
974 GC_printf("Count of regions = %16d\n", moduleinfo.lmi_nregion);
975 GC_printf("flags for module = %16lx\n", moduleinfo.lmi_flags);
976 GC_printf("pathname of module = \"%s\"\n", moduleinfo.lmi_name);
979 /* For each region in this module */
980 for (region = 0; region < moduleinfo.lmi_nregion; region++) {
982 /* Get the region information */
983 status = ldr_inq_region(mypid, moduleid, region, ®ioninfo,
984 regioninfosize, ®ionreturnsize);
986 ABORT("ldr_inq_region failed");
988 /* only process writable (data) regions */
989 if (! (regioninfo.lri_prot & LDR_W))
993 GC_printf("--- Region ---\n");
994 GC_printf("Region number = %16ld\n",
995 regioninfo.lri_region_no);
996 GC_printf("Protection flags = %016x\n", regioninfo.lri_prot);
997 GC_printf("Virtual address = %16p\n", regioninfo.lri_vaddr);
998 GC_printf("Mapped address = %16p\n", regioninfo.lri_mapaddr);
999 GC_printf("Region size = %16ld\n", regioninfo.lri_size);
1000 GC_printf("Region name = \"%s\"\n", regioninfo.lri_name);
1003 /* register region as a garbage collection root */
1004 GC_add_roots_inner (
1005 (char *)regioninfo.lri_mapaddr,
1006 (char *)regioninfo.lri_mapaddr + regioninfo.lri_size,
1020 extern char *sys_errlist[];
1021 extern int sys_nerr;
1023 void GC_register_dynamic_libraries()
1026 int index = 1; /* Ordinal position in shared library search list */
1027 struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
1029 /* For each dynamic library loaded */
1032 /* Get info about next shared library */
1033 status = shl_get(index, &shl_desc);
1035 /* Check if this is the end of the list or if some error occured */
1037 # ifdef GC_HPUX_THREADS
1038 /* I've seen errno values of 0. The man page is not clear */
1039 /* as to whether errno should get set on a -1 return. */
1042 if (errno == EINVAL) {
1043 break; /* Moved past end of shared library list --> finished */
1045 if (errno <= sys_nerr) {
1046 GC_printf1("dynamic_load: %s\n", (long) sys_errlist[errno]);
1048 GC_printf1("dynamic_load: %d\n", (long) errno);
1050 ABORT("shl_get failed");
1056 GC_printf0("---Shared library---\n");
1057 GC_printf1("\tfilename = \"%s\"\n", shl_desc->filename);
1058 GC_printf1("\tindex = %d\n", index);
1059 GC_printf1("\thandle = %08x\n",
1060 (unsigned long) shl_desc->handle);
1061 GC_printf1("\ttext seg. start = %08x\n", shl_desc->tstart);
1062 GC_printf1("\ttext seg. end = %08x\n", shl_desc->tend);
1063 GC_printf1("\tdata seg. start = %08x\n", shl_desc->dstart);
1064 GC_printf1("\tdata seg. end = %08x\n", shl_desc->dend);
1065 GC_printf1("\tref. count = %lu\n", shl_desc->ref_count);
1068 /* register shared library's data segment as a garbage collection root */
1069 GC_add_roots_inner((char *) shl_desc->dstart,
1070 (char *) shl_desc->dend, TRUE);
1079 #include <sys/ldr.h>
1080 #include <sys/errno.h>
1081 void GC_register_dynamic_libraries()
1086 struct ld_info *ldi;
1088 ldibuf = alloca(ldibuflen = 8192);
1090 while ( (len = loadquery(L_GETINFO,ldibuf,ldibuflen)) < 0) {
1091 if (errno != ENOMEM) {
1092 ABORT("loadquery failed");
1094 ldibuf = alloca(ldibuflen *= 2);
1097 ldi = (struct ld_info *)ldibuf;
1099 len = ldi->ldinfo_next;
1101 ldi->ldinfo_dataorg,
1102 (ptr_t)(unsigned long)ldi->ldinfo_dataorg
1103 + ldi->ldinfo_datasize,
1105 ldi = len ? (struct ld_info *)((char *)ldi + len) : 0;
1112 /* __private_extern__ hack required for pre-3.4 gcc versions. */
1113 #ifndef __private_extern__
1114 # define __private_extern__ extern
1115 # include <mach-o/dyld.h>
1116 # undef __private_extern__
1118 # include <mach-o/dyld.h>
1120 #include <mach-o/getsect.h>
1122 /*#define DARWIN_DEBUG*/
1124 const static struct {
1127 } GC_dyld_sections[] = {
1128 { SEG_DATA, SECT_DATA },
1129 { SEG_DATA, SECT_BSS },
1130 { SEG_DATA, SECT_COMMON }
1134 static const char *GC_dyld_name_for_hdr(struct mach_header *hdr) {
1136 c = _dyld_image_count();
1137 for(i=0;i<c;i++) if(_dyld_get_image_header(i) == hdr)
1138 return _dyld_get_image_name(i);
1143 /* This should never be called by a thread holding the lock */
1144 static void GC_dyld_image_add(struct mach_header* hdr, unsigned long slide) {
1145 unsigned long start,end,i;
1146 const struct section *sec;
1147 if (GC_no_dls) return;
1148 for(i=0;i<sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]);i++) {
1149 sec = getsectbynamefromheader(
1150 hdr,GC_dyld_sections[i].seg,GC_dyld_sections[i].sect);
1151 if(sec == NULL || sec->size == 0) continue;
1152 start = slide + sec->addr;
1153 end = start + sec->size;
1154 # ifdef DARWIN_DEBUG
1155 GC_printf4("Adding section at %p-%p (%lu bytes) from image %s\n",
1156 start,end,sec->size,GC_dyld_name_for_hdr(hdr));
1158 GC_add_roots((char*)start,(char*)end);
1160 # ifdef DARWIN_DEBUG
1161 GC_print_static_roots();
1165 /* This should never be called by a thread holding the lock */
1166 static void GC_dyld_image_remove(struct mach_header* hdr, unsigned long slide) {
1167 unsigned long start,end,i;
1168 const struct section *sec;
1169 for(i=0;i<sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]);i++) {
1170 sec = getsectbynamefromheader(
1171 hdr,GC_dyld_sections[i].seg,GC_dyld_sections[i].sect);
1172 if(sec == NULL || sec->size == 0) continue;
1173 start = slide + sec->addr;
1174 end = start + sec->size;
1175 # ifdef DARWIN_DEBUG
1176 GC_printf4("Removing section at %p-%p (%lu bytes) from image %s\n",
1177 start,end,sec->size,GC_dyld_name_for_hdr(hdr));
1179 GC_remove_roots((char*)start,(char*)end);
1181 # ifdef DARWIN_DEBUG
1182 GC_print_static_roots();
1186 void GC_register_dynamic_libraries() {
1187 /* Currently does nothing. The callbacks are setup by GC_init_dyld()
1188 The dyld library takes it from there. */
1191 /* The _dyld_* functions have an internal lock so no _dyld functions
1192 can be called while the world is stopped without the risk of a deadlock.
1193 Because of this we MUST setup callbacks BEFORE we ever stop the world.
1194 This should be called BEFORE any thread in created and WITHOUT the
1195 allocation lock held. */
1197 void GC_init_dyld() {
1198 static GC_bool initialized = FALSE;
1199 char *bind_fully_env = NULL;
1201 if(initialized) return;
1203 # ifdef DARWIN_DEBUG
1204 GC_printf0("Registering dyld callbacks...\n");
1207 /* Apple's Documentation:
1208 When you call _dyld_register_func_for_add_image, the dynamic linker runtime
1209 calls the specified callback (func) once for each of the images that is
1210 currently loaded into the program. When a new image is added to the program,
1211 your callback is called again with the mach_header for the new image, and the
1212 virtual memory slide amount of the new image.
1214 This WILL properly register already linked libraries and libraries
1215 linked in the future
1218 _dyld_register_func_for_add_image(GC_dyld_image_add);
1219 _dyld_register_func_for_remove_image(GC_dyld_image_remove);
1221 /* Set this early to avoid reentrancy issues. */
1224 bind_fully_env = getenv("DYLD_BIND_AT_LAUNCH");
1226 if (bind_fully_env == NULL) {
1227 # ifdef DARWIN_DEBUG
1228 GC_printf0("Forcing full bind of GC code...\n");
1231 if(!_dyld_bind_fully_image_containing_address((unsigned long*)GC_malloc))
1232 GC_abort("_dyld_bind_fully_image_containing_address failed");
1237 #define HAVE_REGISTER_MAIN_STATIC_DATA
1238 GC_bool GC_register_main_static_data()
1240 /* Already done through dyld callbacks */
1246 #else /* !DYNAMIC_LOADING */
1250 # include "il/PCR_IL.h"
1251 # include "th/PCR_ThCtl.h"
1252 # include "mm/PCR_MM.h"
1254 void GC_register_dynamic_libraries()
1256 /* Add new static data areas of dynamically loaded modules. */
1258 PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
1259 PCR_IL_LoadedSegment * q;
1261 /* Skip uncommited files */
1262 while (p != NIL && !(p -> lf_commitPoint)) {
1263 /* The loading of this file has not yet been committed */
1264 /* Hence its description could be inconsistent. */
1265 /* Furthermore, it hasn't yet been run. Hence its data */
1266 /* segments can't possibly reference heap allocated */
1270 for (; p != NIL; p = p -> lf_prev) {
1271 for (q = p -> lf_ls; q != NIL; q = q -> ls_next) {
1272 if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
1273 == PCR_IL_SegFlags_Traced_on) {
1275 ((char *)(q -> ls_addr),
1276 (char *)(q -> ls_addr) + q -> ls_bytes,
1287 void GC_register_dynamic_libraries(){}
1289 int GC_no_dynamic_loading;
1293 #endif /* !DYNAMIC_LOADING */
1295 #ifndef HAVE_REGISTER_MAIN_STATIC_DATA
1297 /* Do we need to separately register the main static data segment? */
1298 GC_bool GC_register_main_static_data()
1302 #endif /* HAVE_REGISTER_MAIN_STATIC_DATA */