2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
15 /* An incomplete test for the garbage collector. */
16 /* Some more obscure entry points are not tested at all. */
17 /* This must be compiled with the same flags used to build the */
18 /* GC. It uses GC internals to allow more precise results */
19 /* checking for some of the tests. */
22 # include "private/config.h"
27 #if (defined(DBG_HDRS_ALL) || defined(MAKE_BACK_GRAPH)) && !defined(GC_DEBUG)
33 #ifndef NTHREADS /* Number of additional threads to fork. */
34 # define NTHREADS 5 /* excludes main thread, which also runs a test. */
35 /* Not respected by PCR test. */
38 # if defined(mips) && defined(SYSTYPE_BSD43)
44 # if defined(_WIN32_WCE) && !defined(__GNUC__)
46 /* # define assert ASSERT */
48 # include <assert.h> /* Not normally used, but handy for debugging. */
51 # include "gc_typed.h"
52 # include "private/gc_priv.h" /* For output, locking, MIN_WORDS, */
53 /* some statistics and gcconfig.h. */
55 # if defined(MSWIN32) || defined(MSWINCE)
59 #ifdef GC_PRINT_VERBOSE_STATS
60 # define print_stats VERBOSE
61 # define INIT_PRINT_STATS /* empty */
63 /* Use own variable as GC_print_stats might not be exported. */
64 static int print_stats = 0;
65 # ifdef GC_READ_ENV_FILE
66 /* GETENV uses GC internal function in this case. */
67 # define INIT_PRINT_STATS /* empty */
69 # define INIT_PRINT_STATS \
71 if (0 != GETENV("GC_PRINT_VERBOSE_STATS")) \
72 print_stats = VERBOSE; \
73 else if (0 != GETENV("GC_PRINT_STATS")) \
77 #endif /* !GC_PRINT_VERBOSE_STATS */
80 # include "th/PCR_ThCrSec.h"
81 # include "th/PCR_Th.h"
82 # define GC_printf printf
85 # if defined(GC_PTHREADS)
89 # if (!defined(THREADS) || !defined(HANDLE_FORK) \
90 || (defined(DARWIN) && defined(MPROTECT_VDB) \
91 && !defined(NO_INCREMENTAL) && !defined(MAKE_BACK_GRAPH))) \
92 && !defined(NO_TEST_HANDLE_FORK)
93 # define NO_TEST_HANDLE_FORK
96 # ifndef NO_TEST_HANDLE_FORK
98 # define INIT_FORK_SUPPORT GC_set_handle_fork(1)
100 # define INIT_FORK_SUPPORT /* empty */
103 # if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
104 static CRITICAL_SECTION incr_cs;
109 #ifndef GC_ALPHA_VERSION
110 # define GC_ALPHA_VERSION GC_TMP_ALPHA_VERSION
113 #define CHECH_GCLIB_VERSION \
114 if (GC_get_version() != ((GC_VERSION_MAJOR<<16) \
115 | (GC_VERSION_MINOR<<8) \
116 | GC_ALPHA_VERSION)) { \
117 GC_printf("libgc version mismatch\n"); \
121 /* Call GC_INIT only on platforms on which we think we really need it, */
122 /* so that we can test automatic initialization on the rest. */
123 #if defined(CYGWIN32) || defined (AIX) || defined(DARWIN) \
124 || defined(THREAD_LOCAL_ALLOC) \
125 || (defined(MSWINCE) && !defined(GC_WINMAIN_REDIRECT))
126 # define GC_OPT_INIT GC_INIT()
128 # define GC_OPT_INIT /* empty */
131 #define GC_COND_INIT() \
132 INIT_FORK_SUPPORT; GC_OPT_INIT; CHECH_GCLIB_VERSION; INIT_PRINT_STATS
134 #define CHECK_OUT_OF_MEMORY(p) \
136 GC_printf("Out of memory\n"); \
140 /* Allocation Statistics. Incremented without synchronization. */
141 /* FIXME: We should be using synchronization. */
142 int stubborn_count = 0;
143 int uncollectable_count = 0;
144 int collectable_count = 0;
145 int atomic_count = 0;
146 int realloc_count = 0;
148 #if defined(GC_AMIGA_FASTALLOC) && defined(AMIGA)
150 void GC_amiga_free_all_mem(void);
151 void Amiga_Fail(void){GC_amiga_free_all_mem();abort();}
152 # define FAIL (void)Amiga_Fail()
153 void *GC_amiga_gctest_malloc_explicitly_typed(size_t lb, GC_descr d){
154 void *ret=GC_malloc_explicitly_typed(lb,d);
158 ret=GC_malloc_explicitly_typed(lb,d);
161 GC_printf("Out of memory, (typed allocations are not directly "
162 "supported with the GC_AMIGA_FASTALLOC option.)\n");
168 void *GC_amiga_gctest_calloc_explicitly_typed(size_t a,size_t lb, GC_descr d){
169 void *ret=GC_calloc_explicitly_typed(a,lb,d);
173 ret=GC_calloc_explicitly_typed(a,lb,d);
176 GC_printf("Out of memory, (typed allocations are not directly "
177 "supported with the GC_AMIGA_FASTALLOC option.)\n");
183 # define GC_malloc_explicitly_typed(a,b) GC_amiga_gctest_malloc_explicitly_typed(a,b)
184 # define GC_calloc_explicitly_typed(a,b,c) GC_amiga_gctest_calloc_explicitly_typed(a,b,c)
186 #else /* !AMIGA_FASTALLOC */
188 # if defined(PCR) || defined(LINT2)
189 # define FAIL (void)abort()
191 # define FAIL ABORT("Test failed")
194 #endif /* !AMIGA_FASTALLOC */
196 /* AT_END may be defined to exercise the interior pointer test */
197 /* if the collector is configured with ALL_INTERIOR_POINTERS. */
198 /* As it stands, this test should succeed with either */
199 /* configuration. In the FIND_LEAK configuration, it should */
200 /* find lots of leaks, since we free almost nothing. */
203 struct SEXPR * sexpr_car;
204 struct SEXPR * sexpr_cdr;
208 typedef struct SEXPR * sexpr;
210 # define INT_TO_SEXPR(x) ((sexpr)(GC_word)(x))
211 # define SEXPR_TO_INT(x) ((int)(GC_word)(x))
214 # define nil (INT_TO_SEXPR(0))
215 # define car(x) ((x) -> sexpr_car)
216 # define cdr(x) ((x) -> sexpr_cdr)
217 # define is_nil(x) ((x) == nil)
220 int extra_count = 0; /* Amount of space wasted in cons node */
222 /* Silly implementation of Lisp cons. Intentionally wastes lots of space */
223 /* to test collector. */
224 # ifdef VERY_SMALL_CONFIG
225 # define cons small_cons
227 sexpr cons (sexpr x, sexpr y)
231 int my_extra = extra_count;
234 r = (sexpr) GC_MALLOC_STUBBORN(sizeof(struct SEXPR) + my_extra);
235 CHECK_OUT_OF_MEMORY(r);
237 ((char *)p) < ((char *)r) + my_extra + sizeof(struct SEXPR); p++) {
239 GC_printf("Found nonzero at %p - allocator is broken\n", p);
242 *p = (int)((13 << 12) + ((p - (int *)r) & 0xfff));
245 r = (sexpr)((char *)r + (my_extra & ~7));
250 if ( my_extra >= 5000 ) {
253 extra_count = my_extra;
255 GC_END_STUBBORN_CHANGE((char *)r);
260 #ifdef GC_GCJ_SUPPORT
265 /* The following struct emulates the vtable in gcj. */
266 /* This assumes the default value of MARK_DESCR_OFFSET. */
268 void * dummy; /* class pointer in real gcj. */
272 struct fake_vtable gcj_class_struct1 = { 0, sizeof(struct SEXPR)
273 + sizeof(struct fake_vtable *) };
274 /* length based descriptor. */
275 struct fake_vtable gcj_class_struct2 =
276 { 0, ((GC_word)3 << (CPP_WORDSZ - 3)) | GC_DS_BITMAP};
277 /* Bitmap based descriptor. */
279 struct GC_ms_entry * fake_gcj_mark_proc(word * addr,
280 struct GC_ms_entry *mark_stack_ptr,
281 struct GC_ms_entry *mark_stack_limit,
286 /* Object allocated with debug allocator. */
287 addr = (word *)GC_USR_PTR_FROM_BASE(addr);
289 x = (sexpr)(addr + 1); /* Skip the vtable pointer. */
290 mark_stack_ptr = GC_MARK_AND_PUSH(
291 (void *)(x -> sexpr_cdr), mark_stack_ptr,
292 mark_stack_limit, (void * *)&(x -> sexpr_cdr));
293 mark_stack_ptr = GC_MARK_AND_PUSH(
294 (void *)(x -> sexpr_car), mark_stack_ptr,
295 mark_stack_limit, (void * *)&(x -> sexpr_car));
296 return(mark_stack_ptr);
299 #endif /* GC_GCJ_SUPPORT */
302 sexpr small_cons (sexpr x, sexpr y)
307 r = (sexpr) GC_MALLOC(sizeof(struct SEXPR));
308 CHECK_OUT_OF_MEMORY(r);
314 sexpr small_cons_uncollectable (sexpr x, sexpr y)
318 uncollectable_count++;
319 r = (sexpr) GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR));
320 CHECK_OUT_OF_MEMORY(r);
322 r -> sexpr_cdr = (sexpr)(~(GC_word)y);
326 #ifdef GC_GCJ_SUPPORT
329 sexpr gcj_cons(sexpr x, sexpr y)
334 r = (GC_word *) GC_GCJ_MALLOC(sizeof(struct SEXPR)
335 + sizeof(struct fake_vtable*),
337 CHECK_OUT_OF_MEMORY(r);
338 result = (sexpr)(r + 1);
339 result -> sexpr_car = x;
340 result -> sexpr_cdr = y;
345 /* Return reverse(x) concatenated with y */
346 sexpr reverse1(sexpr x, sexpr y)
351 return( reverse1(cdr(x), cons(car(x), y)) );
355 sexpr reverse(sexpr x)
357 # ifdef TEST_WITH_SYSTEM_MALLOC
360 return( reverse1(x, nil) );
363 sexpr ints(int low, int up)
368 return(small_cons(small_cons(INT_TO_SEXPR(low), nil), ints(low+1, up)));
372 #ifdef GC_GCJ_SUPPORT
373 /* Return reverse(x) concatenated with y */
374 sexpr gcj_reverse1(sexpr x, sexpr y)
379 return( gcj_reverse1(cdr(x), gcj_cons(car(x), y)) );
383 sexpr gcj_reverse(sexpr x)
385 return( gcj_reverse1(x, nil) );
388 sexpr gcj_ints(int low, int up)
393 return(gcj_cons(gcj_cons(INT_TO_SEXPR(low), nil), gcj_ints(low+1, up)));
396 #endif /* GC_GCJ_SUPPORT */
398 /* To check uncollectable allocation we build lists with disguised cdr */
399 /* pointers, and make sure they don't go away. */
400 sexpr uncollectable_ints(int low, int up)
405 return(small_cons_uncollectable(small_cons(INT_TO_SEXPR(low), nil),
406 uncollectable_ints(low+1, up)));
410 void check_ints(sexpr list, int low, int up)
412 if (SEXPR_TO_INT(car(car(list))) != low) {
414 "List reversal produced incorrect list - collector is broken\n");
418 if (cdr(list) != nil) {
419 GC_printf("List too long - collector is broken\n");
423 check_ints(cdr(list), low+1, up);
427 # define UNCOLLECTABLE_CDR(x) (sexpr)(~(GC_word)(cdr(x)))
429 void check_uncollectable_ints(sexpr list, int low, int up)
431 if (SEXPR_TO_INT(car(car(list))) != low) {
432 GC_printf("Uncollectable list corrupted - collector is broken\n");
436 if (UNCOLLECTABLE_CDR(list) != nil) {
437 GC_printf("Uncollectable list too long - collector is broken\n");
441 check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
445 /* Not used, but useful for debugging: */
446 void print_int_list(sexpr x)
451 GC_printf("(%d)", SEXPR_TO_INT(car(car(x))));
452 if (!is_nil(cdr(x))) {
454 print_int_list(cdr(x));
462 void check_marks_int_list(sexpr x)
464 if (!GC_is_marked((ptr_t)x))
465 GC_printf("[unm:%p]", x);
467 GC_printf("[mkd:%p]", x);
471 if (!GC_is_marked((ptr_t)car(x)))
472 GC_printf("[unm car:%p]", car(x));
473 GC_printf("(%d)", SEXPR_TO_INT(car(car(x))));
474 if (!is_nil(cdr(x))) {
476 check_marks_int_list(cdr(x));
484 * A tiny list reversal test to check thread creation.
488 # ifdef VERY_SMALL_CONFIG
489 # define TINY_REVERSE_UPPER_VALUE 4
491 # define TINY_REVERSE_UPPER_VALUE 10
494 # if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
495 DWORD __stdcall tiny_reverse_test(void * arg)
497 void * tiny_reverse_test(void * arg)
501 for (i = 0; i < 5; ++i) {
502 check_ints(reverse(reverse(ints(1, TINY_REVERSE_UPPER_VALUE))),
503 1, TINY_REVERSE_UPPER_VALUE);
508 # if defined(GC_PTHREADS)
509 void fork_a_thread(void)
513 if ((code = pthread_create(&t, 0, tiny_reverse_test, 0)) != 0) {
514 GC_printf("Small thread creation failed %d\n", code);
517 if ((code = pthread_join(t, 0)) != 0) {
518 GC_printf("Small thread join failed %d\n", code);
523 # elif defined(GC_WIN32_THREADS)
524 void fork_a_thread(void)
528 h = GC_CreateThread(NULL, 0, tiny_reverse_test, 0, 0, &thread_id);
529 if (h == (HANDLE)NULL) {
530 GC_printf("Small thread creation failed %d\n",
531 (int)GetLastError());
534 if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) {
535 GC_printf("Small thread wait failed %d\n",
536 (int)GetLastError());
545 /* Try to force a to be strangely aligned */
553 * Repeatedly reverse lists built out of very different sized cons cells.
554 * Check that we didn't lose anything.
556 void *GC_CALLBACK reverse_test_inner(void *data)
566 /* This stack frame is not guaranteed to be scanned. */
567 return GC_call_with_gc_active(reverse_test_inner, (void*)(word)1);
570 # if /*defined(MSWIN32) ||*/ defined(MACOS)
571 /* Win32S only allows 128K stacks */
574 /* PCR default stack is 100K. Stack frames are up to 120 bytes. */
576 # elif defined(MSWINCE) || defined(RTEMS)
577 /* WinCE only allows 64K stacks */
580 /* OSF has limited stack space by default, and large frames. */
582 # elif defined(__MACH__) && defined(__ppc64__)
592 d = uncollectable_ints(1, 100);
593 e = uncollectable_ints(1, 1);
594 /* Check that realloc updates object descriptors correctly */
596 f = (sexpr *)GC_MALLOC(4 * sizeof(sexpr));
598 f = (sexpr *)GC_REALLOC((void *)f, 6 * sizeof(sexpr));
599 CHECK_OUT_OF_MEMORY(f);
602 g = (sexpr *)GC_MALLOC(513 * sizeof(sexpr));
604 g = (sexpr *)GC_REALLOC((void *)g, 800 * sizeof(sexpr));
605 CHECK_OUT_OF_MEMORY(g);
608 h = (sexpr *)GC_MALLOC(1025 * sizeof(sexpr));
610 h = (sexpr *)GC_REALLOC((void *)h, 2000 * sizeof(sexpr));
611 CHECK_OUT_OF_MEMORY(h);
612 # ifdef GC_GCJ_SUPPORT
613 h[1999] = gcj_ints(1,200);
614 for (i = 0; i < 51; ++i)
615 h[1999] = gcj_reverse(h[1999]);
616 /* Leave it as the reveresed list for now. */
618 h[1999] = ints(1,200);
620 /* Try to force some collections and reuse of small list elements */
621 for (i = 0; i < 10; i++) {
624 /* Superficially test interior pointer recognition on stack */
625 c = (sexpr)((char *)c + sizeof(char *));
626 d = (sexpr)((char *)d + sizeof(char *));
632 for (i = 0; i < 50; i++) {
634 b = reverse(reverse(b));
638 for (i = 0; i < 60; i++) {
639 # if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
640 if (i % 10 == 0) fork_a_thread();
642 /* This maintains the invariant that a always points to a list of */
643 /* 49 integers. Thus this is thread safe without locks, */
644 /* assuming atomic pointer assignments. */
645 a = reverse(reverse(a));
646 # if !defined(AT_END) && !defined(THREADS)
647 /* This is not thread safe, since realloc explicitly deallocates */
649 a = (sexpr)GC_REALLOC((void *)a, 500);
651 a = (sexpr)GC_REALLOC((void *)a, 8200);
658 /* Restore c and d values. */
659 c = (sexpr)((char *)c - sizeof(char *));
660 d = (sexpr)((char *)d - sizeof(char *));
663 check_uncollectable_ints(d, 1, 100);
664 check_ints(f[5], 1,17);
665 check_ints(g[799], 1,18);
666 # ifdef GC_GCJ_SUPPORT
667 h[1999] = gcj_reverse(h[1999]);
669 check_ints(h[1999], 1,200);
673 *(sexpr volatile *)&b = 0;
674 *(sexpr volatile *)&c = 0;
678 void reverse_test(void)
680 /* Test GC_do_blocking/GC_call_with_gc_active. */
681 (void)GC_do_blocking(reverse_test_inner, 0);
687 * The rest of this builds balanced binary trees, checks that they don't
688 * disappear, and tests finalization.
690 typedef struct treenode {
692 struct treenode * lchild;
693 struct treenode * rchild;
696 int finalizable_count = 0;
697 int finalized_count = 0;
698 volatile int dropped_something = 0;
700 void GC_CALLBACK finalizer(void * obj, void * client_data)
705 PCR_ThCrSec_EnterSys();
707 # if defined(GC_PTHREADS)
708 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
709 pthread_mutex_lock(&incr_lock);
710 # elif defined(GC_WIN32_THREADS)
711 EnterCriticalSection(&incr_cs);
713 if ((int)(GC_word)client_data != t -> level) {
714 GC_printf("Wrong finalization data - collector is broken\n");
718 t -> level = -1; /* detect duplicate finalization immediately */
720 PCR_ThCrSec_ExitSys();
722 # if defined(GC_PTHREADS)
723 pthread_mutex_unlock(&incr_lock);
724 # elif defined(GC_WIN32_THREADS)
725 LeaveCriticalSection(&incr_cs);
731 # define MAX_FINALIZED (NTHREADS*4000)
734 GC_FAR GC_word live_indicators[MAX_FINALIZED] = {0};
736 /* Too big for THINK_C. have to allocate it dynamically. */
737 GC_word *live_indicators = 0;
740 int live_indicators_count = 0;
744 tn * result = (tn *)GC_MALLOC(sizeof(tn));
748 /* get around static data limitations. */
749 if (!live_indicators) {
751 (GC_word*)NewPtrClear(MAX_FINALIZED * sizeof(GC_word));
752 CHECK_OUT_OF_MEMORY(live_indicators);
755 if (n == 0) return(0);
756 CHECK_OUT_OF_MEMORY(result);
758 result -> lchild = mktree(n-1);
759 result -> rchild = mktree(n-1);
760 if (counter++ % 17 == 0 && n >= 2) {
763 CHECK_OUT_OF_MEMORY(result->lchild);
764 tmp = result -> lchild -> rchild;
765 CHECK_OUT_OF_MEMORY(result->rchild);
766 result -> lchild -> rchild = result -> rchild -> lchild;
767 result -> rchild -> lchild = tmp;
769 if (counter++ % 119 == 0) {
774 PCR_ThCrSec_EnterSys();
776 # if defined(GC_PTHREADS)
777 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
778 pthread_mutex_lock(&incr_lock);
779 # elif defined(GC_WIN32_THREADS)
780 EnterCriticalSection(&incr_cs);
782 /* Losing a count here causes erroneous report of failure. */
784 my_index = live_indicators_count++;
786 PCR_ThCrSec_ExitSys();
788 # if defined(GC_PTHREADS)
789 pthread_mutex_unlock(&incr_lock);
790 # elif defined(GC_WIN32_THREADS)
791 LeaveCriticalSection(&incr_cs);
795 GC_REGISTER_FINALIZER((void *)result, finalizer, (void *)(GC_word)n,
796 (GC_finalization_proc *)0, (void * *)0);
797 if (my_index >= MAX_FINALIZED) {
798 GC_printf("live_indicators overflowed\n");
801 live_indicators[my_index] = 13;
802 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
803 (void * *)(&(live_indicators[my_index])),
804 (void *)result) != 0) {
805 GC_printf("GC_general_register_disappearing_link failed\n");
808 if (GC_unregister_disappearing_link(
810 (&(live_indicators[my_index]))) == 0) {
811 GC_printf("GC_unregister_disappearing_link failed\n");
814 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
815 (void * *)(&(live_indicators[my_index])),
816 (void *)result) != 0) {
817 GC_printf("GC_general_register_disappearing_link failed 2\n");
820 GC_reachable_here(result);
825 void chktree(tn *t, int n)
827 if (n == 0 && t != 0) {
828 GC_printf("Clobbered a leaf - collector is broken\n");
832 if (t -> level != n) {
833 GC_printf("Lost a node at level %d - collector is broken\n", n);
836 if (counter++ % 373 == 0) {
838 (void) GC_MALLOC(counter%5001);
840 chktree(t -> lchild, n-1);
841 if (counter++ % 73 == 0) {
843 (void) GC_MALLOC(counter%373);
845 chktree(t -> rchild, n-1);
849 #if defined(GC_PTHREADS)
850 pthread_key_t fl_key;
852 void * alloc8bytes(void)
854 # if defined(SMALL_CONFIG) || defined(GC_DEBUG)
856 return(GC_MALLOC(8));
858 void ** my_free_list_ptr;
861 my_free_list_ptr = (void **)pthread_getspecific(fl_key);
862 if (my_free_list_ptr == 0) {
863 uncollectable_count++;
864 my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
865 CHECK_OUT_OF_MEMORY(my_free_list_ptr);
866 if (pthread_setspecific(fl_key, my_free_list_ptr) != 0) {
867 GC_printf("pthread_setspecific failed\n");
871 my_free_list = *my_free_list_ptr;
872 if (my_free_list == 0) {
873 my_free_list = GC_malloc_many(8);
874 CHECK_OUT_OF_MEMORY(my_free_list);
876 *my_free_list_ptr = GC_NEXT(my_free_list);
877 GC_NEXT(my_free_list) = 0;
879 return(my_free_list);
884 # define alloc8bytes() GC_MALLOC_ATOMIC(8)
887 void alloc_small(int n)
891 for (i = 0; i < n; i += 8) {
893 if (alloc8bytes() == 0) {
894 GC_printf("Out of memory\n");
900 # if defined(THREADS) && defined(GC_DEBUG)
901 # ifdef VERY_SMALL_CONFIG
902 # define TREE_HEIGHT 12
904 # define TREE_HEIGHT 15
907 # ifdef VERY_SMALL_CONFIG
908 # define TREE_HEIGHT 13
910 # define TREE_HEIGHT 16
918 root = mktree(TREE_HEIGHT);
919 # ifndef VERY_SMALL_CONFIG
920 alloc_small(5000000);
922 chktree(root, TREE_HEIGHT);
923 if (finalized_count && ! dropped_something) {
924 GC_printf("Premature finalization - collector is broken\n");
927 dropped_something = 1;
928 GC_noop1((word)root); /* Root needs to remain live until */
929 /* dropped_something is set. */
930 root = mktree(TREE_HEIGHT);
931 chktree(root, TREE_HEIGHT);
932 for (i = TREE_HEIGHT; i >= 0; i--) {
936 # ifndef VERY_SMALL_CONFIG
937 alloc_small(5000000);
941 unsigned n_tests = 0;
943 GC_word bm_huge[10] = {
956 /* A very simple test of explicitly typed allocation */
957 void typed_test(void)
959 GC_word * old, * new;
962 GC_word bm_large = 0xf7ff7fff;
963 GC_descr d1 = GC_make_descriptor(&bm3, 2);
964 GC_descr d2 = GC_make_descriptor(&bm2, 2);
965 GC_descr d3 = GC_make_descriptor(&bm_large, 32);
966 GC_descr d4 = GC_make_descriptor(bm_huge, 320);
967 GC_word * x = (GC_word *)GC_malloc_explicitly_typed(2000, d4);
971 (void)GC_make_descriptor(&bm_large, 32);
975 for (i = 0; i < 4000; i++) {
977 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d1);
978 CHECK_OUT_OF_MEMORY(new);
979 if (0 != new[0] || 0 != new[1]) {
980 GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
984 new[1] = (GC_word)old;
987 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d2);
988 CHECK_OUT_OF_MEMORY(new);
990 new[1] = (GC_word)old;
993 new = (GC_word *) GC_malloc_explicitly_typed(33 * sizeof(GC_word), d3);
994 CHECK_OUT_OF_MEMORY(new);
996 new[1] = (GC_word)old;
999 new = (GC_word *) GC_calloc_explicitly_typed(4, 2 * sizeof(GC_word),
1001 CHECK_OUT_OF_MEMORY(new);
1003 new[1] = (GC_word)old;
1005 collectable_count++;
1007 new = (GC_word *) GC_calloc_explicitly_typed(7, 3 * sizeof(GC_word),
1010 new = (GC_word *) GC_calloc_explicitly_typed(1001,
1011 3 * sizeof(GC_word),
1013 if (new && (0 != new[0] || 0 != new[1])) {
1014 GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
1018 CHECK_OUT_OF_MEMORY(new);
1020 new[1] = (GC_word)old;
1023 for (i = 0; i < 20000; i++) {
1025 GC_printf("typed alloc failed at %lu\n", (unsigned long)i);
1030 new = (GC_word *)(old[1]);
1039 void GC_CALLBACK fail_proc1(void * x)
1044 static void uniq(void *p, ...) {
1050 for (;(q[n] = va_arg(a,void *)) != NULL;n++) ;
1056 "Apparently failed to mark from some function arguments.\n"
1057 "Perhaps GC_push_regs was configured incorrectly?\n"
1064 # define TEST_FAIL_COUNT(n) 1
1066 # define TEST_FAIL_COUNT(n) (fail_count >= (n))
1069 void * GC_CALLBACK inc_int_counter(void *pcounter)
1071 ++(*(int *)pcounter);
1075 void run_one_test(void)
1077 # ifndef DBG_HDRS_ALL
1083 char *y = (char *)(GC_word)fail_proc1;
1085 CLOCK_TYPE typed_time;
1087 CLOCK_TYPE start_time;
1088 CLOCK_TYPE reverse_time;
1089 CLOCK_TYPE tree_time;
1090 unsigned long time_diff;
1094 "This test program is not designed for leak detection mode\n");
1095 GC_printf("Expect lots of problems\n");
1098 # ifndef DBG_HDRS_ALL
1099 collectable_count += 3;
1100 if ((GC_size(GC_malloc(7)) != 8 &&
1101 GC_size(GC_malloc(7)) != MIN_WORDS * sizeof(GC_word))
1102 || GC_size(GC_malloc(15)) != 16) {
1103 GC_printf("GC_size produced unexpected results\n");
1106 collectable_count += 1;
1107 if (GC_size(GC_malloc(0)) != MIN_WORDS * sizeof(GC_word)) {
1108 GC_printf("GC_malloc(0) failed: GC_size returns %ld\n",
1109 (unsigned long)GC_size(GC_malloc(0)));
1112 collectable_count += 1;
1113 if (GC_size(GC_malloc_uncollectable(0)) != MIN_WORDS * sizeof(GC_word)) {
1114 GC_printf("GC_malloc_uncollectable(0) failed\n");
1117 GC_is_valid_displacement_print_proc = fail_proc1;
1118 GC_is_visible_print_proc = fail_proc1;
1119 collectable_count += 1;
1121 if (GC_base(GC_PTR_ADD(x, 13)) != x) {
1122 GC_printf("GC_base(heap ptr) produced incorrect result\n");
1125 (void)GC_PRE_INCR(x, 0);
1126 (void)GC_POST_INCR(x);
1127 (void)GC_POST_DECR(x);
1128 if (GC_base(x) != x) {
1129 GC_printf("Bad INCR/DECR result\n");
1133 if (GC_base(y) != 0) {
1134 GC_printf("GC_base(fn_ptr) produced incorrect result\n");
1138 if (GC_same_obj(x+5, x) != x + 5) {
1139 GC_printf("GC_same_obj produced incorrect result\n");
1142 if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
1143 GC_printf("GC_is_visible produced incorrect result\n");
1149 GC_printf("GC_PTR_STORE failed: %p != %p\n", *z, x);
1152 if (!TEST_FAIL_COUNT(1)) {
1153 # if!(defined(POWERPC) || defined(IA64)) || defined(M68K)
1154 /* On POWERPCs function pointers point to a descriptor in the */
1155 /* data segment, so there should have been no failures. */
1156 /* The same applies to IA64. Something similar seems to */
1157 /* be going on with NetBSD/M68K. */
1158 GC_printf("GC_is_visible produced wrong failure indication\n");
1162 if (GC_is_valid_displacement(y) != y
1163 || GC_is_valid_displacement(x) != x
1164 || GC_is_valid_displacement(x + 3) != x + 3) {
1165 GC_printf("GC_is_valid_displacement produced incorrect result\n");
1172 for (i = sizeof(GC_word); i < 512; i *= 2) {
1173 GC_word result = (GC_word) GC_memalign(i, 17);
1174 if (result % i != 0 || result == 0 || *(int *)result != 0) FAIL;
1177 # ifndef ALL_INTERIOR_POINTERS
1178 # if defined(RS6000) || defined(POWERPC)
1179 if (!TEST_FAIL_COUNT(1))
1181 if (!TEST_FAIL_COUNT(GC_get_all_interior_pointers() ? 1 : 2))
1185 "GC_is_valid_displacement produced wrong failure indication\n");
1189 # endif /* DBG_HDRS_ALL */
1190 /* Test floating point alignment */
1191 collectable_count += 2;
1193 double *dp = GC_MALLOC(sizeof(double));
1194 CHECK_OUT_OF_MEMORY(dp);
1196 dp = GC_MALLOC(sizeof(double));
1197 CHECK_OUT_OF_MEMORY(dp);
1200 /* Test size 0 allocation a bit more */
1203 for (i = 0; i < 10000; ++i) {
1205 GC_FREE(GC_MALLOC(0));
1206 GC_MALLOC_ATOMIC(0);
1207 GC_FREE(GC_MALLOC_ATOMIC(0));
1210 # ifdef GC_GCJ_SUPPORT
1211 GC_REGISTER_DISPLACEMENT(sizeof(struct fake_vtable *));
1212 GC_init_gcj_malloc(0, (void *)(GC_word)fake_gcj_mark_proc);
1214 /* Make sure that fn arguments are visible to the collector. */
1216 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1217 (GC_gcollect(),GC_malloc(12)),
1218 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1219 (GC_gcollect(),GC_malloc(12)),
1220 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1221 (GC_gcollect(),GC_malloc(12)),
1222 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1223 (GC_gcollect(),GC_malloc(12)),
1224 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1225 (GC_gcollect(),GC_malloc(12)),
1227 /* GC_malloc(0) must return NULL or something we can deallocate. */
1228 GC_free(GC_malloc(0));
1229 GC_free(GC_malloc_atomic(0));
1230 GC_free(GC_malloc(0));
1231 GC_free(GC_malloc_atomic(0));
1232 /* Repeated list reversal test. */
1233 GET_TIME(start_time);
1236 GET_TIME(reverse_time);
1237 time_diff = MS_TIME_DIFF(reverse_time, start_time);
1238 GC_log_printf("-------------Finished reverse_test at time %u (%p)\n",
1239 (unsigned) time_diff, &start_time);
1241 # ifndef DBG_HDRS_ALL
1244 GET_TIME(typed_time);
1245 time_diff = MS_TIME_DIFF(typed_time, start_time);
1246 GC_log_printf("-------------Finished typed_test at time %u (%p)\n",
1247 (unsigned) time_diff, &start_time);
1249 # endif /* DBG_HDRS_ALL */
1252 GET_TIME(tree_time);
1253 time_diff = MS_TIME_DIFF(tree_time, start_time);
1254 GC_log_printf("-------------Finished tree_test at time %u (%p)\n",
1255 (unsigned) time_diff, &start_time);
1257 /* Run reverse_test a second time, so we hopefully notice corruption. */
1260 GET_TIME(reverse_time);
1261 time_diff = MS_TIME_DIFF(reverse_time, start_time);
1263 "-------------Finished second reverse_test at time %u (%p)\n",
1264 (unsigned)time_diff, &start_time);
1266 /* GC_allocate_ml and GC_need_to_lock are no longer exported, and */
1267 /* AO_fetch_and_add1() may be unavailable to update a counter. */
1268 (void)GC_call_with_alloc_lock(inc_int_counter, &n_tests);
1269 # ifndef NO_TEST_HANDLE_FORK
1272 tiny_reverse_test(0);
1275 GC_log_printf("Finished a child process\n");
1280 GC_log_printf("Finished %p\n", &start_time);
1283 #define NUMBER_ROUND_UP(v, bound) ((((v) + (bound) - 1) / (bound)) * (bound))
1285 void check_heap_stats(void)
1290 # ifdef FINALIZE_ON_DEMAND
1291 int late_finalize_count = 0;
1294 # ifdef VERY_SMALL_CONFIG
1295 /* The upper bounds are a guess, which has been empirically */
1296 /* adjusted. On low end uniprocessors with incremental GC */
1297 /* these may be particularly dubious, since empirically the */
1298 /* heap tends to grow largely as a result of the GC not */
1299 /* getting enough cycles. */
1300 # if CPP_WORDSZ == 64
1301 max_heap_sz = 4500000;
1303 max_heap_sz = 2800000;
1306 # if CPP_WORDSZ == 64
1307 max_heap_sz = 23000000;
1309 max_heap_sz = 16000000;
1314 # ifdef SAVE_CALL_CHAIN
1316 # ifdef SAVE_CALL_COUNT
1317 max_heap_sz += max_heap_sz * SAVE_CALL_COUNT/4;
1321 max_heap_sz *= n_tests;
1322 # if defined(USE_MMAP) || defined(MSWIN32)
1323 max_heap_sz = NUMBER_ROUND_UP(max_heap_sz, 4 * 1024 * 1024);
1325 /* Garbage collect repeatedly so that all inaccessible objects */
1326 /* can be finalized. */
1327 while (GC_collect_a_little()) { }
1328 for (i = 0; i < 16; i++) {
1330 # ifdef FINALIZE_ON_DEMAND
1331 late_finalize_count +=
1333 GC_invoke_finalizers();
1336 GC_log_printf("Primordial thread stack bottom: %p\n",
1339 GC_printf("Completed %u tests\n", n_tests);
1340 GC_printf("Allocated %d collectable objects\n", collectable_count);
1341 GC_printf("Allocated %d uncollectable objects\n",
1342 uncollectable_count);
1343 GC_printf("Allocated %d atomic objects\n", atomic_count);
1344 GC_printf("Allocated %d stubborn objects\n", stubborn_count);
1345 GC_printf("Finalized %d/%d objects - ",
1346 finalized_count, finalizable_count);
1347 # ifdef FINALIZE_ON_DEMAND
1348 if (finalized_count != late_finalize_count) {
1349 GC_printf("Demand finalization error\n");
1353 if (finalized_count > finalizable_count
1354 || finalized_count < finalizable_count/2) {
1355 GC_printf("finalization is probably broken\n");
1358 GC_printf("finalization is probably ok\n");
1361 for (i = 0; i < MAX_FINALIZED; i++) {
1362 if (live_indicators[i] != 0) {
1366 i = finalizable_count - finalized_count - still_live;
1368 GC_printf("%d disappearing links remain and %d more objects "
1369 "were not finalized\n", still_live, i);
1371 GC_printf("\tVery suspicious!\n");
1373 GC_printf("\tSlightly suspicious, but probably OK\n");
1376 GC_printf("Total number of bytes allocated is %lu\n",
1377 (unsigned long)GC_get_total_bytes());
1378 GC_printf("Final heap size is %lu bytes\n",
1379 (unsigned long)GC_get_heap_size());
1380 if (GC_get_total_bytes() < n_tests *
1381 # ifdef VERY_SMALL_CONFIG
1387 GC_printf("Incorrect execution - missed some allocations\n");
1390 if (GC_get_heap_size() + GC_get_unmapped_bytes() > max_heap_sz) {
1391 GC_printf("Unexpected heap growth - collector may be broken"
1392 " (heapsize: %lu, expected: %lu)\n",
1393 (unsigned long)(GC_get_heap_size() + GC_get_unmapped_bytes()),
1394 (unsigned long)max_heap_sz);
1398 GC_unregister_my_thread(); /* just to check it works (for main) */
1400 GC_printf("Collector appears to work\n");
1404 void SetMinimumStack(long minSize)
1408 if (minSize > LMGetDefltStack())
1410 newApplLimit = (long) GetApplLimit()
1411 - (minSize - LMGetDefltStack());
1412 SetApplLimit((Ptr) newApplLimit);
1417 #define cMinStackSpace (512L * 1024L)
1421 void GC_CALLBACK warn_proc(char *msg, GC_word p)
1423 GC_printf(msg, (unsigned long)p);
1427 #if defined(MSWINCE) && defined(UNDER_CE)
1428 # define WINMAIN_LPTSTR LPWSTR
1430 # define WINMAIN_LPTSTR LPSTR
1433 #if !defined(PCR) && !defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS) \
1435 #if defined(MSWIN32) && !defined(__MINGW32__) || defined(MSWINCE)
1436 int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev,
1437 WINMAIN_LPTSTR cmd, int n)
1438 #elif defined(RTEMS)
1440 # define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
1441 # define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
1442 # define CONFIGURE_RTEMS_INIT_TASKS_TABLE
1443 # define CONFIGURE_MAXIMUM_TASKS 1
1444 # define CONFIGURE_INIT
1445 # define CONFIGURE_INIT_TASK_STACK_SIZE (64*1024)
1446 # include <rtems/confdefs.h>
1447 rtems_task Init(rtems_task_argument ignord)
1454 /* Make sure we have lots and lots of stack space. */
1455 SetMinimumStack(cMinStackSpace);
1456 /* Cheat and let stdio initialize toolbox for us. */
1457 printf("Testing GC Macintosh port\n");
1460 GC_set_warn_proc(warn_proc);
1461 # if (defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(GWW_VDB)) \
1462 && !defined(MAKE_BACK_GRAPH) && !defined(NO_INCREMENTAL)
1463 GC_enable_incremental();
1464 GC_printf("Switched to incremental mode\n");
1465 # if defined(MPROTECT_VDB)
1466 GC_printf("Emulating dirty bits with mprotect/signals\n");
1469 GC_printf("Reading dirty bits from /proc\n");
1470 # elif defined(GWW_VDB)
1471 GC_printf("Using GetWriteWatch-based implementation\n");
1473 GC_printf("Using DEFAULT_VDB dirty bit implementation\n");
1483 /* Entry points we should be testing, but aren't. */
1484 /* Some can be tested by defining GC_DEBUG at the top of this file */
1485 /* This is a bit SunOS4 specific. */
1486 GC_noop(GC_expand_hp, GC_add_roots, GC_clear_roots,
1487 GC_register_disappearing_link,
1488 GC_register_finalizer_ignore_self,
1489 GC_debug_register_displacement, GC_debug_change_stubborn,
1490 GC_debug_end_stubborn_change, GC_debug_malloc_uncollectable,
1491 GC_debug_free, GC_debug_realloc,
1492 GC_generic_malloc_words_small, GC_init,
1493 GC_malloc_ignore_off_page, GC_malloc_atomic_ignore_off_page,
1494 GC_set_max_heap_size, GC_get_bytes_since_gc,
1495 GC_get_total_bytes, GC_pre_incr, GC_post_incr);
1498 GC_win32_free_heap();
1508 #if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
1510 DWORD __stdcall thr_run_one_test(void *arg)
1517 HANDLE win_created_h;
1520 LRESULT CALLBACK window_proc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
1525 GC_printf("Received WM_HIBERNATE, calling GC_gcollect\n");
1526 /* Force "unmap as much memory as possible" mode. */
1527 GC_gcollect_and_unmap();
1530 GC_printf("Received WM_CLOSE, closing window\n");
1531 DestroyWindow(hwnd);
1537 ret = DefWindowProc(hwnd, uMsg, wParam, lParam);
1543 DWORD __stdcall thr_window(void *arg)
1545 WNDCLASS win_class = {
1550 GetModuleHandle(NULL),
1553 (HBRUSH)(COLOR_APPWORKSPACE+1),
1555 TEXT("GCtestWindow")
1559 if (!RegisterClass(&win_class))
1562 win_handle = CreateWindowEx(
1564 TEXT("GCtestWindow"),
1567 CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT,
1570 GetModuleHandle(NULL),
1573 if (win_handle == NULL)
1576 SetEvent(win_created_h);
1578 ShowWindow(win_handle, SW_SHOW);
1579 UpdateWindow(win_handle);
1581 while (GetMessage(&msg, NULL, 0, 0)) {
1582 TranslateMessage(&msg);
1583 DispatchMessage(&msg);
1590 int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev,
1591 WINMAIN_LPTSTR cmd, int n)
1601 # if defined(GC_DLL) && !defined(GC_NO_THREADS_DISCOVERY) \
1602 && !defined(MSWINCE) && !defined(THREAD_LOCAL_ALLOC) \
1603 && !defined(PARALLEL_MARK)
1604 GC_use_threads_discovery();
1605 /* Test with implicit thread registration if possible. */
1606 GC_printf("Using DllMain to track threads\n");
1609 # if !defined(MAKE_BACK_GRAPH) && !defined(NO_INCREMENTAL)
1610 GC_enable_incremental();
1612 InitializeCriticalSection(&incr_cs);
1613 GC_set_warn_proc(warn_proc);
1615 win_created_h = CreateEvent(NULL, FALSE, FALSE, NULL);
1616 if (win_created_h == (HANDLE)NULL) {
1617 GC_printf("Event creation failed %d\n", (int)GetLastError());
1620 win_thr_h = GC_CreateThread(NULL, 0, thr_window, 0, 0, &thread_id);
1621 if (win_thr_h == (HANDLE)NULL) {
1622 GC_printf("Thread creation failed %d\n", (int)GetLastError());
1625 if (WaitForSingleObject(win_created_h, INFINITE) != WAIT_OBJECT_0)
1627 CloseHandle(win_created_h);
1630 for (i = 0; i < NTHREADS; i++) {
1631 h[i] = GC_CreateThread(NULL, 0, thr_run_one_test, 0, 0, &thread_id);
1632 if (h[i] == (HANDLE)NULL) {
1633 GC_printf("Thread creation failed %d\n", (int)GetLastError());
1637 # endif /* NTHREADS > 0 */
1640 for (i = 0; i < NTHREADS; i++) {
1641 if (WaitForSingleObject(h[i], INFINITE) != WAIT_OBJECT_0) {
1642 GC_printf("Thread wait failed %d\n", (int)GetLastError());
1646 # endif /* NTHREADS > 0 */
1648 PostMessage(win_handle, WM_CLOSE, 0, 0);
1649 if (WaitForSingleObject(win_thr_h, INFINITE) != WAIT_OBJECT_0)
1656 #endif /* GC_WIN32_THREADS */
1667 /* GC_enable_incremental(); */
1668 GC_set_warn_proc(warn_proc);
1669 th1 = PCR_Th_Fork(run_one_test, 0);
1670 th2 = PCR_Th_Fork(run_one_test, 0);
1672 if (PCR_Th_T_Join(th1, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1673 != PCR_ERes_okay || code != 0) {
1674 GC_printf("Thread 1 failed\n");
1676 if (PCR_Th_T_Join(th2, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1677 != PCR_ERes_okay || code != 0) {
1678 GC_printf("Thread 2 failed\n");
1685 #if defined(GC_PTHREADS)
1686 void * thr_run_one_test(void * arg)
1693 # define GC_free GC_debug_free
1698 pthread_t th[NTHREADS];
1699 pthread_attr_t attr;
1702 # ifdef GC_IRIX_THREADS
1703 /* Force a larger stack to be preallocated */
1704 /* Since the initial can't always grow later. */
1705 *((volatile char *)&code - 1024*1024) = 0; /* Require 1 MB */
1706 # endif /* GC_IRIX_THREADS */
1707 # if defined(GC_HPUX_THREADS)
1708 /* Default stack size is too small, especially with the 64 bit ABI */
1710 if (pthread_default_stacksize_np(1024*1024, 0) != 0) {
1711 GC_printf("pthread_default_stacksize_np failed\n");
1713 # endif /* GC_HPUX_THREADS */
1714 # ifdef PTW32_STATIC_LIB
1715 pthread_win32_process_attach_np ();
1716 pthread_win32_thread_attach_np ();
1718 # if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY) \
1719 && !defined(DARWIN_DONT_PARSE_STACK) && !defined(THREAD_LOCAL_ALLOC)
1720 /* Test with the Darwin implicit thread registration. */
1721 GC_use_threads_discovery();
1722 GC_printf("Using Darwin task-threads-based world stop and push\n");
1726 pthread_attr_init(&attr);
1727 # if defined(GC_IRIX_THREADS) || defined(GC_FREEBSD_THREADS) \
1728 || defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS) \
1729 || defined(GC_OPENBSD_THREADS)
1730 pthread_attr_setstacksize(&attr, 1000000);
1733 # if (defined(MPROTECT_VDB)) && !defined(REDIRECT_MALLOC) \
1734 && !defined(MAKE_BACK_GRAPH) && !defined(USE_PROC_FOR_LIBRARIES) \
1735 && !defined(NO_INCREMENTAL)
1736 GC_enable_incremental();
1737 GC_printf("Switched to incremental mode\n");
1738 # if defined(MPROTECT_VDB)
1739 GC_printf("Emulating dirty bits with mprotect/signals\n");
1742 GC_printf("Reading dirty bits from /proc\n");
1744 GC_printf("Using DEFAULT_VDB dirty bit implementation\n");
1748 GC_set_warn_proc(warn_proc);
1749 if ((code = pthread_key_create(&fl_key, 0)) != 0) {
1750 GC_printf("Key creation failed %d\n", code);
1753 for (i = 0; i < NTHREADS; ++i) {
1754 if ((code = pthread_create(th+i, &attr, thr_run_one_test, 0)) != 0) {
1755 GC_printf("Thread %d creation failed %d\n", i, code);
1760 for (i = 0; i < NTHREADS; ++i) {
1761 if ((code = pthread_join(th[i], 0)) != 0) {
1762 GC_printf("Thread %d failed %d\n", i, code);
1767 (void)fflush(stdout);
1768 pthread_attr_destroy(&attr);
1769 GC_printf("Completed %u collections\n", (unsigned)GC_get_gc_no());
1770 # ifdef PTW32_STATIC_LIB
1771 pthread_win32_thread_detach_np ();
1772 pthread_win32_process_detach_np ();
1776 #endif /* GC_PTHREADS */