2 #include <unistd.h> /* getpagesize, mmap, ... */
11 #include "threads/thread.h"
12 #include "threads/locks.h"
17 #if !defined(HAVE_MAP_FAILED)
18 #define MAP_FAILED ((void*) -1)
22 #define PAGESIZE_MINUS_ONE (getpagesize() - 1)
27 #define HEURISTIC_SEL 0
28 #define HEURISTIC_PARAM 2UL
31 #define next_collection_heuristic_init() \
32 (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> 4))
34 #if HEURISTIC_SEL == 0
35 #define next_collection_heuristic() \
36 (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> HEURISTIC_PARAM))
37 #elif HEURISTIC_SEL == 1
38 #define next_collection_heuristic() \
39 (void*)((long)heap_top + (((long)heap_top - (long)heap_base) << HEURISTIC_PARAM))
40 #elif HEURISTIC_SEL == 2
41 #define next_collection_heuristic() \
42 (void*)((long)heap_top + HEURISTIC_PARAM)
45 //#define COLLECT_LIFESPAN
46 //#define NEW_COLLECT_LIFESPAN
47 #define COLLECT_FRAGMENTATION
48 //#define COLLECT_SIZES
50 #define GC_COLLECT_STATISTICS
51 //#define FINALIZER_COUNTING
53 #undef STRUCTURES_ON_HEAP
54 //#define STRUCTURES_ON_HEAP
59 #include "allocator.h" /* rev. 1 allocator */
60 #include "bitmap2.h" /* rev. 2 bitmap management */
68 #define align_size(size) ((size) & ~((1 << ALIGN) - 1))
69 #define MAP_ADDRESS (void*) 0x10000000
71 /* --- file-wide variables */
73 static void* heap_base = NULL;
74 static SIZE heap_size = 0;
75 static void* heap_top = NULL;
76 static void* heap_limit = NULL;
77 static void* heap_next_collection = NULL;
79 static bitmap_t* start_bitmap = NULL;
80 static BITBLOCK* start_bits = NULL;
81 static bitmap_t* reference_bitmap = NULL;
82 static BITBLOCK* reference_bits = NULL;
83 static bitmap_t* mark_bitmap = NULL;
84 static BITBLOCK* mark_bits = NULL;
86 static void** stackbottom = NULL;
88 typedef struct address_list_node {
90 struct address_list_node* prev;
91 struct address_list_node* next;
94 static address_list_node* references = NULL;
95 static address_list_node* finalizers = NULL;
97 #ifdef GC_COLLECT_STATISTICS
99 static unsigned long gc_collections_count = 0;
101 static unsigned long gc_alloc_total = 0;
102 static unsigned long gc_alloc_count = 0;
104 static unsigned long gc_mark_heapblocks_visited = 0;
105 static unsigned long gc_mark_not_aligned = 0;
106 static unsigned long gc_mark_not_inheap = 0;
107 static unsigned long gc_mark_not_object = 0;
108 static unsigned long gc_mark_objects_marked = 0;
109 static unsigned long gc_mark_already_marked = 0;
111 static unsigned long gc_mark_null_pointer = 0;
115 #ifdef FINALIZER_COUNTING
117 static unsigned long gc_finalizers_executed = 0;
118 static unsigned long gc_finalizers_detected = 0;
123 static iMux alloc_mutex;
126 #ifdef COLLECT_LIFESPAN
127 static FILE* tracefile;
130 #ifdef COLLECT_FRAGMENTATION
131 static FILE* fragfile;
132 static FILE* fragsizefile;
135 /* --- implementation */
138 heap_init (SIZE size,
139 SIZE startsize, /* when should we collect for the first time ? */
140 void **in_stackbottom)
142 /* 1. Initialise the freelists & reset the allocator's state */
145 /* 2. Allocate at least (alignment!) size bytes of memory for the heap */
146 heap_size = align_size(size + ((1 << ALIGN) - 1));
148 #if !(defined(HAVE_MAP_ANONYMOUS))
149 heap_base = malloc(heap_size);
151 heap_base = (void*) mmap (NULL,
152 ((size_t)heap_size + PAGESIZE_MINUS_ONE) & ~PAGESIZE_MINUS_ONE,
153 PROT_READ | PROT_WRITE,
154 MAP_PRIVATE | MAP_ANONYMOUS,
159 if (heap_base == (void*)MAP_FAILED) {
160 /* unable to allocate the requested amount of memory */
161 fprintf(stderr, "heap2.c: The queen, mylord, is dead! (mmap failed)\n");
165 /* 3. Allocate the bitmaps */
166 start_bitmap = bitmap_allocate(heap_base, heap_size);
167 reference_bitmap = bitmap_allocate(heap_base, heap_size);
168 mark_bitmap = bitmap_allocate(heap_base, heap_size);
170 start_bits = start_bitmap->bitmap;
171 reference_bits = reference_bitmap->bitmap;
172 mark_bits = mark_bitmap->bitmap;
174 /* 4. Mark the first free-area as an object-start */
175 bitmap_setbit(start_bits, heap_base);
177 /* 5. Initialise the heap's state (heap_top, etc.) */
178 stackbottom = in_stackbottom; /* copy the stackbottom */
180 heap_top = heap_base; /* the current end of the heap (just behind the last allocated object) */
181 heap_limit = (void*)((long)heap_base + heap_size); /* points just behind the last accessible block of the heap */
183 /* 6. calculate a useful first collection limit */
184 /* This is extremly primitive at this point...
185 we should replace it with something more useful -- phil. */
186 heap_next_collection = next_collection_heuristic_init();
188 /* 7. Init the global reference lists & finalizer addresses */
192 #ifdef STRUCTURES_ON_HEAP
193 heap_addreference(&references);
194 heap_addreference(&finalizers);
198 /* 8. Init the mutexes for synchronization */
199 alloc_mutex.holder = 0;
202 /* 9. Set up collection of lifespan data */
203 #ifdef COLLECT_LIFESPAN
205 tracefile = fopen("heap.trace", "w");
207 tracefile = popen("gzip -9 >heap.trace.gz", "w");
210 fprintf(stderr, "heap2.c: Radio Ga Ga! (fopen failed)\n");
214 fprintf(tracefile, "heap_base\t0x%lx\n", heap_base);
215 fprintf(tracefile, "heap_limit\t0x%lx\n", heap_limit);
216 fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
219 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
220 lifespan_init(heap_base, heap_size);
223 /* 10. Set up collection of fragmentation data */
224 #ifdef COLLECT_FRAGMENTATION
225 fragfile = popen("gzip -9 >fragmentation.gz", "w");
226 fragsizefile = popen("gzip -9 >freeblocks.gz", "w");
233 heap_call_finalizer_for_object_at(java_objectheader* object_addr)
235 asm_calljavamethod(object_addr->vftbl->class->finalizer, object_addr, NULL, NULL, NULL);
236 #ifdef FINALIZER_COUNTING
237 ++gc_finalizers_executed;
244 address_list_node* curr = finalizers;
246 /* 0. clean up lifespan module */
247 #ifdef COLLECT_LIFESPAN
255 #if defined(NEW_COLLECT_LIFESPAN)
259 #ifdef COLLECT_FRAGMENTATION
261 pclose(fragsizefile);
264 /* 1. Clean up on the heap... finalize all remaining objects */
267 address_list_node* prev = curr;
268 java_objectheader* addr = (java_objectheader*)(curr->address);
270 if (addr && bitmap_testbit(start_bits, addr))
271 heap_call_finalizer_for_object_at(addr);
278 /* 2. Release the bitmaps */
279 bitmap_release(start_bitmap);
280 bitmap_release(reference_bitmap);
281 bitmap_release(mark_bitmap);
283 /* 3. Release the memory allocated to the heap */
285 munmap(heap_base, heap_size);
287 /* 4. emit statistical data */
288 #ifdef GC_COLLECT_STATISTICS
289 sprintf(logtext, "%ld bytes for %ld objects allocated.",
290 gc_alloc_total, gc_alloc_count);
292 sprintf(logtext, "%ld garbage collections performed.", gc_collections_count);
294 sprintf(logtext, "%ld heapblocks visited, %ld objects marked",
295 gc_mark_heapblocks_visited, gc_mark_objects_marked);
297 sprintf(logtext, " %ld null pointers.", gc_mark_null_pointer);
299 sprintf(logtext, " %ld out of heap.", gc_mark_not_inheap);
301 sprintf(logtext, " %ld visits to objects already marked.", gc_mark_already_marked);
303 sprintf(logtext, " %ld not an object.", gc_mark_not_object);
305 sprintf(logtext, " %ld potential references not aligned.", gc_mark_not_aligned);
309 #ifdef FINALIZER_COUNTING
310 sprintf(logtext, "%ld objects with a finalizer", gc_finalizers_detected);
313 if (gc_finalizers_detected == gc_finalizers_executed)
314 sprintf(logtext, " all finalizers executed.");
316 sprintf(logtext, " only %ld finalizers executed.", gc_finalizers_executed);
320 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
328 heap_add_address_to_address_list(address_list_node** list, void* address)
330 /* Note: address lists are kept sorted to simplify finalization */
332 address_list_node* new_node = malloc(sizeof(address_list_node));
333 new_node->address = address;
334 new_node->next = NULL;
336 while (*list && (*list)->next) {
337 if ((*list)->next->address < address)
338 list = &(*list)->next;
340 new_node->next = *list;
346 new_node->next = *list;
354 heap_add_finalizer_for_object_at(void* addr)
356 /* Finalizers seem to be very rare... for this reason, I keep a linked
357 list of object addresses, which have a finalizer attached. This list
358 is kept in ascending order according to the order garbage is freed.
359 This list is currently kept separate from the heap, but should be
360 moved onto it, but some JIT-marker code to handle these special
361 objects will need to be added first. -- phil. */
363 heap_add_address_to_address_list(&finalizers, addr);
365 #ifdef COLLECT_LIFESPAN
366 fprintf(tracefile, "finalizer\t0x%lx\n", addr);
371 heap_allocate (SIZE in_length,
373 methodinfo *finalizer)
375 SIZE length = align_size(in_length + ((1 << ALIGN) - 1));
376 void* free_chunk = NULL;
379 /* check for misaligned in_length parameter */
380 if (length != in_length)
382 "heap2.c: heap_allocate was passed unaligned in_length parameter: %ld, \n aligned to %ld. (mistrust)\n",
386 #ifdef FINALIZER_COUNTING
388 ++gc_finalizers_detected;
391 #if defined(COLLECT_LIFESPAN) || defined(NEW_COLLECT_LIFESPAN)
392 /* perform garbage collection to collect data for lifespan analysis */
393 if (heap_top > heap_base)
398 lock_mutex(&alloc_mutex);
401 /* 1. attempt to get a free block with size >= length from the freelists */
402 free_chunk = allocator_alloc(length);
404 /* 2. if unsuccessful, try alternative allocation strategies */
406 /* 2.a if the collection threshold would be exceeded, collect the heap */
407 if ((long)heap_top + length > (long)heap_next_collection) {
408 /* 2.a.1. collect if the next_collection threshold would be exceeded */
411 /* 2.a.2. we just ran a collection, recheck the freelists */
412 free_chunk = allocator_alloc(length);
416 /* 2.a.3. we can't satisfy the request from the freelists, check
417 against the heap_limit whether growing the heap is possible */
418 if ((long)heap_top + length > (long)heap_limit)
422 /* 2.b. grow the heap */
423 free_chunk = heap_top;
424 heap_top = (void*)((long)heap_top + length);
428 /* 3.a. mark all necessary bits, store the finalizer & return the newly allocated block */
430 /* I don't mark the object-start anymore, as it always is at the beginning of a free-block,
431 which already is marked (Note: The first free-block gets marked in heap_init). -- phil. */
432 bitmap_setbit(start_bits, free_chunk); /* mark the new object */
434 #ifndef SIZE_FROM_CLASSINFO
435 bitmap_setbit(start_bits, (void*)((long)free_chunk + (long)length)); /* mark the freespace behind the new object */
439 bitmap_setbit(reference_bits, free_chunk);
441 bitmap_clearbit(reference_bits, free_chunk);
443 /* store a hint, that there's a finalizer for this address */
445 heap_add_finalizer_for_object_at(free_chunk);
447 #ifdef GC_COLLECT_STATISTICS
448 gc_alloc_total += length;
452 #ifdef COLLECT_LIFESPAN
453 fprintf(tracefile, "alloc\t0x%lx\t0x%lx\n",
454 free_chunk, (long)free_chunk + length);
457 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
458 lifespan_alloc(free_chunk, length);
463 unlock_mutex(&alloc_mutex);
469 heap_addreference (void **reflocation)
471 /* I currently use a separate linked list (as in the original code) to hold
472 the global reference locations, but I'll change this to allocate these
473 in blocks on the heap; we'll have to add JIT-Marker code for those Java
474 objects then. -- phil. */
476 heap_add_address_to_address_list(&references, reflocation);
481 void gc_finalize (void)
483 /* This will have to be slightly rewritten as soon the JIT-marked heap-based lists are used. -- phil. */
485 address_list_node* curr = finalizers;
486 address_list_node* prev;
489 /* FIXME: new code, please! */
493 if (!bitmap_testbit(mark_bits, curr->address)) {
495 #ifdef FINALIZER_COUNTING
496 ++gc_finalizers_executed;
498 asm_calljavamethod(((java_objectheader*)curr->address)->vftbl->class->finalizer,
499 curr->address, NULL, NULL, NULL);
512 void* advance_address(void* addr)
514 if (*(u8*)addr & 0x1)
515 return addr + (*(u8)addr & ~0x3);
517 if (((java_objectheader*)addr)->vftbl == class_array->vftbl)
518 addr = (void**)((long)addr + (long)((java_arrayheader*)addr)->alignedsize);
520 addr = (void**)((long)addr + (long)((java_objectheader*)addr)->vftbl->class->alignedsize);
529 int get_type(void* addr)
531 if (bitmap_testbit(markbits, addr))
534 if (*(u8*)addr & 0x2)
542 void gc_reclaim (void)
545 void* free_end = heap_base;
554 bitmap_t* temp_bitmap;
556 #ifdef COLLECT_FRAGMENTATION
557 unsigned long free_size = 0;
558 unsigned long free_fragments = 0;
561 /* 2. reclaim unmarked objects */
566 switch (get_type(addr)) {
569 size = *(u8*)addr & ~0x3;
570 addr = advance_address(addr);
575 addr = advance_address(addr);
579 addr = advance_address(addr);
584 switch (get_type(addr)) {
586 size += *(u8*)addr & ~0x3;
587 addr = advance_address(addr);
591 allocator_free_prejoin(addr1, addr, size);
592 addr = advance_address(addr);
596 allocator_free(addr1, size);
598 addr = advance_address(addr);
603 switch (get_type(addr)) {
605 size = *(u8*)addr & ~0x3;
607 addr = advance_address(addr);
611 fprintf(stderr, "error!");
615 addr = advance_address(addr);
620 switch (get_type(addr)) {
622 size += *(u8*)addr & ~0x3;
623 addr = advance_address(addr);
627 allocator_free_blockjoin(addr1, addr, size);
628 addr = advance_address(addr);
632 allocator_free_postjoin(addr1, addr, size);
633 addr = advance_address(addr);
637 /* 3.1. swap mark & start bitmaps */
638 temp_bits = mark_bits;
639 mark_bits = start_bits;
640 start_bits = temp_bits;
642 temp_bitmap = mark_bitmap;
643 mark_bitmap = start_bitmap;
644 start_bitmap = temp_bitmap;
646 /* 3.3. update heap_top */
647 if (free_start < heap_top) {
648 heap_top = free_start;
649 #ifdef NEW_COLLECT_LIFESPAN
650 lifespan_free(free_start, free_end);
655 if (heap_top < heap_limit)
656 bitmap_setbit(start_bits, heap_top);
659 /* 3.4. emit fragmentation info */
660 #ifdef COLLECT_FRAGMENTATION
662 unsigned long heap_full = (unsigned long)heap_top - (unsigned long)heap_base;
663 unsigned long heap_life = (unsigned long)heap_top - (unsigned long)heap_base - free_size;
666 "%ld\t%ld\t%ld\t%ld\t%f\t%f\t%f\n",
671 100*(float)free_size/(free_fragments ? free_fragments : 1),
672 100*(float)heap_life/(heap_full ? heap_full : 1),
673 100*(float)free_size/(heap_full ? heap_full : 1)
678 allocator_dump_to_file(fragsizefile);
681 /* 4. adjust the collection threshold */
682 heap_next_collection = next_collection_heuristic();
683 if (heap_next_collection > heap_limit)
684 heap_next_collection = heap_limit;
686 #ifdef COLLECT_LIFESPAN
687 fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
694 gc_mark_object_at (void** addr)
697 * A note concerning the order of the tests:
699 * Statistics collected during a test run, where alignment
700 * was tested before checking whether the addr points into
702 * >> LOG: 9301464 bytes for 196724 objects allocated.
703 * >> LOG: 15 garbage collections performed.
704 * >> LOG: 6568440 heapblocks visited, 469249 objects marked
705 * >> LOG: 1064447 visits to objects already marked.
706 * >> LOG: 988270 potential references not aligned.
707 * >> LOG: 4049446 out of heap.
708 * >> LOG: 5236 not an object.
710 * These results show, that only about 1/4 of all heapblocks
711 * point to objects; The single most important reason why a
712 * heapblock can not point at an object is, that it's value
713 * doesn't fall within the heap area (this test was performed
716 * From the results, the various tests have to be conducted
717 * in the following order for maximum efficiency:
719 * 2. already marked ?
723 * The results after reordering:
724 * >> LOG: 9301464 bytes for 196724 objects allocated.
725 * >> LOG: 15 garbage collections performed.
726 * >> LOG: 6568440 heapblocks visited, 469249 objects marked
727 * >> LOG: 1064447 visits to objects already marked.
728 * >> LOG: 350 potential references not aligned.
729 * >> LOG: 5037366 out of heap.
730 * >> LOG: 5236 not an object.
734 * 2. already marked ?
738 * >> LOG: 9301464 bytes for 196724 objects allocated.
739 * >> LOG: 15 garbage collections performed.
740 * >> LOG: 6568440 heapblocks visited, 469249 objects marked
741 * >> LOG: 5037366 out of heap.
742 * >> LOG: 1064456 visits to objects already marked.
743 * >> LOG: 5539 not an object.
744 * >> LOG: 38 potential references not aligned.
746 * Apparently, most unaligned values will already be eliminated
747 * when checking against the bounds of the heap. Checking this
748 * property first, should thus improve collection times.
751 /* 1.a. if addr doesn't point into the heap, return. */
752 if ((unsigned long)addr - (unsigned long)heap_base >=
753 ((long)heap_top - (long)heap_base)) {
754 #ifdef GC_COLLECT_STATISTICS
756 ++gc_mark_null_pointer;
758 ++gc_mark_not_inheap;
763 /* 1.b. if align(addr) has already been marked during this collection, return. */
764 if (bitmap_testbit(mark_bits, (void*)addr)) {
765 #ifdef GC_COLLECT_STATISTICS
766 ++gc_mark_already_marked;
771 /* 1.c. if align(addr) doesn't point to the start of an object, return. */
772 if (!bitmap_testbit(start_bits, (void*)addr)) {
773 #ifdef GC_COLLECT_STATISTICS
774 ++gc_mark_not_object;
779 /* 1.d. if addr is not properly aligned, return. */
780 if ((long)addr & ((1 << ALIGN) - 1)) {
781 #ifdef GC_COLLECT_STATISTICS
782 ++gc_mark_not_aligned;
787 /* 2. Mark the object at addr */
788 bitmap_setbit(mark_bits, (void*)addr);
789 #ifdef GC_COLLECT_STATISTICS
790 ++gc_mark_objects_marked;
793 #ifdef JIT_MARKER_SUPPORT
794 asm_calljavamethod(addr->vftbl->class->marker, addr, NULL, NULL, NULL);
797 /* 3. mark the references contained within the extents of the object at addr */
798 if (bitmap_testbit(reference_bits, addr)) {
799 /* 3.1. find the end of the object */
802 #ifdef SIZE_FROM_CLASSINFO
803 if (((java_objectheader*)addr)->vftbl == class_array->vftbl)
804 end = (void**)((long)addr + (long)((java_arrayheader*)addr)->alignedsize);
806 end = (void**)((long)addr + (long)((java_objectheader*)addr)->vftbl->class->alignedsize);
808 end = (void**)bitmap_find_next_setbit(start_bitmap, addr + 1); /* points just behind the object */
811 /* 3.2. mark the references within the object at addr */
812 #ifdef GC_COLLECT_STATISTICS
813 gc_mark_heapblocks_visited += ((long)end - (long)addr) >> ALIGN;
816 gc_mark_object_at(*(addr++));
826 void gc_mark_references (void)
828 address_list_node* curr = references;
831 #ifdef GC_COLLECT_STATISTICS
832 ++gc_mark_heapblocks_visited;
834 gc_mark_object_at(*((void**)(curr->address)));
842 markreferences(void** start, void** end)
844 while (start < end) {
845 #ifdef GC_COLLECT_STATISTICS
846 ++gc_mark_heapblocks_visited;
848 gc_mark_object_at(*(start++));
854 void gc_mark_stack (void)
861 if (currentThread == NULL) {
862 void **top_of_stack = &dummy;
864 if (top_of_stack > stackbottom)
865 markreferences(stackbottom, top_of_stack);
867 markreferences(top_of_stack, stackbottom);
870 for (aThread = liveThreads; aThread != 0;
871 aThread = CONTEXT(aThread).nextlive) {
872 gc_mark_object_at((void*)aThread);
873 if (CONTEXT(aThread).usedStackTop > CONTEXT(aThread).stackEnd)
874 markreferences((void**)CONTEXT(aThread).stackEnd,
875 (void**)CONTEXT(aThread).usedStackTop);
877 markreferences((void**)CONTEXT(aThread).usedStackTop,
878 (void**)CONTEXT(aThread).stackEnd);
881 markreferences((void**)&threadQhead[0],
882 (void**)&threadQhead[MAX_THREAD_PRIO]);
885 void **top_of_stack = &dummy;
887 if (top_of_stack > stackbottom)
888 markreferences(stackbottom, top_of_stack);
890 markreferences(top_of_stack, stackbottom);
898 static int armageddon_is_near = 0;
900 if (armageddon_is_near) {
901 /* armageddon_is_here! */
902 fprintf(stderr, "Oops, seems like there's a slight problem here: gc_run() called while still running?!\n");
906 armageddon_is_near = true;
907 heap_next_collection = heap_limit; /* try to avoid finalizer-induced collections */
909 bitmap_clear(mark_bitmap);
911 asm_dumpregistersandcall(gc_mark_stack);
912 gc_mark_references();
916 armageddon_is_near = false;
918 #ifdef GC_COLLECT_STATISTICS
919 ++gc_collections_count;
924 /************************* Function: gc_init **********************************
926 Initializes anything that must be initialized to call the gc on the right
929 ******************************************************************************/
936 /************************** Function: gc_call ********************************
938 Calls the garbage collector. The garbage collector should always be called
939 using this function since it ensures that enough stack space is available.
941 ******************************************************************************/
949 assert(blockInts == 0);
952 if (currentThread == NULL || currentThread == mainThread) {
953 CONTEXT(mainThread).usedStackTop = &dummy;
957 asm_switchstackandcall(CONTEXT(mainThread).usedStackTop, gc_run,
958 (void**)&(CONTEXT(currentThread).usedStackTop));
968 * These are local overrides for various environment variables in Emacs.
969 * Please do not remove this and leave it at the end of the file, where
970 * Emacs will automagically detect them.
971 * ---------------------------------------------------------------------
974 * indent-tabs-mode: t