Changed the makefile system to autoconf/automake.
[cacao.git] / mm / heap2.c
1 #include <stddef.h>
2 #include <unistd.h>             /* getpagesize, mmap, ... */
3 #include <sys/mman.h>
4
5 #include <sys/types.h>
6 #include <stdio.h>
7 #include <assert.h>
8
9 #include "asmpart.h"
10 #include "callargs.h"
11 #include "threads/thread.h"
12 #include "threads/locks.h"
13
14 #include "lifespan.h"
15 #include "mm.h"
16
17 #if !defined(HAVE_MAP_FAILED)
18 #define MAP_FAILED ((void*) -1)
19 #endif
20
21
22 #define PAGESIZE_MINUS_ONE      (getpagesize() - 1)
23
24 #undef ALIGN
25 #undef OFFSET
26
27 #define HEURISTIC_SEL    0
28 #define HEURISTIC_PARAM  2UL
29
30
31 #define next_collection_heuristic_init()   \
32              (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> 4))
33
34 #if HEURISTIC_SEL == 0
35 #define next_collection_heuristic()   \
36              (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> HEURISTIC_PARAM))
37 #elif HEURISTIC_SEL == 1
38 #define next_collection_heuristic()   \
39              (void*)((long)heap_top + (((long)heap_top - (long)heap_base) << HEURISTIC_PARAM))
40 #elif HEURISTIC_SEL == 2
41 #define next_collection_heuristic()   \
42              (void*)((long)heap_top + HEURISTIC_PARAM)   
43 #endif
44
45 //#define PSEUDO_GENERATIONAL
46 //#define COLLECT_LIFESPAN
47 //#define NEW_COLLECT_LIFESPAN
48 //#define COLLECT_FRAGMENTATION
49 //#define COLLECT_SIZES
50
51 //#define GC_COLLECT_STATISTICS
52 //#define FINALIZER_COUNTING
53
54 #undef STRUCTURES_ON_HEAP
55 //#define STRUCTURES_ON_HEAP
56
57 #define false 0
58 #define true 1
59
60 #include "allocator.h" /* rev. 1 allocator */
61 #include "bitmap2.h"   /* rev. 2 bitmap management */
62
63 bool collectverbose;
64
65 void gc_call (void);
66
67 /* --- macros */
68
69 #define align_size(size)        ((size) & ~((1 << ALIGN) - 1))
70 #define MAP_ADDRESS                     (void*) 0x10000000
71
72 /* --- file-wide variables */
73
74 static void*    heap_base = NULL;
75 static SIZE             heap_size = 0;
76 static void*    heap_top = NULL;
77 static void*    heap_limit = NULL;
78 static void*    heap_next_collection = NULL;
79
80 static bitmap_t* start_bitmap = NULL;
81 static BITBLOCK* start_bits = NULL;
82 static bitmap_t* reference_bitmap = NULL;
83 static BITBLOCK* reference_bits = NULL;
84 static bitmap_t* mark_bitmap = NULL;
85 static BITBLOCK* mark_bits = NULL;
86
87 static void**   stackbottom = NULL;
88
89 typedef struct address_list_node {
90         void* address;
91         struct address_list_node* prev;
92         struct address_list_node* next;
93 } address_list_node;
94
95 static address_list_node* references = NULL;
96 static address_list_node* finalizers = NULL;
97
98 #ifdef GC_COLLECT_STATISTICS
99
100 static unsigned long gc_collections_count = 0;
101
102 static unsigned long gc_alloc_total = 0;
103 static unsigned long gc_alloc_count = 0;
104
105 static unsigned long gc_mark_heapblocks_visited = 0;
106 static unsigned long gc_mark_not_aligned = 0;
107 static unsigned long gc_mark_not_inheap = 0;
108 static unsigned long gc_mark_not_object = 0;
109 static unsigned long gc_mark_objects_marked = 0;
110 static unsigned long gc_mark_already_marked = 0;
111
112 static unsigned long gc_mark_null_pointer = 0;
113
114 #endif
115
116 #ifdef FINALIZER_COUNTING
117
118 static unsigned long gc_finalizers_executed = 0;
119 static unsigned long gc_finalizers_detected = 0;
120
121 #endif
122
123 #ifdef USE_THREADS
124 static iMux  alloc_mutex;
125 #endif
126
127 #ifdef COLLECT_LIFESPAN
128 static FILE* tracefile;
129 #endif
130
131 #ifdef COLLECT_FRAGMENTATION
132 static FILE* fragfile;
133 static FILE* fragsizefile;
134 #endif
135
136 /* --- implementation */
137
138 void 
139 heap_init (SIZE size, 
140                    SIZE startsize, /* when should we collect for the first time ? */
141                    void **in_stackbottom)
142 {
143         /* 1. Initialise the freelists & reset the allocator's state */
144         allocator_init(); 
145
146         /* 2. Allocate at least (alignment!) size bytes of memory for the heap */
147         heap_size = align_size(size + ((1 << ALIGN) - 1));
148
149 #if !(defined(HAVE_MAP_ANONYMOUS))
150         heap_base = malloc(heap_size);
151 #else
152         heap_base = (void*) mmap (NULL, 
153                                                           ((size_t)heap_size + PAGESIZE_MINUS_ONE) & ~PAGESIZE_MINUS_ONE,
154                                                           PROT_READ | PROT_WRITE, 
155                                                           MAP_PRIVATE | MAP_ANONYMOUS, 
156                                                           -1, 
157                                                           (off_t) 0);
158 #endif
159
160         if (heap_base == (void*)MAP_FAILED) {
161                 /* unable to allocate the requested amount of memory */
162                 fprintf(stderr, "heap2.c: The queen, mylord, is dead! (mmap failed)\n");
163                 exit(-1);
164         }
165
166         /* 3. Allocate the bitmaps */
167         start_bitmap = bitmap_allocate(heap_base, heap_size);
168         reference_bitmap = bitmap_allocate(heap_base, heap_size);
169         mark_bitmap = bitmap_allocate(heap_base, heap_size);
170
171         start_bits = start_bitmap->bitmap;
172     reference_bits = reference_bitmap->bitmap;
173     mark_bits = mark_bitmap->bitmap;
174
175         /* 4. Mark the first free-area as an object-start */
176         bitmap_setbit(start_bits, heap_base);
177
178         /* 5. Initialise the heap's state (heap_top, etc.) */
179         stackbottom = in_stackbottom; /* copy the stackbottom */
180
181         heap_top = heap_base; /* the current end of the heap (just behind the last allocated object) */
182         heap_limit = (void*)((long)heap_base + heap_size); /* points just behind the last accessible block of the heap */
183
184         /* 6. calculate a useful first collection limit */
185         /* This is extremly primitive at this point... 
186            we should replace it with something more useful -- phil. */
187         heap_next_collection = next_collection_heuristic_init();
188
189         /* 7. Init the global reference lists & finalizer addresses */
190         references = NULL;
191         finalizers = NULL;
192
193 #ifdef STRUCTURES_ON_HEAP
194         heap_addreference(&references);
195         heap_addreference(&finalizers);
196 #endif
197
198 #ifdef USE_THREADS
199         /* 8. Init the mutexes for synchronization */
200         alloc_mutex.holder = 0;
201 #endif
202
203         /* 9. Set up collection of lifespan data */
204 #ifdef COLLECT_LIFESPAN
205 #if 0
206         tracefile = fopen("heap.trace", "w");
207 #else
208         tracefile = popen("gzip -9 >heap.trace.gz", "w");
209 #endif
210         if (!tracefile) {
211                 fprintf(stderr, "heap2.c: Radio Ga Ga! (fopen failed)\n");
212                 exit(-2);
213         }
214
215         fprintf(tracefile, "heap_base\t0x%lx\n", heap_base);    
216         fprintf(tracefile, "heap_limit\t0x%lx\n", heap_limit);
217         fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
218 #endif
219
220 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
221         lifespan_init(heap_base, heap_size);
222 #endif
223
224         /* 10. Set up collection of fragmentation data */
225 #ifdef COLLECT_FRAGMENTATION
226         fragfile = popen("gzip -9 >fragmentation.gz", "w");
227         fragsizefile = popen("gzip -9 >freeblocks.gz", "w");
228 #endif
229 }
230
231 inline
232 static
233 void
234 heap_call_finalizer_for_object_at(java_objectheader* object_addr)
235 {
236         asm_calljavamethod(object_addr->vftbl->class->finalizer, object_addr, NULL, NULL, NULL);
237 #ifdef FINALIZER_COUNTING
238         ++gc_finalizers_executed;
239 #endif
240 }
241
242 void 
243 heap_close (void)
244 {
245         address_list_node* curr = finalizers;
246
247         /* 0. clean up lifespan module */
248 #ifdef COLLECT_LIFESPAN
249 #if 0
250         fclose(tracefile);
251 #else
252         pclose(tracefile);
253 #endif
254 #endif
255
256 #if defined(NEW_COLLECT_LIFESPAN)
257         lifespan_close();
258 #endif
259
260 #ifdef COLLECT_FRAGMENTATION
261         pclose(fragfile);
262         pclose(fragsizefile);
263 #endif
264
265         /* 1. Clean up on the heap... finalize all remaining objects */
266 #if 1
267         while (curr) {
268                 address_list_node* prev = curr;
269                 java_objectheader* addr = (java_objectheader*)(curr->address);
270                 
271                 if (addr && bitmap_testbit(start_bits, addr))
272                         heap_call_finalizer_for_object_at(addr);
273                 
274                 curr = curr->next;
275                 free(prev);
276         }
277 #endif
278
279         /* 2. Release the bitmaps */
280         bitmap_release(start_bitmap);
281         bitmap_release(reference_bitmap);
282         bitmap_release(mark_bitmap);
283
284         /* 3. Release the memory allocated to the heap */
285         if (heap_base)
286                 munmap(heap_base, heap_size);
287
288         /* 4. emit statistical data */
289 #ifdef GC_COLLECT_STATISTICS
290         sprintf(logtext, "%ld bytes for %ld objects allocated.", 
291                         gc_alloc_total, gc_alloc_count); 
292         dolog();
293         sprintf(logtext, "%ld garbage collections performed.", gc_collections_count);
294         dolog();
295         sprintf(logtext, "%ld heapblocks visited, %ld objects marked", 
296                         gc_mark_heapblocks_visited, gc_mark_objects_marked);
297         dolog();
298         sprintf(logtext, "    %ld null pointers.", gc_mark_null_pointer);
299         dolog();
300         sprintf(logtext, "    %ld out of heap.", gc_mark_not_inheap);
301         dolog();
302         sprintf(logtext, "    %ld visits to objects already marked.", gc_mark_already_marked);
303         dolog();
304         sprintf(logtext, "    %ld not an object.", gc_mark_not_object);
305         dolog();
306         sprintf(logtext, "    %ld potential references not aligned.", gc_mark_not_aligned);
307         dolog();
308 #endif
309
310 #ifdef FINALIZER_COUNTING
311         sprintf(logtext, "%ld objects with a finalizer", gc_finalizers_detected);
312         dolog();
313
314         if (gc_finalizers_detected == gc_finalizers_executed)
315                 sprintf(logtext, "    all finalizers executed.");
316         else
317                 sprintf(logtext, "    only %ld finalizers executed.", gc_finalizers_executed);
318         dolog();
319 #endif
320
321 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
322         lifespan_emit();
323 #endif
324 }
325
326 inline
327 static
328 void 
329 heap_add_address_to_address_list(address_list_node** list, void* address)
330 {
331         /* Note: address lists are kept sorted to simplify finalization */
332
333         address_list_node* new_node = malloc(sizeof(address_list_node));
334         new_node->address = address;
335         new_node->next = NULL;
336
337         while (*list && (*list)->next) {
338                 if ((*list)->next->address < address)
339                         list = &(*list)->next;
340                 else {
341                         new_node->next = *list;
342                         *list = new_node;
343                         return;
344                 }                       
345         }
346
347         new_node->next = *list;
348         *list = new_node;
349 }
350
351
352 inline
353 static
354 void
355 heap_add_finalizer_for_object_at(void* addr)
356 {
357         /* Finalizers seem to be very rare... for this reason, I keep a linked
358            list of object addresses, which have a finalizer attached. This list
359            is kept in ascending order according to the order garbage is freed.
360            This list is currently kept separate from the heap, but should be 
361            moved onto it, but some JIT-marker code to handle these special
362            objects will need to be added first. -- phil. */
363
364         heap_add_address_to_address_list(&finalizers, addr);
365
366 #ifdef COLLECT_LIFESPAN
367         fprintf(tracefile, "finalizer\t0x%lx\n", addr);
368 #endif
369 }
370
371 void* 
372 heap_allocate (SIZE               in_length, 
373                            bool           references, 
374                            methodinfo *finalizer)
375 {
376         SIZE    length = align_size(in_length + ((1 << ALIGN) - 1)); 
377         void*   free_chunk = NULL;
378
379 #if 0
380         /* check for misaligned in_length parameter */
381         if (length != in_length)
382                 fprintf(stderr,
383                                 "heap2.c: heap_allocate was passed unaligned in_length parameter: %ld, \n         aligned to %ld. (mistrust)\n",
384                                 in_length, length);
385 #endif
386
387 #ifdef FINALIZER_COUNTING
388         if (finalizer)
389                 ++gc_finalizers_detected;
390 #endif
391
392 #if defined(COLLECT_LIFESPAN) || defined(NEW_COLLECT_LIFESPAN)
393         /* perform garbage collection to collect data for lifespan analysis */
394         if (heap_top > heap_base)
395                 gc_call();
396 #endif
397
398 #ifdef USE_THREADS
399         lock_mutex(&alloc_mutex);
400 #endif  
401  retry:
402         /* 1. attempt to get a free block with size >= length from the freelists */
403         free_chunk = allocator_alloc(length);
404
405         /* 2. if unsuccessful, try alternative allocation strategies */
406         if (!free_chunk) {
407                 /* 2.a if the collection threshold would be exceeded, collect the heap */
408                 if ((long)heap_top + length > (long)heap_next_collection) {
409                         /* 2.a.1. collect if the next_collection threshold would be exceeded */
410                         gc_call();
411
412                         /* 2.a.2. we just ran a collection, recheck the freelists */
413                         free_chunk = allocator_alloc(length);
414                         if (free_chunk)
415                                 goto success;
416
417                         /* 2.a.3. we can't satisfy the request from the freelists, check
418                                   against the heap_limit whether growing the heap is possible */
419                         if ((long)heap_top + length > (long)heap_limit)
420                                 goto failure;
421                 }
422
423                 /* 2.b. grow the heap */
424                 free_chunk = heap_top;
425                 heap_top = (void*)((long)heap_top + length);
426         }
427
428  success:
429         /* 3.a. mark all necessary bits, store the finalizer & return the newly allocated block */
430
431         /* I don't mark the object-start anymore, as it always is at the beginning of a free-block,
432            which already is marked (Note: The first free-block gets marked in heap_init). -- phil. */
433         bitmap_setbit(start_bits, free_chunk); /* mark the new object */
434
435 #ifndef SIZE_FROM_CLASSINFO
436         bitmap_setbit(start_bits, (void*)((long)free_chunk + (long)length)); /* mark the freespace behind the new object */
437 #endif
438
439         if (references)
440                 bitmap_setbit(reference_bits, free_chunk);
441         else 
442                 bitmap_clearbit(reference_bits, free_chunk);
443
444         /* store a hint, that there's a finalizer for this address */
445         if (finalizer)
446                 heap_add_finalizer_for_object_at(free_chunk);
447
448 #ifdef GC_COLLECT_STATISTICS
449         gc_alloc_total += length;
450         ++gc_alloc_count;
451 #endif
452
453 #ifdef COLLECT_LIFESPAN
454         fprintf(tracefile, "alloc\t0x%lx\t0x%lx\n", 
455                         free_chunk, (long)free_chunk + length);
456 #endif
457
458 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
459         lifespan_alloc(free_chunk, length);
460 #endif
461
462  failure:
463 #ifdef USE_THREADS
464         unlock_mutex(&alloc_mutex);
465 #endif  
466         return free_chunk;
467 }
468
469 void 
470 heap_addreference (void **reflocation)
471 {
472         /* I currently use a separate linked list (as in the original code) to hold
473            the global reference locations, but I'll change this to allocate these
474            in blocks on the heap; we'll have to add JIT-Marker code for those Java
475            objects then. -- phil. */
476
477         heap_add_address_to_address_list(&references, reflocation);
478 }
479
480 static
481 inline
482 void gc_finalize (void)
483 {
484         /* This will have to be slightly rewritten as soon the JIT-marked heap-based lists are used. -- phil. */
485
486         address_list_node*  curr = finalizers;
487         address_list_node*  prev;
488
489 #if 0
490         /* FIXME: new code, please! */
491 #else
492         while (curr) {
493                 if (curr->address) {
494                         if (!bitmap_testbit(mark_bits, curr->address)) {
495
496 #ifdef FINALIZER_COUNTING
497                                 ++gc_finalizers_executed;
498 #endif
499                                 asm_calljavamethod(((java_objectheader*)curr->address)->vftbl->class->finalizer, 
500                                                                    curr->address, NULL, NULL, NULL);
501                                 curr->address = 0;
502                         }
503                 }
504
505                 curr = curr->next;
506         }
507 #endif
508 }
509
510
511 inline
512 static 
513 void gc_reclaim (void)
514 {
515 #ifdef PSEUDO_GENERATIONAL
516         static void*  generation_start = 0;
517         static int    generation_num = 0;
518         void* addr = heap_base;
519 #endif
520         void* free_start;
521         void* free_end = heap_base;
522         BITBLOCK* temp_bits;
523         bitmap_t* temp_bitmap;
524
525 #ifdef COLLECT_FRAGMENTATION
526         unsigned long  free_size = 0;
527         unsigned long  free_fragments = 0;
528 #endif
529
530 #ifdef PSEUDO_GENERATIONAL
531         if (!generation_start || !(generation_num % 5))
532                 generation_start = heap_base;
533
534         ++generation_num;
535 #endif
536
537         /* 1. reset the freelists */
538
539 #if 0
540         allocator_mark_free_kludge(start_bits); /* this line will be kicked out, when
541                                                                                            the SIZE_FROM_CLASSINFO reclaim
542                                                                                            is implemented (very soon!!) */
543 #endif
544
545 #ifdef PSEUDO_GENERATIONAL
546         for (addr = heap_base; addr <= generation_start; ++addr) {
547                 if (bitmap_testbit(start_bits, addr))
548                         bitmap_setbit(mark_bits, addr);
549         }
550
551         allocator_mark_free_kludge(start_bits); /* this line will be kicked out, when
552                                                                                            the SIZE_FROM_CLASSINFO reclaim
553                                                                                            is implemented (very soon!!) */
554 #endif
555
556         allocator_reset();
557
558         /* 2. reclaim unmarked objects */
559 #if 0
560         if (!testbit(start_bits, heap_base))
561                 free_start = heap_base;
562         else
563                 free_start = bitmap_find_next_combination_set_unset(start_bitmap,
564                                                                                                                         mark_bitmap,
565                                                                                                                         free_start);
566
567 #else
568         while (free_end < heap_top) {
569                 free_start = bitmap_find_next_combination_set_unset(start_bitmap,
570                                                                                                                         mark_bitmap,
571                                                                                                                         free_end);
572
573                 if (free_start < heap_top) {
574                         free_end = bitmap_find_next_setbit(mark_bitmap, (void*)((long)free_start + 8)); /* FIXME: constant used */
575
576                         if (free_end < heap_top) {
577                                 allocator_free(free_start, (long)free_end - (long)free_start);
578
579 #ifdef COLLECT_FRAGMENTATION
580                                 free_size += (long)free_end - (long)free_start;
581                                 free_fragments++;
582 #endif
583
584 #ifdef COLLECT_LIFESPAN
585                                 fprintf(tracefile, 
586                                                 "free\t0x%lx\t0x%lx\n", 
587                                                 free_start,
588                                                 (long)free_end);
589 #endif
590
591 #ifdef NEW_COLLECT_LIFESPAN
592                                 lifespan_free(free_start, free_end);
593 #endif
594
595 #ifndef SIZE_FROM_CLASSINFO
596                                 /* would make trouble with JIT-Marker support. The Marker for unused blocks 
597                                    might be called, leading to a bad dereference. -- phil. */
598                                 bitmap_setbit(mark_bits, free_start); /* necessary to calculate obj-size bitmap based. */
599 #endif
600                         }
601                 } else {
602                         free_end = heap_top;    
603                 }
604         }
605 #endif
606
607         /* 3.1. swap mark & start bitmaps */
608         temp_bits = mark_bits;
609         mark_bits = start_bits;
610         start_bits = temp_bits;
611
612         temp_bitmap = mark_bitmap;
613         mark_bitmap = start_bitmap;
614         start_bitmap = temp_bitmap;
615
616 #if 0 /* operation already handled in allocate */
617         /* 3.2. mask reference bitmap */
618         bitmap_mask_with_bitmap(reference_bitmap, start_bitmap);
619 #endif
620
621         /* 3.3. update heap_top */
622         if (free_start < heap_top) {
623                 heap_top = free_start;
624 #ifdef NEW_COLLECT_LIFESPAN
625                         lifespan_free(free_start, free_end);
626 #endif
627         }
628
629 #if 0
630         if (heap_top < heap_limit)
631                 bitmap_setbit(start_bits, heap_top);
632 #endif
633
634         /* 3.4. emit fragmentation info */
635 #ifdef COLLECT_FRAGMENTATION
636         {
637                 unsigned long heap_full = (unsigned long)heap_top - (unsigned long)heap_base;
638                 unsigned long heap_life = (unsigned long)heap_top - (unsigned long)heap_base - free_size;
639
640                 fprintf(fragfile, 
641                                 "%ld\t%ld\t%ld\t%ld\t%f\t%f\t%f\n", 
642                                 heap_full,
643                                 heap_life,
644                                 free_size, 
645                                 free_fragments,
646                                 100*(float)free_size/(free_fragments ? free_fragments : 1),
647                                 100*(float)heap_life/(heap_full ? heap_full : 1),
648                                 100*(float)free_size/(heap_full ? heap_full : 1)
649                                 );
650         }
651         fflush(fragfile);
652
653         allocator_dump_to_file(fragsizefile);
654 #endif
655
656         /* 4. adjust the collection threshold */
657         heap_next_collection = next_collection_heuristic();
658         if (heap_next_collection > heap_limit)
659                 heap_next_collection = heap_limit;
660
661 #ifdef COLLECT_LIFESPAN
662         fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
663 #endif
664
665 #ifdef PSEUDO_GENERATIONAL
666         generation_start = heap_top;
667 #endif
668 }
669
670 inline
671 static
672 void 
673 gc_mark_object_at (void** addr)
674 {
675         /*
676          * A note concerning the order of the tests:
677          *
678          * Statistics collected during a test run, where alignment
679          * was tested before checking whether the addr points into 
680          * the heap:
681          * >> LOG: 9301464 bytes for 196724 objects allocated.
682          * >> LOG: 15 garbage collections performed.
683          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
684          * >> LOG:     1064447 visits to objects already marked.
685          * >> LOG:     988270 potential references not aligned.
686          * >> LOG:     4049446 out of heap.
687          * >> LOG:     5236 not an object.
688          *
689          * These results show, that only about 1/4 of all heapblocks 
690          * point to objects; The single most important reason why a 
691          * heapblock can not point at an object is, that it's value 
692          * doesn't fall within the heap area (this test was performed
693          * with a 3MB heap).
694          *
695          * From the results, the various tests have to be conducted 
696          * in the following order for maximum efficiency:
697          *    1. addr in heap?
698          *    2. already marked ?
699          *    3. aligned ?
700          *    4. object ?
701          *
702          * The results after reordering:
703          * >> LOG: 9301464 bytes for 196724 objects allocated.
704          * >> LOG: 15 garbage collections performed.
705          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
706          * >> LOG:     1064447 visits to objects already marked.
707          * >> LOG:     350 potential references not aligned.
708          * >> LOG:     5037366 out of heap.
709          * >> LOG:     5236 not an object.
710          *
711          * And using:
712          *    1. addr in heap?
713          *    2. already marked ?
714          *    3. object ?
715          *    4. aligned ?
716          * 
717          * >> LOG: 9301464 bytes for 196724 objects allocated.
718          * >> LOG: 15 garbage collections performed.
719          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
720          * >> LOG:     5037366 out of heap.
721          * >> LOG:     1064456 visits to objects already marked.
722          * >> LOG:     5539 not an object.
723          * >> LOG:     38 potential references not aligned.
724          *
725          * Apparently, most unaligned values will already be eliminated
726          * when checking against the bounds of the heap. Checking this
727          * property first, should thus improve collection times.
728          */
729
730         /* 1.a. if addr doesn't point into the heap, return. */
731         if ((unsigned long)addr - (unsigned long)heap_base >= 
732                 ((long)heap_top - (long)heap_base)) {
733 #ifdef GC_COLLECT_STATISTICS
734                 if (addr == NULL)
735                         ++gc_mark_null_pointer;
736                 else
737                         ++gc_mark_not_inheap;
738 #endif
739                 return;
740         }
741
742         /* 1.b. if align(addr) has already been marked during this collection, return. */
743         if (bitmap_testbit(mark_bits, (void*)addr)) {
744 #ifdef GC_COLLECT_STATISTICS
745                 ++gc_mark_already_marked;
746 #endif
747                 return;
748         }
749
750         /* 1.c. if align(addr) doesn't point to the start of an object, return. */
751         if (!bitmap_testbit(start_bits, (void*)addr)) {
752 #ifdef GC_COLLECT_STATISTICS
753                 ++gc_mark_not_object;
754 #endif
755                 return;
756         }
757
758         /* 1.d. if addr is not properly aligned, return. */
759         if ((long)addr & ((1 << ALIGN) - 1)) {
760 #ifdef GC_COLLECT_STATISTICS
761                 ++gc_mark_not_aligned;
762 #endif
763                 return;
764         }
765
766         /* 2. Mark the object at addr */
767         bitmap_setbit(mark_bits, (void*)addr); 
768 #ifdef GC_COLLECT_STATISTICS
769         ++gc_mark_objects_marked;
770 #endif
771
772 #ifdef JIT_MARKER_SUPPORT
773         asm_calljavamethod(addr->vftbl->class->marker, addr, NULL, NULL, NULL);
774 #else
775
776         /* 3. mark the references contained within the extents of the object at addr */
777         if (bitmap_testbit(reference_bits, addr)) {
778                 /* 3.1. find the end of the object */
779                 void** end;
780
781 #ifdef SIZE_FROM_CLASSINFO
782                 if (((java_objectheader*)addr)->vftbl == class_array->vftbl)
783                         end = (void**)((long)addr + (long)((java_arrayheader*)addr)->alignedsize);
784                 else
785                         end = (void**)((long)addr + (long)((java_objectheader*)addr)->vftbl->class->alignedsize);
786 #else
787                 end = (void**)bitmap_find_next_setbit(start_bitmap, addr + 1); /* points just behind the object */
788 #endif
789
790                 /* 3.2. mark the references within the object at addr */
791 #ifdef GC_COLLECT_STATISTICS
792                 gc_mark_heapblocks_visited += ((long)end - (long)addr) >> ALIGN;
793 #endif
794                 while (addr < end)
795                         gc_mark_object_at(*(addr++));
796 #endif  
797         }
798         
799         return;
800 }
801
802
803 inline
804 static
805 void gc_mark_references (void)
806 {
807         address_list_node* curr = references;
808
809         while (curr) {
810 #ifdef GC_COLLECT_STATISTICS
811                 ++gc_mark_heapblocks_visited;
812 #endif          
813                 gc_mark_object_at(*((void**)(curr->address)));
814                 curr = curr->next;
815         }
816 }
817
818 inline
819 static
820 void 
821 markreferences(void** start, void** end)
822 {
823         while (start < end) {
824 #ifdef GC_COLLECT_STATISTICS
825                 ++gc_mark_heapblocks_visited;
826 #endif          
827                 gc_mark_object_at(*(start++));
828         }
829 }
830
831 inline
832 static
833 void gc_mark_stack (void)
834 {
835         void *dummy;
836
837 #ifdef USE_THREADS 
838     thread *aThread;
839         
840         if (currentThread == NULL) {
841                 void **top_of_stack = &dummy;
842                 
843                 if (top_of_stack > stackbottom)
844                         markreferences(stackbottom, top_of_stack);
845                 else
846                         markreferences(top_of_stack, stackbottom);
847         }
848         else {
849                 for (aThread = liveThreads; aThread != 0;
850                          aThread = CONTEXT(aThread).nextlive) {
851                         gc_mark_object_at((void*)aThread);
852                         if (CONTEXT(aThread).usedStackTop > CONTEXT(aThread).stackEnd)
853                                 markreferences((void**)CONTEXT(aThread).stackEnd,
854                                                            (void**)CONTEXT(aThread).usedStackTop);
855                         else    
856                                 markreferences((void**)CONTEXT(aThread).usedStackTop,
857                                                            (void**)CONTEXT(aThread).stackEnd);
858             }
859
860                 markreferences((void**)&threadQhead[0],
861                                            (void**)&threadQhead[MAX_THREAD_PRIO]);
862         }
863 #else
864     void **top_of_stack = &dummy;
865
866     if (top_of_stack > stackbottom)
867         markreferences(stackbottom, top_of_stack);
868         else
869         markreferences(top_of_stack, stackbottom);
870 #endif
871 }
872
873
874 static 
875 void gc_run (void)
876 {
877         static int armageddon_is_near = 0;
878
879         if (armageddon_is_near) {
880                 /* armageddon_is_here! */
881                 fprintf(stderr, "Oops, seems like there's a slight problem here: gc_run() called while still running?!\n");
882                 return;
883         }
884
885         armageddon_is_near = true;
886         heap_next_collection = heap_limit;  /* try to avoid finalizer-induced collections */
887
888         bitmap_clear(mark_bitmap);
889
890         asm_dumpregistersandcall(gc_mark_stack);
891         gc_mark_references();
892         gc_finalize();
893         gc_reclaim();
894
895         armageddon_is_near = false;
896
897 #ifdef GC_COLLECT_STATISTICS
898         ++gc_collections_count;
899 #endif
900 }
901
902
903 /************************* Function: gc_init **********************************
904
905   Initializes anything that must be initialized to call the gc on the right
906   stack.
907
908 ******************************************************************************/
909
910 void
911 gc_init (void)
912 {
913 }
914
915 /************************** Function: gc_call ********************************
916
917   Calls the garbage collector. The garbage collector should always be called
918   using this function since it ensures that enough stack space is available.
919
920 ******************************************************************************/
921
922 void
923 gc_call (void)
924 {
925 #ifdef USE_THREADS
926         u1 dummy;
927
928         assert(blockInts == 0);
929
930         intsDisable();
931         if (currentThread == NULL || currentThread == mainThread) {
932                 CONTEXT(mainThread).usedStackTop = &dummy;
933                 gc_run();
934                 }
935         else
936                 asm_switchstackandcall(CONTEXT(mainThread).usedStackTop, gc_run,
937                                                            (void**)&(CONTEXT(currentThread).usedStackTop));
938         intsRestore();
939 #else
940         gc_run();
941 #endif
942 }
943
944
945
946 /*
947  * These are local overrides for various environment variables in Emacs.
948  * Please do not remove this and leave it at the end of the file, where
949  * Emacs will automagically detect them.
950  * ---------------------------------------------------------------------
951  * Local variables:
952  * mode: c
953  * indent-tabs-mode: t
954  * c-basic-offset: 4
955  * tab-width: 4
956  * End:
957  */
958