97a5158682dd306a818f1e8ab2e6fbd0839101db
[cacao.git] / mm / heap2.c
1 #include <stddef.h>
2 #include <unistd.h>             /* getpagesize, mmap, ... */
3 #include <sys/mman.h>
4
5 #include <sys/types.h>
6 #include <stdio.h>
7 #include <assert.h>
8
9 #include "asmpart.h"
10 #include "callargs.h"
11 #include "threads/thread.h"
12 #include "threads/locks.h"
13
14 #include "lifespan.h"
15 #include "mm.h"
16
17 #if !defined(HAVE_MAP_FAILED)
18 #define MAP_FAILED ((void*) -1)
19 #endif
20
21
22 #define PAGESIZE_MINUS_ONE      (getpagesize() - 1)
23
24 #undef ALIGN
25 #undef OFFSET
26
27 #define HEURISTIC_SEL    0
28 #define HEURISTIC_PARAM  2UL
29
30
31 #define next_collection_heuristic_init()   \
32              (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> 4))
33
34 #if HEURISTIC_SEL == 0
35 #define next_collection_heuristic()   \
36              (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> HEURISTIC_PARAM))
37 #elif HEURISTIC_SEL == 1
38 #define next_collection_heuristic()   \
39              (void*)((long)heap_top + (((long)heap_top - (long)heap_base) << HEURISTIC_PARAM))
40 #elif HEURISTIC_SEL == 2
41 #define next_collection_heuristic()   \
42              (void*)((long)heap_top + HEURISTIC_PARAM)   
43 #endif
44
45          /*
46 //#define PSEUDO_GENERATIONAL
47 //#define COLLECT_LIFESPAN
48 //#define NEW_COLLECT_LIFESPAN
49 //#define COLLECT_FRAGMENTATION
50 //#define COLLECT_SIZES
51
52 //#define GC_COLLECT_STATISTICS
53 //#define FINALIZER_COUNTING
54 */
55
56 #undef STRUCTURES_ON_HEAP
57          /*
58 //#define STRUCTURES_ON_HEAP
59 */
60
61 #define false 0
62 #define true 1
63
64 #include "allocator.h" /* rev. 1 allocator */
65 #include "bitmap2.h"   /* rev. 2 bitmap management */
66
67 bool collectverbose;
68
69 void gc_call (void);
70
71 /* --- macros */
72
73 #define align_size(size)        ((size) & ~((1 << ALIGN) - 1))
74 #define MAP_ADDRESS                     (void*) 0x10000000
75
76 /* --- file-wide variables */
77
78 static void*    heap_base = NULL;
79 static SIZE             heap_size = 0;
80 static void*    heap_top = NULL;
81 static void*    heap_limit = NULL;
82 static void*    heap_next_collection = NULL;
83
84 static bitmap_t* start_bitmap = NULL;
85 static BITBLOCK* start_bits = NULL;
86 static bitmap_t* reference_bitmap = NULL;
87 static BITBLOCK* reference_bits = NULL;
88 static bitmap_t* mark_bitmap = NULL;
89 static BITBLOCK* mark_bits = NULL;
90
91 static void**   stackbottom = NULL;
92
93 typedef struct address_list_node {
94         void* address;
95         struct address_list_node* prev;
96         struct address_list_node* next;
97 } address_list_node;
98
99 static address_list_node* references = NULL;
100 static address_list_node* finalizers = NULL;
101
102 #ifdef GC_COLLECT_STATISTICS
103
104 static unsigned long gc_collections_count = 0;
105
106 static unsigned long gc_alloc_total = 0;
107 static unsigned long gc_alloc_count = 0;
108
109 static unsigned long gc_mark_heapblocks_visited = 0;
110 static unsigned long gc_mark_not_aligned = 0;
111 static unsigned long gc_mark_not_inheap = 0;
112 static unsigned long gc_mark_not_object = 0;
113 static unsigned long gc_mark_objects_marked = 0;
114 static unsigned long gc_mark_already_marked = 0;
115
116 static unsigned long gc_mark_null_pointer = 0;
117
118 #endif
119
120 #ifdef FINALIZER_COUNTING
121
122 static unsigned long gc_finalizers_executed = 0;
123 static unsigned long gc_finalizers_detected = 0;
124
125 #endif
126
127 #ifdef USE_THREADS
128 static iMux  alloc_mutex;
129 #endif
130
131 #ifdef COLLECT_LIFESPAN
132 static FILE* tracefile;
133 #endif
134
135 #ifdef COLLECT_FRAGMENTATION
136 static FILE* fragfile;
137 static FILE* fragsizefile;
138 #endif
139
140 /* --- implementation */
141
142 void 
143 heap_init (SIZE size, 
144                    SIZE startsize, /* when should we collect for the first time ? */
145                    void **in_stackbottom)
146 {
147         /* 1. Initialise the freelists & reset the allocator's state */
148         allocator_init(); 
149
150         /* 2. Allocate at least (alignment!) size bytes of memory for the heap */
151         heap_size = align_size(size + ((1 << ALIGN) - 1));
152
153 #if !(defined(HAVE_MAP_ANONYMOUS))
154         heap_base = malloc(heap_size);
155 #else
156         heap_base = (void*) mmap (NULL, 
157                                                           ((size_t)heap_size + PAGESIZE_MINUS_ONE) & ~PAGESIZE_MINUS_ONE,
158                                                           PROT_READ | PROT_WRITE, 
159                                                           MAP_PRIVATE | MAP_ANONYMOUS, 
160                                                           -1, 
161                                                           (off_t) 0);
162 #endif
163
164         if (heap_base == (void*)MAP_FAILED) {
165                 /* unable to allocate the requested amount of memory */
166                 fprintf(stderr, "heap2.c: The queen, mylord, is dead! (mmap failed)\n");
167                 exit(-1);
168         }
169
170         /* 3. Allocate the bitmaps */
171         start_bitmap = bitmap_allocate(heap_base, heap_size);
172         reference_bitmap = bitmap_allocate(heap_base, heap_size);
173         mark_bitmap = bitmap_allocate(heap_base, heap_size);
174
175         start_bits = start_bitmap->bitmap;
176     reference_bits = reference_bitmap->bitmap;
177     mark_bits = mark_bitmap->bitmap;
178
179         /* 4. Mark the first free-area as an object-start */
180         bitmap_setbit(start_bits, heap_base);
181
182         /* 5. Initialise the heap's state (heap_top, etc.) */
183         stackbottom = in_stackbottom; /* copy the stackbottom */
184
185         heap_top = heap_base; /* the current end of the heap (just behind the last allocated object) */
186         heap_limit = (void*)((long)heap_base + heap_size); /* points just behind the last accessible block of the heap */
187
188         /* 6. calculate a useful first collection limit */
189         /* This is extremly primitive at this point... 
190            we should replace it with something more useful -- phil. */
191         heap_next_collection = next_collection_heuristic_init();
192
193         /* 7. Init the global reference lists & finalizer addresses */
194         references = NULL;
195         finalizers = NULL;
196
197 #ifdef STRUCTURES_ON_HEAP
198         heap_addreference(&references);
199         heap_addreference(&finalizers);
200 #endif
201
202 #ifdef USE_THREADS
203         /* 8. Init the mutexes for synchronization */
204         alloc_mutex.holder = 0;
205 #endif
206
207         /* 9. Set up collection of lifespan data */
208 #ifdef COLLECT_LIFESPAN
209 #if 0
210         tracefile = fopen("heap.trace", "w");
211 #else
212         tracefile = popen("gzip -9 >heap.trace.gz", "w");
213 #endif
214         if (!tracefile) {
215                 fprintf(stderr, "heap2.c: Radio Ga Ga! (fopen failed)\n");
216                 exit(-2);
217         }
218
219         fprintf(tracefile, "heap_base\t0x%lx\n", heap_base);    
220         fprintf(tracefile, "heap_limit\t0x%lx\n", heap_limit);
221         fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
222 #endif
223
224 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
225         lifespan_init(heap_base, heap_size);
226 #endif
227
228         /* 10. Set up collection of fragmentation data */
229 #ifdef COLLECT_FRAGMENTATION
230         fragfile = popen("gzip -9 >fragmentation.gz", "w");
231         fragsizefile = popen("gzip -9 >freeblocks.gz", "w");
232 #endif
233 }
234
235 inline
236 static
237 void
238 heap_call_finalizer_for_object_at(java_objectheader* object_addr)
239 {
240         asm_calljavamethod(object_addr->vftbl->class->finalizer, object_addr, NULL, NULL, NULL);
241 #ifdef FINALIZER_COUNTING
242         ++gc_finalizers_executed;
243 #endif
244 }
245
246 void 
247 heap_close (void)
248 {
249         address_list_node* curr = finalizers;
250
251         /* 0. clean up lifespan module */
252 #ifdef COLLECT_LIFESPAN
253 #if 0
254         fclose(tracefile);
255 #else
256         pclose(tracefile);
257 #endif
258 #endif
259
260 #if defined(NEW_COLLECT_LIFESPAN)
261         lifespan_close();
262 #endif
263
264 #ifdef COLLECT_FRAGMENTATION
265         pclose(fragfile);
266         pclose(fragsizefile);
267 #endif
268
269         /* 1. Clean up on the heap... finalize all remaining objects */
270 #if 1
271         while (curr) {
272                 address_list_node* prev = curr;
273                 java_objectheader* addr = (java_objectheader*)(curr->address);
274                 
275                 if (addr && bitmap_testbit(start_bits, addr))
276                         heap_call_finalizer_for_object_at(addr);
277                 
278                 curr = curr->next;
279                 free(prev);
280         }
281 #endif
282
283         /* 2. Release the bitmaps */
284         bitmap_release(start_bitmap);
285         bitmap_release(reference_bitmap);
286         bitmap_release(mark_bitmap);
287
288         /* 3. Release the memory allocated to the heap */
289         if (heap_base)
290                 munmap(heap_base, heap_size);
291
292         /* 4. emit statistical data */
293 #ifdef GC_COLLECT_STATISTICS
294         sprintf(logtext, "%ld bytes for %ld objects allocated.", 
295                         gc_alloc_total, gc_alloc_count); 
296         dolog();
297         sprintf(logtext, "%ld garbage collections performed.", gc_collections_count);
298         dolog();
299         sprintf(logtext, "%ld heapblocks visited, %ld objects marked", 
300                         gc_mark_heapblocks_visited, gc_mark_objects_marked);
301         dolog();
302         sprintf(logtext, "    %ld null pointers.", gc_mark_null_pointer);
303         dolog();
304         sprintf(logtext, "    %ld out of heap.", gc_mark_not_inheap);
305         dolog();
306         sprintf(logtext, "    %ld visits to objects already marked.", gc_mark_already_marked);
307         dolog();
308         sprintf(logtext, "    %ld not an object.", gc_mark_not_object);
309         dolog();
310         sprintf(logtext, "    %ld potential references not aligned.", gc_mark_not_aligned);
311         dolog();
312 #endif
313
314 #ifdef FINALIZER_COUNTING
315         sprintf(logtext, "%ld objects with a finalizer", gc_finalizers_detected);
316         dolog();
317
318         if (gc_finalizers_detected == gc_finalizers_executed)
319                 sprintf(logtext, "    all finalizers executed.");
320         else
321                 sprintf(logtext, "    only %ld finalizers executed.", gc_finalizers_executed);
322         dolog();
323 #endif
324
325 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
326         lifespan_emit();
327 #endif
328 }
329
330 inline
331 static
332 void 
333 heap_add_address_to_address_list(address_list_node** list, void* address)
334 {
335         /* Note: address lists are kept sorted to simplify finalization */
336
337         address_list_node* new_node = malloc(sizeof(address_list_node));
338         new_node->address = address;
339         new_node->next = NULL;
340
341         while (*list && (*list)->next) {
342                 if ((*list)->next->address < address)
343                         list = &(*list)->next;
344                 else {
345                         new_node->next = *list;
346                         *list = new_node;
347                         return;
348                 }                       
349         }
350
351         new_node->next = *list;
352         *list = new_node;
353 }
354
355
356 inline
357 static
358 void 
359 heap_add_address_to_address_list_unsorted(address_list_node** list, 
360                                                                                   void* address)
361 {
362         address_list_node* new_node = malloc(sizeof(address_list_node));
363         new_node->address = address;
364         new_node->next = *list;
365         *list = new_node;
366 }
367
368
369 inline
370 static
371 void
372 heap_add_finalizer_for_object_at(void* addr)
373 {
374         /* Finalizers seem to be very rare... for this reason, I keep a linked
375            list of object addresses, which have a finalizer attached. This list
376            is kept in ascending order according to the order garbage is freed.
377            This list is currently kept separate from the heap, but should be 
378            moved onto it, but some JIT-marker code to handle these special
379            objects will need to be added first. -- phil. */
380
381         heap_add_address_to_address_list(&finalizers, addr);
382
383 #ifdef COLLECT_LIFESPAN
384         fprintf(tracefile, "finalizer\t0x%lx\n", addr);
385 #endif
386 }
387
388 void* 
389 heap_allocate (SIZE               in_length, 
390                            bool           references, 
391                            methodinfo *finalizer)
392 {
393         SIZE    length = align_size(in_length + ((1 << ALIGN) - 1)); 
394         void*   free_chunk = NULL;
395
396 #if 0
397         /* check for misaligned in_length parameter */
398         if (length != in_length)
399                 fprintf(stderr,
400                                 "heap2.c: heap_allocate was passed unaligned in_length parameter: %ld, \n         aligned to %ld. (mistrust)\n",
401                                 in_length, length);
402 #endif
403
404 #ifdef FINALIZER_COUNTING
405         if (finalizer)
406                 ++gc_finalizers_detected;
407 #endif
408
409 #if defined(COLLECT_LIFESPAN) || defined(NEW_COLLECT_LIFESPAN)
410         /* perform garbage collection to collect data for lifespan analysis */
411         if (heap_top > heap_base)
412                 gc_call();
413 #endif
414
415 #ifdef USE_THREADS
416         lock_mutex(&alloc_mutex);
417 #endif  
418  retry:
419         /* 1. attempt to get a free block with size >= length from the freelists */
420         free_chunk = allocator_alloc(length);
421
422         /* 2. if unsuccessful, try alternative allocation strategies */
423         if (!free_chunk) {
424                 /* 2.a if the collection threshold would be exceeded, collect the heap */
425                 if ((long)heap_top + length > (long)heap_next_collection) {
426                         /* 2.a.1. collect if the next_collection threshold would be exceeded */
427                         gc_call();
428
429                         /* 2.a.2. we just ran a collection, recheck the freelists */
430                         free_chunk = allocator_alloc(length);
431                         if (free_chunk)
432                                 goto success;
433
434                         /* 2.a.3. we can't satisfy the request from the freelists, check
435                                   against the heap_limit whether growing the heap is possible */
436                         if ((long)heap_top + length > (long)heap_limit)
437                                 goto failure;
438                 }
439
440                 /* 2.b. grow the heap */
441                 free_chunk = heap_top;
442                 heap_top = (void*)((long)heap_top + length);
443         }
444
445  success:
446         /* 3.a. mark all necessary bits, store the finalizer & return the newly allocated block */
447
448         /* I don't mark the object-start anymore, as it always is at the beginning of a free-block,
449            which already is marked (Note: The first free-block gets marked in heap_init). -- phil. */
450         bitmap_setbit(start_bits, free_chunk); /* mark the new object */
451
452 #ifndef SIZE_FROM_CLASSINFO
453         bitmap_setbit(start_bits, (void*)((long)free_chunk + (long)length)); /* mark the freespace behind the new object */
454 #endif
455
456         if (references)
457                 bitmap_setbit(reference_bits, free_chunk);
458         else 
459                 bitmap_clearbit(reference_bits, free_chunk);
460
461         /* store a hint, that there's a finalizer for this address */
462         if (finalizer)
463                 heap_add_finalizer_for_object_at(free_chunk);
464
465 #ifdef GC_COLLECT_STATISTICS
466         gc_alloc_total += length;
467         ++gc_alloc_count;
468 #endif
469
470 #ifdef COLLECT_LIFESPAN
471         fprintf(tracefile, "alloc\t0x%lx\t0x%lx\n", 
472                         free_chunk, (long)free_chunk + length);
473 #endif
474
475 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
476         lifespan_alloc(free_chunk, length);
477 #endif
478
479  failure:
480 #ifdef USE_THREADS
481         unlock_mutex(&alloc_mutex);
482 #endif  
483         return free_chunk;
484 }
485
486 void 
487 heap_addreference (void **reflocation)
488 {
489         /* I currently use a separate linked list (as in the original code) to hold
490            the global reference locations, but I'll change this to allocate these
491            in blocks on the heap; we'll have to add JIT-Marker code for those Java
492            objects then. -- phil. */
493
494         heap_add_address_to_address_list_unsorted(&references, reflocation);
495 }
496
497 static
498 inline
499 void gc_finalize (void)
500 {
501         /* This will have to be slightly rewritten as soon the JIT-marked heap-based lists are used. -- phil. */
502
503         address_list_node*  curr = finalizers;
504         address_list_node*  prev;
505
506 #if 0
507         /* FIXME: new code, please! */
508 #else
509         while (curr) {
510                 if (curr->address) {
511                         if (!bitmap_testbit(mark_bits, curr->address)) {
512
513 #ifdef FINALIZER_COUNTING
514                                 ++gc_finalizers_executed;
515 #endif
516                                 asm_calljavamethod(((java_objectheader*)curr->address)->vftbl->class->finalizer, 
517                                                                    curr->address, NULL, NULL, NULL);
518                                 curr->address = 0;
519                         }
520                 }
521
522                 curr = curr->next;
523         }
524 #endif
525 }
526
527
528 inline
529 static 
530 void gc_reclaim (void)
531 {
532 #ifdef PSEUDO_GENERATIONAL
533         static void*  generation_start = 0;
534         static int    generation_num = 0;
535         void* addr = heap_base;
536 #endif
537         void* free_start;
538         void* free_end = heap_base;
539         BITBLOCK* temp_bits;
540         bitmap_t* temp_bitmap;
541
542 #ifdef COLLECT_FRAGMENTATION
543         unsigned long  free_size = 0;
544         unsigned long  free_fragments = 0;
545 #endif
546
547 #ifdef PSEUDO_GENERATIONAL
548         if (!generation_start || !(generation_num % 5))
549                 generation_start = heap_base;
550
551         ++generation_num;
552 #endif
553
554         /* 1. reset the freelists */
555
556 #if 0
557         allocator_mark_free_kludge(start_bits); /* this line will be kicked out, when
558                                                                                            the SIZE_FROM_CLASSINFO reclaim
559                                                                                            is implemented (very soon!!) */
560 #endif
561
562 #ifdef PSEUDO_GENERATIONAL
563         for (addr = heap_base; addr <= generation_start; ++addr) {
564                 if (bitmap_testbit(start_bits, addr))
565                         bitmap_setbit(mark_bits, addr);
566         }
567
568         allocator_mark_free_kludge(start_bits); /* this line will be kicked out, when
569                                                                                            the SIZE_FROM_CLASSINFO reclaim
570                                                                                            is implemented (very soon!!) */
571 #endif
572
573         allocator_reset();
574
575         /* 2. reclaim unmarked objects */
576 #if 0
577         if (!testbit(start_bits, heap_base))
578                 free_start = heap_base;
579         else
580                 free_start = bitmap_find_next_combination_set_unset(start_bitmap,
581                                                                                                                         mark_bitmap,
582                                                                                                                         free_start);
583
584 #else
585         while (free_end < heap_top) {
586                 free_start = bitmap_find_next_combination_set_unset(start_bitmap,
587                                                                                                                         mark_bitmap,
588                                                                                                                         free_end);
589
590                 if (free_start < heap_top) {
591                         free_end = bitmap_find_next_setbit(mark_bitmap, (void*)((long)free_start + 8)); /* FIXME: constant used */
592
593                         if (free_end < heap_top) {
594                                 allocator_free(free_start, (long)free_end - (long)free_start);
595
596 #ifdef COLLECT_FRAGMENTATION
597                                 free_size += (long)free_end - (long)free_start;
598                                 free_fragments++;
599 #endif
600
601 #ifdef COLLECT_LIFESPAN
602                                 fprintf(tracefile, 
603                                                 "free\t0x%lx\t0x%lx\n", 
604                                                 free_start,
605                                                 (long)free_end);
606 #endif
607
608 #ifdef NEW_COLLECT_LIFESPAN
609                                 lifespan_free(free_start, free_end);
610 #endif
611
612 #ifndef SIZE_FROM_CLASSINFO
613                                 /* would make trouble with JIT-Marker support. The Marker for unused blocks 
614                                    might be called, leading to a bad dereference. -- phil. */
615                                 bitmap_setbit(mark_bits, free_start); /* necessary to calculate obj-size bitmap based. */
616 #endif
617                         }
618                 } else {
619                         free_end = heap_top;    
620                 }
621         }
622 #endif
623
624         /* 3.1. swap mark & start bitmaps */
625         temp_bits = mark_bits;
626         mark_bits = start_bits;
627         start_bits = temp_bits;
628
629         temp_bitmap = mark_bitmap;
630         mark_bitmap = start_bitmap;
631         start_bitmap = temp_bitmap;
632
633 #if 0 /* operation already handled in allocate */
634         /* 3.2. mask reference bitmap */
635         bitmap_mask_with_bitmap(reference_bitmap, start_bitmap);
636 #endif
637
638         /* 3.3. update heap_top */
639         if (free_start < heap_top) {
640                 heap_top = free_start;
641 #ifdef NEW_COLLECT_LIFESPAN
642                         lifespan_free(free_start, free_end);
643 #endif
644         }
645
646 #if 0
647         if (heap_top < heap_limit)
648                 bitmap_setbit(start_bits, heap_top);
649 #endif
650
651         /* 3.4. emit fragmentation info */
652 #ifdef COLLECT_FRAGMENTATION
653         {
654                 unsigned long heap_full = (unsigned long)heap_top - (unsigned long)heap_base;
655                 unsigned long heap_life = (unsigned long)heap_top - (unsigned long)heap_base - free_size;
656
657                 fprintf(fragfile, 
658                                 "%ld\t%ld\t%ld\t%ld\t%f\t%f\t%f\n", 
659                                 heap_full,
660                                 heap_life,
661                                 free_size, 
662                                 free_fragments,
663                                 100*(float)free_size/(free_fragments ? free_fragments : 1),
664                                 100*(float)heap_life/(heap_full ? heap_full : 1),
665                                 100*(float)free_size/(heap_full ? heap_full : 1)
666                                 );
667         }
668         fflush(fragfile);
669
670         allocator_dump_to_file(fragsizefile);
671 #endif
672
673         /* 4. adjust the collection threshold */
674         heap_next_collection = next_collection_heuristic();
675         if (heap_next_collection > heap_limit)
676                 heap_next_collection = heap_limit;
677
678 #ifdef COLLECT_LIFESPAN
679         fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
680 #endif
681
682 #ifdef PSEUDO_GENERATIONAL
683         generation_start = heap_top;
684 #endif
685 }
686
687 inline
688 static
689 void 
690 gc_mark_object_at (void** addr)
691 {
692         /*
693          * A note concerning the order of the tests:
694          *
695          * Statistics collected during a test run, where alignment
696          * was tested before checking whether the addr points into 
697          * the heap:
698          * >> LOG: 9301464 bytes for 196724 objects allocated.
699          * >> LOG: 15 garbage collections performed.
700          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
701          * >> LOG:     1064447 visits to objects already marked.
702          * >> LOG:     988270 potential references not aligned.
703          * >> LOG:     4049446 out of heap.
704          * >> LOG:     5236 not an object.
705          *
706          * These results show, that only about 1/4 of all heapblocks 
707          * point to objects; The single most important reason why a 
708          * heapblock can not point at an object is, that it's value 
709          * doesn't fall within the heap area (this test was performed
710          * with a 3MB heap).
711          *
712          * From the results, the various tests have to be conducted 
713          * in the following order for maximum efficiency:
714          *    1. addr in heap?
715          *    2. already marked ?
716          *    3. aligned ?
717          *    4. object ?
718          *
719          * The results after reordering:
720          * >> LOG: 9301464 bytes for 196724 objects allocated.
721          * >> LOG: 15 garbage collections performed.
722          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
723          * >> LOG:     1064447 visits to objects already marked.
724          * >> LOG:     350 potential references not aligned.
725          * >> LOG:     5037366 out of heap.
726          * >> LOG:     5236 not an object.
727          *
728          * And using:
729          *    1. addr in heap?
730          *    2. already marked ?
731          *    3. object ?
732          *    4. aligned ?
733          * 
734          * >> LOG: 9301464 bytes for 196724 objects allocated.
735          * >> LOG: 15 garbage collections performed.
736          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
737          * >> LOG:     5037366 out of heap.
738          * >> LOG:     1064456 visits to objects already marked.
739          * >> LOG:     5539 not an object.
740          * >> LOG:     38 potential references not aligned.
741          *
742          * Apparently, most unaligned values will already be eliminated
743          * when checking against the bounds of the heap. Checking this
744          * property first, should thus improve collection times.
745          */
746
747         /* 1.a. if addr doesn't point into the heap, return. */
748         if ((unsigned long)addr - (unsigned long)heap_base >= 
749                 ((long)heap_top - (long)heap_base)) {
750 #ifdef GC_COLLECT_STATISTICS
751                 if (addr == NULL)
752                         ++gc_mark_null_pointer;
753                 else
754                         ++gc_mark_not_inheap;
755 #endif
756                 return;
757         }
758
759         /* 1.b. if align(addr) has already been marked during this collection, return. */
760         if (bitmap_testbit(mark_bits, (void*)addr)) {
761 #ifdef GC_COLLECT_STATISTICS
762                 ++gc_mark_already_marked;
763 #endif
764                 return;
765         }
766
767         /* 1.c. if align(addr) doesn't point to the start of an object, return. */
768         if (!bitmap_testbit(start_bits, (void*)addr)) {
769 #ifdef GC_COLLECT_STATISTICS
770                 ++gc_mark_not_object;
771 #endif
772                 return;
773         }
774
775         /* 1.d. if addr is not properly aligned, return. */
776         if ((long)addr & ((1 << ALIGN) - 1)) {
777 #ifdef GC_COLLECT_STATISTICS
778                 ++gc_mark_not_aligned;
779 #endif
780                 return;
781         }
782
783         /* 2. Mark the object at addr */
784         bitmap_setbit(mark_bits, (void*)addr); 
785 #ifdef GC_COLLECT_STATISTICS
786         ++gc_mark_objects_marked;
787 #endif
788
789 #ifdef JIT_MARKER_SUPPORT
790         asm_calljavamethod(addr->vftbl->class->marker, addr, NULL, NULL, NULL);
791 #else
792
793         /* 3. mark the references contained within the extents of the object at addr */
794         if (bitmap_testbit(reference_bits, addr)) {
795                 /* 3.1. find the end of the object */
796                 void** end;
797
798 #ifdef SIZE_FROM_CLASSINFO
799                 if (((java_objectheader*)addr)->vftbl == class_array->vftbl)
800                         end = (void**)((long)addr + (long)((java_arrayheader*)addr)->alignedsize);
801                 else
802                         end = (void**)((long)addr + (long)((java_objectheader*)addr)->vftbl->class->alignedsize);
803 #else
804                 end = (void**)bitmap_find_next_setbit(start_bitmap, addr + 1); /* points just behind the object */
805 #endif
806
807                 /* 3.2. mark the references within the object at addr */
808 #ifdef GC_COLLECT_STATISTICS
809                 gc_mark_heapblocks_visited += ((long)end - (long)addr) >> ALIGN;
810 #endif
811                 while (addr < end)
812                         gc_mark_object_at(*(addr++));
813 #endif  
814         }
815         
816         return;
817 }
818
819
820 inline
821 static
822 void gc_mark_references (void)
823 {
824         address_list_node* curr = references;
825
826         while (curr) {
827 #ifdef GC_COLLECT_STATISTICS
828                 ++gc_mark_heapblocks_visited;
829 #endif          
830                 gc_mark_object_at(*((void**)(curr->address)));
831                 curr = curr->next;
832         }
833 }
834
835 inline
836 static
837 void 
838 markreferences(void** start, void** end)
839 {
840         while (start < end) {
841 #ifdef GC_COLLECT_STATISTICS
842                 ++gc_mark_heapblocks_visited;
843 #endif          
844                 gc_mark_object_at(*(start++));
845         }
846 }
847
848 inline
849 static
850 void gc_mark_stack (void)
851 {
852         void *dummy;
853
854 #ifdef USE_THREADS 
855     thread *aThread;
856         
857         if (currentThread == NULL) {
858                 void **top_of_stack = &dummy;
859                 
860                 if (top_of_stack > stackbottom)
861                         markreferences(stackbottom, top_of_stack);
862                 else
863                         markreferences(top_of_stack, stackbottom);
864         }
865         else {
866                 for (aThread = liveThreads; aThread != 0;
867                          aThread = CONTEXT(aThread).nextlive) {
868                         gc_mark_object_at((void*)aThread);
869                         if (CONTEXT(aThread).usedStackTop > CONTEXT(aThread).stackEnd)
870                                 markreferences((void**)CONTEXT(aThread).stackEnd,
871                                                            (void**)CONTEXT(aThread).usedStackTop);
872                         else    
873                                 markreferences((void**)CONTEXT(aThread).usedStackTop,
874                                                            (void**)CONTEXT(aThread).stackEnd);
875             }
876
877                 markreferences((void**)&threadQhead[0],
878                                            (void**)&threadQhead[MAX_THREAD_PRIO]);
879         }
880 #else
881     void **top_of_stack = &dummy;
882
883     if (top_of_stack > stackbottom)
884         markreferences(stackbottom, top_of_stack);
885         else
886         markreferences(top_of_stack, stackbottom);
887 #endif
888 }
889
890
891 static 
892 void gc_run (void)
893 {
894         static int armageddon_is_near = 0;
895
896         if (armageddon_is_near) {
897                 /* armageddon_is_here! */
898                 fprintf(stderr, "Oops, seems like there's a slight problem here: gc_run() called while still running?!\n");
899                 return;
900         }
901
902         armageddon_is_near = true;
903         heap_next_collection = heap_limit;  /* try to avoid finalizer-induced collections */
904
905         bitmap_clear(mark_bitmap);
906
907         asm_dumpregistersandcall(gc_mark_stack);
908         gc_mark_references();
909         gc_finalize();
910         gc_reclaim();
911
912         armageddon_is_near = false;
913
914 #ifdef GC_COLLECT_STATISTICS
915         ++gc_collections_count;
916 #endif
917 }
918
919
920 /************************* Function: gc_init **********************************
921
922   Initializes anything that must be initialized to call the gc on the right
923   stack.
924
925 ******************************************************************************/
926
927 void
928 gc_init (void)
929 {
930 }
931
932 /************************** Function: gc_call ********************************
933
934   Calls the garbage collector. The garbage collector should always be called
935   using this function since it ensures that enough stack space is available.
936
937 ******************************************************************************/
938
939 void
940 gc_call (void)
941 {
942 #ifdef USE_THREADS
943         u1 dummy;
944
945         assert(blockInts == 0);
946
947         intsDisable();
948         if (currentThread == NULL || currentThread == mainThread) {
949                 CONTEXT(mainThread).usedStackTop = &dummy;
950                 gc_run();
951                 }
952         else
953                 asm_switchstackandcall(CONTEXT(mainThread).usedStackTop, gc_run,
954                                                            (void**)&(CONTEXT(currentThread).usedStackTop));
955         intsRestore();
956 #else
957         gc_run();
958 #endif
959 }
960
961
962
963 /*
964  * These are local overrides for various environment variables in Emacs.
965  * Please do not remove this and leave it at the end of the file, where
966  * Emacs will automagically detect them.
967  * ---------------------------------------------------------------------
968  * Local variables:
969  * mode: c
970  * indent-tabs-mode: t
971  * c-basic-offset: 4
972  * tab-width: 4
973  * End:
974  */
975