9264c204fdc434f7e6c83b8f32461e28f229e5ac
[cacao.git] / mm / heap2.c
1 #include <stddef.h>
2 #include <unistd.h>             /* getpagesize, mmap, ... */
3 #include <sys/mman.h>
4
5 #include <sys/types.h>
6 #include <stdio.h>
7 #include <assert.h>
8
9 #include "asmpart.h"
10 #include "callargs.h"
11 #include "threads/thread.h"
12 #include "threads/locks.h"
13
14 #include "lifespan.h"
15 #include "mm.h"
16
17 #if !defined(HAVE_MAP_FAILED)
18 #define MAP_FAILED ((void*) -1)
19 #endif
20
21
22 #define PAGESIZE_MINUS_ONE      (getpagesize() - 1)
23
24 #undef ALIGN
25 #undef OFFSET
26
27 #define HEURISTIC_SEL    0
28 #define HEURISTIC_PARAM  2UL
29
30
31 #define next_collection_heuristic_init()   \
32              (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> 4))
33
34 #if HEURISTIC_SEL == 0
35 #define next_collection_heuristic()   \
36              (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> HEURISTIC_PARAM))
37 #elif HEURISTIC_SEL == 1
38 #define next_collection_heuristic()   \
39              (void*)((long)heap_top + (((long)heap_top - (long)heap_base) << HEURISTIC_PARAM))
40 #elif HEURISTIC_SEL == 2
41 #define next_collection_heuristic()   \
42              (void*)((long)heap_top + HEURISTIC_PARAM)   
43 #endif
44
45          /*
46 //#define PSEUDO_GENERATIONAL
47 //#define COLLECT_LIFESPAN
48 //#define NEW_COLLECT_LIFESPAN
49 //#define COLLECT_FRAGMENTATION
50 //#define COLLECT_SIZES
51
52 //#define GC_COLLECT_STATISTICS
53 //#define FINALIZER_COUNTING
54 */
55
56 #undef STRUCTURES_ON_HEAP
57          /*
58 //#define STRUCTURES_ON_HEAP
59 */
60
61 #define false 0
62 #define true 1
63
64 #include "allocator.h" /* rev. 1 allocator */
65 #include "bitmap2.h"   /* rev. 2 bitmap management */
66
67 bool collectverbose;
68
69 void gc_call (void);
70
71 /* --- macros */
72
73 #define align_size(size)        ((size) & ~((1 << ALIGN) - 1))
74 #define MAP_ADDRESS                     (void*) 0x10000000
75
76 /* --- file-wide variables */
77
78 static void*    heap_base = NULL;
79 static SIZE             heap_size = 0;
80 static void*    heap_top = NULL;
81 static void*    heap_limit = NULL;
82 static void*    heap_next_collection = NULL;
83
84 static bitmap_t* start_bitmap = NULL;
85 static BITBLOCK* start_bits = NULL;
86 static bitmap_t* reference_bitmap = NULL;
87 static BITBLOCK* reference_bits = NULL;
88 static bitmap_t* mark_bitmap = NULL;
89 static BITBLOCK* mark_bits = NULL;
90
91 static void**   stackbottom = NULL;
92
93 typedef struct address_list_node {
94         void* address;
95         struct address_list_node* prev;
96         struct address_list_node* next;
97 } address_list_node;
98
99 static address_list_node* references = NULL;
100 static address_list_node* finalizers = NULL;
101
102 #ifdef GC_COLLECT_STATISTICS
103
104 static unsigned long gc_collections_count = 0;
105
106 static unsigned long gc_alloc_total = 0;
107 static unsigned long gc_alloc_count = 0;
108
109 static unsigned long gc_mark_heapblocks_visited = 0;
110 static unsigned long gc_mark_not_aligned = 0;
111 static unsigned long gc_mark_not_inheap = 0;
112 static unsigned long gc_mark_not_object = 0;
113 static unsigned long gc_mark_objects_marked = 0;
114 static unsigned long gc_mark_already_marked = 0;
115
116 static unsigned long gc_mark_null_pointer = 0;
117
118 #endif
119
120 #ifdef FINALIZER_COUNTING
121
122 static unsigned long gc_finalizers_executed = 0;
123 static unsigned long gc_finalizers_detected = 0;
124
125 #endif
126
127 #ifdef USE_THREADS
128 static iMux  alloc_mutex;
129 #endif
130
131 #ifdef COLLECT_LIFESPAN
132 static FILE* tracefile;
133 #endif
134
135 #ifdef COLLECT_FRAGMENTATION
136 static FILE* fragfile;
137 static FILE* fragsizefile;
138 #endif
139
140 /* --- implementation */
141
142 void 
143 heap_init (SIZE size, 
144                    SIZE startsize, /* when should we collect for the first time ? */
145                    void **in_stackbottom)
146 {
147         /* 1. Initialise the freelists & reset the allocator's state */
148         allocator_init(); 
149
150         /* 2. Allocate at least (alignment!) size bytes of memory for the heap */
151         heap_size = align_size(size + ((1 << ALIGN) - 1));
152
153 #if !(defined(HAVE_MAP_ANONYMOUS))
154         heap_base = malloc(heap_size);
155 #else
156         heap_base = (void*) mmap (NULL, 
157                                                           ((size_t)heap_size + PAGESIZE_MINUS_ONE) & ~PAGESIZE_MINUS_ONE,
158                                                           PROT_READ | PROT_WRITE, 
159                                                           MAP_PRIVATE | MAP_ANONYMOUS, 
160                                                           -1, 
161                                                           (off_t) 0);
162 #endif
163
164         if (heap_base == (void*)MAP_FAILED) {
165                 /* unable to allocate the requested amount of memory */
166                 fprintf(stderr, "heap2.c: The queen, mylord, is dead! (mmap failed)\n");
167                 exit(-1);
168         }
169
170         /* 3. Allocate the bitmaps */
171         start_bitmap = bitmap_allocate(heap_base, heap_size);
172         reference_bitmap = bitmap_allocate(heap_base, heap_size);
173         mark_bitmap = bitmap_allocate(heap_base, heap_size);
174
175         start_bits = start_bitmap->bitmap;
176     reference_bits = reference_bitmap->bitmap;
177     mark_bits = mark_bitmap->bitmap;
178
179         /* 4. Mark the first free-area as an object-start */
180         bitmap_setbit(start_bits, heap_base);
181
182         /* 5. Initialise the heap's state (heap_top, etc.) */
183         stackbottom = in_stackbottom; /* copy the stackbottom */
184
185         heap_top = heap_base; /* the current end of the heap (just behind the last allocated object) */
186         heap_limit = (void*)((long)heap_base + heap_size); /* points just behind the last accessible block of the heap */
187
188         /* 6. calculate a useful first collection limit */
189         /* This is extremly primitive at this point... 
190            we should replace it with something more useful -- phil. */
191         heap_next_collection = next_collection_heuristic_init();
192
193         /* 7. Init the global reference lists & finalizer addresses */
194         references = NULL;
195         finalizers = NULL;
196
197 #ifdef STRUCTURES_ON_HEAP
198         heap_addreference(&references);
199         heap_addreference(&finalizers);
200 #endif
201
202 #ifdef USE_THREADS
203         /* 8. Init the mutexes for synchronization */
204         alloc_mutex.holder = 0;
205 #endif
206
207         /* 9. Set up collection of lifespan data */
208 #ifdef COLLECT_LIFESPAN
209 #if 0
210         tracefile = fopen("heap.trace", "w");
211 #else
212         tracefile = popen("gzip -9 >heap.trace.gz", "w");
213 #endif
214         if (!tracefile) {
215                 fprintf(stderr, "heap2.c: Radio Ga Ga! (fopen failed)\n");
216                 exit(-2);
217         }
218
219         fprintf(tracefile, "heap_base\t0x%lx\n", heap_base);    
220         fprintf(tracefile, "heap_limit\t0x%lx\n", heap_limit);
221         fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
222 #endif
223
224 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
225         lifespan_init(heap_base, heap_size);
226 #endif
227
228         /* 10. Set up collection of fragmentation data */
229 #ifdef COLLECT_FRAGMENTATION
230         fragfile = popen("gzip -9 >fragmentation.gz", "w");
231         fragsizefile = popen("gzip -9 >freeblocks.gz", "w");
232 #endif
233 }
234
235 inline
236 static
237 void
238 heap_call_finalizer_for_object_at(java_objectheader* object_addr)
239 {
240         asm_calljavamethod(object_addr->vftbl->class->finalizer, object_addr, NULL, NULL, NULL);
241 #ifdef FINALIZER_COUNTING
242         ++gc_finalizers_executed;
243 #endif
244 }
245
246 void 
247 heap_close (void)
248 {
249         address_list_node* curr = finalizers;
250
251         /* 0. clean up lifespan module */
252 #ifdef COLLECT_LIFESPAN
253 #if 0
254         fclose(tracefile);
255 #else
256         pclose(tracefile);
257 #endif
258 #endif
259
260 #if defined(NEW_COLLECT_LIFESPAN)
261         lifespan_close();
262 #endif
263
264 #ifdef COLLECT_FRAGMENTATION
265         pclose(fragfile);
266         pclose(fragsizefile);
267 #endif
268
269         /* 1. Clean up on the heap... finalize all remaining objects */
270 #if 1
271         while (curr) {
272                 address_list_node* prev = curr;
273                 java_objectheader* addr = (java_objectheader*)(curr->address);
274                 
275                 if (addr && bitmap_testbit(start_bits, addr))
276                         heap_call_finalizer_for_object_at(addr);
277                 
278                 curr = curr->next;
279                 free(prev);
280         }
281 #endif
282
283         /* 2. Release the bitmaps */
284         bitmap_release(start_bitmap);
285         bitmap_release(reference_bitmap);
286         bitmap_release(mark_bitmap);
287
288         /* 3. Release the memory allocated to the heap */
289         if (heap_base)
290                 munmap(heap_base, heap_size);
291
292         /* 4. emit statistical data */
293 #ifdef GC_COLLECT_STATISTICS
294         sprintf(logtext, "%ld bytes for %ld objects allocated.", 
295                         gc_alloc_total, gc_alloc_count); 
296         dolog();
297         sprintf(logtext, "%ld garbage collections performed.", gc_collections_count);
298         dolog();
299         sprintf(logtext, "%ld heapblocks visited, %ld objects marked", 
300                         gc_mark_heapblocks_visited, gc_mark_objects_marked);
301         dolog();
302         sprintf(logtext, "    %ld null pointers.", gc_mark_null_pointer);
303         dolog();
304         sprintf(logtext, "    %ld out of heap.", gc_mark_not_inheap);
305         dolog();
306         sprintf(logtext, "    %ld visits to objects already marked.", gc_mark_already_marked);
307         dolog();
308         sprintf(logtext, "    %ld not an object.", gc_mark_not_object);
309         dolog();
310         sprintf(logtext, "    %ld potential references not aligned.", gc_mark_not_aligned);
311         dolog();
312 #endif
313
314 #ifdef FINALIZER_COUNTING
315         sprintf(logtext, "%ld objects with a finalizer", gc_finalizers_detected);
316         dolog();
317
318         if (gc_finalizers_detected == gc_finalizers_executed)
319                 sprintf(logtext, "    all finalizers executed.");
320         else
321                 sprintf(logtext, "    only %ld finalizers executed.", gc_finalizers_executed);
322         dolog();
323 #endif
324
325 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
326         lifespan_emit();
327 #endif
328 }
329
330 inline
331 static
332 void 
333 heap_add_address_to_address_list(address_list_node** list, void* address)
334 {
335         /* Note: address lists are kept sorted to simplify finalization */
336
337         address_list_node* new_node = malloc(sizeof(address_list_node));
338         new_node->address = address;
339         new_node->next = NULL;
340
341         while (*list && (*list)->next) {
342                 if ((*list)->next->address < address)
343                         list = &(*list)->next;
344                 else {
345                         new_node->next = *list;
346                         *list = new_node;
347                         return;
348                 }                       
349         }
350
351         new_node->next = *list;
352         *list = new_node;
353 }
354
355
356 inline
357 static
358 void
359 heap_add_finalizer_for_object_at(void* addr)
360 {
361         /* Finalizers seem to be very rare... for this reason, I keep a linked
362            list of object addresses, which have a finalizer attached. This list
363            is kept in ascending order according to the order garbage is freed.
364            This list is currently kept separate from the heap, but should be 
365            moved onto it, but some JIT-marker code to handle these special
366            objects will need to be added first. -- phil. */
367
368         heap_add_address_to_address_list(&finalizers, addr);
369
370 #ifdef COLLECT_LIFESPAN
371         fprintf(tracefile, "finalizer\t0x%lx\n", addr);
372 #endif
373 }
374
375 void* 
376 heap_allocate (SIZE               in_length, 
377                            bool           references, 
378                            methodinfo *finalizer)
379 {
380         SIZE    length = align_size(in_length + ((1 << ALIGN) - 1)); 
381         void*   free_chunk = NULL;
382
383 #if 0
384         /* check for misaligned in_length parameter */
385         if (length != in_length)
386                 fprintf(stderr,
387                                 "heap2.c: heap_allocate was passed unaligned in_length parameter: %ld, \n         aligned to %ld. (mistrust)\n",
388                                 in_length, length);
389 #endif
390
391 #ifdef FINALIZER_COUNTING
392         if (finalizer)
393                 ++gc_finalizers_detected;
394 #endif
395
396 #if defined(COLLECT_LIFESPAN) || defined(NEW_COLLECT_LIFESPAN)
397         /* perform garbage collection to collect data for lifespan analysis */
398         if (heap_top > heap_base)
399                 gc_call();
400 #endif
401
402 #ifdef USE_THREADS
403         lock_mutex(&alloc_mutex);
404 #endif  
405  retry:
406         /* 1. attempt to get a free block with size >= length from the freelists */
407         free_chunk = allocator_alloc(length);
408
409         /* 2. if unsuccessful, try alternative allocation strategies */
410         if (!free_chunk) {
411                 /* 2.a if the collection threshold would be exceeded, collect the heap */
412                 if ((long)heap_top + length > (long)heap_next_collection) {
413                         /* 2.a.1. collect if the next_collection threshold would be exceeded */
414                         gc_call();
415
416                         /* 2.a.2. we just ran a collection, recheck the freelists */
417                         free_chunk = allocator_alloc(length);
418                         if (free_chunk)
419                                 goto success;
420
421                         /* 2.a.3. we can't satisfy the request from the freelists, check
422                                   against the heap_limit whether growing the heap is possible */
423                         if ((long)heap_top + length > (long)heap_limit)
424                                 goto failure;
425                 }
426
427                 /* 2.b. grow the heap */
428                 free_chunk = heap_top;
429                 heap_top = (void*)((long)heap_top + length);
430         }
431
432  success:
433         /* 3.a. mark all necessary bits, store the finalizer & return the newly allocated block */
434
435         /* I don't mark the object-start anymore, as it always is at the beginning of a free-block,
436            which already is marked (Note: The first free-block gets marked in heap_init). -- phil. */
437         bitmap_setbit(start_bits, free_chunk); /* mark the new object */
438
439 #ifndef SIZE_FROM_CLASSINFO
440         bitmap_setbit(start_bits, (void*)((long)free_chunk + (long)length)); /* mark the freespace behind the new object */
441 #endif
442
443         if (references)
444                 bitmap_setbit(reference_bits, free_chunk);
445         else 
446                 bitmap_clearbit(reference_bits, free_chunk);
447
448         /* store a hint, that there's a finalizer for this address */
449         if (finalizer)
450                 heap_add_finalizer_for_object_at(free_chunk);
451
452 #ifdef GC_COLLECT_STATISTICS
453         gc_alloc_total += length;
454         ++gc_alloc_count;
455 #endif
456
457 #ifdef COLLECT_LIFESPAN
458         fprintf(tracefile, "alloc\t0x%lx\t0x%lx\n", 
459                         free_chunk, (long)free_chunk + length);
460 #endif
461
462 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
463         lifespan_alloc(free_chunk, length);
464 #endif
465
466  failure:
467 #ifdef USE_THREADS
468         unlock_mutex(&alloc_mutex);
469 #endif  
470         return free_chunk;
471 }
472
473 void 
474 heap_addreference (void **reflocation)
475 {
476         /* I currently use a separate linked list (as in the original code) to hold
477            the global reference locations, but I'll change this to allocate these
478            in blocks on the heap; we'll have to add JIT-Marker code for those Java
479            objects then. -- phil. */
480
481         heap_add_address_to_address_list(&references, reflocation);
482 }
483
484 static
485 inline
486 void gc_finalize (void)
487 {
488         /* This will have to be slightly rewritten as soon the JIT-marked heap-based lists are used. -- phil. */
489
490         address_list_node*  curr = finalizers;
491         address_list_node*  prev;
492
493 #if 0
494         /* FIXME: new code, please! */
495 #else
496         while (curr) {
497                 if (curr->address) {
498                         if (!bitmap_testbit(mark_bits, curr->address)) {
499
500 #ifdef FINALIZER_COUNTING
501                                 ++gc_finalizers_executed;
502 #endif
503                                 asm_calljavamethod(((java_objectheader*)curr->address)->vftbl->class->finalizer, 
504                                                                    curr->address, NULL, NULL, NULL);
505                                 curr->address = 0;
506                         }
507                 }
508
509                 curr = curr->next;
510         }
511 #endif
512 }
513
514
515 inline
516 static 
517 void gc_reclaim (void)
518 {
519 #ifdef PSEUDO_GENERATIONAL
520         static void*  generation_start = 0;
521         static int    generation_num = 0;
522         void* addr = heap_base;
523 #endif
524         void* free_start;
525         void* free_end = heap_base;
526         BITBLOCK* temp_bits;
527         bitmap_t* temp_bitmap;
528
529 #ifdef COLLECT_FRAGMENTATION
530         unsigned long  free_size = 0;
531         unsigned long  free_fragments = 0;
532 #endif
533
534 #ifdef PSEUDO_GENERATIONAL
535         if (!generation_start || !(generation_num % 5))
536                 generation_start = heap_base;
537
538         ++generation_num;
539 #endif
540
541         /* 1. reset the freelists */
542
543 #if 0
544         allocator_mark_free_kludge(start_bits); /* this line will be kicked out, when
545                                                                                            the SIZE_FROM_CLASSINFO reclaim
546                                                                                            is implemented (very soon!!) */
547 #endif
548
549 #ifdef PSEUDO_GENERATIONAL
550         for (addr = heap_base; addr <= generation_start; ++addr) {
551                 if (bitmap_testbit(start_bits, addr))
552                         bitmap_setbit(mark_bits, addr);
553         }
554
555         allocator_mark_free_kludge(start_bits); /* this line will be kicked out, when
556                                                                                            the SIZE_FROM_CLASSINFO reclaim
557                                                                                            is implemented (very soon!!) */
558 #endif
559
560         allocator_reset();
561
562         /* 2. reclaim unmarked objects */
563 #if 0
564         if (!testbit(start_bits, heap_base))
565                 free_start = heap_base;
566         else
567                 free_start = bitmap_find_next_combination_set_unset(start_bitmap,
568                                                                                                                         mark_bitmap,
569                                                                                                                         free_start);
570
571 #else
572         while (free_end < heap_top) {
573                 free_start = bitmap_find_next_combination_set_unset(start_bitmap,
574                                                                                                                         mark_bitmap,
575                                                                                                                         free_end);
576
577                 if (free_start < heap_top) {
578                         free_end = bitmap_find_next_setbit(mark_bitmap, (void*)((long)free_start + 8)); /* FIXME: constant used */
579
580                         if (free_end < heap_top) {
581                                 allocator_free(free_start, (long)free_end - (long)free_start);
582
583 #ifdef COLLECT_FRAGMENTATION
584                                 free_size += (long)free_end - (long)free_start;
585                                 free_fragments++;
586 #endif
587
588 #ifdef COLLECT_LIFESPAN
589                                 fprintf(tracefile, 
590                                                 "free\t0x%lx\t0x%lx\n", 
591                                                 free_start,
592                                                 (long)free_end);
593 #endif
594
595 #ifdef NEW_COLLECT_LIFESPAN
596                                 lifespan_free(free_start, free_end);
597 #endif
598
599 #ifndef SIZE_FROM_CLASSINFO
600                                 /* would make trouble with JIT-Marker support. The Marker for unused blocks 
601                                    might be called, leading to a bad dereference. -- phil. */
602                                 bitmap_setbit(mark_bits, free_start); /* necessary to calculate obj-size bitmap based. */
603 #endif
604                         }
605                 } else {
606                         free_end = heap_top;    
607                 }
608         }
609 #endif
610
611         /* 3.1. swap mark & start bitmaps */
612         temp_bits = mark_bits;
613         mark_bits = start_bits;
614         start_bits = temp_bits;
615
616         temp_bitmap = mark_bitmap;
617         mark_bitmap = start_bitmap;
618         start_bitmap = temp_bitmap;
619
620 #if 0 /* operation already handled in allocate */
621         /* 3.2. mask reference bitmap */
622         bitmap_mask_with_bitmap(reference_bitmap, start_bitmap);
623 #endif
624
625         /* 3.3. update heap_top */
626         if (free_start < heap_top) {
627                 heap_top = free_start;
628 #ifdef NEW_COLLECT_LIFESPAN
629                         lifespan_free(free_start, free_end);
630 #endif
631         }
632
633 #if 0
634         if (heap_top < heap_limit)
635                 bitmap_setbit(start_bits, heap_top);
636 #endif
637
638         /* 3.4. emit fragmentation info */
639 #ifdef COLLECT_FRAGMENTATION
640         {
641                 unsigned long heap_full = (unsigned long)heap_top - (unsigned long)heap_base;
642                 unsigned long heap_life = (unsigned long)heap_top - (unsigned long)heap_base - free_size;
643
644                 fprintf(fragfile, 
645                                 "%ld\t%ld\t%ld\t%ld\t%f\t%f\t%f\n", 
646                                 heap_full,
647                                 heap_life,
648                                 free_size, 
649                                 free_fragments,
650                                 100*(float)free_size/(free_fragments ? free_fragments : 1),
651                                 100*(float)heap_life/(heap_full ? heap_full : 1),
652                                 100*(float)free_size/(heap_full ? heap_full : 1)
653                                 );
654         }
655         fflush(fragfile);
656
657         allocator_dump_to_file(fragsizefile);
658 #endif
659
660         /* 4. adjust the collection threshold */
661         heap_next_collection = next_collection_heuristic();
662         if (heap_next_collection > heap_limit)
663                 heap_next_collection = heap_limit;
664
665 #ifdef COLLECT_LIFESPAN
666         fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
667 #endif
668
669 #ifdef PSEUDO_GENERATIONAL
670         generation_start = heap_top;
671 #endif
672 }
673
674 inline
675 static
676 void 
677 gc_mark_object_at (void** addr)
678 {
679         /*
680          * A note concerning the order of the tests:
681          *
682          * Statistics collected during a test run, where alignment
683          * was tested before checking whether the addr points into 
684          * the heap:
685          * >> LOG: 9301464 bytes for 196724 objects allocated.
686          * >> LOG: 15 garbage collections performed.
687          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
688          * >> LOG:     1064447 visits to objects already marked.
689          * >> LOG:     988270 potential references not aligned.
690          * >> LOG:     4049446 out of heap.
691          * >> LOG:     5236 not an object.
692          *
693          * These results show, that only about 1/4 of all heapblocks 
694          * point to objects; The single most important reason why a 
695          * heapblock can not point at an object is, that it's value 
696          * doesn't fall within the heap area (this test was performed
697          * with a 3MB heap).
698          *
699          * From the results, the various tests have to be conducted 
700          * in the following order for maximum efficiency:
701          *    1. addr in heap?
702          *    2. already marked ?
703          *    3. aligned ?
704          *    4. object ?
705          *
706          * The results after reordering:
707          * >> LOG: 9301464 bytes for 196724 objects allocated.
708          * >> LOG: 15 garbage collections performed.
709          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
710          * >> LOG:     1064447 visits to objects already marked.
711          * >> LOG:     350 potential references not aligned.
712          * >> LOG:     5037366 out of heap.
713          * >> LOG:     5236 not an object.
714          *
715          * And using:
716          *    1. addr in heap?
717          *    2. already marked ?
718          *    3. object ?
719          *    4. aligned ?
720          * 
721          * >> LOG: 9301464 bytes for 196724 objects allocated.
722          * >> LOG: 15 garbage collections performed.
723          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
724          * >> LOG:     5037366 out of heap.
725          * >> LOG:     1064456 visits to objects already marked.
726          * >> LOG:     5539 not an object.
727          * >> LOG:     38 potential references not aligned.
728          *
729          * Apparently, most unaligned values will already be eliminated
730          * when checking against the bounds of the heap. Checking this
731          * property first, should thus improve collection times.
732          */
733
734         /* 1.a. if addr doesn't point into the heap, return. */
735         if ((unsigned long)addr - (unsigned long)heap_base >= 
736                 ((long)heap_top - (long)heap_base)) {
737 #ifdef GC_COLLECT_STATISTICS
738                 if (addr == NULL)
739                         ++gc_mark_null_pointer;
740                 else
741                         ++gc_mark_not_inheap;
742 #endif
743                 return;
744         }
745
746         /* 1.b. if align(addr) has already been marked during this collection, return. */
747         if (bitmap_testbit(mark_bits, (void*)addr)) {
748 #ifdef GC_COLLECT_STATISTICS
749                 ++gc_mark_already_marked;
750 #endif
751                 return;
752         }
753
754         /* 1.c. if align(addr) doesn't point to the start of an object, return. */
755         if (!bitmap_testbit(start_bits, (void*)addr)) {
756 #ifdef GC_COLLECT_STATISTICS
757                 ++gc_mark_not_object;
758 #endif
759                 return;
760         }
761
762         /* 1.d. if addr is not properly aligned, return. */
763         if ((long)addr & ((1 << ALIGN) - 1)) {
764 #ifdef GC_COLLECT_STATISTICS
765                 ++gc_mark_not_aligned;
766 #endif
767                 return;
768         }
769
770         /* 2. Mark the object at addr */
771         bitmap_setbit(mark_bits, (void*)addr); 
772 #ifdef GC_COLLECT_STATISTICS
773         ++gc_mark_objects_marked;
774 #endif
775
776 #ifdef JIT_MARKER_SUPPORT
777         asm_calljavamethod(addr->vftbl->class->marker, addr, NULL, NULL, NULL);
778 #else
779
780         /* 3. mark the references contained within the extents of the object at addr */
781         if (bitmap_testbit(reference_bits, addr)) {
782                 /* 3.1. find the end of the object */
783                 void** end;
784
785 #ifdef SIZE_FROM_CLASSINFO
786                 if (((java_objectheader*)addr)->vftbl == class_array->vftbl)
787                         end = (void**)((long)addr + (long)((java_arrayheader*)addr)->alignedsize);
788                 else
789                         end = (void**)((long)addr + (long)((java_objectheader*)addr)->vftbl->class->alignedsize);
790 #else
791                 end = (void**)bitmap_find_next_setbit(start_bitmap, addr + 1); /* points just behind the object */
792 #endif
793
794                 /* 3.2. mark the references within the object at addr */
795 #ifdef GC_COLLECT_STATISTICS
796                 gc_mark_heapblocks_visited += ((long)end - (long)addr) >> ALIGN;
797 #endif
798                 while (addr < end)
799                         gc_mark_object_at(*(addr++));
800 #endif  
801         }
802         
803         return;
804 }
805
806
807 inline
808 static
809 void gc_mark_references (void)
810 {
811         address_list_node* curr = references;
812
813         while (curr) {
814 #ifdef GC_COLLECT_STATISTICS
815                 ++gc_mark_heapblocks_visited;
816 #endif          
817                 gc_mark_object_at(*((void**)(curr->address)));
818                 curr = curr->next;
819         }
820 }
821
822 inline
823 static
824 void 
825 markreferences(void** start, void** end)
826 {
827         while (start < end) {
828 #ifdef GC_COLLECT_STATISTICS
829                 ++gc_mark_heapblocks_visited;
830 #endif          
831                 gc_mark_object_at(*(start++));
832         }
833 }
834
835 inline
836 static
837 void gc_mark_stack (void)
838 {
839         void *dummy;
840
841 #ifdef USE_THREADS 
842     thread *aThread;
843         
844         if (currentThread == NULL) {
845                 void **top_of_stack = &dummy;
846                 
847                 if (top_of_stack > stackbottom)
848                         markreferences(stackbottom, top_of_stack);
849                 else
850                         markreferences(top_of_stack, stackbottom);
851         }
852         else {
853                 for (aThread = liveThreads; aThread != 0;
854                          aThread = CONTEXT(aThread).nextlive) {
855                         gc_mark_object_at((void*)aThread);
856                         if (CONTEXT(aThread).usedStackTop > CONTEXT(aThread).stackEnd)
857                                 markreferences((void**)CONTEXT(aThread).stackEnd,
858                                                            (void**)CONTEXT(aThread).usedStackTop);
859                         else    
860                                 markreferences((void**)CONTEXT(aThread).usedStackTop,
861                                                            (void**)CONTEXT(aThread).stackEnd);
862             }
863
864                 markreferences((void**)&threadQhead[0],
865                                            (void**)&threadQhead[MAX_THREAD_PRIO]);
866         }
867 #else
868     void **top_of_stack = &dummy;
869
870     if (top_of_stack > stackbottom)
871         markreferences(stackbottom, top_of_stack);
872         else
873         markreferences(top_of_stack, stackbottom);
874 #endif
875 }
876
877
878 static 
879 void gc_run (void)
880 {
881         static int armageddon_is_near = 0;
882
883         if (armageddon_is_near) {
884                 /* armageddon_is_here! */
885                 fprintf(stderr, "Oops, seems like there's a slight problem here: gc_run() called while still running?!\n");
886                 return;
887         }
888
889         armageddon_is_near = true;
890         heap_next_collection = heap_limit;  /* try to avoid finalizer-induced collections */
891
892         bitmap_clear(mark_bitmap);
893
894         asm_dumpregistersandcall(gc_mark_stack);
895         gc_mark_references();
896         gc_finalize();
897         gc_reclaim();
898
899         armageddon_is_near = false;
900
901 #ifdef GC_COLLECT_STATISTICS
902         ++gc_collections_count;
903 #endif
904 }
905
906
907 /************************* Function: gc_init **********************************
908
909   Initializes anything that must be initialized to call the gc on the right
910   stack.
911
912 ******************************************************************************/
913
914 void
915 gc_init (void)
916 {
917 }
918
919 /************************** Function: gc_call ********************************
920
921   Calls the garbage collector. The garbage collector should always be called
922   using this function since it ensures that enough stack space is available.
923
924 ******************************************************************************/
925
926 void
927 gc_call (void)
928 {
929 #ifdef USE_THREADS
930         u1 dummy;
931
932         assert(blockInts == 0);
933
934         intsDisable();
935         if (currentThread == NULL || currentThread == mainThread) {
936                 CONTEXT(mainThread).usedStackTop = &dummy;
937                 gc_run();
938                 }
939         else
940                 asm_switchstackandcall(CONTEXT(mainThread).usedStackTop, gc_run,
941                                                            (void**)&(CONTEXT(currentThread).usedStackTop));
942         intsRestore();
943 #else
944         gc_run();
945 #endif
946 }
947
948
949
950 /*
951  * These are local overrides for various environment variables in Emacs.
952  * Please do not remove this and leave it at the end of the file, where
953  * Emacs will automagically detect them.
954  * ---------------------------------------------------------------------
955  * Local variables:
956  * mode: c
957  * indent-tabs-mode: t
958  * c-basic-offset: 4
959  * tab-width: 4
960  * End:
961  */
962