Changed the makefile system to autoconf/automake.
[cacao.git] / mm / heap3.c
1 #include <stddef.h>
2 #include <unistd.h>             /* getpagesize, mmap, ... */
3 #include <sys/mman.h>
4
5 #include <sys/types.h>
6 #include <stdio.h>
7 #include <assert.h>
8
9 #include "asmpart.h"
10 #include "callargs.h"
11 #include "threads/thread.h"
12 #include "threads/locks.h"
13
14 #include "lifespan.h"
15 #include "mm.h"
16
17 #if !defined(HAVE_MAP_FAILED)
18 #define MAP_FAILED ((void*) -1)
19 #endif
20
21
22 #define PAGESIZE_MINUS_ONE      (getpagesize() - 1)
23
24 #undef ALIGN
25 #undef OFFSET
26
27 #define HEURISTIC_SEL    0
28 #define HEURISTIC_PARAM  2UL
29
30
31 #define next_collection_heuristic_init()   \
32              (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> 4))
33
34 #if HEURISTIC_SEL == 0
35 #define next_collection_heuristic()   \
36              (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> HEURISTIC_PARAM))
37 #elif HEURISTIC_SEL == 1
38 #define next_collection_heuristic()   \
39              (void*)((long)heap_top + (((long)heap_top - (long)heap_base) << HEURISTIC_PARAM))
40 #elif HEURISTIC_SEL == 2
41 #define next_collection_heuristic()   \
42              (void*)((long)heap_top + HEURISTIC_PARAM)   
43 #endif
44
45 //#define COLLECT_LIFESPAN
46 //#define NEW_COLLECT_LIFESPAN
47 #define COLLECT_FRAGMENTATION
48 //#define COLLECT_SIZES
49
50 #define GC_COLLECT_STATISTICS
51 //#define FINALIZER_COUNTING
52
53 #undef STRUCTURES_ON_HEAP
54 //#define STRUCTURES_ON_HEAP
55
56 #define false 0
57 #define true 1
58
59 #include "allocator.h" /* rev. 1 allocator */
60 #include "bitmap2.h"   /* rev. 2 bitmap management */
61
62 bool collectverbose;
63
64 void gc_call (void);
65
66 /* --- macros */
67
68 #define align_size(size)        ((size) & ~((1 << ALIGN) - 1))
69 #define MAP_ADDRESS                     (void*) 0x10000000
70
71 /* --- file-wide variables */
72
73 static void*    heap_base = NULL;
74 static SIZE             heap_size = 0;
75 static void*    heap_top = NULL;
76 static void*    heap_limit = NULL;
77 static void*    heap_next_collection = NULL;
78
79 static bitmap_t* start_bitmap = NULL;
80 static BITBLOCK* start_bits = NULL;
81 static bitmap_t* reference_bitmap = NULL;
82 static BITBLOCK* reference_bits = NULL;
83 static bitmap_t* mark_bitmap = NULL;
84 static BITBLOCK* mark_bits = NULL;
85
86 static void**   stackbottom = NULL;
87
88 typedef struct address_list_node {
89         void* address;
90         struct address_list_node* prev;
91         struct address_list_node* next;
92 } address_list_node;
93
94 static address_list_node* references = NULL;
95 static address_list_node* finalizers = NULL;
96
97 #ifdef GC_COLLECT_STATISTICS
98
99 static unsigned long gc_collections_count = 0;
100
101 static unsigned long gc_alloc_total = 0;
102 static unsigned long gc_alloc_count = 0;
103
104 static unsigned long gc_mark_heapblocks_visited = 0;
105 static unsigned long gc_mark_not_aligned = 0;
106 static unsigned long gc_mark_not_inheap = 0;
107 static unsigned long gc_mark_not_object = 0;
108 static unsigned long gc_mark_objects_marked = 0;
109 static unsigned long gc_mark_already_marked = 0;
110
111 static unsigned long gc_mark_null_pointer = 0;
112
113 #endif
114
115 #ifdef FINALIZER_COUNTING
116
117 static unsigned long gc_finalizers_executed = 0;
118 static unsigned long gc_finalizers_detected = 0;
119
120 #endif
121
122 #ifdef USE_THREADS
123 static iMux  alloc_mutex;
124 #endif
125
126 #ifdef COLLECT_LIFESPAN
127 static FILE* tracefile;
128 #endif
129
130 #ifdef COLLECT_FRAGMENTATION
131 static FILE* fragfile;
132 static FILE* fragsizefile;
133 #endif
134
135 /* --- implementation */
136
137 void 
138 heap_init (SIZE size, 
139                    SIZE startsize, /* when should we collect for the first time ? */
140                    void **in_stackbottom)
141 {
142         /* 1. Initialise the freelists & reset the allocator's state */
143         allocator_init(); 
144
145         /* 2. Allocate at least (alignment!) size bytes of memory for the heap */
146         heap_size = align_size(size + ((1 << ALIGN) - 1));
147
148 #if !(defined(HAVE_MAP_ANONYMOUS))
149         heap_base = malloc(heap_size);
150 #else
151         heap_base = (void*) mmap (NULL, 
152                                                           ((size_t)heap_size + PAGESIZE_MINUS_ONE) & ~PAGESIZE_MINUS_ONE,
153                                                           PROT_READ | PROT_WRITE, 
154                                                           MAP_PRIVATE | MAP_ANONYMOUS, 
155                                                           -1, 
156                                                           (off_t) 0);
157 #endif
158
159         if (heap_base == (void*)MAP_FAILED) {
160                 /* unable to allocate the requested amount of memory */
161                 fprintf(stderr, "heap2.c: The queen, mylord, is dead! (mmap failed)\n");
162                 exit(-1);
163         }
164
165         /* 3. Allocate the bitmaps */
166         start_bitmap = bitmap_allocate(heap_base, heap_size);
167         reference_bitmap = bitmap_allocate(heap_base, heap_size);
168         mark_bitmap = bitmap_allocate(heap_base, heap_size);
169
170         start_bits = start_bitmap->bitmap;
171     reference_bits = reference_bitmap->bitmap;
172     mark_bits = mark_bitmap->bitmap;
173
174         /* 4. Mark the first free-area as an object-start */
175         bitmap_setbit(start_bits, heap_base);
176
177         /* 5. Initialise the heap's state (heap_top, etc.) */
178         stackbottom = in_stackbottom; /* copy the stackbottom */
179
180         heap_top = heap_base; /* the current end of the heap (just behind the last allocated object) */
181         heap_limit = (void*)((long)heap_base + heap_size); /* points just behind the last accessible block of the heap */
182
183         /* 6. calculate a useful first collection limit */
184         /* This is extremly primitive at this point... 
185            we should replace it with something more useful -- phil. */
186         heap_next_collection = next_collection_heuristic_init();
187
188         /* 7. Init the global reference lists & finalizer addresses */
189         references = NULL;
190         finalizers = NULL;
191
192 #ifdef STRUCTURES_ON_HEAP
193         heap_addreference(&references);
194         heap_addreference(&finalizers);
195 #endif
196
197 #ifdef USE_THREADS
198         /* 8. Init the mutexes for synchronization */
199         alloc_mutex.holder = 0;
200 #endif
201
202         /* 9. Set up collection of lifespan data */
203 #ifdef COLLECT_LIFESPAN
204 #if 0
205         tracefile = fopen("heap.trace", "w");
206 #else
207         tracefile = popen("gzip -9 >heap.trace.gz", "w");
208 #endif
209         if (!tracefile) {
210                 fprintf(stderr, "heap2.c: Radio Ga Ga! (fopen failed)\n");
211                 exit(-2);
212         }
213
214         fprintf(tracefile, "heap_base\t0x%lx\n", heap_base);    
215         fprintf(tracefile, "heap_limit\t0x%lx\n", heap_limit);
216         fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
217 #endif
218
219 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
220         lifespan_init(heap_base, heap_size);
221 #endif
222
223         /* 10. Set up collection of fragmentation data */
224 #ifdef COLLECT_FRAGMENTATION
225         fragfile = popen("gzip -9 >fragmentation.gz", "w");
226         fragsizefile = popen("gzip -9 >freeblocks.gz", "w");
227 #endif
228 }
229
230 inline
231 static
232 void
233 heap_call_finalizer_for_object_at(java_objectheader* object_addr)
234 {
235         asm_calljavamethod(object_addr->vftbl->class->finalizer, object_addr, NULL, NULL, NULL);
236 #ifdef FINALIZER_COUNTING
237         ++gc_finalizers_executed;
238 #endif
239 }
240
241 void 
242 heap_close (void)
243 {
244         address_list_node* curr = finalizers;
245
246         /* 0. clean up lifespan module */
247 #ifdef COLLECT_LIFESPAN
248 #if 0
249         fclose(tracefile);
250 #else
251         pclose(tracefile);
252 #endif
253 #endif
254
255 #if defined(NEW_COLLECT_LIFESPAN)
256         lifespan_close();
257 #endif
258
259 #ifdef COLLECT_FRAGMENTATION
260         pclose(fragfile);
261         pclose(fragsizefile);
262 #endif
263
264         /* 1. Clean up on the heap... finalize all remaining objects */
265 #if 1
266         while (curr) {
267                 address_list_node* prev = curr;
268                 java_objectheader* addr = (java_objectheader*)(curr->address);
269                 
270                 if (addr && bitmap_testbit(start_bits, addr))
271                         heap_call_finalizer_for_object_at(addr);
272                 
273                 curr = curr->next;
274                 free(prev);
275         }
276 #endif
277
278         /* 2. Release the bitmaps */
279         bitmap_release(start_bitmap);
280         bitmap_release(reference_bitmap);
281         bitmap_release(mark_bitmap);
282
283         /* 3. Release the memory allocated to the heap */
284         if (heap_base)
285                 munmap(heap_base, heap_size);
286
287         /* 4. emit statistical data */
288 #ifdef GC_COLLECT_STATISTICS
289         sprintf(logtext, "%ld bytes for %ld objects allocated.", 
290                         gc_alloc_total, gc_alloc_count); 
291         dolog();
292         sprintf(logtext, "%ld garbage collections performed.", gc_collections_count);
293         dolog();
294         sprintf(logtext, "%ld heapblocks visited, %ld objects marked", 
295                         gc_mark_heapblocks_visited, gc_mark_objects_marked);
296         dolog();
297         sprintf(logtext, "    %ld null pointers.", gc_mark_null_pointer);
298         dolog();
299         sprintf(logtext, "    %ld out of heap.", gc_mark_not_inheap);
300         dolog();
301         sprintf(logtext, "    %ld visits to objects already marked.", gc_mark_already_marked);
302         dolog();
303         sprintf(logtext, "    %ld not an object.", gc_mark_not_object);
304         dolog();
305         sprintf(logtext, "    %ld potential references not aligned.", gc_mark_not_aligned);
306         dolog();
307 #endif
308
309 #ifdef FINALIZER_COUNTING
310         sprintf(logtext, "%ld objects with a finalizer", gc_finalizers_detected);
311         dolog();
312
313         if (gc_finalizers_detected == gc_finalizers_executed)
314                 sprintf(logtext, "    all finalizers executed.");
315         else
316                 sprintf(logtext, "    only %ld finalizers executed.", gc_finalizers_executed);
317         dolog();
318 #endif
319
320 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
321         lifespan_emit();
322 #endif
323 }
324
325 inline
326 static
327 void 
328 heap_add_address_to_address_list(address_list_node** list, void* address)
329 {
330         /* Note: address lists are kept sorted to simplify finalization */
331
332         address_list_node* new_node = malloc(sizeof(address_list_node));
333         new_node->address = address;
334         new_node->next = NULL;
335
336         while (*list && (*list)->next) {
337                 if ((*list)->next->address < address)
338                         list = &(*list)->next;
339                 else {
340                         new_node->next = *list;
341                         *list = new_node;
342                         return;
343                 }                       
344         }
345
346         new_node->next = *list;
347         *list = new_node;
348 }
349
350
351 inline
352 static
353 void
354 heap_add_finalizer_for_object_at(void* addr)
355 {
356         /* Finalizers seem to be very rare... for this reason, I keep a linked
357            list of object addresses, which have a finalizer attached. This list
358            is kept in ascending order according to the order garbage is freed.
359            This list is currently kept separate from the heap, but should be 
360            moved onto it, but some JIT-marker code to handle these special
361            objects will need to be added first. -- phil. */
362
363         heap_add_address_to_address_list(&finalizers, addr);
364
365 #ifdef COLLECT_LIFESPAN
366         fprintf(tracefile, "finalizer\t0x%lx\n", addr);
367 #endif
368 }
369
370 void* 
371 heap_allocate (SIZE               in_length, 
372                            bool           references, 
373                            methodinfo *finalizer)
374 {
375         SIZE    length = align_size(in_length + ((1 << ALIGN) - 1)); 
376         void*   free_chunk = NULL;
377
378 #if 0
379         /* check for misaligned in_length parameter */
380         if (length != in_length)
381                 fprintf(stderr,
382                                 "heap2.c: heap_allocate was passed unaligned in_length parameter: %ld, \n         aligned to %ld. (mistrust)\n",
383                                 in_length, length);
384 #endif
385
386 #ifdef FINALIZER_COUNTING
387         if (finalizer)
388                 ++gc_finalizers_detected;
389 #endif
390
391 #if defined(COLLECT_LIFESPAN) || defined(NEW_COLLECT_LIFESPAN)
392         /* perform garbage collection to collect data for lifespan analysis */
393         if (heap_top > heap_base)
394                 gc_call();
395 #endif
396
397 #ifdef USE_THREADS
398         lock_mutex(&alloc_mutex);
399 #endif  
400  retry:
401         /* 1. attempt to get a free block with size >= length from the freelists */
402         free_chunk = allocator_alloc(length);
403
404         /* 2. if unsuccessful, try alternative allocation strategies */
405         if (!free_chunk) {
406                 /* 2.a if the collection threshold would be exceeded, collect the heap */
407                 if ((long)heap_top + length > (long)heap_next_collection) {
408                         /* 2.a.1. collect if the next_collection threshold would be exceeded */
409                         gc_call();
410
411                         /* 2.a.2. we just ran a collection, recheck the freelists */
412                         free_chunk = allocator_alloc(length);
413                         if (free_chunk)
414                                 goto success;
415
416                         /* 2.a.3. we can't satisfy the request from the freelists, check
417                                   against the heap_limit whether growing the heap is possible */
418                         if ((long)heap_top + length > (long)heap_limit)
419                                 goto failure;
420                 }
421
422                 /* 2.b. grow the heap */
423                 free_chunk = heap_top;
424                 heap_top = (void*)((long)heap_top + length);
425         }
426
427  success:
428         /* 3.a. mark all necessary bits, store the finalizer & return the newly allocated block */
429
430         /* I don't mark the object-start anymore, as it always is at the beginning of a free-block,
431            which already is marked (Note: The first free-block gets marked in heap_init). -- phil. */
432         bitmap_setbit(start_bits, free_chunk); /* mark the new object */
433
434 #ifndef SIZE_FROM_CLASSINFO
435         bitmap_setbit(start_bits, (void*)((long)free_chunk + (long)length)); /* mark the freespace behind the new object */
436 #endif
437
438         if (references)
439                 bitmap_setbit(reference_bits, free_chunk);
440         else 
441                 bitmap_clearbit(reference_bits, free_chunk);
442
443         /* store a hint, that there's a finalizer for this address */
444         if (finalizer)
445                 heap_add_finalizer_for_object_at(free_chunk);
446
447 #ifdef GC_COLLECT_STATISTICS
448         gc_alloc_total += length;
449         ++gc_alloc_count;
450 #endif
451
452 #ifdef COLLECT_LIFESPAN
453         fprintf(tracefile, "alloc\t0x%lx\t0x%lx\n", 
454                         free_chunk, (long)free_chunk + length);
455 #endif
456
457 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
458         lifespan_alloc(free_chunk, length);
459 #endif
460
461  failure:
462 #ifdef USE_THREADS
463         unlock_mutex(&alloc_mutex);
464 #endif  
465         return free_chunk;
466 }
467
468 void 
469 heap_addreference (void **reflocation)
470 {
471         /* I currently use a separate linked list (as in the original code) to hold
472            the global reference locations, but I'll change this to allocate these
473            in blocks on the heap; we'll have to add JIT-Marker code for those Java
474            objects then. -- phil. */
475
476         heap_add_address_to_address_list(&references, reflocation);
477 }
478
479 static
480 inline
481 void gc_finalize (void)
482 {
483         /* This will have to be slightly rewritten as soon the JIT-marked heap-based lists are used. -- phil. */
484
485         address_list_node*  curr = finalizers;
486         address_list_node*  prev;
487
488 #if 0
489         /* FIXME: new code, please! */
490 #else
491         while (curr) {
492                 if (curr->address) {
493                         if (!bitmap_testbit(mark_bits, curr->address)) {
494
495 #ifdef FINALIZER_COUNTING
496                                 ++gc_finalizers_executed;
497 #endif
498                                 asm_calljavamethod(((java_objectheader*)curr->address)->vftbl->class->finalizer, 
499                                                                    curr->address, NULL, NULL, NULL);
500                                 curr->address = 0;
501                         }
502                 }
503
504                 curr = curr->next;
505         }
506 #endif
507 }
508
509
510 inline
511 static
512 void* advance_address(void* addr)
513 {
514         if (*(u8*)addr & 0x1)
515                 return addr + (*(u8)addr & ~0x3);
516                 
517         if (((java_objectheader*)addr)->vftbl == class_array->vftbl)
518                 addr = (void**)((long)addr + (long)((java_arrayheader*)addr)->alignedsize);
519         else
520                 addr = (void**)((long)addr + (long)((java_objectheader*)addr)->vftbl->class->alignedsize);
521 }
522
523 #define FREE     0
524 #define USED     1
525 #define UNMARKED 2
526
527 inline
528 static
529 int get_type(void* addr)
530 {
531         if (bitmap_testbit(markbits, addr))
532                 return USED;
533
534         if (*(u8*)addr & 0x2)
535                 return FREE;
536
537         return UNMARKED;
538 }
539
540 inline
541 static 
542 void gc_reclaim (void)
543 {
544         void* free_start;
545         void* free_end = heap_base;
546
547         void* addr;
548         void* addr1 = NULL;
549         void* addr2 = NULL;
550         u8    size = 0;
551         int   type;
552
553         BITBLOCK* temp_bits;
554         bitmap_t* temp_bitmap;
555
556 #ifdef COLLECT_FRAGMENTATION
557         unsigned long  free_size = 0;
558         unsigned long  free_fragments = 0;
559 #endif
560
561         /* 2. reclaim unmarked objects */
562
563         addr = heap_start;
564
565  state0:
566         switch (get_type(addr)) {
567         UNMARKED:
568                 addr1 = addr;
569                 size = *(u8*)addr & ~0x3;
570                 addr = advance_address(addr);
571                 goto state1;
572
573         FREE:
574                 addr1 = addr;
575                 addr = advance_address(addr);
576                 goto state2;
577
578         USED:
579                 addr = advance_address(addr);
580                 goto state0;
581         }
582
583  state1:
584         switch (get_type(addr)) {
585         UNMARKED:
586                 size += *(u8*)addr & ~0x3;
587                 addr = advance_address(addr);
588                 goto state1;
589
590         FREE:
591                 allocator_free_prejoin(addr1, addr, size);
592                 addr = advance_address(addr);
593                 goto state2;
594
595         USED:
596                 allocator_free(addr1, size);
597                 size = 0;
598                 addr = advance_address(addr);
599                 goto state0;
600         }
601
602  state2:
603         switch (get_type(addr)) {
604         UNMARKED:
605                 size = *(u8*)addr & ~0x3;
606                 addr2 = addr;
607                 addr = advance_address(addr);
608                 goto state3;
609
610         FREE:
611                 fprintf(stderr, "error!");
612                 exit(-2);
613
614         USED:
615                 addr = advance_address(addr);
616                 goto state0;
617         }
618
619  state3:
620         switch (get_type(addr)) {
621         UNMARKED:
622                 size += *(u8*)addr & ~0x3;
623                 addr = advance_address(addr);
624                 goto state3;
625
626         FREE:
627                 allocator_free_blockjoin(addr1, addr, size);
628                 addr = advance_address(addr);
629                 goto state2;
630
631         USED:
632                 allocator_free_postjoin(addr1, addr, size);
633                 addr = advance_address(addr);
634                 goto state0;
635         }
636
637         /* 3.1. swap mark & start bitmaps */
638         temp_bits = mark_bits;
639         mark_bits = start_bits;
640         start_bits = temp_bits;
641
642         temp_bitmap = mark_bitmap;
643         mark_bitmap = start_bitmap;
644         start_bitmap = temp_bitmap;
645
646         /* 3.3. update heap_top */
647         if (free_start < heap_top) {
648                 heap_top = free_start;
649 #ifdef NEW_COLLECT_LIFESPAN
650                         lifespan_free(free_start, free_end);
651 #endif
652         }
653
654 #if 0
655         if (heap_top < heap_limit)
656                 bitmap_setbit(start_bits, heap_top);
657 #endif
658
659         /* 3.4. emit fragmentation info */
660 #ifdef COLLECT_FRAGMENTATION
661         {
662                 unsigned long heap_full = (unsigned long)heap_top - (unsigned long)heap_base;
663                 unsigned long heap_life = (unsigned long)heap_top - (unsigned long)heap_base - free_size;
664
665                 fprintf(fragfile, 
666                                 "%ld\t%ld\t%ld\t%ld\t%f\t%f\t%f\n", 
667                                 heap_full,
668                                 heap_life,
669                                 free_size, 
670                                 free_fragments,
671                                 100*(float)free_size/(free_fragments ? free_fragments : 1),
672                                 100*(float)heap_life/(heap_full ? heap_full : 1),
673                                 100*(float)free_size/(heap_full ? heap_full : 1)
674                                 );
675         }
676         fflush(fragfile);
677
678         allocator_dump_to_file(fragsizefile);
679 #endif
680
681         /* 4. adjust the collection threshold */
682         heap_next_collection = next_collection_heuristic();
683         if (heap_next_collection > heap_limit)
684                 heap_next_collection = heap_limit;
685
686 #ifdef COLLECT_LIFESPAN
687         fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
688 #endif
689 }
690
691 inline
692 static
693 void 
694 gc_mark_object_at (void** addr)
695 {
696         /*
697          * A note concerning the order of the tests:
698          *
699          * Statistics collected during a test run, where alignment
700          * was tested before checking whether the addr points into 
701          * the heap:
702          * >> LOG: 9301464 bytes for 196724 objects allocated.
703          * >> LOG: 15 garbage collections performed.
704          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
705          * >> LOG:     1064447 visits to objects already marked.
706          * >> LOG:     988270 potential references not aligned.
707          * >> LOG:     4049446 out of heap.
708          * >> LOG:     5236 not an object.
709          *
710          * These results show, that only about 1/4 of all heapblocks 
711          * point to objects; The single most important reason why a 
712          * heapblock can not point at an object is, that it's value 
713          * doesn't fall within the heap area (this test was performed
714          * with a 3MB heap).
715          *
716          * From the results, the various tests have to be conducted 
717          * in the following order for maximum efficiency:
718          *    1. addr in heap?
719          *    2. already marked ?
720          *    3. aligned ?
721          *    4. object ?
722          *
723          * The results after reordering:
724          * >> LOG: 9301464 bytes for 196724 objects allocated.
725          * >> LOG: 15 garbage collections performed.
726          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
727          * >> LOG:     1064447 visits to objects already marked.
728          * >> LOG:     350 potential references not aligned.
729          * >> LOG:     5037366 out of heap.
730          * >> LOG:     5236 not an object.
731          *
732          * And using:
733          *    1. addr in heap?
734          *    2. already marked ?
735          *    3. object ?
736          *    4. aligned ?
737          * 
738          * >> LOG: 9301464 bytes for 196724 objects allocated.
739          * >> LOG: 15 garbage collections performed.
740          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
741          * >> LOG:     5037366 out of heap.
742          * >> LOG:     1064456 visits to objects already marked.
743          * >> LOG:     5539 not an object.
744          * >> LOG:     38 potential references not aligned.
745          *
746          * Apparently, most unaligned values will already be eliminated
747          * when checking against the bounds of the heap. Checking this
748          * property first, should thus improve collection times.
749          */
750
751         /* 1.a. if addr doesn't point into the heap, return. */
752         if ((unsigned long)addr - (unsigned long)heap_base >= 
753                 ((long)heap_top - (long)heap_base)) {
754 #ifdef GC_COLLECT_STATISTICS
755                 if (addr == NULL)
756                         ++gc_mark_null_pointer;
757                 else
758                         ++gc_mark_not_inheap;
759 #endif
760                 return;
761         }
762
763         /* 1.b. if align(addr) has already been marked during this collection, return. */
764         if (bitmap_testbit(mark_bits, (void*)addr)) {
765 #ifdef GC_COLLECT_STATISTICS
766                 ++gc_mark_already_marked;
767 #endif
768                 return;
769         }
770
771         /* 1.c. if align(addr) doesn't point to the start of an object, return. */
772         if (!bitmap_testbit(start_bits, (void*)addr)) {
773 #ifdef GC_COLLECT_STATISTICS
774                 ++gc_mark_not_object;
775 #endif
776                 return;
777         }
778
779         /* 1.d. if addr is not properly aligned, return. */
780         if ((long)addr & ((1 << ALIGN) - 1)) {
781 #ifdef GC_COLLECT_STATISTICS
782                 ++gc_mark_not_aligned;
783 #endif
784                 return;
785         }
786
787         /* 2. Mark the object at addr */
788         bitmap_setbit(mark_bits, (void*)addr); 
789 #ifdef GC_COLLECT_STATISTICS
790         ++gc_mark_objects_marked;
791 #endif
792
793 #ifdef JIT_MARKER_SUPPORT
794         asm_calljavamethod(addr->vftbl->class->marker, addr, NULL, NULL, NULL);
795 #else
796
797         /* 3. mark the references contained within the extents of the object at addr */
798         if (bitmap_testbit(reference_bits, addr)) {
799                 /* 3.1. find the end of the object */
800                 void** end;
801
802 #ifdef SIZE_FROM_CLASSINFO
803                 if (((java_objectheader*)addr)->vftbl == class_array->vftbl)
804                         end = (void**)((long)addr + (long)((java_arrayheader*)addr)->alignedsize);
805                 else
806                         end = (void**)((long)addr + (long)((java_objectheader*)addr)->vftbl->class->alignedsize);
807 #else
808                 end = (void**)bitmap_find_next_setbit(start_bitmap, addr + 1); /* points just behind the object */
809 #endif
810
811                 /* 3.2. mark the references within the object at addr */
812 #ifdef GC_COLLECT_STATISTICS
813                 gc_mark_heapblocks_visited += ((long)end - (long)addr) >> ALIGN;
814 #endif
815                 while (addr < end)
816                         gc_mark_object_at(*(addr++));
817 #endif  
818         }
819         
820         return;
821 }
822
823
824 inline
825 static
826 void gc_mark_references (void)
827 {
828         address_list_node* curr = references;
829
830         while (curr) {
831 #ifdef GC_COLLECT_STATISTICS
832                 ++gc_mark_heapblocks_visited;
833 #endif          
834                 gc_mark_object_at(*((void**)(curr->address)));
835                 curr = curr->next;
836         }
837 }
838
839 inline
840 static
841 void 
842 markreferences(void** start, void** end)
843 {
844         while (start < end) {
845 #ifdef GC_COLLECT_STATISTICS
846                 ++gc_mark_heapblocks_visited;
847 #endif          
848                 gc_mark_object_at(*(start++));
849         }
850 }
851
852 inline
853 static
854 void gc_mark_stack (void)
855 {
856         void *dummy;
857
858 #ifdef USE_THREADS 
859     thread *aThread;
860         
861         if (currentThread == NULL) {
862                 void **top_of_stack = &dummy;
863                 
864                 if (top_of_stack > stackbottom)
865                         markreferences(stackbottom, top_of_stack);
866                 else
867                         markreferences(top_of_stack, stackbottom);
868         }
869         else {
870                 for (aThread = liveThreads; aThread != 0;
871                          aThread = CONTEXT(aThread).nextlive) {
872                         gc_mark_object_at((void*)aThread);
873                         if (CONTEXT(aThread).usedStackTop > CONTEXT(aThread).stackEnd)
874                                 markreferences((void**)CONTEXT(aThread).stackEnd,
875                                                            (void**)CONTEXT(aThread).usedStackTop);
876                         else    
877                                 markreferences((void**)CONTEXT(aThread).usedStackTop,
878                                                            (void**)CONTEXT(aThread).stackEnd);
879             }
880
881                 markreferences((void**)&threadQhead[0],
882                                            (void**)&threadQhead[MAX_THREAD_PRIO]);
883         }
884 #else
885     void **top_of_stack = &dummy;
886
887     if (top_of_stack > stackbottom)
888         markreferences(stackbottom, top_of_stack);
889         else
890         markreferences(top_of_stack, stackbottom);
891 #endif
892 }
893
894
895 static 
896 void gc_run (void)
897 {
898         static int armageddon_is_near = 0;
899
900         if (armageddon_is_near) {
901                 /* armageddon_is_here! */
902                 fprintf(stderr, "Oops, seems like there's a slight problem here: gc_run() called while still running?!\n");
903                 return;
904         }
905
906         armageddon_is_near = true;
907         heap_next_collection = heap_limit;  /* try to avoid finalizer-induced collections */
908
909         bitmap_clear(mark_bitmap);
910
911         asm_dumpregistersandcall(gc_mark_stack);
912         gc_mark_references();
913         gc_finalize();
914         gc_reclaim();
915
916         armageddon_is_near = false;
917
918 #ifdef GC_COLLECT_STATISTICS
919         ++gc_collections_count;
920 #endif
921 }
922
923
924 /************************* Function: gc_init **********************************
925
926   Initializes anything that must be initialized to call the gc on the right
927   stack.
928
929 ******************************************************************************/
930
931 void
932 gc_init (void)
933 {
934 }
935
936 /************************** Function: gc_call ********************************
937
938   Calls the garbage collector. The garbage collector should always be called
939   using this function since it ensures that enough stack space is available.
940
941 ******************************************************************************/
942
943 void
944 gc_call (void)
945 {
946 #ifdef USE_THREADS
947         u1 dummy;
948
949         assert(blockInts == 0);
950
951         intsDisable();
952         if (currentThread == NULL || currentThread == mainThread) {
953                 CONTEXT(mainThread).usedStackTop = &dummy;
954                 gc_run();
955                 }
956         else
957                 asm_switchstackandcall(CONTEXT(mainThread).usedStackTop, gc_run,
958                                                            (void**)&(CONTEXT(currentThread).usedStackTop));
959         intsRestore();
960 #else
961         gc_run();
962 #endif
963 }
964
965
966
967 /*
968  * These are local overrides for various environment variables in Emacs.
969  * Please do not remove this and leave it at the end of the file, where
970  * Emacs will automagically detect them.
971  * ---------------------------------------------------------------------
972  * Local variables:
973  * mode: c
974  * indent-tabs-mode: t
975  * c-basic-offset: 4
976  * tab-width: 4
977  * End:
978  */
979