10ed06bf4515c72bf48068351fee3043c18912a5
[cacao.git] / mm / heap2.c
1 #include <stddef.h>
2 #include <unistd.h>             /* getpagesize, mmap, ... */
3 #include <sys/mman.h>
4 #ifndef MAP_FAILED
5 #define MAP_FAILED ((void*) -1)
6 #endif
7 #include <sys/types.h>
8 #include <stdio.h>
9 #include "../asmpart.h"
10 #include "../callargs.h"
11 #include "../threads/thread.h"
12 #include "../threads/locks.h"
13 #include "../sysdep/threads.h"
14 #include <assert.h>
15 #include "lifespan.h"
16
17 #include "mm.h"
18
19 #define PAGESIZE_MINUS_ONE      (getpagesize() - 1)
20
21 #undef ALIGN
22 #undef OFFSET
23
24 #define HEURISTIC_SEL    0
25 #define HEURISTIC_PARAM  2UL
26
27
28 #define next_collection_heuristic_init()   \
29              (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> 4))
30
31 #if HEURISTIC_SEL == 0
32 #define next_collection_heuristic()   \
33              (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> HEURISTIC_PARAM))
34 #elif HEURISTIC_SEL == 1
35 #define next_collection_heuristic()   \
36              (void*)((long)heap_top + (((long)heap_top - (long)heap_base) << HEURISTIC_PARAM))
37 #elif HEURISTIC_SEL == 2
38 #define next_collection_heuristic()   \
39              (void*)((long)heap_top + HEURISTIC_PARAM)   
40 #endif
41
42 //#define PSEUDO_GENERATIONAL
43 //#define COLLECT_LIFESPAN
44 //#define NEW_COLLECT_LIFESPAN
45 //#define COLLECT_FRAGMENTATION
46 //#define GC_COLLECT_STATISTICS
47 //#define FINALIZER_COUNTING
48
49 #undef STRUCTURES_ON_HEAP
50 //#define STRUCTURES_ON_HEAP
51
52 #define false 0
53 #define true 1
54
55 #include "allocator.h" /* rev. 1 allocator */
56 #include "bitmap2.h"   /* rev. 2 bitmap management */
57
58 bool collectverbose;
59
60 void gc_call (void);
61
62 /* --- macros */
63
64 #define align_size(size)        ((size) & ~((1 << ALIGN) - 1))
65 #define MAP_ADDRESS                     (void*) 0x10000000
66
67 /* --- file-wide variables */
68
69 static void*    heap_base = NULL;
70 static SIZE             heap_size = 0;
71 static void*    heap_top = NULL;
72 static void*    heap_limit = NULL;
73 static void*    heap_next_collection = NULL;
74
75 static bitmap_t* start_bitmap = NULL;
76 static BITBLOCK* start_bits = NULL;
77 static bitmap_t* reference_bitmap = NULL;
78 static BITBLOCK* reference_bits = NULL;
79 static bitmap_t* mark_bitmap = NULL;
80 static BITBLOCK* mark_bits = NULL;
81
82 static void**   stackbottom = NULL;
83
84 typedef struct address_list_node {
85         void* address;
86         struct address_list_node* prev;
87         struct address_list_node* next;
88 } address_list_node;
89
90 static address_list_node* references = NULL;
91 static address_list_node* finalizers = NULL;
92
93 #ifdef GC_COLLECT_STATISTICS
94
95 static unsigned long gc_collections_count = 0;
96
97 static unsigned long gc_alloc_total = 0;
98 static unsigned long gc_alloc_count = 0;
99
100 static unsigned long gc_mark_heapblocks_visited = 0;
101 static unsigned long gc_mark_not_aligned = 0;
102 static unsigned long gc_mark_not_inheap = 0;
103 static unsigned long gc_mark_not_object = 0;
104 static unsigned long gc_mark_objects_marked = 0;
105 static unsigned long gc_mark_already_marked = 0;
106
107 #endif
108
109 #ifdef FINALIZER_COUNTING
110
111 static unsigned long gc_finalizers_executed = 0;
112 static unsigned long gc_finalizers_detected = 0;
113
114 #endif
115
116 #ifdef USE_THREADS
117 static iMux  alloc_mutex;
118 #endif
119
120 #ifdef COLLECT_LIFESPAN
121 static FILE* tracefile;
122 #endif
123
124 #ifdef COLLECT_FRAGMENTATION
125 static FILE* fragfile;
126 static FILE* fragsizefile;
127 #endif
128
129 /* --- implementation */
130
131 void 
132 heap_init (SIZE size, 
133                    SIZE startsize, /* when should we collect for the first time ? */
134                    void **in_stackbottom)
135 {
136         /* 1. Initialise the freelists & reset the allocator's state */
137         allocator_init(); 
138
139         /* 2. Allocate at least (alignment!) size bytes of memory for the heap */
140         heap_size = align_size(size + ((1 << ALIGN) - 1));
141
142 #ifdef DONT_MMAP
143         heap_base = malloc(heap_size);
144 #else
145         heap_base = (void*) mmap (NULL, 
146                                                           ((size_t)heap_size + PAGESIZE_MINUS_ONE) & ~PAGESIZE_MINUS_ONE,
147                                                           PROT_READ | PROT_WRITE, 
148                                                           MAP_PRIVATE | MAP_ANONYMOUS, 
149                                                           -1, 
150                                                           (off_t) 0);
151 #endif
152
153         if (heap_base == (void*)MAP_FAILED) {
154                 /* unable to allocate the requested amount of memory */
155                 fprintf(stderr, "heap2.c: The queen, mylord, is dead! (mmap failed)\n");
156                 exit(-1);
157         }
158
159         /* 3. Allocate the bitmaps */
160         start_bitmap = bitmap_allocate(heap_base, heap_size);
161         reference_bitmap = bitmap_allocate(heap_base, heap_size);
162         mark_bitmap = bitmap_allocate(heap_base, heap_size);
163
164         start_bits = start_bitmap->bitmap;
165     reference_bits = reference_bitmap->bitmap;
166     mark_bits = mark_bitmap->bitmap;
167
168         /* 4. Mark the first free-area as an object-start */
169         bitmap_setbit(start_bits, heap_base);
170
171         /* 5. Initialise the heap's state (heap_top, etc.) */
172         stackbottom = in_stackbottom; /* copy the stackbottom */
173
174         heap_top = heap_base; /* the current end of the heap (just behind the last allocated object) */
175         heap_limit = (void*)((long)heap_base + heap_size); /* points just behind the last accessible block of the heap */
176
177         /* 6. calculate a useful first collection limit */
178         /* This is extremly primitive at this point... 
179            we should replace it with something more useful -- phil. */
180         heap_next_collection = next_collection_heuristic_init();
181
182         /* 7. Init the global reference lists & finalizer addresses */
183         references = NULL;
184         finalizers = NULL;
185
186 #ifdef STRUCTURES_ON_HEAP
187         heap_addreference(&references);
188         heap_addreference(&finalizers);
189 #endif
190
191 #ifdef USE_THREADS
192         /* 8. Init the mutexes for synchronization */
193         alloc_mutex.holder = 0;
194 #endif
195
196         /* 9. Set up collection of lifespan data */
197 #ifdef COLLECT_LIFESPAN
198 #if 0
199         tracefile = fopen("heap.trace", "w");
200 #else
201         tracefile = popen("gzip -9 >heap.trace.gz", "w");
202 #endif
203         if (!tracefile) {
204                 fprintf(stderr, "heap2.c: Radio Ga Ga! (fopen failed)\n");
205                 exit(-2);
206         }
207
208         fprintf(tracefile, "heap_base\t0x%lx\n", heap_base);    
209         fprintf(tracefile, "heap_limit\t0x%lx\n", heap_limit);
210         fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
211 #endif
212
213 #ifdef NEW_COLLECT_LIFESPAN
214         lifespan_init(heap_base, heap_size);
215 #endif
216
217         /* 10. Set up collection of fragmentation data */
218 #ifdef COLLECT_FRAGMENTATION
219         fragfile = popen("gzip -9 >fragmentation.gz", "w");
220         fragsizefile = popen("gzip -9 >freeblocks.gz", "w");
221 #endif
222 }
223
224 __inline__
225 static
226 void
227 heap_call_finalizer_for_object_at(java_objectheader* object_addr)
228 {
229         asm_calljavamethod(object_addr->vftbl->class->finalizer, object_addr, NULL, NULL, NULL);
230 #ifdef FINALIZER_COUNTING
231         ++gc_finalizers_executed;
232 #endif
233 }
234
235 void 
236 heap_close (void)
237 {
238         address_list_node* curr = finalizers;
239
240         /* 0. clean up lifespan module */
241 #ifdef COLLECT_LIFESPAN
242 #if 0
243         fclose(tracefile);
244 #else
245         pclose(tracefile);
246 #endif
247 #endif
248
249 #ifdef NEW_COLLECT_LIFESPAN
250         lifespan_close();
251 #endif
252
253 #ifdef COLLECT_FRAGMENTATION
254         pclose(fragfile);
255         pclose(fragsizefile);
256 #endif
257
258         /* 1. Clean up on the heap... finalize all remaining objects */
259 #if 1
260         while (curr) {
261                 address_list_node* prev = curr;
262                 java_objectheader* addr = (java_objectheader*)(curr->address);
263                 
264                 if (addr && bitmap_testbit(start_bits, addr))
265                         heap_call_finalizer_for_object_at(addr);
266                 
267                 curr = curr->next;
268                 free(prev);
269         }
270 #endif
271
272         /* 2. Release the bitmaps */
273         bitmap_release(start_bitmap);
274         bitmap_release(reference_bitmap);
275         bitmap_release(mark_bitmap);
276
277         /* 3. Release the memory allocated to the heap */
278         if (heap_base)
279                 munmap(heap_base, heap_size);
280
281         /* 4. emit statistical data */
282 #ifdef GC_COLLECT_STATISTICS
283         sprintf(logtext, "%ld bytes for %ld objects allocated.", 
284                         gc_alloc_total, gc_alloc_count); 
285         dolog();
286         sprintf(logtext, "%ld garbage collections performed.", gc_collections_count);
287         dolog();
288         sprintf(logtext, "%ld heapblocks visited, %ld objects marked", 
289                         gc_mark_heapblocks_visited, gc_mark_objects_marked);
290         dolog();
291         sprintf(logtext, "    %ld out of heap.", gc_mark_not_inheap);
292         dolog();
293         sprintf(logtext, "    %ld visits to objects already marked.", gc_mark_already_marked);
294         dolog();
295         sprintf(logtext, "    %ld not an object.", gc_mark_not_object);
296         dolog();
297         sprintf(logtext, "    %ld potential references not aligned.", gc_mark_not_aligned);
298         dolog();
299 #endif
300
301 #ifdef FINALIZER_COUNTING
302         sprintf(logtext, "%ld objects with a finalizer", gc_finalizers_detected);
303         dolog();
304
305         if (gc_finalizers_detected == gc_finalizers_executed)
306                 sprintf(logtext, "    all finalizers executed.");
307         else
308                 sprintf(logtext, "    only %ld finalizers executed.", gc_finalizers_executed);
309         dolog();
310 #endif
311
312 #ifdef NEW_COLLECT_LIFESPAN
313         lifespan_emit();
314 #endif
315 }
316
317 __inline__
318 static
319 void 
320 heap_add_address_to_address_list(address_list_node** list, void* address)
321 {
322         /* Note: address lists are kept sorted to simplify finalization */
323
324         address_list_node* new_node = malloc(sizeof(address_list_node));
325         new_node->address = address;
326         new_node->next = NULL;
327
328         while (*list && (*list)->next) {
329                 if ((*list)->next->address < address)
330                         list = &(*list)->next;
331                 else {
332                         new_node->next = *list;
333                         *list = new_node;
334                         return;
335                 }                       
336         }
337
338         new_node->next = *list;
339         *list = new_node;
340 }
341
342
343 __inline__
344 static
345 void
346 heap_add_finalizer_for_object_at(void* addr)
347 {
348         /* Finalizers seem to be very rare... for this reason, I keep a linked
349            list of object addresses, which have a finalizer attached. This list
350            is kept in ascending order according to the order garbage is freed.
351            This list is currently kept separate from the heap, but should be 
352            moved onto it, but some JIT-marker code to handle these special
353            objects will need to be added first. -- phil. */
354
355         heap_add_address_to_address_list(&finalizers, addr);
356
357 #ifdef COLLECT_LIFESPAN
358         fprintf(tracefile, "finalizer\t0x%lx\n", addr);
359 #endif
360 }
361
362 void* 
363 heap_allocate (SIZE               in_length, 
364                            bool           references, 
365                            methodinfo *finalizer)
366 {
367         SIZE    length = align_size(in_length + ((1 << ALIGN) - 1)); 
368         void*   free_chunk = NULL;
369
370 #if 0
371         /* check for misaligned in_length parameter */
372         if (length != in_length)
373                 fprintf(stderr,
374                                 "heap2.c: heap_allocate was passed unaligned in_length parameter: %ld, \n         aligned to %ld. (mistrust)\n",
375                                 in_length, length);
376 #endif
377
378 #ifdef FINALIZER_COUNTING
379         if (finalizer)
380                 ++gc_finalizers_detected;
381 #endif
382
383 #if defined(COLLECT_LIFESPAN) || defined(NEW_COLLECT_LIFESPAN)
384         /* perform garbage collection to collect data for lifespan analysis */
385         if (heap_top > heap_base)
386                 gc_call();
387 #endif
388
389 #ifdef USE_THREADS
390         lock_mutex(&alloc_mutex);
391 #endif  
392  retry:
393         /* 1. attempt to get a free block with size >= length from the freelists */
394         free_chunk = allocator_alloc(length);
395
396         /* 2. if unsuccessful, try alternative allocation strategies */
397         if (!free_chunk) {
398                 /* 2.a if the collection threshold would be exceeded, collect the heap */
399                 if ((long)heap_top + length > (long)heap_next_collection) {
400                         /* 2.a.1. collect if the next_collection threshold would be exceeded */
401                         gc_call();
402
403                         /* 2.a.2. we just ran a collection, recheck the freelists */
404                         free_chunk = allocator_alloc(length);
405                         if (free_chunk)
406                                 goto success;
407
408                         /* 2.a.3. we can't satisfy the request from the freelists, check
409                                   against the heap_limit whether growing the heap is possible */
410                         if ((long)heap_top + length > (long)heap_limit)
411                                 goto failure;
412                 }
413
414                 /* 2.b. grow the heap */
415                 free_chunk = heap_top;
416                 heap_top = (void*)((long)heap_top + length);
417         }
418
419  success:
420         /* 3.a. mark all necessary bits, store the finalizer & return the newly allocated block */
421
422         /* I don't mark the object-start anymore, as it always is at the beginning of a free-block,
423            which already is marked (Note: The first free-block gets marked in heap_init). -- phil. */
424         bitmap_setbit(start_bits, free_chunk); /* mark the new object */
425
426 #ifndef SIZE_FROM_CLASSINFO
427         bitmap_setbit(start_bits, (void*)((long)free_chunk + (long)length)); /* mark the freespace behind the new object */
428 #endif
429
430         if (references)
431                 bitmap_setbit(reference_bits, free_chunk);
432         else 
433                 bitmap_clearbit(reference_bits, free_chunk);
434
435         /* store a hint, that there's a finalizer for this address */
436         if (finalizer)
437                 heap_add_finalizer_for_object_at(free_chunk);
438
439 #ifdef GC_COLLECT_STATISTICS
440         gc_alloc_total += length;
441         ++gc_alloc_count;
442 #endif
443
444 #ifdef COLLECT_LIFESPAN
445         fprintf(tracefile, "alloc\t0x%lx\t0x%lx\n", 
446                         free_chunk, (long)free_chunk + length);
447 #endif
448
449 #ifdef NEW_COLLECT_LIFESPAN
450         lifespan_alloc(free_chunk, length);
451 #endif
452
453  failure:
454 #ifdef USE_THREADS
455         unlock_mutex(&alloc_mutex);
456 #endif  
457         return free_chunk;
458 }
459
460 void 
461 heap_addreference (void **reflocation)
462 {
463         /* I currently use a separate linked list (as in the original code) to hold
464            the global reference locations, but I'll change this to allocate these
465            in blocks on the heap; we'll have to add JIT-Marker code for those Java
466            objects then. -- phil. */
467
468         heap_add_address_to_address_list(&references, reflocation);
469 }
470
471 static
472 __inline__
473 void gc_finalize (void)
474 {
475         /* This will have to be slightly rewritten as soon the JIT-marked heap-based lists are used. -- phil. */
476
477         address_list_node*  curr = finalizers;
478         address_list_node*  prev;
479
480 #if 0
481         /* FIXME: new code, please! */
482 #else
483         while (curr) {
484                 if (curr->address) {
485                         if (!bitmap_testbit(mark_bits, curr->address)) {
486
487 #ifdef FINALIZER_COUNTING
488                                 ++gc_finalizers_executed;
489 #endif
490                                 asm_calljavamethod(((java_objectheader*)curr->address)->vftbl->class->finalizer, 
491                                                                    curr->address, NULL, NULL, NULL);
492                                 curr->address = 0;
493                         }
494                 }
495
496                 curr = curr->next;
497         }
498 #endif
499 }
500
501
502 __inline__
503 static 
504 void gc_reclaim (void)
505 {
506 #ifdef PSEUDO_GENERATIONAL
507         static void*  generation_start = 0;
508         static int    generation_num = 0;
509         void* addr = heap_base;
510 #endif
511         void* free_start;
512         void* free_end = heap_base;
513         BITBLOCK* temp_bits;
514         bitmap_t* temp_bitmap;
515
516 #ifdef COLLECT_FRAGMENTATION
517         unsigned long  free_size = 0;
518         unsigned long  free_fragments = 0;
519 #endif
520
521 #ifdef PSEUDO_GENERATIONAL
522         if (!generation_start || !(generation_num % 5))
523                 generation_start = heap_base;
524
525         ++generation_num;
526 #endif
527
528         /* 1. reset the freelists */
529
530 #if 0
531         allocator_mark_free_kludge(start_bits); /* this line will be kicked out, when
532                                                                                            the SIZE_FROM_CLASSINFO reclaim
533                                                                                            is implemented (very soon!!) */
534 #endif
535
536 #ifdef PSEUDO_GENERATIONAL
537         for (addr = heap_base; addr <= generation_start; ++addr) {
538                 if (bitmap_testbit(start_bits, addr))
539                         bitmap_setbit(mark_bits, addr);
540         }
541
542         allocator_mark_free_kludge(start_bits); /* this line will be kicked out, when
543                                                                                            the SIZE_FROM_CLASSINFO reclaim
544                                                                                            is implemented (very soon!!) */
545 #endif
546
547         allocator_reset();
548
549         /* 2. reclaim unmarked objects */
550 #if 0
551         if (!testbit(start_bits, heap_base))
552                 free_start = heap_base;
553         else
554                 free_start = bitmap_find_next_combination_set_unset(start_bitmap,
555                                                                                                                         mark_bitmap,
556                                                                                                                         free_start);
557
558 #else
559         while (free_end < heap_top) {
560                 free_start = bitmap_find_next_combination_set_unset(start_bitmap,
561                                                                                                                         mark_bitmap,
562                                                                                                                         free_end);
563
564                 if (free_start < heap_top) {
565                         free_end = bitmap_find_next_setbit(mark_bitmap, (void*)((long)free_start + 8)); /* FIXME: constant used */
566
567                         if (free_end < heap_top) {
568                                 allocator_free(free_start, (long)free_end - (long)free_start);
569
570 #ifdef COLLECT_FRAGMENTATION
571                                 free_size += (long)free_end - (long)free_start;
572                                 free_fragments++;
573 #endif
574
575 #ifdef COLLECT_LIFESPAN
576                                 fprintf(tracefile, 
577                                                 "free\t0x%lx\t0x%lx\n", 
578                                                 free_start,
579                                                 (long)free_end);
580 #endif
581
582 #ifdef NEW_COLLECT_LIFESPAN
583                                 lifespan_free(free_start, free_end);
584 #endif
585
586 #ifndef SIZE_FROM_CLASSINFO
587                                 /* would make trouble with JIT-Marker support. The Marker for unused blocks 
588                                    might be called, leading to a bad dereference. -- phil. */
589                                 bitmap_setbit(mark_bits, free_start); /* necessary to calculate obj-size bitmap based. */
590 #endif
591                         }
592                 } else {
593                         free_end = heap_top;    
594                 }
595         }
596 #endif
597
598         /* 3.1. swap mark & start bitmaps */
599         temp_bits = mark_bits;
600         mark_bits = start_bits;
601         start_bits = temp_bits;
602
603         temp_bitmap = mark_bitmap;
604         mark_bitmap = start_bitmap;
605         start_bitmap = temp_bitmap;
606
607 #if 0 /* operation already handled in allocate */
608         /* 3.2. mask reference bitmap */
609         bitmap_mask_with_bitmap(reference_bitmap, start_bitmap);
610 #endif
611
612         /* 3.3. update heap_top */
613         if (free_start < heap_top) {
614                 heap_top = free_start;
615 #ifdef NEW_COLLECT_LIFESPAN
616                         lifespan_free(free_start, free_end);
617 #endif
618         }
619
620 #if 0
621         if (heap_top < heap_limit)
622                 bitmap_setbit(start_bits, heap_top);
623 #endif
624
625         /* 3.4. emit fragmentation info */
626 #ifdef COLLECT_FRAGMENTATION
627         {
628                 unsigned long heap_full = (unsigned long)heap_top - (unsigned long)heap_base;
629                 unsigned long heap_life = (unsigned long)heap_top - (unsigned long)heap_base - free_size;
630
631                 fprintf(fragfile, 
632                                 "%ld\t%ld\t%ld\t%ld\t%f\t%f\t%f\n", 
633                                 heap_full,
634                                 heap_life,
635                                 free_size, 
636                                 free_fragments,
637                                 100*(float)free_size/free_fragments,
638                                 100*(float)heap_life/heap_full,
639                                 100*(float)free_size/heap_full
640                                 );
641         }
642         fflush(fragfile);
643
644         allocator_dump_to_file(fragsizefile);
645 #endif
646
647         /* 4. adjust the collection threshold */
648         heap_next_collection = next_collection_heuristic();
649         if (heap_next_collection > heap_limit)
650                 heap_next_collection = heap_limit;
651
652 #ifdef COLLECT_LIFESPAN
653         fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
654 #endif
655
656 #ifdef PSEUDO_GENERATIONAL
657         generation_start = heap_top;
658 #endif
659 }
660
661 __inline__
662 static
663 void 
664 gc_mark_object_at (void** addr)
665 {
666         /*
667          * A note concerning the order of the tests:
668          *
669          * Statistics collected during a test run, where alignment
670          * was tested before checking whether the addr points into 
671          * the heap:
672          * >> LOG: 9301464 bytes for 196724 objects allocated.
673          * >> LOG: 15 garbage collections performed.
674          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
675          * >> LOG:     1064447 visits to objects already marked.
676          * >> LOG:     988270 potential references not aligned.
677          * >> LOG:     4049446 out of heap.
678          * >> LOG:     5236 not an object.
679          *
680          * These results show, that only about 1/4 of all heapblocks 
681          * point to objects; The single most important reason why a 
682          * heapblock can not point at an object is, that it's value 
683          * doesn't fall within the heap area (this test was performed
684          * with a 3MB heap).
685          *
686          * From the results, the various tests have to be conducted 
687          * in the following order for maximum efficiency:
688          *    1. addr in heap?
689          *    2. already marked ?
690          *    3. aligned ?
691          *    4. object ?
692          *
693          * The results after reordering:
694          * >> LOG: 9301464 bytes for 196724 objects allocated.
695          * >> LOG: 15 garbage collections performed.
696          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
697          * >> LOG:     1064447 visits to objects already marked.
698          * >> LOG:     350 potential references not aligned.
699          * >> LOG:     5037366 out of heap.
700          * >> LOG:     5236 not an object.
701          *
702          * And using:
703          *    1. addr in heap?
704          *    2. already marked ?
705          *    3. object ?
706          *    4. aligned ?
707          * 
708          * >> LOG: 9301464 bytes for 196724 objects allocated.
709          * >> LOG: 15 garbage collections performed.
710          * >> LOG: 6568440 heapblocks visited, 469249 objects marked
711          * >> LOG:     5037366 out of heap.
712          * >> LOG:     1064456 visits to objects already marked.
713          * >> LOG:     5539 not an object.
714          * >> LOG:     38 potential references not aligned.
715          *
716          * Apparently, most unaligned values will already be eliminated
717          * when checking against the bounds of the heap. Checking this
718          * property first, should thus improve collection times.
719          */
720
721
722         /* 1.a. if addr doesn't point into the heap, return. */
723         if ((unsigned long)addr - (unsigned long)heap_base >= 
724                 ((long)heap_top - (long)heap_base)) {
725 #ifdef GC_COLLECT_STATISTICS
726                 ++gc_mark_not_inheap;
727 #endif
728                 return;
729         }
730
731         /* 1.b. if align(addr) has already been marked during this collection, return. */
732         if (bitmap_testbit(mark_bits, (void*)addr)) {
733 #ifdef GC_COLLECT_STATISTICS
734                 ++gc_mark_already_marked;
735 #endif
736                 return;
737         }
738
739         /* 1.c. if align(addr) doesn't point to the start of an object, return. */
740         if (!bitmap_testbit(start_bits, (void*)addr)) {
741 #ifdef GC_COLLECT_STATISTICS
742                 ++gc_mark_not_object;
743 #endif
744                 return;
745         }
746
747         /* 1.d. if addr is not properly aligned, return. */
748         if ((long)addr & ((1 << ALIGN) - 1)) {
749 #ifdef GC_COLLECT_STATISTICS
750                 ++gc_mark_not_aligned;
751 #endif
752                 return;
753         }
754
755         /* 2. Mark the object at addr */
756         bitmap_setbit(mark_bits, (void*)addr); 
757 #ifdef GC_COLLECT_STATISTICS
758         ++gc_mark_objects_marked;
759 #endif
760
761 #ifdef JIT_MARKER_SUPPORT
762         asm_calljavamethod(addr->vftbl->class->marker, addr, NULL, NULL, NULL);
763 #else
764
765         /* 3. mark the references contained within the extents of the object at addr */
766         if (bitmap_testbit(reference_bits, addr)) {
767                 /* 3.1. find the end of the object */
768                 void** end;
769
770 #ifdef SIZE_FROM_CLASSINFO
771                 if (((java_objectheader*)addr)->vftbl == class_array->vftbl)
772                         end = (void**)((long)addr + (long)((java_arrayheader*)addr)->alignedsize);
773                 else
774                         end = (void**)((long)addr + (long)((java_objectheader*)addr)->vftbl->class->alignedsize);
775 #else
776                 end = (void**)bitmap_find_next_setbit(start_bitmap, addr + 1); /* points just behind the object */
777 #endif
778
779                 /* 3.2. mark the references within the object at addr */
780 #ifdef GC_COLLECT_STATISTICS
781                 gc_mark_heapblocks_visited += ((long)end - (long)addr) >> ALIGN;
782 #endif
783                 while (addr < end)
784                         gc_mark_object_at(*(addr++));
785 #endif  
786         }
787         
788         return;
789 }
790
791
792 __inline__
793 static
794 void gc_mark_references (void)
795 {
796         address_list_node* curr = references;
797
798         while (curr) {
799                 gc_mark_object_at(*((void**)(curr->address)));
800                 curr = curr->next;
801         }
802 }
803
804 __inline__
805 static
806 void 
807 markreferences(void** start, void** end)
808 {
809         while (start < end)
810                 gc_mark_object_at(*(start++));
811 }
812
813 __inline__
814 static
815 void gc_mark_stack (void)
816 {
817         void *dummy;
818
819 #ifdef USE_THREADS 
820     thread *aThread;
821         
822         if (currentThread == NULL) {
823                 void **top_of_stack = &dummy;
824                 
825                 if (top_of_stack > stackbottom)
826                         markreferences(stackbottom, top_of_stack);
827                 else
828                         markreferences(top_of_stack, stackbottom);
829         }
830         else {
831                 for (aThread = liveThreads; aThread != 0;
832                          aThread = CONTEXT(aThread).nextlive) {
833                         gc_mark_object_at((void*)aThread);
834                         if (CONTEXT(aThread).usedStackTop > CONTEXT(aThread).stackEnd)
835                                 markreferences((void**)CONTEXT(aThread).stackEnd,
836                                                            (void**)CONTEXT(aThread).usedStackTop);
837                         else    
838                                 markreferences((void**)CONTEXT(aThread).usedStackTop,
839                                                            (void**)CONTEXT(aThread).stackEnd);
840             }
841
842                 markreferences((void**)&threadQhead[0],
843                                            (void**)&threadQhead[MAX_THREAD_PRIO]);
844         }
845 #else
846     void **top_of_stack = &dummy;
847
848     if (top_of_stack > stackbottom)
849         markreferences(stackbottom, top_of_stack);
850         else
851         markreferences(top_of_stack, stackbottom);
852 #endif
853 }
854
855
856 static 
857 void gc_run (void)
858 {
859         static int armageddon_is_near = 0;
860
861         if (armageddon_is_near) {
862                 /* armageddon_is_here! */
863                 fprintf(stderr, "Oops, seems like there's a slight problem here: gc_run() called while still running?!\n");
864                 return;
865         }
866
867         armageddon_is_near = true;
868         heap_next_collection = heap_limit;  /* try to avoid finalizer-induced collections */
869
870         bitmap_clear(mark_bitmap);
871
872         asm_dumpregistersandcall(gc_mark_stack);
873         gc_mark_references();
874         gc_finalize();
875         gc_reclaim();
876
877         armageddon_is_near = false;
878
879 #ifdef GC_COLLECT_STATISTICS
880         ++gc_collections_count;
881 #endif
882 }
883
884
885 /************************* Function: gc_init **********************************
886
887   Initializes anything that must be initialized to call the gc on the right
888   stack.
889
890 ******************************************************************************/
891
892 void
893 gc_init (void)
894 {
895 }
896
897 /************************** Function: gc_call ********************************
898
899   Calls the garbage collector. The garbage collector should always be called
900   using this function since it ensures that enough stack space is available.
901
902 ******************************************************************************/
903
904 void
905 gc_call (void)
906 {
907 #ifdef USE_THREADS
908         u1 dummy;
909
910         assert(blockInts == 0);
911
912         intsDisable();
913         if (currentThread == NULL || currentThread == mainThread) {
914                 CONTEXT(mainThread).usedStackTop = &dummy;
915                 gc_run();
916                 }
917         else
918                 asm_switchstackandcall(CONTEXT(mainThread).usedStackTop, gc_run,
919                                                            (void**)&(CONTEXT(currentThread).usedStackTop));
920         intsRestore();
921 #else
922         gc_run();
923 #endif
924 }
925
926
927
928 /*
929  * These are local overrides for various environment variables in Emacs.
930  * Please do not remove this and leave it at the end of the file, where
931  * Emacs will automagically detect them.
932  * ---------------------------------------------------------------------
933  * Local variables:
934  * mode: c
935  * indent-tabs-mode: t
936  * c-basic-offset: 4
937  * tab-width: 4
938  * End:
939  */
940