2 #include <unistd.h> /* getpagesize, mmap, ... */
11 #include "threads/thread.h"
12 #include "threads/locks.h"
17 #if !defined(HAVE_MAP_FAILED)
18 #define MAP_FAILED ((void*) -1)
22 #define PAGESIZE_MINUS_ONE (getpagesize() - 1)
27 #define HEURISTIC_SEL 0
28 #define HEURISTIC_PARAM 2UL
31 #define next_collection_heuristic_init() \
32 (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> 4))
34 #if HEURISTIC_SEL == 0
35 #define next_collection_heuristic() \
36 (void*)((long)heap_top + (((long)heap_limit - (long)heap_top) >> HEURISTIC_PARAM))
37 #elif HEURISTIC_SEL == 1
38 #define next_collection_heuristic() \
39 (void*)((long)heap_top + (((long)heap_top - (long)heap_base) << HEURISTIC_PARAM))
40 #elif HEURISTIC_SEL == 2
41 #define next_collection_heuristic() \
42 (void*)((long)heap_top + HEURISTIC_PARAM)
46 //#define PSEUDO_GENERATIONAL
47 //#define COLLECT_LIFESPAN
48 //#define NEW_COLLECT_LIFESPAN
49 //#define COLLECT_FRAGMENTATION
50 //#define COLLECT_SIZES
52 //#define GC_COLLECT_STATISTICS
53 //#define FINALIZER_COUNTING
56 #undef STRUCTURES_ON_HEAP
58 //#define STRUCTURES_ON_HEAP
64 #include "allocator.h" /* rev. 1 allocator */
65 #include "bitmap2.h" /* rev. 2 bitmap management */
73 #define align_size(size) ((size) & ~((1 << ALIGN) - 1))
74 #define MAP_ADDRESS (void*) 0x10000000
76 /* --- file-wide variables */
78 static void* heap_base = NULL;
79 static SIZE heap_size = 0;
80 static void* heap_top = NULL;
81 static void* heap_limit = NULL;
82 static void* heap_next_collection = NULL;
84 static bitmap_t* start_bitmap = NULL;
85 static BITBLOCK* start_bits = NULL;
86 static bitmap_t* reference_bitmap = NULL;
87 static BITBLOCK* reference_bits = NULL;
88 static bitmap_t* mark_bitmap = NULL;
89 static BITBLOCK* mark_bits = NULL;
91 static void** stackbottom = NULL;
93 typedef struct address_list_node {
95 struct address_list_node* prev;
96 struct address_list_node* next;
99 static address_list_node* references = NULL;
100 static address_list_node* finalizers = NULL;
102 #ifdef GC_COLLECT_STATISTICS
104 static unsigned long gc_collections_count = 0;
106 static unsigned long gc_alloc_total = 0;
107 static unsigned long gc_alloc_count = 0;
109 static unsigned long gc_mark_heapblocks_visited = 0;
110 static unsigned long gc_mark_not_aligned = 0;
111 static unsigned long gc_mark_not_inheap = 0;
112 static unsigned long gc_mark_not_object = 0;
113 static unsigned long gc_mark_objects_marked = 0;
114 static unsigned long gc_mark_already_marked = 0;
116 static unsigned long gc_mark_null_pointer = 0;
120 #ifdef FINALIZER_COUNTING
122 static unsigned long gc_finalizers_executed = 0;
123 static unsigned long gc_finalizers_detected = 0;
128 static iMux alloc_mutex;
131 #ifdef COLLECT_LIFESPAN
132 static FILE* tracefile;
135 #ifdef COLLECT_FRAGMENTATION
136 static FILE* fragfile;
137 static FILE* fragsizefile;
140 /* --- implementation */
143 heap_init (SIZE size,
144 SIZE startsize, /* when should we collect for the first time ? */
145 void **in_stackbottom)
147 /* 1. Initialise the freelists & reset the allocator's state */
150 /* 2. Allocate at least (alignment!) size bytes of memory for the heap */
151 heap_size = align_size(size + ((1 << ALIGN) - 1));
153 #if !(defined(HAVE_MAP_ANONYMOUS))
154 heap_base = malloc(heap_size);
156 heap_base = (void*) mmap (NULL,
157 ((size_t)heap_size + PAGESIZE_MINUS_ONE) & ~PAGESIZE_MINUS_ONE,
158 PROT_READ | PROT_WRITE,
159 MAP_PRIVATE | MAP_ANONYMOUS,
164 if (heap_base == (void*)MAP_FAILED) {
165 /* unable to allocate the requested amount of memory */
166 fprintf(stderr, "heap2.c: The queen, mylord, is dead! (mmap failed)\n");
170 /* 3. Allocate the bitmaps */
171 start_bitmap = bitmap_allocate(heap_base, heap_size);
172 reference_bitmap = bitmap_allocate(heap_base, heap_size);
173 mark_bitmap = bitmap_allocate(heap_base, heap_size);
175 start_bits = start_bitmap->bitmap;
176 reference_bits = reference_bitmap->bitmap;
177 mark_bits = mark_bitmap->bitmap;
179 /* 4. Mark the first free-area as an object-start */
180 bitmap_setbit(start_bits, heap_base);
182 /* 5. Initialise the heap's state (heap_top, etc.) */
183 stackbottom = in_stackbottom; /* copy the stackbottom */
185 heap_top = heap_base; /* the current end of the heap (just behind the last allocated object) */
186 heap_limit = (void*)((long)heap_base + heap_size); /* points just behind the last accessible block of the heap */
188 /* 6. calculate a useful first collection limit */
189 /* This is extremly primitive at this point...
190 we should replace it with something more useful -- phil. */
191 heap_next_collection = next_collection_heuristic_init();
193 /* 7. Init the global reference lists & finalizer addresses */
197 #ifdef STRUCTURES_ON_HEAP
198 heap_addreference(&references);
199 heap_addreference(&finalizers);
203 /* 8. Init the mutexes for synchronization */
204 alloc_mutex.holder = 0;
207 /* 9. Set up collection of lifespan data */
208 #ifdef COLLECT_LIFESPAN
210 tracefile = fopen("heap.trace", "w");
212 tracefile = popen("gzip -9 >heap.trace.gz", "w");
215 fprintf(stderr, "heap2.c: Radio Ga Ga! (fopen failed)\n");
219 fprintf(tracefile, "heap_base\t0x%lx\n", heap_base);
220 fprintf(tracefile, "heap_limit\t0x%lx\n", heap_limit);
221 fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
224 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
225 lifespan_init(heap_base, heap_size);
228 /* 10. Set up collection of fragmentation data */
229 #ifdef COLLECT_FRAGMENTATION
230 fragfile = popen("gzip -9 >fragmentation.gz", "w");
231 fragsizefile = popen("gzip -9 >freeblocks.gz", "w");
238 heap_call_finalizer_for_object_at(java_objectheader* object_addr)
240 asm_calljavamethod(object_addr->vftbl->class->finalizer, object_addr, NULL, NULL, NULL);
241 #ifdef FINALIZER_COUNTING
242 ++gc_finalizers_executed;
249 address_list_node* curr = finalizers;
251 /* 0. clean up lifespan module */
252 #ifdef COLLECT_LIFESPAN
260 #if defined(NEW_COLLECT_LIFESPAN)
264 #ifdef COLLECT_FRAGMENTATION
266 pclose(fragsizefile);
269 /* 1. Clean up on the heap... finalize all remaining objects */
272 address_list_node* prev = curr;
273 java_objectheader* addr = (java_objectheader*)(curr->address);
275 if (addr && bitmap_testbit(start_bits, addr))
276 heap_call_finalizer_for_object_at(addr);
283 /* 2. Release the bitmaps */
284 bitmap_release(start_bitmap);
285 bitmap_release(reference_bitmap);
286 bitmap_release(mark_bitmap);
288 /* 3. Release the memory allocated to the heap */
290 munmap(heap_base, heap_size);
292 /* 4. emit statistical data */
293 #ifdef GC_COLLECT_STATISTICS
294 sprintf(logtext, "%ld bytes for %ld objects allocated.",
295 gc_alloc_total, gc_alloc_count);
297 sprintf(logtext, "%ld garbage collections performed.", gc_collections_count);
299 sprintf(logtext, "%ld heapblocks visited, %ld objects marked",
300 gc_mark_heapblocks_visited, gc_mark_objects_marked);
302 sprintf(logtext, " %ld null pointers.", gc_mark_null_pointer);
304 sprintf(logtext, " %ld out of heap.", gc_mark_not_inheap);
306 sprintf(logtext, " %ld visits to objects already marked.", gc_mark_already_marked);
308 sprintf(logtext, " %ld not an object.", gc_mark_not_object);
310 sprintf(logtext, " %ld potential references not aligned.", gc_mark_not_aligned);
314 #ifdef FINALIZER_COUNTING
315 sprintf(logtext, "%ld objects with a finalizer", gc_finalizers_detected);
318 if (gc_finalizers_detected == gc_finalizers_executed)
319 sprintf(logtext, " all finalizers executed.");
321 sprintf(logtext, " only %ld finalizers executed.", gc_finalizers_executed);
325 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
333 heap_add_address_to_address_list(address_list_node** list, void* address)
335 /* Note: address lists are kept sorted to simplify finalization */
337 address_list_node* new_node = malloc(sizeof(address_list_node));
338 new_node->address = address;
339 new_node->next = NULL;
341 while (*list && (*list)->next) {
342 if ((*list)->next->address < address)
343 list = &(*list)->next;
345 new_node->next = *list;
351 new_node->next = *list;
359 heap_add_address_to_address_list_unsorted(address_list_node** list,
362 address_list_node* new_node = malloc(sizeof(address_list_node));
363 new_node->address = address;
364 new_node->next = *list;
372 heap_add_finalizer_for_object_at(void* addr)
374 /* Finalizers seem to be very rare... for this reason, I keep a linked
375 list of object addresses, which have a finalizer attached. This list
376 is kept in ascending order according to the order garbage is freed.
377 This list is currently kept separate from the heap, but should be
378 moved onto it, but some JIT-marker code to handle these special
379 objects will need to be added first. -- phil. */
381 heap_add_address_to_address_list(&finalizers, addr);
383 #ifdef COLLECT_LIFESPAN
384 fprintf(tracefile, "finalizer\t0x%lx\n", addr);
389 heap_allocate (SIZE in_length,
391 methodinfo *finalizer)
393 SIZE length = align_size(in_length + ((1 << ALIGN) - 1));
394 void* free_chunk = NULL;
397 /* check for misaligned in_length parameter */
398 if (length != in_length)
400 "heap2.c: heap_allocate was passed unaligned in_length parameter: %ld, \n aligned to %ld. (mistrust)\n",
404 #ifdef FINALIZER_COUNTING
406 ++gc_finalizers_detected;
409 #if defined(COLLECT_LIFESPAN) || defined(NEW_COLLECT_LIFESPAN)
410 /* perform garbage collection to collect data for lifespan analysis */
411 if (heap_top > heap_base)
416 lock_mutex(&alloc_mutex);
419 /* 1. attempt to get a free block with size >= length from the freelists */
420 free_chunk = allocator_alloc(length);
422 /* 2. if unsuccessful, try alternative allocation strategies */
424 /* 2.a if the collection threshold would be exceeded, collect the heap */
425 if ((long)heap_top + length > (long)heap_next_collection) {
426 /* 2.a.1. collect if the next_collection threshold would be exceeded */
429 /* 2.a.2. we just ran a collection, recheck the freelists */
430 free_chunk = allocator_alloc(length);
434 /* 2.a.3. we can't satisfy the request from the freelists, check
435 against the heap_limit whether growing the heap is possible */
436 if ((long)heap_top + length > (long)heap_limit)
440 /* 2.b. grow the heap */
441 free_chunk = heap_top;
442 heap_top = (void*)((long)heap_top + length);
446 /* 3.a. mark all necessary bits, store the finalizer & return the newly allocated block */
448 /* I don't mark the object-start anymore, as it always is at the beginning of a free-block,
449 which already is marked (Note: The first free-block gets marked in heap_init). -- phil. */
450 bitmap_setbit(start_bits, free_chunk); /* mark the new object */
452 #ifndef SIZE_FROM_CLASSINFO
453 bitmap_setbit(start_bits, (void*)((long)free_chunk + (long)length)); /* mark the freespace behind the new object */
457 bitmap_setbit(reference_bits, free_chunk);
459 bitmap_clearbit(reference_bits, free_chunk);
461 /* store a hint, that there's a finalizer for this address */
463 heap_add_finalizer_for_object_at(free_chunk);
465 #ifdef GC_COLLECT_STATISTICS
466 gc_alloc_total += length;
470 #ifdef COLLECT_LIFESPAN
471 fprintf(tracefile, "alloc\t0x%lx\t0x%lx\n",
472 free_chunk, (long)free_chunk + length);
475 #if defined(NEW_COLLECT_LIFESPAN) || defined(COLLECT_SIZES)
476 lifespan_alloc(free_chunk, length);
481 unlock_mutex(&alloc_mutex);
487 heap_addreference (void **reflocation)
489 /* I currently use a separate linked list (as in the original code) to hold
490 the global reference locations, but I'll change this to allocate these
491 in blocks on the heap; we'll have to add JIT-Marker code for those Java
492 objects then. -- phil. */
494 heap_add_address_to_address_list_unsorted(&references, reflocation);
499 void gc_finalize (void)
501 /* This will have to be slightly rewritten as soon the JIT-marked heap-based lists are used. -- phil. */
503 address_list_node* curr = finalizers;
504 address_list_node* prev;
507 /* FIXME: new code, please! */
511 if (!bitmap_testbit(mark_bits, curr->address)) {
513 #ifdef FINALIZER_COUNTING
514 ++gc_finalizers_executed;
516 asm_calljavamethod(((java_objectheader*)curr->address)->vftbl->class->finalizer,
517 curr->address, NULL, NULL, NULL);
530 void gc_reclaim (void)
532 #ifdef PSEUDO_GENERATIONAL
533 static void* generation_start = 0;
534 static int generation_num = 0;
535 void* addr = heap_base;
538 void* free_end = heap_base;
540 bitmap_t* temp_bitmap;
542 #ifdef COLLECT_FRAGMENTATION
543 unsigned long free_size = 0;
544 unsigned long free_fragments = 0;
547 #ifdef PSEUDO_GENERATIONAL
548 if (!generation_start || !(generation_num % 5))
549 generation_start = heap_base;
554 /* 1. reset the freelists */
557 allocator_mark_free_kludge(start_bits); /* this line will be kicked out, when
558 the SIZE_FROM_CLASSINFO reclaim
559 is implemented (very soon!!) */
562 #ifdef PSEUDO_GENERATIONAL
563 for (addr = heap_base; addr <= generation_start; ++addr) {
564 if (bitmap_testbit(start_bits, addr))
565 bitmap_setbit(mark_bits, addr);
568 allocator_mark_free_kludge(start_bits); /* this line will be kicked out, when
569 the SIZE_FROM_CLASSINFO reclaim
570 is implemented (very soon!!) */
575 /* 2. reclaim unmarked objects */
577 if (!testbit(start_bits, heap_base))
578 free_start = heap_base;
580 free_start = bitmap_find_next_combination_set_unset(start_bitmap,
585 while (free_end < heap_top) {
586 free_start = bitmap_find_next_combination_set_unset(start_bitmap,
590 if (free_start < heap_top) {
591 free_end = bitmap_find_next_setbit(mark_bitmap, (void*)((long)free_start + 8)); /* FIXME: constant used */
593 if (free_end < heap_top) {
594 allocator_free(free_start, (long)free_end - (long)free_start);
596 #ifdef COLLECT_FRAGMENTATION
597 free_size += (long)free_end - (long)free_start;
601 #ifdef COLLECT_LIFESPAN
603 "free\t0x%lx\t0x%lx\n",
608 #ifdef NEW_COLLECT_LIFESPAN
609 lifespan_free(free_start, free_end);
612 #ifndef SIZE_FROM_CLASSINFO
613 /* would make trouble with JIT-Marker support. The Marker for unused blocks
614 might be called, leading to a bad dereference. -- phil. */
615 bitmap_setbit(mark_bits, free_start); /* necessary to calculate obj-size bitmap based. */
624 /* 3.1. swap mark & start bitmaps */
625 temp_bits = mark_bits;
626 mark_bits = start_bits;
627 start_bits = temp_bits;
629 temp_bitmap = mark_bitmap;
630 mark_bitmap = start_bitmap;
631 start_bitmap = temp_bitmap;
633 #if 0 /* operation already handled in allocate */
634 /* 3.2. mask reference bitmap */
635 bitmap_mask_with_bitmap(reference_bitmap, start_bitmap);
638 /* 3.3. update heap_top */
639 if (free_start < heap_top) {
640 heap_top = free_start;
641 #ifdef NEW_COLLECT_LIFESPAN
642 lifespan_free(free_start, free_end);
647 if (heap_top < heap_limit)
648 bitmap_setbit(start_bits, heap_top);
651 /* 3.4. emit fragmentation info */
652 #ifdef COLLECT_FRAGMENTATION
654 unsigned long heap_full = (unsigned long)heap_top - (unsigned long)heap_base;
655 unsigned long heap_life = (unsigned long)heap_top - (unsigned long)heap_base - free_size;
658 "%ld\t%ld\t%ld\t%ld\t%f\t%f\t%f\n",
663 100*(float)free_size/(free_fragments ? free_fragments : 1),
664 100*(float)heap_life/(heap_full ? heap_full : 1),
665 100*(float)free_size/(heap_full ? heap_full : 1)
670 allocator_dump_to_file(fragsizefile);
673 /* 4. adjust the collection threshold */
674 heap_next_collection = next_collection_heuristic();
675 if (heap_next_collection > heap_limit)
676 heap_next_collection = heap_limit;
678 #ifdef COLLECT_LIFESPAN
679 fprintf(tracefile, "heap_top\t0x%lx\n", heap_top);
682 #ifdef PSEUDO_GENERATIONAL
683 generation_start = heap_top;
690 gc_mark_object_at (void** addr)
693 * A note concerning the order of the tests:
695 * Statistics collected during a test run, where alignment
696 * was tested before checking whether the addr points into
698 * >> LOG: 9301464 bytes for 196724 objects allocated.
699 * >> LOG: 15 garbage collections performed.
700 * >> LOG: 6568440 heapblocks visited, 469249 objects marked
701 * >> LOG: 1064447 visits to objects already marked.
702 * >> LOG: 988270 potential references not aligned.
703 * >> LOG: 4049446 out of heap.
704 * >> LOG: 5236 not an object.
706 * These results show, that only about 1/4 of all heapblocks
707 * point to objects; The single most important reason why a
708 * heapblock can not point at an object is, that it's value
709 * doesn't fall within the heap area (this test was performed
712 * From the results, the various tests have to be conducted
713 * in the following order for maximum efficiency:
715 * 2. already marked ?
719 * The results after reordering:
720 * >> LOG: 9301464 bytes for 196724 objects allocated.
721 * >> LOG: 15 garbage collections performed.
722 * >> LOG: 6568440 heapblocks visited, 469249 objects marked
723 * >> LOG: 1064447 visits to objects already marked.
724 * >> LOG: 350 potential references not aligned.
725 * >> LOG: 5037366 out of heap.
726 * >> LOG: 5236 not an object.
730 * 2. already marked ?
734 * >> LOG: 9301464 bytes for 196724 objects allocated.
735 * >> LOG: 15 garbage collections performed.
736 * >> LOG: 6568440 heapblocks visited, 469249 objects marked
737 * >> LOG: 5037366 out of heap.
738 * >> LOG: 1064456 visits to objects already marked.
739 * >> LOG: 5539 not an object.
740 * >> LOG: 38 potential references not aligned.
742 * Apparently, most unaligned values will already be eliminated
743 * when checking against the bounds of the heap. Checking this
744 * property first, should thus improve collection times.
747 /* 1.a. if addr doesn't point into the heap, return. */
748 if ((unsigned long)addr - (unsigned long)heap_base >=
749 ((long)heap_top - (long)heap_base)) {
750 #ifdef GC_COLLECT_STATISTICS
752 ++gc_mark_null_pointer;
754 ++gc_mark_not_inheap;
759 /* 1.b. if align(addr) has already been marked during this collection, return. */
760 if (bitmap_testbit(mark_bits, (void*)addr)) {
761 #ifdef GC_COLLECT_STATISTICS
762 ++gc_mark_already_marked;
767 /* 1.c. if align(addr) doesn't point to the start of an object, return. */
768 if (!bitmap_testbit(start_bits, (void*)addr)) {
769 #ifdef GC_COLLECT_STATISTICS
770 ++gc_mark_not_object;
775 /* 1.d. if addr is not properly aligned, return. */
776 if ((long)addr & ((1 << ALIGN) - 1)) {
777 #ifdef GC_COLLECT_STATISTICS
778 ++gc_mark_not_aligned;
783 /* 2. Mark the object at addr */
784 bitmap_setbit(mark_bits, (void*)addr);
785 #ifdef GC_COLLECT_STATISTICS
786 ++gc_mark_objects_marked;
789 #ifdef JIT_MARKER_SUPPORT
790 asm_calljavamethod(addr->vftbl->class->marker, addr, NULL, NULL, NULL);
793 /* 3. mark the references contained within the extents of the object at addr */
794 if (bitmap_testbit(reference_bits, addr)) {
795 /* 3.1. find the end of the object */
798 #ifdef SIZE_FROM_CLASSINFO
799 if (((java_objectheader*)addr)->vftbl == class_array->vftbl)
800 end = (void**)((long)addr + (long)((java_arrayheader*)addr)->alignedsize);
802 end = (void**)((long)addr + (long)((java_objectheader*)addr)->vftbl->class->alignedsize);
804 end = (void**)bitmap_find_next_setbit(start_bitmap, addr + 1); /* points just behind the object */
807 /* 3.2. mark the references within the object at addr */
808 #ifdef GC_COLLECT_STATISTICS
809 gc_mark_heapblocks_visited += ((long)end - (long)addr) >> ALIGN;
812 gc_mark_object_at(*(addr++));
822 void gc_mark_references (void)
824 address_list_node* curr = references;
827 #ifdef GC_COLLECT_STATISTICS
828 ++gc_mark_heapblocks_visited;
830 gc_mark_object_at(*((void**)(curr->address)));
838 markreferences(void** start, void** end)
840 while (start < end) {
841 #ifdef GC_COLLECT_STATISTICS
842 ++gc_mark_heapblocks_visited;
844 gc_mark_object_at(*(start++));
850 void gc_mark_stack (void)
857 if (currentThread == NULL) {
858 void **top_of_stack = &dummy;
860 if (top_of_stack > stackbottom)
861 markreferences(stackbottom, top_of_stack);
863 markreferences(top_of_stack, stackbottom);
866 for (aThread = liveThreads; aThread != 0;
867 aThread = CONTEXT(aThread).nextlive) {
868 gc_mark_object_at((void*)aThread);
869 if (CONTEXT(aThread).usedStackTop > CONTEXT(aThread).stackEnd)
870 markreferences((void**)CONTEXT(aThread).stackEnd,
871 (void**)CONTEXT(aThread).usedStackTop);
873 markreferences((void**)CONTEXT(aThread).usedStackTop,
874 (void**)CONTEXT(aThread).stackEnd);
877 markreferences((void**)&threadQhead[0],
878 (void**)&threadQhead[MAX_THREAD_PRIO]);
881 void **top_of_stack = &dummy;
883 if (top_of_stack > stackbottom)
884 markreferences(stackbottom, top_of_stack);
886 markreferences(top_of_stack, stackbottom);
894 static int armageddon_is_near = 0;
896 if (armageddon_is_near) {
897 /* armageddon_is_here! */
898 fprintf(stderr, "Oops, seems like there's a slight problem here: gc_run() called while still running?!\n");
902 armageddon_is_near = true;
903 heap_next_collection = heap_limit; /* try to avoid finalizer-induced collections */
905 bitmap_clear(mark_bitmap);
907 asm_dumpregistersandcall(gc_mark_stack);
908 gc_mark_references();
912 armageddon_is_near = false;
914 #ifdef GC_COLLECT_STATISTICS
915 ++gc_collections_count;
920 /************************* Function: gc_init **********************************
922 Initializes anything that must be initialized to call the gc on the right
925 ******************************************************************************/
932 /************************** Function: gc_call ********************************
934 Calls the garbage collector. The garbage collector should always be called
935 using this function since it ensures that enough stack space is available.
937 ******************************************************************************/
945 assert(blockInts == 0);
948 if (currentThread == NULL || currentThread == mainThread) {
949 CONTEXT(mainThread).usedStackTop = &dummy;
953 asm_switchstackandcall(CONTEXT(mainThread).usedStackTop, gc_run,
954 (void**)&(CONTEXT(currentThread).usedStackTop));
964 * These are local overrides for various environment variables in Emacs.
965 * Please do not remove this and leave it at the end of the file, where
966 * Emacs will automagically detect them.
967 * ---------------------------------------------------------------------
970 * indent-tabs-mode: t