2 * sgen-major-copying.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
7 * Copyright 2005-2010 Novell, Inc (http://www.novell.com)
9 * Thread start/stop adapted from Boehm's GC:
10 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
11 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
12 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
13 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
15 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
16 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
18 * Permission is hereby granted to use or copy this program
19 * for any purpose, provided the above notices are retained on all copies.
20 * Permission to modify the code and to distribute modified code is granted,
21 * provided the above notices are retained, and a notice that the code was
22 * modified is included with the above copyright notice.
25 * Copyright 2001-2003 Ximian, Inc
26 * Copyright 2003-2010 Novell, Inc.
28 * Permission is hereby granted, free of charge, to any person obtaining
29 * a copy of this software and associated documentation files (the
30 * "Software"), to deal in the Software without restriction, including
31 * without limitation the rights to use, copy, modify, merge, publish,
32 * distribute, sublicense, and/or sell copies of the Software, and to
33 * permit persons to whom the Software is furnished to do so, subject to
34 * the following conditions:
36 * The above copyright notice and this permission notice shall be
37 * included in all copies or substantial portions of the Software.
39 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
40 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
41 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
42 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
43 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
44 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
45 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
48 #define MAJOR_SECTION_SIZE PINNED_CHUNK_SIZE
49 #define BLOCK_FOR_OBJECT(o) ((Block*)(((mword)(o)) & ~(MAJOR_SECTION_SIZE - 1)))
50 #define MAJOR_SECTION_FOR_OBJECT(o) ((GCMemSection*)BLOCK_FOR_OBJECT ((o)))
52 #define MAJOR_OBJ_IS_IN_TO_SPACE(o) (MAJOR_SECTION_FOR_OBJECT ((o))->is_to_space)
54 static int num_major_sections = 0;
56 static GCMemSection *section_list = NULL;
58 /* pinned_chunk_list is used for allocations of objects that are never moved */
59 static PinnedChunk *pinned_chunk_list = NULL;
62 * used when moving the objects
64 static char *to_space_bumper = NULL;
65 static char *to_space_top = NULL;
66 static GCMemSection *to_space_section = NULL;
68 #ifdef HEAVY_STATISTICS
69 static long stat_major_copy_object_failed_forwarded = 0;
70 static long stat_major_copy_object_failed_pinned = 0;
71 static long stat_major_copy_object_failed_large_pinned = 0;
72 static long stat_major_copy_object_failed_to_space = 0;
76 obj_is_from_pinned_alloc (char *p)
78 return BLOCK_FOR_OBJECT (p)->role == MEMORY_ROLE_PINNED;
82 free_pinned_object (char *obj, size_t size)
84 PinnedChunk *chunk = (PinnedChunk*) BLOCK_FOR_OBJECT (obj);
85 void **p = (void**)obj;
86 int slot = slot_for_size (size);
88 g_assert (obj >= (char*)chunk->start_data && obj < ((char*)chunk + chunk->num_pages * FREELIST_PAGESIZE));
89 *p = chunk->free_list [slot];
90 chunk->free_list [slot] = p;
94 * Allocate a new section of memory to be used as old generation.
97 alloc_major_section (void)
99 GCMemSection *section;
102 section = get_os_memory_aligned (MAJOR_SECTION_SIZE, MAJOR_SECTION_SIZE, TRUE);
103 section->next_data = section->data = (char*)section + SIZEOF_GC_MEM_SECTION;
104 g_assert (!((mword)section->data & 7));
105 section->size = MAJOR_SECTION_SIZE - SIZEOF_GC_MEM_SECTION;
106 section->end_data = section->data + section->size;
107 UPDATE_HEAP_BOUNDARIES (section->data, section->end_data);
108 total_alloc += section->size;
109 DEBUG (3, fprintf (gc_debug_file, "New major heap section: (%p-%p), total: %zd\n", section->data, section->end_data, total_alloc));
110 scan_starts = (section->size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
111 section->scan_starts = get_internal_mem (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS);
112 section->num_scan_start = scan_starts;
113 section->block.role = MEMORY_ROLE_GEN1;
114 section->is_to_space = TRUE;
116 /* add to the section list */
117 section->block.next = section_list;
118 section_list = section;
120 ++num_major_sections;
126 free_major_section (GCMemSection *section)
128 DEBUG (3, fprintf (gc_debug_file, "Freed major section %p (%p-%p)\n", section, section->data, section->end_data));
129 free_internal_mem (section->scan_starts, INTERNAL_MEM_SCAN_STARTS);
130 free_os_memory (section, MAJOR_SECTION_SIZE);
131 total_alloc -= MAJOR_SECTION_SIZE - SIZEOF_GC_MEM_SECTION;
133 --num_major_sections;
137 new_to_space_section (void)
139 /* FIXME: if the current to_space_section is empty, we don't
140 have to allocate a new one */
142 to_space_section = alloc_major_section ();
143 to_space_bumper = to_space_section->next_data;
144 to_space_top = to_space_section->end_data;
148 to_space_set_next_data (void)
150 g_assert (to_space_bumper >= to_space_section->next_data && to_space_bumper <= to_space_section->end_data);
151 to_space_section->next_data = to_space_bumper;
155 to_space_expand (void)
157 if (to_space_section) {
158 g_assert (to_space_top == to_space_section->end_data);
159 to_space_set_next_data ();
162 new_to_space_section ();
165 #define MAJOR_GET_COPY_OBJECT_SPACE(dest, size, refs) do { \
166 (dest) = to_space_bumper; \
167 /* Make sure we have enough space available */ \
168 if ((dest) + (size) > to_space_top) { \
169 to_space_expand (); \
170 (dest) = to_space_bumper; \
171 DEBUG (8, g_assert ((dest) + (objsize) <= to_space_top)); \
173 to_space_bumper += objsize; \
174 DEBUG (8, g_assert (to_space_bumper <= to_space_top)); \
175 to_space_section->scan_starts [((dest) - (char*)to_space_section->data)/SCAN_START_SIZE] = (dest); \
179 unset_to_space (void)
181 /* between collections the to_space_bumper is invalidated
182 because degraded allocations might occur, so we set it to
183 NULL, just to make it explicit */
184 to_space_bumper = NULL;
186 /* don't unset to_space_section if we implement the FIXME in
187 new_to_space_section */
188 to_space_section = NULL;
192 major_is_object_live (char *obj)
197 if (ptr_in_nursery (obj))
200 objsize = safe_object_get_size ((MonoObject*)obj);
201 objsize += ALLOC_ALIGN - 1;
202 objsize &= ~(ALLOC_ALIGN - 1);
205 if (objsize > MAX_SMALL_OBJ_SIZE)
209 if (obj_is_from_pinned_alloc (obj))
212 /* now we know it's in a major heap section */
213 return MAJOR_SECTION_FOR_OBJECT (obj)->is_to_space;
216 /* size is a multiple of ALLOC_ALIGN */
218 major_alloc_small_pinned_obj (size_t size, gboolean has_references)
223 slot = slot_for_size (size);
224 /*g_print ("using slot %d for size %d (slot size: %d)\n", slot, size, freelist_sizes [slot]);*/
225 g_assert (size <= freelist_sizes [slot]);
226 for (pchunk = pinned_chunk_list; pchunk; pchunk = pchunk->block.next) {
227 void **p = pchunk->free_list [slot];
229 /*g_print ("found freelist for slot %d in chunk %p, returning %p, next %p\n", slot, pchunk, p, *p);*/
230 pchunk->free_list [slot] = *p;
235 for (pchunk = pinned_chunk_list; pchunk; pchunk = pchunk->block.next) {
236 res = get_chunk_freelist (pchunk, slot);
240 pchunk = alloc_pinned_chunk ();
241 /* FIXME: handle OOM */
242 pchunk->block.next = pinned_chunk_list;
243 pinned_chunk_list = pchunk;
244 res = get_chunk_freelist (pchunk, slot);
246 memset (res, 0, size);
251 * size is already rounded up and we hold the GC lock.
254 alloc_degraded (MonoVTable *vtable, size_t size)
256 GCMemSection *section;
258 g_assert (size <= MAX_SMALL_OBJ_SIZE);
259 HEAVY_STAT (++stat_objects_alloced_degraded);
260 HEAVY_STAT (stat_bytes_alloced_degraded += size);
261 for (section = section_list; section; section = section->block.next) {
262 if ((section->end_data - section->next_data) >= size) {
263 p = (void**)section->next_data;
268 section = alloc_major_section ();
269 section->is_to_space = FALSE;
270 /* FIXME: handle OOM */
271 p = (void**)section->next_data;
273 section->next_data += size;
274 degraded_mode += size;
275 DEBUG (3, fprintf (gc_debug_file, "Allocated (degraded) object %p, vtable: %p (%s), size: %zd in section %p\n", p, vtable, vtable->klass->name, size, section));
281 major_copy_or_mark_object (void **obj_slot)
284 char *obj = *obj_slot;
287 DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
289 HEAVY_STAT (++stat_copy_object_called_major);
291 DEBUG (9, fprintf (gc_debug_file, "Precise copy of %p from %p", obj, obj_slot));
294 * obj must belong to one of:
299 * 4. a non-to-space section of the major heap
300 * 5. a to-space section of the major heap
302 * In addition, objects in 1, 2 and 4 might also be pinned.
303 * Objects in 1 and 4 might be forwarded.
305 * Before we can copy the object we must make sure that we are
306 * allowed to, i.e. that the object not pinned, not already
307 * forwarded and doesn't belong to the LOS, a pinned chunk, or
308 * a to-space section.
310 * We are usually called for to-space objects (5) when we have
311 * two remset entries for the same reference. The first entry
312 * copies the object and updates the reference and the second
313 * calls us with the updated reference that points into
314 * to-space. There might also be other circumstances where we
315 * get to-space objects.
318 if ((forwarded = object_is_forwarded (obj))) {
319 DEBUG (9, g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr));
320 DEBUG (9, fprintf (gc_debug_file, " (already forwarded to %p)\n", forwarded));
321 HEAVY_STAT (++stat_major_copy_object_failed_forwarded);
322 *obj_slot = forwarded;
325 if (object_is_pinned (obj)) {
326 DEBUG (9, g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr));
327 DEBUG (9, fprintf (gc_debug_file, " (pinned, no change)\n"));
328 HEAVY_STAT (++stat_major_copy_object_failed_pinned);
332 if (ptr_in_nursery (obj))
336 * At this point we know obj is not pinned, not forwarded and
337 * belongs to 2, 3, 4, or 5.
339 * LOS object (2) are simple, at least until we always follow
340 * the rule: if objsize > MAX_SMALL_OBJ_SIZE, pin the object
341 * and return it. At the end of major collections, we walk
342 * the los list and if the object is pinned, it is marked,
343 * otherwise it can be freed.
345 * Pinned chunks (3) and major heap sections (4, 5) both
346 * reside in blocks, which are always aligned, so once we've
347 * eliminated LOS objects, we can just access the block and
348 * see whether it's a pinned chunk or a major heap section.
351 objsize = safe_object_get_size ((MonoObject*)obj);
352 objsize += ALLOC_ALIGN - 1;
353 objsize &= ~(ALLOC_ALIGN - 1);
355 if (G_UNLIKELY (objsize > MAX_SMALL_OBJ_SIZE || obj_is_from_pinned_alloc (obj))) {
356 if (object_is_pinned (obj))
358 DEBUG (9, fprintf (gc_debug_file, " (marked LOS/Pinned %p (%s), size: %zd)\n", obj, safe_name (obj), objsize));
359 binary_protocol_pin (obj, (gpointer)LOAD_VTABLE (obj), safe_object_get_size ((MonoObject*)obj));
361 GRAY_OBJECT_ENQUEUE (obj);
362 HEAVY_STAT (++stat_major_copy_object_failed_large_pinned);
367 * Now we know the object is in a major heap section. All we
368 * need to do is check whether it's already in to-space (5) or
371 if (MAJOR_OBJ_IS_IN_TO_SPACE (obj)) {
372 DEBUG (9, g_assert (objsize <= MAX_SMALL_OBJ_SIZE));
373 DEBUG (9, fprintf (gc_debug_file, " (already copied)\n"));
374 HEAVY_STAT (++stat_major_copy_object_failed_to_space);
379 HEAVY_STAT (++stat_objects_copied_major);
381 *obj_slot = copy_object_no_checks (obj);
384 /* FIXME: later reduce code duplication here with build_nursery_fragments().
385 * We don't keep track of section fragments for non-nursery sections yet, so
389 build_section_fragments (GCMemSection *section)
392 char *frag_start, *frag_end;
395 /* clear scan starts */
396 memset (section->scan_starts, 0, section->num_scan_start * sizeof (gpointer));
397 frag_start = section->data;
398 section->next_data = section->data;
399 for (i = section->pin_queue_start; i < section->pin_queue_end; ++i) {
400 frag_end = pin_queue [i];
401 /* remove the pin bit from pinned objects */
402 unpin_object (frag_end);
403 if (frag_end >= section->data + section->size) {
404 frag_end = section->data + section->size;
406 section->scan_starts [((char*)frag_end - (char*)section->data)/SCAN_START_SIZE] = frag_end;
408 frag_size = frag_end - frag_start;
410 binary_protocol_empty (frag_start, frag_size);
411 memset (frag_start, 0, frag_size);
413 frag_size = safe_object_get_size ((MonoObject*)pin_queue [i]);
414 frag_size += ALLOC_ALIGN - 1;
415 frag_size &= ~(ALLOC_ALIGN - 1);
416 frag_start = (char*)pin_queue [i] + frag_size;
417 section->next_data = MAX (section->next_data, frag_start);
419 frag_end = section->end_data;
420 frag_size = frag_end - frag_start;
422 binary_protocol_empty (frag_start, frag_size);
423 memset (frag_start, 0, frag_size);
428 scan_pinned_objects (IterateObjectCallbackFunc callback, void *callback_data)
435 for (chunk = pinned_chunk_list; chunk; chunk = chunk->block.next) {
436 end_chunk = (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE;
437 DEBUG (6, fprintf (gc_debug_file, "Scanning pinned chunk %p (range: %p-%p)\n", chunk, chunk->start_data, end_chunk));
438 for (i = 0; i < chunk->num_pages; ++i) {
439 obj_size = chunk->page_sizes [i];
442 p = i? (char*)chunk + i * FREELIST_PAGESIZE: chunk->start_data;
443 endp = i? p + FREELIST_PAGESIZE: (char*)chunk + FREELIST_PAGESIZE;
444 DEBUG (6, fprintf (gc_debug_file, "Page %d (size: %d, range: %p-%p)\n", i, obj_size, p, endp));
445 while (p + obj_size <= endp) {
447 DEBUG (9, fprintf (gc_debug_file, "Considering %p (vtable: %p)\n", ptr, *ptr));
448 /* if the first word (the vtable) is outside the chunk we have an object */
449 if (*ptr && (*ptr < (void*)chunk || *ptr >= end_chunk))
450 callback ((char*)ptr, obj_size, callback_data);
458 * the array of pointers from @start to @end contains conservative
459 * pointers to objects inside @chunk: mark each referenced object
463 mark_pinned_from_addresses (PinnedChunk *chunk, void **start, void **end)
465 for (; start < end; start++) {
467 int offset = (char*)addr - (char*)chunk;
468 int page = offset / FREELIST_PAGESIZE;
469 int obj_offset = page == 0? offset - ((char*)chunk->start_data - (char*)chunk): offset % FREELIST_PAGESIZE;
470 int slot_size = chunk->page_sizes [page];
472 /* the page is not allocated */
475 /* would be faster if we restrict the sizes to power of two,
476 * but that's a waste of memory: need to measure. it could reduce
477 * fragmentation since there are less pages needed, if for example
478 * someone interns strings of each size we end up with one page per
479 * interned string (still this is just ~40 KB): with more fine-grained sizes
480 * this increases the number of used pages.
483 obj_offset /= slot_size;
484 obj_offset *= slot_size;
485 addr = (char*)chunk->start_data + obj_offset;
487 obj_offset /= slot_size;
488 obj_offset *= slot_size;
489 addr = (char*)chunk + page * FREELIST_PAGESIZE + obj_offset;
492 /* if the vtable is inside the chunk it's on the freelist, so skip */
493 if (*ptr && (*ptr < (void*)chunk->start_data || *ptr > (void*)((char*)chunk + chunk->num_pages * FREELIST_PAGESIZE))) {
494 binary_protocol_pin (addr, (gpointer)LOAD_VTABLE (addr), safe_object_get_size ((MonoObject*)addr));
495 if (heap_dump_file && !object_is_pinned (addr))
496 pin_stats_register_object ((char*) addr, safe_object_get_size ((MonoObject*) addr));
498 GRAY_OBJECT_ENQUEUE (addr);
499 DEBUG (6, fprintf (gc_debug_file, "Marked pinned object %p (%s) from roots\n", addr, safe_name (addr)));
505 sweep_pinned_objects_callback (char *ptr, size_t size, void *data)
507 if (object_is_pinned (ptr)) {
509 DEBUG (6, fprintf (gc_debug_file, "Unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
511 DEBUG (6, fprintf (gc_debug_file, "Freeing unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
512 free_pinned_object (ptr, size);
517 sweep_pinned_objects (void)
519 scan_pinned_objects (sweep_pinned_objects_callback, NULL);
523 major_iterate_objects (gboolean non_pinned, gboolean pinned, IterateObjectCallbackFunc callback, void *data)
526 GCMemSection *section;
527 for (section = section_list; section; section = section->block.next)
528 scan_area_with_callback (section->data, section->end_data, callback, data);
531 scan_pinned_objects (callback, data);
535 major_free_non_pinned_object (char *obj, size_t size)
537 memset (obj, 0, size);
541 major_find_pin_queue_start_ends (void)
543 GCMemSection *section;
546 for (section = section_list; section; section = section->block.next)
547 find_section_pin_queue_start_end (section);
549 /* look for pinned addresses for pinned-alloc objects */
550 DEBUG (6, fprintf (gc_debug_file, "Pinning from pinned-alloc objects\n"));
551 for (chunk = pinned_chunk_list; chunk; chunk = chunk->block.next) {
553 find_optimized_pin_queue_area (chunk->start_data, (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE, &start, &end);
555 mark_pinned_from_addresses (chunk, pin_queue + start, pin_queue + end);
560 major_pin_objects (void)
562 GCMemSection *section;
564 for (section = section_list; section; section = section->block.next)
565 pin_objects_in_section (section);
569 major_init_to_space (void)
571 new_to_space_section ();
577 GCMemSection *section, *prev_section;
579 to_space_set_next_data ();
582 /* unpin objects from the pinned chunks and free the unmarked ones */
583 sweep_pinned_objects ();
585 /* free the unused sections */
587 for (section = section_list; section;) {
588 /* to_space doesn't need handling here */
589 if (section->is_to_space) {
590 section->is_to_space = FALSE;
591 prev_section = section;
592 section = section->block.next;
595 /* no pinning object, so the section is free */
596 if (section->pin_queue_start == section->pin_queue_end) {
597 GCMemSection *to_free;
599 prev_section->block.next = section->block.next;
601 section_list = section->block.next;
603 section = section->block.next;
604 free_major_section (to_free);
607 DEBUG (6, fprintf (gc_debug_file, "Section %p has still pinned objects (%d)\n", section, section->pin_queue_end - section->pin_queue_start));
608 build_section_fragments (section);
610 prev_section = section;
611 section = section->block.next;
616 major_check_scan_starts (void)
618 GCMemSection *section;
619 for (section = section_list; section; section = section->block.next)
620 check_section_scan_starts (section);
624 major_dump_heap (void)
626 GCMemSection *section;
627 for (section = section_list; section; section = section->block.next)
628 dump_section (section, "old");
629 /* FIXME: dump pinned sections, too */
633 major_get_used_size (void)
636 GCMemSection *section;
637 for (section = section_list; section; section = section->block.next) {
638 /* this is approximate... */
639 tot += section->next_data - section->data;
647 #ifdef HEAVY_STATISTICS
648 mono_counters_register ("# major copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_forwarded);
649 mono_counters_register ("# major copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_pinned);
650 mono_counters_register ("# major copy_object() failed large or pinned chunk", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_large_pinned);
651 mono_counters_register ("# major copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_to_space);
655 /* only valid during minor collections */
656 static int old_num_major_sections;
659 major_start_nursery_collection (void)
661 old_num_major_sections = num_major_sections;
663 if (!to_space_section) {
664 new_to_space_section ();
666 /* we might have done degraded allocation since the
668 g_assert (to_space_bumper <= to_space_section->next_data);
669 to_space_bumper = to_space_section->next_data;
671 to_space_section->is_to_space = TRUE;
676 major_finish_nursery_collection (void)
678 GCMemSection *section;
679 int sections_alloced;
681 to_space_set_next_data ();
683 for (section = section_list; section; section = section->block.next)
684 section->is_to_space = FALSE;
686 sections_alloced = num_major_sections - old_num_major_sections;
687 minor_collection_sections_alloced += sections_alloced;
691 major_ptr_is_in_non_pinned_space (char *ptr)
693 GCMemSection *section;
694 for (section = section_list; section;) {
695 if (ptr >= section->data && ptr < section->data + section->size)
697 section = section->block.next;
703 major_report_pinned_memory_usage (void)
707 for (chunk = pinned_chunk_list; chunk; chunk = chunk->block.next)
708 report_pinned_chunk (chunk, i++);