2 * sgen-major-copying.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
7 * Copyright 2005-2010 Novell, Inc (http://www.novell.com)
9 * Thread start/stop adapted from Boehm's GC:
10 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
11 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
12 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
13 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
15 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
16 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
18 * Permission is hereby granted to use or copy this program
19 * for any purpose, provided the above notices are retained on all copies.
20 * Permission to modify the code and to distribute modified code is granted,
21 * provided the above notices are retained, and a notice that the code was
22 * modified is included with the above copyright notice.
25 * Copyright 2001-2003 Ximian, Inc
26 * Copyright 2003-2010 Novell, Inc.
28 * Permission is hereby granted, free of charge, to any person obtaining
29 * a copy of this software and associated documentation files (the
30 * "Software"), to deal in the Software without restriction, including
31 * without limitation the rights to use, copy, modify, merge, publish,
32 * distribute, sublicense, and/or sell copies of the Software, and to
33 * permit persons to whom the Software is furnished to do so, subject to
34 * the following conditions:
36 * The above copyright notice and this permission notice shall be
37 * included in all copies or substantial portions of the Software.
39 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
40 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
41 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
42 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
43 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
44 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
45 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
48 #ifdef SGEN_PARALLEL_MARK
49 #error Parallel mark not supported in copying major collector
52 #define MAJOR_SECTION_SIZE SGEN_PINNED_CHUNK_SIZE
53 #define BLOCK_FOR_OBJECT(o) SGEN_PINNED_CHUNK_FOR_PTR ((o))
54 #define MAJOR_SECTION_FOR_OBJECT(o) ((GCMemSection*)BLOCK_FOR_OBJECT ((o)))
56 #define MAJOR_OBJ_IS_IN_TO_SPACE(o) (MAJOR_SECTION_FOR_OBJECT ((o))->is_to_space)
58 static int num_major_sections = 0;
60 static GCMemSection *section_list = NULL;
62 static SgenInternalAllocator pinned_allocator;
65 * used when moving the objects
67 static char *to_space_bumper = NULL;
68 static char *to_space_top = NULL;
69 static GCMemSection *to_space_section = NULL;
71 #ifdef HEAVY_STATISTICS
72 static long stat_major_copy_object_failed_forwarded = 0;
73 static long stat_major_copy_object_failed_pinned = 0;
74 static long stat_major_copy_object_failed_large_pinned = 0;
75 static long stat_major_copy_object_failed_to_space = 0;
79 obj_is_from_pinned_alloc (char *p)
81 return BLOCK_FOR_OBJECT (p)->role == MEMORY_ROLE_PINNED;
85 free_pinned_object (char *obj, size_t size)
87 mono_sgen_free_internal_full (&pinned_allocator, obj, size, INTERNAL_MEM_MANAGED);
91 * Allocate a new section of memory to be used as old generation.
94 alloc_major_section (void)
96 GCMemSection *section;
99 section = mono_sgen_alloc_os_memory_aligned (MAJOR_SECTION_SIZE, MAJOR_SECTION_SIZE, TRUE);
100 section->next_data = section->data = (char*)section + SIZEOF_GC_MEM_SECTION;
101 g_assert (!((mword)section->data & 7));
102 section->size = MAJOR_SECTION_SIZE - SIZEOF_GC_MEM_SECTION;
103 section->end_data = section->data + section->size;
104 mono_sgen_update_heap_boundaries ((mword)section->data, (mword)section->end_data);
105 DEBUG (3, fprintf (gc_debug_file, "New major heap section: (%p-%p), total: %zd\n", section->data, section->end_data, total_alloc));
106 scan_starts = (section->size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
107 section->scan_starts = mono_sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS);
108 section->num_scan_start = scan_starts;
109 section->block.role = MEMORY_ROLE_GEN1;
110 section->is_to_space = TRUE;
112 /* add to the section list */
113 section->block.next = section_list;
114 section_list = section;
116 ++num_major_sections;
122 free_major_section (GCMemSection *section)
124 DEBUG (3, fprintf (gc_debug_file, "Freed major section %p (%p-%p)\n", section, section->data, section->end_data));
125 mono_sgen_free_internal_dynamic (section->scan_starts,
126 (section->size + SCAN_START_SIZE - 1) / SCAN_START_SIZE * sizeof (char*), INTERNAL_MEM_SCAN_STARTS);
127 mono_sgen_free_os_memory (section, MAJOR_SECTION_SIZE);
129 --num_major_sections;
133 new_to_space_section (void)
135 /* FIXME: if the current to_space_section is empty, we don't
136 have to allocate a new one */
138 to_space_section = alloc_major_section ();
139 to_space_bumper = to_space_section->next_data;
140 to_space_top = to_space_section->end_data;
144 to_space_set_next_data (void)
146 g_assert (to_space_bumper >= to_space_section->next_data && to_space_bumper <= to_space_section->end_data);
147 to_space_section->next_data = to_space_bumper;
151 to_space_expand (void)
153 if (to_space_section) {
154 g_assert (to_space_top == to_space_section->end_data);
155 to_space_set_next_data ();
158 new_to_space_section ();
161 #define MAJOR_GET_COPY_OBJECT_SPACE(dest, size, refs) do { \
162 (dest) = to_space_bumper; \
163 /* Make sure we have enough space available */ \
164 if ((dest) + (size) > to_space_top) { \
165 to_space_expand (); \
166 (dest) = to_space_bumper; \
167 DEBUG (8, g_assert ((dest) + (objsize) <= to_space_top)); \
169 to_space_bumper += objsize; \
170 DEBUG (8, g_assert (to_space_bumper <= to_space_top)); \
171 to_space_section->scan_starts [((dest) - (char*)to_space_section->data)/SCAN_START_SIZE] = (dest); \
175 unset_to_space (void)
177 /* between collections the to_space_bumper is invalidated
178 because degraded allocations might occur, so we set it to
179 NULL, just to make it explicit */
180 to_space_bumper = NULL;
182 /* don't unset to_space_section if we implement the FIXME in
183 new_to_space_section */
184 to_space_section = NULL;
188 major_is_object_live (char *obj)
193 if (ptr_in_nursery (obj))
196 objsize = SGEN_ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
199 if (objsize > MAX_SMALL_OBJ_SIZE)
203 if (obj_is_from_pinned_alloc (obj))
206 /* now we know it's in a major heap section */
207 return MAJOR_SECTION_FOR_OBJECT (obj)->is_to_space;
210 /* size is a multiple of ALLOC_ALIGN */
212 major_alloc_small_pinned_obj (size_t size, gboolean has_references)
214 return mono_sgen_alloc_internal_full (&pinned_allocator, size, INTERNAL_MEM_MANAGED);
218 * size is already rounded up and we hold the GC lock.
221 major_alloc_degraded (MonoVTable *vtable, size_t size)
223 GCMemSection *section;
225 g_assert (size <= MAX_SMALL_OBJ_SIZE);
226 HEAVY_STAT (++stat_objects_alloced_degraded);
227 HEAVY_STAT (stat_bytes_alloced_degraded += size);
228 for (section = section_list; section; section = section->block.next) {
229 if ((section->end_data - section->next_data) >= size) {
230 p = (void**)section->next_data;
235 section = alloc_major_section ();
236 section->is_to_space = FALSE;
237 /* FIXME: handle OOM */
238 p = (void**)section->next_data;
239 ++minor_collection_sections_alloced;
241 section->next_data += size;
242 degraded_mode += size;
243 DEBUG (3, fprintf (gc_debug_file, "Allocated (degraded) object %p, vtable: %p (%s), size: %zd in section %p\n", p, vtable, vtable->klass->name, size, section));
249 major_copy_or_mark_object (void **obj_slot, GrayQueue *queue)
252 char *obj = *obj_slot;
255 DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
257 HEAVY_STAT (++stat_copy_object_called_major);
259 DEBUG (9, fprintf (gc_debug_file, "Precise copy of %p from %p", obj, obj_slot));
262 * obj must belong to one of:
267 * 4. a non-to-space section of the major heap
268 * 5. a to-space section of the major heap
270 * In addition, objects in 1, 2 and 4 might also be pinned.
271 * Objects in 1 and 4 might be forwarded.
273 * Before we can copy the object we must make sure that we are
274 * allowed to, i.e. that the object not pinned, not already
275 * forwarded and doesn't belong to the LOS, a pinned chunk, or
276 * a to-space section.
278 * We are usually called for to-space objects (5) when we have
279 * two remset entries for the same reference. The first entry
280 * copies the object and updates the reference and the second
281 * calls us with the updated reference that points into
282 * to-space. There might also be other circumstances where we
283 * get to-space objects.
286 if ((forwarded = object_is_forwarded (obj))) {
287 DEBUG (9, g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr));
288 DEBUG (9, fprintf (gc_debug_file, " (already forwarded to %p)\n", forwarded));
289 HEAVY_STAT (++stat_major_copy_object_failed_forwarded);
290 *obj_slot = forwarded;
293 if (object_is_pinned (obj)) {
294 DEBUG (9, g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr));
295 DEBUG (9, fprintf (gc_debug_file, " (pinned, no change)\n"));
296 HEAVY_STAT (++stat_major_copy_object_failed_pinned);
300 if (ptr_in_nursery (obj))
304 * At this point we know obj is not pinned, not forwarded and
305 * belongs to 2, 3, 4, or 5.
307 * LOS object (2) are simple, at least until we always follow
308 * the rule: if objsize > MAX_SMALL_OBJ_SIZE, pin the object
309 * and return it. At the end of major collections, we walk
310 * the los list and if the object is pinned, it is marked,
311 * otherwise it can be freed.
313 * Pinned chunks (3) and major heap sections (4, 5) both
314 * reside in blocks, which are always aligned, so once we've
315 * eliminated LOS objects, we can just access the block and
316 * see whether it's a pinned chunk or a major heap section.
319 objsize = SGEN_ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
321 if (G_UNLIKELY (objsize > MAX_SMALL_OBJ_SIZE || obj_is_from_pinned_alloc (obj))) {
322 if (object_is_pinned (obj))
324 DEBUG (9, fprintf (gc_debug_file, " (marked LOS/Pinned %p (%s), size: %zd)\n", obj, safe_name (obj), objsize));
325 binary_protocol_pin (obj, (gpointer)LOAD_VTABLE (obj), safe_object_get_size ((MonoObject*)obj));
327 GRAY_OBJECT_ENQUEUE (queue, obj);
328 HEAVY_STAT (++stat_major_copy_object_failed_large_pinned);
333 * Now we know the object is in a major heap section. All we
334 * need to do is check whether it's already in to-space (5) or
337 if (MAJOR_OBJ_IS_IN_TO_SPACE (obj)) {
338 DEBUG (9, g_assert (objsize <= MAX_SMALL_OBJ_SIZE));
339 DEBUG (9, fprintf (gc_debug_file, " (already copied)\n"));
340 HEAVY_STAT (++stat_major_copy_object_failed_to_space);
345 HEAVY_STAT (++stat_objects_copied_major);
347 *obj_slot = copy_object_no_checks (obj, queue);
350 /* FIXME: later reduce code duplication here with build_nursery_fragments().
351 * We don't keep track of section fragments for non-nursery sections yet, so
355 build_section_fragments (GCMemSection *section)
358 char *frag_start, *frag_end;
361 /* clear scan starts */
362 memset (section->scan_starts, 0, section->num_scan_start * sizeof (gpointer));
363 frag_start = section->data;
364 section->next_data = section->data;
365 for (i = section->pin_queue_start; i < section->pin_queue_end; ++i) {
366 frag_end = pin_queue [i];
367 /* remove the pin bit from pinned objects */
368 unpin_object (frag_end);
369 if (frag_end >= section->data + section->size) {
370 frag_end = section->data + section->size;
372 section->scan_starts [((char*)frag_end - (char*)section->data)/SCAN_START_SIZE] = frag_end;
374 frag_size = frag_end - frag_start;
376 binary_protocol_empty (frag_start, frag_size);
377 memset (frag_start, 0, frag_size);
379 frag_size = SGEN_ALIGN_UP (safe_object_get_size ((MonoObject*)pin_queue [i]));
380 frag_start = (char*)pin_queue [i] + frag_size;
381 section->next_data = MAX (section->next_data, frag_start);
383 frag_end = section->end_data;
384 frag_size = frag_end - frag_start;
386 binary_protocol_empty (frag_start, frag_size);
387 memset (frag_start, 0, frag_size);
392 sweep_pinned_objects_callback (char *ptr, size_t size, void *data)
394 if (object_is_pinned (ptr)) {
396 DEBUG (6, fprintf (gc_debug_file, "Unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
398 DEBUG (6, fprintf (gc_debug_file, "Freeing unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
399 free_pinned_object (ptr, size);
404 sweep_pinned_objects (void)
406 mono_sgen_internal_scan_objects (&pinned_allocator, sweep_pinned_objects_callback, NULL);
410 major_iterate_objects (gboolean non_pinned, gboolean pinned, IterateObjectCallbackFunc callback, void *data)
413 GCMemSection *section;
414 for (section = section_list; section; section = section->block.next)
415 scan_area_with_callback (section->data, section->end_data, callback, data);
418 mono_sgen_internal_scan_objects (&pinned_allocator, callback, data);
422 major_free_non_pinned_object (char *obj, size_t size)
424 memset (obj, 0, size);
428 pin_pinned_object_callback (void *addr, size_t slot_size, GrayQueue *queue)
430 binary_protocol_pin (addr, (gpointer)LOAD_VTABLE (addr), safe_object_get_size ((MonoObject*)addr));
431 if (heap_dump_file && !object_is_pinned (addr))
432 pin_stats_register_object ((char*) addr, safe_object_get_size ((MonoObject*) addr));
434 GRAY_OBJECT_ENQUEUE (queue, addr);
435 DEBUG (6, fprintf (gc_debug_file, "Marked pinned object %p (%s) from roots\n", addr, safe_name (addr)));
439 major_find_pin_queue_start_ends (GrayQueue *queue)
441 GCMemSection *section;
443 for (section = section_list; section; section = section->block.next)
444 find_section_pin_queue_start_end (section);
445 mono_sgen_internal_scan_pinned_objects (&pinned_allocator, (IterateObjectCallbackFunc)pin_pinned_object_callback, queue);
449 major_pin_objects (GrayQueue *queue)
451 GCMemSection *section;
453 for (section = section_list; section; section = section->block.next)
454 pin_objects_in_section (section, queue);
458 major_init_to_space (void)
460 new_to_space_section ();
466 GCMemSection *section, *prev_section;
468 to_space_set_next_data ();
471 /* unpin objects from the pinned chunks and free the unmarked ones */
472 sweep_pinned_objects ();
474 /* free the unused sections */
476 for (section = section_list; section;) {
477 /* to_space doesn't need handling here */
478 if (section->is_to_space) {
479 section->is_to_space = FALSE;
480 prev_section = section;
481 section = section->block.next;
484 /* no pinning object, so the section is free */
485 if (section->pin_queue_start == section->pin_queue_end) {
486 GCMemSection *to_free;
488 prev_section->block.next = section->block.next;
490 section_list = section->block.next;
492 section = section->block.next;
493 free_major_section (to_free);
496 DEBUG (6, fprintf (gc_debug_file, "Section %p has still pinned objects (%d)\n", section, section->pin_queue_end - section->pin_queue_start));
497 build_section_fragments (section);
499 prev_section = section;
500 section = section->block.next;
505 major_check_scan_starts (void)
507 GCMemSection *section;
508 for (section = section_list; section; section = section->block.next)
509 check_section_scan_starts (section);
513 major_dump_heap (void)
515 GCMemSection *section;
516 for (section = section_list; section; section = section->block.next)
517 dump_section (section, "old");
518 /* FIXME: dump pinned sections, too */
522 major_get_used_size (void)
525 GCMemSection *section;
526 for (section = section_list; section; section = section->block.next) {
527 /* this is approximate... */
528 tot += section->next_data - section->data;
536 #ifdef HEAVY_STATISTICS
537 mono_counters_register ("# major copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_forwarded);
538 mono_counters_register ("# major copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_pinned);
539 mono_counters_register ("# major copy_object() failed large or pinned chunk", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_large_pinned);
540 mono_counters_register ("# major copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_to_space);
544 /* only valid during minor collections */
545 static int old_num_major_sections;
548 major_start_nursery_collection (void)
550 old_num_major_sections = num_major_sections;
552 if (!to_space_section) {
553 new_to_space_section ();
555 /* we might have done degraded allocation since the
557 g_assert (to_space_bumper <= to_space_section->next_data);
558 to_space_bumper = to_space_section->next_data;
560 to_space_section->is_to_space = TRUE;
565 major_finish_nursery_collection (void)
567 GCMemSection *section;
568 int sections_alloced;
570 to_space_set_next_data ();
572 for (section = section_list; section; section = section->block.next)
573 section->is_to_space = FALSE;
575 sections_alloced = num_major_sections - old_num_major_sections;
576 minor_collection_sections_alloced += sections_alloced;
580 major_finish_major_collection (void)
585 major_ptr_is_in_non_pinned_space (char *ptr)
587 GCMemSection *section;
588 for (section = section_list; section;) {
589 if (ptr >= section->data && ptr < section->data + section->size)
591 section = section->block.next;
597 major_report_pinned_memory_usage (void)
599 mono_sgen_report_internal_mem_usage_full (&pinned_allocator);