Merge branch 'master' of git://github.com/mono/mono
[mono.git] / mono / metadata / sgen-major-copying.c
1 /*
2  * sgen-major-copying.c: Simple generational GC.
3  *
4  * Author:
5  *      Paolo Molaro (lupus@ximian.com)
6  *
7  * Copyright 2005-2010 Novell, Inc (http://www.novell.com)
8  *
9  * Thread start/stop adapted from Boehm's GC:
10  * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
11  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
12  * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
13  * Copyright (c) 2000-2004 by Hewlett-Packard Company.  All rights reserved.
14  *
15  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
16  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
17  *
18  * Permission is hereby granted to use or copy this program
19  * for any purpose,  provided the above notices are retained on all copies.
20  * Permission to modify the code and to distribute modified code is granted,
21  * provided the above notices are retained, and a notice that the code was
22  * modified is included with the above copyright notice.
23  *
24  *
25  * Copyright 2001-2003 Ximian, Inc
26  * Copyright 2003-2010 Novell, Inc.
27  * 
28  * Permission is hereby granted, free of charge, to any person obtaining
29  * a copy of this software and associated documentation files (the
30  * "Software"), to deal in the Software without restriction, including
31  * without limitation the rights to use, copy, modify, merge, publish,
32  * distribute, sublicense, and/or sell copies of the Software, and to
33  * permit persons to whom the Software is furnished to do so, subject to
34  * the following conditions:
35  * 
36  * The above copyright notice and this permission notice shall be
37  * included in all copies or substantial portions of the Software.
38  * 
39  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
40  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
41  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
42  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
43  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
44  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
45  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
46  */
47
48 #ifdef SGEN_PARALLEL_MARK
49 #error Parallel mark not supported in copying major collector
50 #endif
51
52 #define MAJOR_SECTION_SIZE              SGEN_PINNED_CHUNK_SIZE
53 #define BLOCK_FOR_OBJECT(o)             SGEN_PINNED_CHUNK_FOR_PTR ((o))
54 #define MAJOR_SECTION_FOR_OBJECT(o)     ((GCMemSection*)BLOCK_FOR_OBJECT ((o)))
55
56 #define MAJOR_OBJ_IS_IN_TO_SPACE(o)     (MAJOR_SECTION_FOR_OBJECT ((o))->is_to_space)
57
58 static int num_major_sections = 0;
59
60 static GCMemSection *section_list = NULL;
61
62 static SgenInternalAllocator pinned_allocator;
63
64 /*
65  * used when moving the objects
66  */
67 static char *to_space_bumper = NULL;
68 static char *to_space_top = NULL;
69 static GCMemSection *to_space_section = NULL;
70
71 #ifdef HEAVY_STATISTICS
72 static long stat_major_copy_object_failed_forwarded = 0;
73 static long stat_major_copy_object_failed_pinned = 0;
74 static long stat_major_copy_object_failed_large_pinned = 0;
75 static long stat_major_copy_object_failed_to_space = 0;
76 #endif
77
78 static gboolean
79 obj_is_from_pinned_alloc (char *p)
80 {
81         return BLOCK_FOR_OBJECT (p)->role == MEMORY_ROLE_PINNED;
82 }
83
84 static void
85 free_pinned_object (char *obj, size_t size)
86 {
87         mono_sgen_free_internal_full (&pinned_allocator, obj, size, INTERNAL_MEM_MANAGED);
88 }
89
90 /*
91  * Allocate a new section of memory to be used as old generation.
92  */
93 static GCMemSection*
94 alloc_major_section (void)
95 {
96         GCMemSection *section;
97         int scan_starts;
98
99         section = mono_sgen_alloc_os_memory_aligned (MAJOR_SECTION_SIZE, MAJOR_SECTION_SIZE, TRUE);
100         section->next_data = section->data = (char*)section + SIZEOF_GC_MEM_SECTION;
101         g_assert (!((mword)section->data & 7));
102         section->size = MAJOR_SECTION_SIZE - SIZEOF_GC_MEM_SECTION;
103         section->end_data = section->data + section->size;
104         mono_sgen_update_heap_boundaries ((mword)section->data, (mword)section->end_data);
105         DEBUG (3, fprintf (gc_debug_file, "New major heap section: (%p-%p), total: %zd\n", section->data, section->end_data, total_alloc));
106         scan_starts = (section->size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
107         section->scan_starts = mono_sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS);
108         section->num_scan_start = scan_starts;
109         section->block.role = MEMORY_ROLE_GEN1;
110         section->is_to_space = TRUE;
111
112         /* add to the section list */
113         section->block.next = section_list;
114         section_list = section;
115
116         ++num_major_sections;
117
118         return section;
119 }
120
121 static void
122 free_major_section (GCMemSection *section)
123 {
124         DEBUG (3, fprintf (gc_debug_file, "Freed major section %p (%p-%p)\n", section, section->data, section->end_data));
125         mono_sgen_free_internal_dynamic (section->scan_starts,
126                         (section->size + SCAN_START_SIZE - 1) / SCAN_START_SIZE * sizeof (char*), INTERNAL_MEM_SCAN_STARTS);
127         mono_sgen_free_os_memory (section, MAJOR_SECTION_SIZE);
128
129         --num_major_sections;
130 }
131
132 static void
133 new_to_space_section (void)
134 {
135         /* FIXME: if the current to_space_section is empty, we don't
136            have to allocate a new one */
137
138         to_space_section = alloc_major_section ();
139         to_space_bumper = to_space_section->next_data;
140         to_space_top = to_space_section->end_data;
141 }
142
143 static void
144 to_space_set_next_data (void)
145 {
146         g_assert (to_space_bumper >= to_space_section->next_data && to_space_bumper <= to_space_section->end_data);
147         to_space_section->next_data = to_space_bumper;
148 }
149
150 static void
151 to_space_expand (void)
152 {
153         if (to_space_section) {
154                 g_assert (to_space_top == to_space_section->end_data);
155                 to_space_set_next_data ();
156         }
157
158         new_to_space_section ();
159 }
160
161 #define MAJOR_GET_COPY_OBJECT_SPACE(dest, size, refs) do {              \
162                 (dest) = to_space_bumper;                               \
163                 /* Make sure we have enough space available */          \
164                 if ((dest) + (size) > to_space_top) {                   \
165                         to_space_expand ();                             \
166                         (dest) = to_space_bumper;                       \
167                         DEBUG (8, g_assert ((dest) + (objsize) <= to_space_top)); \
168                 }                                                       \
169                 to_space_bumper += objsize;                             \
170                 DEBUG (8, g_assert (to_space_bumper <= to_space_top));  \
171                 to_space_section->scan_starts [((dest) - (char*)to_space_section->data)/SCAN_START_SIZE] = (dest); \
172         } while (0)
173
174 static void
175 unset_to_space (void)
176 {
177         /* between collections the to_space_bumper is invalidated
178            because degraded allocations might occur, so we set it to
179            NULL, just to make it explicit */
180         to_space_bumper = NULL;
181
182         /* don't unset to_space_section if we implement the FIXME in
183            new_to_space_section */
184         to_space_section = NULL;
185 }
186
187 static gboolean
188 major_is_object_live (char *obj)
189 {
190         mword objsize;
191
192         /* nursery */
193         if (ptr_in_nursery (obj))
194                 return FALSE;
195
196         objsize = SGEN_ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
197
198         /* LOS */
199         if (objsize > MAX_SMALL_OBJ_SIZE)
200                 return FALSE;
201
202         /* pinned chunk */
203         if (obj_is_from_pinned_alloc (obj))
204                 return FALSE;
205
206         /* now we know it's in a major heap section */
207         return MAJOR_SECTION_FOR_OBJECT (obj)->is_to_space;
208 }
209
210 /* size is a multiple of ALLOC_ALIGN */
211 static void*
212 major_alloc_small_pinned_obj (size_t size, gboolean has_references)
213 {
214         return mono_sgen_alloc_internal_full (&pinned_allocator, size, INTERNAL_MEM_MANAGED);
215 }
216
217 /*
218  * size is already rounded up and we hold the GC lock.
219  */
220 static void*
221 major_alloc_degraded (MonoVTable *vtable, size_t size)
222 {
223         GCMemSection *section;
224         void **p = NULL;
225         g_assert (size <= MAX_SMALL_OBJ_SIZE);
226         HEAVY_STAT (++stat_objects_alloced_degraded);
227         HEAVY_STAT (stat_bytes_alloced_degraded += size);
228         for (section = section_list; section; section = section->block.next) {
229                 if ((section->end_data - section->next_data) >= size) {
230                         p = (void**)section->next_data;
231                         break;
232                 }
233         }
234         if (!p) {
235                 section = alloc_major_section ();
236                 section->is_to_space = FALSE;
237                 /* FIXME: handle OOM */
238                 p = (void**)section->next_data;
239                 ++minor_collection_sections_alloced;
240         }
241         section->next_data += size;
242         degraded_mode += size;
243         DEBUG (3, fprintf (gc_debug_file, "Allocated (degraded) object %p, vtable: %p (%s), size: %zd in section %p\n", p, vtable, vtable->klass->name, size, section));
244         *p = vtable;
245         return p;
246 }
247
248 static void
249 major_copy_or_mark_object (void **obj_slot, GrayQueue *queue)
250 {
251         char *forwarded;
252         char *obj = *obj_slot;
253         mword objsize;
254
255         DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
256
257         HEAVY_STAT (++stat_copy_object_called_major);
258
259         DEBUG (9, fprintf (gc_debug_file, "Precise copy of %p from %p", obj, obj_slot));
260
261         /*
262          * obj must belong to one of:
263          *
264          * 1. the nursery
265          * 2. the LOS
266          * 3. a pinned chunk
267          * 4. a non-to-space section of the major heap
268          * 5. a to-space section of the major heap
269          *
270          * In addition, objects in 1, 2 and 4 might also be pinned.
271          * Objects in 1 and 4 might be forwarded.
272          *
273          * Before we can copy the object we must make sure that we are
274          * allowed to, i.e. that the object not pinned, not already
275          * forwarded and doesn't belong to the LOS, a pinned chunk, or
276          * a to-space section.
277          *
278          * We are usually called for to-space objects (5) when we have
279          * two remset entries for the same reference.  The first entry
280          * copies the object and updates the reference and the second
281          * calls us with the updated reference that points into
282          * to-space.  There might also be other circumstances where we
283          * get to-space objects.
284          */
285
286         if ((forwarded = object_is_forwarded (obj))) {
287                 DEBUG (9, g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr));
288                 DEBUG (9, fprintf (gc_debug_file, " (already forwarded to %p)\n", forwarded));
289                 HEAVY_STAT (++stat_major_copy_object_failed_forwarded);
290                 *obj_slot = forwarded;
291                 return;
292         }
293         if (object_is_pinned (obj)) {
294                 DEBUG (9, g_assert (((MonoVTable*)LOAD_VTABLE(obj))->gc_descr));
295                 DEBUG (9, fprintf (gc_debug_file, " (pinned, no change)\n"));
296                 HEAVY_STAT (++stat_major_copy_object_failed_pinned);
297                 return;
298         }
299
300         if (ptr_in_nursery (obj))
301                 goto copy;
302
303         /*
304          * At this point we know obj is not pinned, not forwarded and
305          * belongs to 2, 3, 4, or 5.
306          *
307          * LOS object (2) are simple, at least until we always follow
308          * the rule: if objsize > MAX_SMALL_OBJ_SIZE, pin the object
309          * and return it.  At the end of major collections, we walk
310          * the los list and if the object is pinned, it is marked,
311          * otherwise it can be freed.
312          *
313          * Pinned chunks (3) and major heap sections (4, 5) both
314          * reside in blocks, which are always aligned, so once we've
315          * eliminated LOS objects, we can just access the block and
316          * see whether it's a pinned chunk or a major heap section.
317          */
318
319         objsize = SGEN_ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
320
321         if (G_UNLIKELY (objsize > MAX_SMALL_OBJ_SIZE || obj_is_from_pinned_alloc (obj))) {
322                 if (object_is_pinned (obj))
323                         return;
324                 DEBUG (9, fprintf (gc_debug_file, " (marked LOS/Pinned %p (%s), size: %zd)\n", obj, safe_name (obj), objsize));
325                 binary_protocol_pin (obj, (gpointer)LOAD_VTABLE (obj), safe_object_get_size ((MonoObject*)obj));
326                 pin_object (obj);
327                 GRAY_OBJECT_ENQUEUE (queue, obj);
328                 HEAVY_STAT (++stat_major_copy_object_failed_large_pinned);
329                 return;
330         }
331
332         /*
333          * Now we know the object is in a major heap section.  All we
334          * need to do is check whether it's already in to-space (5) or
335          * not (4).
336          */
337         if (MAJOR_OBJ_IS_IN_TO_SPACE (obj)) {
338                 DEBUG (9, g_assert (objsize <= MAX_SMALL_OBJ_SIZE));
339                 DEBUG (9, fprintf (gc_debug_file, " (already copied)\n"));
340                 HEAVY_STAT (++stat_major_copy_object_failed_to_space);
341                 return;
342         }
343
344  copy:
345         HEAVY_STAT (++stat_objects_copied_major);
346
347         *obj_slot = copy_object_no_checks (obj, queue);
348 }
349
350 /* FIXME: later reduce code duplication here with build_nursery_fragments().
351  * We don't keep track of section fragments for non-nursery sections yet, so
352  * just memset to 0.
353  */
354 static void
355 build_section_fragments (GCMemSection *section)
356 {
357         int i;
358         char *frag_start, *frag_end;
359         size_t frag_size;
360
361         /* clear scan starts */
362         memset (section->scan_starts, 0, section->num_scan_start * sizeof (gpointer));
363         frag_start = section->data;
364         section->next_data = section->data;
365         for (i = section->pin_queue_start; i < section->pin_queue_end; ++i) {
366                 frag_end = pin_queue [i];
367                 /* remove the pin bit from pinned objects */
368                 unpin_object (frag_end);
369                 if (frag_end >= section->data + section->size) {
370                         frag_end = section->data + section->size;
371                 } else {
372                         section->scan_starts [((char*)frag_end - (char*)section->data)/SCAN_START_SIZE] = frag_end;
373                 }
374                 frag_size = frag_end - frag_start;
375                 if (frag_size) {
376                         binary_protocol_empty (frag_start, frag_size);
377                         memset (frag_start, 0, frag_size);
378                 }
379                 frag_size = SGEN_ALIGN_UP (safe_object_get_size ((MonoObject*)pin_queue [i]));
380                 frag_start = (char*)pin_queue [i] + frag_size;
381                 section->next_data = MAX (section->next_data, frag_start);
382         }
383         frag_end = section->end_data;
384         frag_size = frag_end - frag_start;
385         if (frag_size) {
386                 binary_protocol_empty (frag_start, frag_size);
387                 memset (frag_start, 0, frag_size);
388         }
389 }
390
391 static void
392 sweep_pinned_objects_callback (char *ptr, size_t size, void *data)
393 {
394         if (object_is_pinned (ptr)) {
395                 unpin_object (ptr);
396                 DEBUG (6, fprintf (gc_debug_file, "Unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
397         } else {
398                 DEBUG (6, fprintf (gc_debug_file, "Freeing unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
399                 free_pinned_object (ptr, size);
400         }
401 }
402
403 static void
404 sweep_pinned_objects (void)
405 {
406         mono_sgen_internal_scan_objects (&pinned_allocator, sweep_pinned_objects_callback, NULL);
407 }
408
409 static void
410 major_iterate_objects (gboolean non_pinned, gboolean pinned, IterateObjectCallbackFunc callback, void *data)
411 {
412         if (non_pinned) {
413                 GCMemSection *section;
414                 for (section = section_list; section; section = section->block.next)
415                         scan_area_with_callback (section->data, section->end_data, callback, data);
416         }
417         if (pinned)
418                 mono_sgen_internal_scan_objects (&pinned_allocator, callback, data);
419 }
420
421 static void
422 major_free_non_pinned_object (char *obj, size_t size)
423 {
424         memset (obj, 0, size);
425 }
426
427 static void
428 pin_pinned_object_callback (void *addr, size_t slot_size, GrayQueue *queue)
429 {
430         binary_protocol_pin (addr, (gpointer)LOAD_VTABLE (addr), safe_object_get_size ((MonoObject*)addr));
431         if (heap_dump_file && !object_is_pinned (addr))
432                 pin_stats_register_object ((char*) addr, safe_object_get_size ((MonoObject*) addr));
433         pin_object (addr);
434         GRAY_OBJECT_ENQUEUE (queue, addr);
435         DEBUG (6, fprintf (gc_debug_file, "Marked pinned object %p (%s) from roots\n", addr, safe_name (addr)));
436 }
437
438 static void
439 major_find_pin_queue_start_ends (GrayQueue *queue)
440 {
441         GCMemSection *section;
442
443         for (section = section_list; section; section = section->block.next)
444                 find_section_pin_queue_start_end (section);
445         mono_sgen_internal_scan_pinned_objects (&pinned_allocator, (IterateObjectCallbackFunc)pin_pinned_object_callback, queue);
446 }
447
448 static void
449 major_pin_objects (GrayQueue *queue)
450 {
451         GCMemSection *section;
452
453         for (section = section_list; section; section = section->block.next)
454                 pin_objects_in_section (section, queue);
455 }
456
457 static void
458 major_init_to_space (void)
459 {
460         new_to_space_section ();
461 }
462
463 static void
464 major_sweep (void)
465 {
466         GCMemSection *section, *prev_section;
467
468         to_space_set_next_data ();
469         unset_to_space ();
470
471         /* unpin objects from the pinned chunks and free the unmarked ones */
472         sweep_pinned_objects ();
473
474         /* free the unused sections */
475         prev_section = NULL;
476         for (section = section_list; section;) {
477                 /* to_space doesn't need handling here */
478                 if (section->is_to_space) {
479                         section->is_to_space = FALSE;
480                         prev_section = section;
481                         section = section->block.next;
482                         continue;
483                 }
484                 /* no pinning object, so the section is free */
485                 if (section->pin_queue_start == section->pin_queue_end) {
486                         GCMemSection *to_free;
487                         if (prev_section)
488                                 prev_section->block.next = section->block.next;
489                         else
490                                 section_list = section->block.next;
491                         to_free = section;
492                         section = section->block.next;
493                         free_major_section (to_free);
494                         continue;
495                 } else {
496                         DEBUG (6, fprintf (gc_debug_file, "Section %p has still pinned objects (%d)\n", section, section->pin_queue_end - section->pin_queue_start));
497                         build_section_fragments (section);
498                 }
499                 prev_section = section;
500                 section = section->block.next;
501         }
502 }
503
504 static void
505 major_check_scan_starts (void)
506 {
507         GCMemSection *section;
508         for (section = section_list; section; section = section->block.next)
509                 check_section_scan_starts (section);
510 }
511
512 static void
513 major_dump_heap (void)
514 {
515         GCMemSection *section;
516         for (section = section_list; section; section = section->block.next)
517                 dump_section (section, "old");
518         /* FIXME: dump pinned sections, too */
519 }
520
521 static gint64
522 major_get_used_size (void)
523 {
524         gint64 tot = 0;
525         GCMemSection *section;
526         for (section = section_list; section; section = section->block.next) {
527                 /* this is approximate... */
528                 tot += section->next_data - section->data;
529         }
530         return tot;
531 }
532
533 static void
534 major_init (void)
535 {
536 #ifdef HEAVY_STATISTICS
537         mono_counters_register ("# major copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_forwarded);
538         mono_counters_register ("# major copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_pinned);
539         mono_counters_register ("# major copy_object() failed large or pinned chunk", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_large_pinned);
540         mono_counters_register ("# major copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_to_space);
541 #endif
542 }
543
544 /* only valid during minor collections */
545 static int old_num_major_sections;
546
547 static void
548 major_start_nursery_collection (void)
549 {
550         old_num_major_sections = num_major_sections;
551
552         if (!to_space_section) {
553                 new_to_space_section ();
554         } else {
555                 /* we might have done degraded allocation since the
556                    last collection */
557                 g_assert (to_space_bumper <= to_space_section->next_data);
558                 to_space_bumper = to_space_section->next_data;
559
560                 to_space_section->is_to_space = TRUE;
561         }
562 }
563
564 static void
565 major_finish_nursery_collection (void)
566 {
567         GCMemSection *section;
568         int sections_alloced;
569
570         to_space_set_next_data ();
571
572         for (section = section_list; section; section = section->block.next)
573                 section->is_to_space = FALSE;
574
575         sections_alloced = num_major_sections - old_num_major_sections;
576         minor_collection_sections_alloced += sections_alloced;
577 }
578
579 static void
580 major_finish_major_collection (void)
581 {
582 }
583
584 static gboolean
585 major_ptr_is_in_non_pinned_space (char *ptr)
586 {
587         GCMemSection *section;
588         for (section = section_list; section;) {
589                 if (ptr >= section->data && ptr < section->data + section->size)
590                         return TRUE;
591                 section = section->block.next;
592         }
593         return FALSE;
594 }
595
596 static void
597 major_report_pinned_memory_usage (void)
598 {
599         mono_sgen_report_internal_mem_usage_full (&pinned_allocator);
600 }