Add [Category ("NotWorking")] to failing test.
[mono.git] / mono / metadata / sgen-gc.c
1 /*
2  * sgen-gc.c: Simple generational GC.
3  *
4  * Author:
5  *      Paolo Molaro (lupus@ximian.com)
6  *  Rodrigo Kumpera (kumpera@gmail.com)
7  *
8  * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9  * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10  *
11  * Thread start/stop adapted from Boehm's GC:
12  * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
13  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
14  * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
15  * Copyright (c) 2000-2004 by Hewlett-Packard Company.  All rights reserved.
16  * Copyright 2001-2003 Ximian, Inc
17  * Copyright 2003-2010 Novell, Inc.
18  * Copyright 2011 Xamarin, Inc.
19  * Copyright (C) 2012 Xamarin Inc
20  *
21  * This library is free software; you can redistribute it and/or
22  * modify it under the terms of the GNU Library General Public
23  * License 2.0 as published by the Free Software Foundation;
24  *
25  * This library is distributed in the hope that it will be useful,
26  * but WITHOUT ANY WARRANTY; without even the implied warranty of
27  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
28  * Library General Public License for more details.
29  *
30  * You should have received a copy of the GNU Library General Public
31  * License 2.0 along with this library; if not, write to the Free
32  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33  *
34  * Important: allocation provides always zeroed memory, having to do
35  * a memset after allocation is deadly for performance.
36  * Memory usage at startup is currently as follows:
37  * 64 KB pinned space
38  * 64 KB internal space
39  * size of nursery
40  * We should provide a small memory config with half the sizes
41  *
42  * We currently try to make as few mono assumptions as possible:
43  * 1) 2-word header with no GC pointers in it (first vtable, second to store the
44  *    forwarding ptr)
45  * 2) gc descriptor is the second word in the vtable (first word in the class)
46  * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
47  * 4) there is a function to get an object's size and the number of
48  *    elements in an array.
49  * 5) we know the special way bounds are allocated for complex arrays
50  * 6) we know about proxies and how to treat them when domains are unloaded
51  *
52  * Always try to keep stack usage to a minimum: no recursive behaviour
53  * and no large stack allocs.
54  *
55  * General description.
56  * Objects are initially allocated in a nursery using a fast bump-pointer technique.
57  * When the nursery is full we start a nursery collection: this is performed with a
58  * copying GC.
59  * When the old generation is full we start a copying GC of the old generation as well:
60  * this will be changed to mark&sweep with copying when fragmentation becomes to severe
61  * in the future.  Maybe we'll even do both during the same collection like IMMIX.
62  *
63  * The things that complicate this description are:
64  * *) pinned objects: we can't move them so we need to keep track of them
65  * *) no precise info of the thread stacks and registers: we need to be able to
66  *    quickly find the objects that may be referenced conservatively and pin them
67  *    (this makes the first issues more important)
68  * *) large objects are too expensive to be dealt with using copying GC: we handle them
69  *    with mark/sweep during major collections
70  * *) some objects need to not move even if they are small (interned strings, Type handles):
71  *    we use mark/sweep for them, too: they are not allocated in the nursery, but inside
72  *    PinnedChunks regions
73  */
74
75 /*
76  * TODO:
77
78  *) we could have a function pointer in MonoClass to implement
79   customized write barriers for value types
80
81  *) investigate the stuff needed to advance a thread to a GC-safe
82   point (single-stepping, read from unmapped memory etc) and implement it.
83   This would enable us to inline allocations and write barriers, for example,
84   or at least parts of them, like the write barrier checks.
85   We may need this also for handling precise info on stacks, even simple things
86   as having uninitialized data on the stack and having to wait for the prolog
87   to zero it. Not an issue for the last frame that we scan conservatively.
88   We could always not trust the value in the slots anyway.
89
90  *) modify the jit to save info about references in stack locations:
91   this can be done just for locals as a start, so that at least
92   part of the stack is handled precisely.
93
94  *) test/fix endianess issues
95
96  *) Implement a card table as the write barrier instead of remembered
97     sets?  Card tables are not easy to implement with our current
98     memory layout.  We have several different kinds of major heap
99     objects: Small objects in regular blocks, small objects in pinned
100     chunks and LOS objects.  If we just have a pointer we have no way
101     to tell which kind of object it points into, therefore we cannot
102     know where its card table is.  The least we have to do to make
103     this happen is to get rid of write barriers for indirect stores.
104     (See next item)
105
106  *) Get rid of write barriers for indirect stores.  We can do this by
107     telling the GC to wbarrier-register an object once we do an ldloca
108     or ldelema on it, and to unregister it once it's not used anymore
109     (it can only travel downwards on the stack).  The problem with
110     unregistering is that it needs to happen eventually no matter
111     what, even if exceptions are thrown, the thread aborts, etc.
112     Rodrigo suggested that we could do only the registering part and
113     let the collector find out (pessimistically) when it's safe to
114     unregister, namely when the stack pointer of the thread that
115     registered the object is higher than it was when the registering
116     happened.  This might make for a good first implementation to get
117     some data on performance.
118
119  *) Some sort of blacklist support?  Blacklists is a concept from the
120     Boehm GC: if during a conservative scan we find pointers to an
121     area which we might use as heap, we mark that area as unusable, so
122     pointer retention by random pinning pointers is reduced.
123
124  *) experiment with max small object size (very small right now - 2kb,
125     because it's tied to the max freelist size)
126
127   *) add an option to mmap the whole heap in one chunk: it makes for many
128      simplifications in the checks (put the nursery at the top and just use a single
129      check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
130      not flexible (too much of the address space may be used by default or we can't
131      increase the heap as needed) and we'd need a race-free mechanism to return memory
132      back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
133      was written to, munmap is needed, but the following mmap may not find the same segment
134      free...)
135
136  *) memzero the major fragments after restarting the world and optionally a smaller
137     chunk at a time
138
139  *) investigate having fragment zeroing threads
140
141  *) separate locks for finalization and other minor stuff to reduce
142     lock contention
143
144  *) try a different copying order to improve memory locality
145
146  *) a thread abort after a store but before the write barrier will
147     prevent the write barrier from executing
148
149  *) specialized dynamically generated markers/copiers
150
151  *) Dynamically adjust TLAB size to the number of threads.  If we have
152     too many threads that do allocation, we might need smaller TLABs,
153     and we might get better performance with larger TLABs if we only
154     have a handful of threads.  We could sum up the space left in all
155     assigned TLABs and if that's more than some percentage of the
156     nursery size, reduce the TLAB size.
157
158  *) Explore placing unreachable objects on unused nursery memory.
159         Instead of memset'ng a region to zero, place an int[] covering it.
160         A good place to start is add_nursery_frag. The tricky thing here is
161         placing those objects atomically outside of a collection.
162
163  *) Allocation should use asymmetric Dekker synchronization:
164         http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
165         This should help weak consistency archs.
166  */
167 #include "config.h"
168 #ifdef HAVE_SGEN_GC
169
170 #ifdef __MACH__
171 #undef _XOPEN_SOURCE
172 #define _XOPEN_SOURCE
173 #define _DARWIN_C_SOURCE
174 #endif
175
176 #ifdef HAVE_UNISTD_H
177 #include <unistd.h>
178 #endif
179 #ifdef HAVE_PTHREAD_H
180 #include <pthread.h>
181 #endif
182 #ifdef HAVE_PTHREAD_NP_H
183 #include <pthread_np.h>
184 #endif
185 #ifdef HAVE_SEMAPHORE_H
186 #include <semaphore.h>
187 #endif
188 #include <stdio.h>
189 #include <string.h>
190 #include <signal.h>
191 #include <errno.h>
192 #include <assert.h>
193
194 #include "metadata/sgen-gc.h"
195 #include "metadata/metadata-internals.h"
196 #include "metadata/class-internals.h"
197 #include "metadata/gc-internal.h"
198 #include "metadata/object-internals.h"
199 #include "metadata/threads.h"
200 #include "metadata/sgen-cardtable.h"
201 #include "metadata/sgen-protocol.h"
202 #include "metadata/sgen-archdep.h"
203 #include "metadata/sgen-bridge.h"
204 #include "metadata/sgen-memory-governor.h"
205 #include "metadata/sgen-hash-table.h"
206 #include "metadata/mono-gc.h"
207 #include "metadata/method-builder.h"
208 #include "metadata/profiler-private.h"
209 #include "metadata/monitor.h"
210 #include "metadata/threadpool-internals.h"
211 #include "metadata/mempool-internals.h"
212 #include "metadata/marshal.h"
213 #include "metadata/runtime.h"
214 #include "metadata/sgen-cardtable.h"
215 #include "metadata/sgen-pinning.h"
216 #include "metadata/sgen-workers.h"
217 #include "metadata/sgen-layout-stats.h"
218 #include "utils/mono-mmap.h"
219 #include "utils/mono-time.h"
220 #include "utils/mono-semaphore.h"
221 #include "utils/mono-counters.h"
222 #include "utils/mono-proclib.h"
223 #include "utils/mono-memory-model.h"
224 #include "utils/mono-logger-internal.h"
225 #include "utils/dtrace.h"
226
227 #include <mono/utils/mono-logger-internal.h>
228 #include <mono/utils/memcheck.h>
229
230 #if defined(__MACH__)
231 #include "utils/mach-support.h"
232 #endif
233
234 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
235         a = i,
236
237 enum {
238 #include "mono/cil/opcode.def"
239         CEE_LAST
240 };
241
242 #undef OPDEF
243
244 #undef pthread_create
245 #undef pthread_join
246 #undef pthread_detach
247
248 /*
249  * ######################################################################
250  * ########  Types and constants used by the GC.
251  * ######################################################################
252  */
253
254 /* 0 means not initialized, 1 is initialized, -1 means in progress */
255 static int gc_initialized = 0;
256 /* If set, check if we need to do something every X allocations */
257 gboolean has_per_allocation_action;
258 /* If set, do a heap check every X allocation */
259 guint32 verify_before_allocs = 0;
260 /* If set, do a minor collection before every X allocation */
261 guint32 collect_before_allocs = 0;
262 /* If set, do a whole heap check before each collection */
263 static gboolean whole_heap_check_before_collection = FALSE;
264 /* If set, do a heap consistency check before each minor collection */
265 static gboolean consistency_check_at_minor_collection = FALSE;
266 /* If set, do a mod union consistency check before each finishing collection pause */
267 static gboolean mod_union_consistency_check = FALSE;
268 /* If set, check whether mark bits are consistent after major collections */
269 static gboolean check_mark_bits_after_major_collection = FALSE;
270 /* If set, check that all nursery objects are pinned/not pinned, depending on context */
271 static gboolean check_nursery_objects_pinned = FALSE;
272 /* If set, do a few checks when the concurrent collector is used */
273 static gboolean do_concurrent_checks = FALSE;
274 /* If set, check that there are no references to the domain left at domain unload */
275 static gboolean xdomain_checks = FALSE;
276 /* If not null, dump the heap after each collection into this file */
277 static FILE *heap_dump_file = NULL;
278 /* If set, mark stacks conservatively, even if precise marking is possible */
279 static gboolean conservative_stack_mark = FALSE;
280 /* If set, do a plausibility check on the scan_starts before and after
281    each collection */
282 static gboolean do_scan_starts_check = FALSE;
283 /*
284  * If the major collector is concurrent and this is FALSE, we will
285  * never initiate a synchronous major collection, unless requested via
286  * GC.Collect().
287  */
288 static gboolean allow_synchronous_major = TRUE;
289 static gboolean nursery_collection_is_parallel = FALSE;
290 static gboolean disable_minor_collections = FALSE;
291 static gboolean disable_major_collections = FALSE;
292 gboolean do_pin_stats = FALSE;
293 static gboolean do_verify_nursery = FALSE;
294 static gboolean do_dump_nursery_content = FALSE;
295
296 #ifdef HEAVY_STATISTICS
297 long long stat_objects_alloced_degraded = 0;
298 long long stat_bytes_alloced_degraded = 0;
299
300 long long stat_copy_object_called_nursery = 0;
301 long long stat_objects_copied_nursery = 0;
302 long long stat_copy_object_called_major = 0;
303 long long stat_objects_copied_major = 0;
304
305 long long stat_scan_object_called_nursery = 0;
306 long long stat_scan_object_called_major = 0;
307
308 long long stat_slots_allocated_in_vain;
309
310 long long stat_nursery_copy_object_failed_from_space = 0;
311 long long stat_nursery_copy_object_failed_forwarded = 0;
312 long long stat_nursery_copy_object_failed_pinned = 0;
313 long long stat_nursery_copy_object_failed_to_space = 0;
314
315 static int stat_wbarrier_add_to_global_remset = 0;
316 static int stat_wbarrier_set_field = 0;
317 static int stat_wbarrier_set_arrayref = 0;
318 static int stat_wbarrier_arrayref_copy = 0;
319 static int stat_wbarrier_generic_store = 0;
320 static int stat_wbarrier_generic_store_atomic = 0;
321 static int stat_wbarrier_set_root = 0;
322 static int stat_wbarrier_value_copy = 0;
323 static int stat_wbarrier_object_copy = 0;
324 #endif
325
326 int stat_minor_gcs = 0;
327 int stat_major_gcs = 0;
328
329 static long long stat_pinned_objects = 0;
330
331 static long long time_minor_pre_collection_fragment_clear = 0;
332 static long long time_minor_pinning = 0;
333 static long long time_minor_scan_remsets = 0;
334 static long long time_minor_scan_pinned = 0;
335 static long long time_minor_scan_registered_roots = 0;
336 static long long time_minor_scan_thread_data = 0;
337 static long long time_minor_finish_gray_stack = 0;
338 static long long time_minor_fragment_creation = 0;
339
340 static long long time_major_pre_collection_fragment_clear = 0;
341 static long long time_major_pinning = 0;
342 static long long time_major_scan_pinned = 0;
343 static long long time_major_scan_registered_roots = 0;
344 static long long time_major_scan_thread_data = 0;
345 static long long time_major_scan_alloc_pinned = 0;
346 static long long time_major_scan_finalized = 0;
347 static long long time_major_scan_big_objects = 0;
348 static long long time_major_finish_gray_stack = 0;
349 static long long time_major_free_bigobjs = 0;
350 static long long time_major_los_sweep = 0;
351 static long long time_major_sweep = 0;
352 static long long time_major_fragment_creation = 0;
353
354 int gc_debug_level = 0;
355 FILE* gc_debug_file;
356
357 /*
358 void
359 mono_gc_flush_info (void)
360 {
361         fflush (gc_debug_file);
362 }
363 */
364
365 #define TV_DECLARE SGEN_TV_DECLARE
366 #define TV_GETTIME SGEN_TV_GETTIME
367 #define TV_ELAPSED SGEN_TV_ELAPSED
368 #define TV_ELAPSED_MS SGEN_TV_ELAPSED_MS
369
370 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
371
372 NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
373
374 #define object_is_forwarded     SGEN_OBJECT_IS_FORWARDED
375 #define object_is_pinned        SGEN_OBJECT_IS_PINNED
376 #define pin_object              SGEN_PIN_OBJECT
377 #define unpin_object            SGEN_UNPIN_OBJECT
378
379 #define ptr_in_nursery sgen_ptr_in_nursery
380
381 #define LOAD_VTABLE     SGEN_LOAD_VTABLE
382
383 static const char*
384 safe_name (void* obj)
385 {
386         MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
387         return vt->klass->name;
388 }
389
390 #define safe_object_get_size    sgen_safe_object_get_size
391
392 const char*
393 sgen_safe_name (void* obj)
394 {
395         return safe_name (obj);
396 }
397
398 /*
399  * ######################################################################
400  * ########  Global data.
401  * ######################################################################
402  */
403 LOCK_DECLARE (gc_mutex);
404
405 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
406
407 static mword pagesize = 4096;
408 int degraded_mode = 0;
409
410 static mword bytes_pinned_from_failed_allocation = 0;
411
412 GCMemSection *nursery_section = NULL;
413 static mword lowest_heap_address = ~(mword)0;
414 static mword highest_heap_address = 0;
415
416 LOCK_DECLARE (sgen_interruption_mutex);
417 static LOCK_DECLARE (pin_queue_mutex);
418
419 #define LOCK_PIN_QUEUE mono_mutex_lock (&pin_queue_mutex)
420 #define UNLOCK_PIN_QUEUE mono_mutex_unlock (&pin_queue_mutex)
421
422 typedef struct _FinalizeReadyEntry FinalizeReadyEntry;
423 struct _FinalizeReadyEntry {
424         FinalizeReadyEntry *next;
425         void *object;
426 };
427
428 typedef struct _EphemeronLinkNode EphemeronLinkNode;
429
430 struct _EphemeronLinkNode {
431         EphemeronLinkNode *next;
432         char *array;
433 };
434
435 typedef struct {
436        void *key;
437        void *value;
438 } Ephemeron;
439
440 int current_collection_generation = -1;
441 volatile gboolean concurrent_collection_in_progress = FALSE;
442
443 /* objects that are ready to be finalized */
444 static FinalizeReadyEntry *fin_ready_list = NULL;
445 static FinalizeReadyEntry *critical_fin_list = NULL;
446
447 static EphemeronLinkNode *ephemeron_list;
448
449 /* registered roots: the key to the hash is the root start address */
450 /* 
451  * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
452  */
453 SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
454         SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
455         SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
456         SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL)
457 };
458 static mword roots_size = 0; /* amount of memory in the root set */
459
460 #define GC_ROOT_NUM 32
461 typedef struct {
462         int count;              /* must be the first field */
463         void *objects [GC_ROOT_NUM];
464         int root_types [GC_ROOT_NUM];
465         uintptr_t extra_info [GC_ROOT_NUM];
466 } GCRootReport;
467
468 static void
469 notify_gc_roots (GCRootReport *report)
470 {
471         if (!report->count)
472                 return;
473         mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
474         report->count = 0;
475 }
476
477 static void
478 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
479 {
480         if (report->count == GC_ROOT_NUM)
481                 notify_gc_roots (report);
482         report->objects [report->count] = object;
483         report->root_types [report->count] = rtype;
484         report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
485 }
486
487 MonoNativeTlsKey thread_info_key;
488
489 #ifdef HAVE_KW_THREAD
490 __thread SgenThreadInfo *sgen_thread_info;
491 __thread char *stack_end;
492 #endif
493
494 /* The size of a TLAB */
495 /* The bigger the value, the less often we have to go to the slow path to allocate a new 
496  * one, but the more space is wasted by threads not allocating much memory.
497  * FIXME: Tune this.
498  * FIXME: Make this self-tuning for each thread.
499  */
500 guint32 tlab_size = (1024 * 4);
501
502 #define MAX_SMALL_OBJ_SIZE      SGEN_MAX_SMALL_OBJ_SIZE
503
504 /* Functions supplied by the runtime to be called by the GC */
505 static MonoGCCallbacks gc_callbacks;
506
507 #define ALLOC_ALIGN             SGEN_ALLOC_ALIGN
508 #define ALLOC_ALIGN_BITS        SGEN_ALLOC_ALIGN_BITS
509
510 #define ALIGN_UP                SGEN_ALIGN_UP
511
512 #define MOVED_OBJECTS_NUM 64
513 static void *moved_objects [MOVED_OBJECTS_NUM];
514 static int moved_objects_idx = 0;
515
516 /* Vtable of the objects used to fill out nursery fragments before a collection */
517 static MonoVTable *array_fill_vtable;
518
519 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
520 MonoNativeThreadId main_gc_thread = NULL;
521 #endif
522
523 /*Object was pinned during the current collection*/
524 static mword objects_pinned;
525
526 /*
527  * ######################################################################
528  * ########  Macros and function declarations.
529  * ######################################################################
530  */
531
532 inline static void*
533 align_pointer (void *ptr)
534 {
535         mword p = (mword)ptr;
536         p += sizeof (gpointer) - 1;
537         p &= ~ (sizeof (gpointer) - 1);
538         return (void*)p;
539 }
540
541 typedef SgenGrayQueue GrayQueue;
542
543 /* forward declarations */
544 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
545 static void scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx);
546 static void scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx);
547 static void report_finalizer_roots (void);
548 static void report_registered_roots (void);
549
550 static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
551 static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, ScanCopyContext ctx);
552 static void finish_gray_stack (int generation, GrayQueue *queue);
553
554 void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
555
556
557 static void init_stats (void);
558
559 static int mark_ephemerons_in_range (ScanCopyContext ctx);
560 static void clear_unreachable_ephemerons (ScanCopyContext ctx);
561 static void null_ephemerons_for_domain (MonoDomain *domain);
562
563 static gboolean major_update_or_finish_concurrent_collection (gboolean force_finish);
564
565 SgenObjectOperations current_object_ops;
566 SgenMajorCollector major_collector;
567 SgenMinorCollector sgen_minor_collector;
568 static GrayQueue gray_queue;
569
570 static SgenRemeberedSet remset;
571
572 /* The gray queue to use from the main collection thread. */
573 #define WORKERS_DISTRIBUTE_GRAY_QUEUE   (&gray_queue)
574
575 /*
576  * The gray queue a worker job must use.  If we're not parallel or
577  * concurrent, we use the main gray queue.
578  */
579 static SgenGrayQueue*
580 sgen_workers_get_job_gray_queue (WorkerData *worker_data)
581 {
582         return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
583 }
584
585 static void
586 gray_queue_redirect (SgenGrayQueue *queue)
587 {
588         gboolean wake = FALSE;
589
590
591         for (;;) {
592                 GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
593                 if (!section)
594                         break;
595                 sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
596                 wake = TRUE;
597         }
598
599         if (wake) {
600                 g_assert (concurrent_collection_in_progress ||
601                                 (current_collection_generation == GENERATION_OLD && major_collector.is_parallel));
602                 if (sgen_workers_have_started ()) {
603                         sgen_workers_wake_up_all ();
604                 } else {
605                         if (concurrent_collection_in_progress)
606                                 g_assert (current_collection_generation == -1);
607                 }
608         }
609 }
610
611 static gboolean
612 is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
613 {
614         MonoObject *o = (MonoObject*)(obj);
615         MonoObject *ref = (MonoObject*)*(ptr);
616         int offset = (char*)(ptr) - (char*)o;
617
618         if (o->vtable->klass == mono_defaults.thread_class && offset == G_STRUCT_OFFSET (MonoThread, internal_thread))
619                 return TRUE;
620         if (o->vtable->klass == mono_defaults.internal_thread_class && offset == G_STRUCT_OFFSET (MonoInternalThread, current_appcontext))
621                 return TRUE;
622
623 #ifndef DISABLE_REMOTING
624         if (mono_class_has_parent_fast (o->vtable->klass, mono_defaults.real_proxy_class) &&
625                         offset == G_STRUCT_OFFSET (MonoRealProxy, unwrapped_server))
626                 return TRUE;
627 #endif
628         /* Thread.cached_culture_info */
629         if (!strcmp (ref->vtable->klass->name_space, "System.Globalization") &&
630                         !strcmp (ref->vtable->klass->name, "CultureInfo") &&
631                         !strcmp(o->vtable->klass->name_space, "System") &&
632                         !strcmp(o->vtable->klass->name, "Object[]"))
633                 return TRUE;
634         /*
635          *  at System.IO.MemoryStream.InternalConstructor (byte[],int,int,bool,bool) [0x0004d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:121
636          * at System.IO.MemoryStream..ctor (byte[]) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:81
637          * at (wrapper remoting-invoke-with-check) System.IO.MemoryStream..ctor (byte[]) <IL 0x00020, 0xffffffff>
638          * at System.Runtime.Remoting.Messaging.CADMethodCallMessage.GetArguments () [0x0000d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/CADMessages.cs:327
639          * at System.Runtime.Remoting.Messaging.MethodCall..ctor (System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/MethodCall.cs:87
640          * at System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) [0x00018] in /home/schani/Work/novell/trunk/mcs/class/corlib/System/AppDomain.cs:1213
641          * at (wrapper remoting-invoke-with-check) System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) <IL 0x0003d, 0xffffffff>
642          * at System.Runtime.Remoting.Channels.CrossAppDomainSink.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00008] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Channels/CrossAppDomainChannel.cs:198
643          * at (wrapper runtime-invoke) object.runtime_invoke_CrossAppDomainSink/ProcessMessageRes_object_object (object,intptr,intptr,intptr) <IL 0x0004c, 0xffffffff>
644          */
645         if (!strcmp (ref->vtable->klass->name_space, "System") &&
646                         !strcmp (ref->vtable->klass->name, "Byte[]") &&
647                         !strcmp (o->vtable->klass->name_space, "System.IO") &&
648                         !strcmp (o->vtable->klass->name, "MemoryStream"))
649                 return TRUE;
650         /* append_job() in threadpool.c */
651         if (!strcmp (ref->vtable->klass->name_space, "System.Runtime.Remoting.Messaging") &&
652                         !strcmp (ref->vtable->klass->name, "AsyncResult") &&
653                         !strcmp (o->vtable->klass->name_space, "System") &&
654                         !strcmp (o->vtable->klass->name, "Object[]") &&
655                         mono_thread_pool_is_queue_array ((MonoArray*) o))
656                 return TRUE;
657         return FALSE;
658 }
659
660 static void
661 check_reference_for_xdomain (gpointer *ptr, char *obj, MonoDomain *domain)
662 {
663         MonoObject *o = (MonoObject*)(obj);
664         MonoObject *ref = (MonoObject*)*(ptr);
665         int offset = (char*)(ptr) - (char*)o;
666         MonoClass *class;
667         MonoClassField *field;
668         char *str;
669
670         if (!ref || ref->vtable->domain == domain)
671                 return;
672         if (is_xdomain_ref_allowed (ptr, obj, domain))
673                 return;
674
675         field = NULL;
676         for (class = o->vtable->klass; class; class = class->parent) {
677                 int i;
678
679                 for (i = 0; i < class->field.count; ++i) {
680                         if (class->fields[i].offset == offset) {
681                                 field = &class->fields[i];
682                                 break;
683                         }
684                 }
685                 if (field)
686                         break;
687         }
688
689         if (ref->vtable->klass == mono_defaults.string_class)
690                 str = mono_string_to_utf8 ((MonoString*)ref);
691         else
692                 str = NULL;
693         g_print ("xdomain reference in %p (%s.%s) at offset %d (%s) to %p (%s.%s) (%s)  -  pointed to by:\n",
694                         o, o->vtable->klass->name_space, o->vtable->klass->name,
695                         offset, field ? field->name : "",
696                         ref, ref->vtable->klass->name_space, ref->vtable->klass->name, str ? str : "");
697         mono_gc_scan_for_specific_ref (o, TRUE);
698         if (str)
699                 g_free (str);
700 }
701
702 #undef HANDLE_PTR
703 #define HANDLE_PTR(ptr,obj)     check_reference_for_xdomain ((ptr), (obj), domain)
704
705 static void
706 scan_object_for_xdomain_refs (char *start, mword size, void *data)
707 {
708         MonoDomain *domain = ((MonoObject*)start)->vtable->domain;
709
710         #include "sgen-scan-object.h"
711 }
712
713 static gboolean scan_object_for_specific_ref_precise = TRUE;
714
715 #undef HANDLE_PTR
716 #define HANDLE_PTR(ptr,obj) do {                \
717         if ((MonoObject*)*(ptr) == key) {       \
718         g_print ("found ref to %p in object %p (%s) at offset %td\n",   \
719                         key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
720         }                                                               \
721         } while (0)
722
723 static void
724 scan_object_for_specific_ref (char *start, MonoObject *key)
725 {
726         char *forwarded;
727
728         if ((forwarded = SGEN_OBJECT_IS_FORWARDED (start)))
729                 start = forwarded;
730
731         if (scan_object_for_specific_ref_precise) {
732                 #include "sgen-scan-object.h"
733         } else {
734                 mword *words = (mword*)start;
735                 size_t size = safe_object_get_size ((MonoObject*)start);
736                 int i;
737                 for (i = 0; i < size / sizeof (mword); ++i) {
738                         if (words [i] == (mword)key) {
739                                 g_print ("found possible ref to %p in object %p (%s) at offset %td\n",
740                                                 key, start, safe_name (start), i * sizeof (mword));
741                         }
742                 }
743         }
744 }
745
746 void
747 sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
748 {
749         while (start < end) {
750                 size_t size;
751                 char *obj;
752
753                 if (!*(void**)start) {
754                         start += sizeof (void*); /* should be ALLOC_ALIGN, really */
755                         continue;
756                 }
757
758                 if (allow_flags) {
759                         if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
760                                 obj = start;
761                 } else {
762                         obj = start;
763                 }
764
765                 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
766
767                 if ((MonoVTable*)SGEN_LOAD_VTABLE (obj) != array_fill_vtable)
768                         callback (obj, size, data);
769
770                 start += size;
771         }
772 }
773
774 static void
775 scan_object_for_specific_ref_callback (char *obj, size_t size, MonoObject *key)
776 {
777         scan_object_for_specific_ref (obj, key);
778 }
779
780 static void
781 check_root_obj_specific_ref (RootRecord *root, MonoObject *key, MonoObject *obj)
782 {
783         if (key != obj)
784                 return;
785         g_print ("found ref to %p in root record %p\n", key, root);
786 }
787
788 static MonoObject *check_key = NULL;
789 static RootRecord *check_root = NULL;
790
791 static void
792 check_root_obj_specific_ref_from_marker (void **obj)
793 {
794         check_root_obj_specific_ref (check_root, check_key, *obj);
795 }
796
797 static void
798 scan_roots_for_specific_ref (MonoObject *key, int root_type)
799 {
800         void **start_root;
801         RootRecord *root;
802         check_key = key;
803
804         SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
805                 mword desc = root->root_desc;
806
807                 check_root = root;
808
809                 switch (desc & ROOT_DESC_TYPE_MASK) {
810                 case ROOT_DESC_BITMAP:
811                         desc >>= ROOT_DESC_TYPE_SHIFT;
812                         while (desc) {
813                                 if (desc & 1)
814                                         check_root_obj_specific_ref (root, key, *start_root);
815                                 desc >>= 1;
816                                 start_root++;
817                         }
818                         return;
819                 case ROOT_DESC_COMPLEX: {
820                         gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
821                         int bwords = (*bitmap_data) - 1;
822                         void **start_run = start_root;
823                         bitmap_data++;
824                         while (bwords-- > 0) {
825                                 gsize bmap = *bitmap_data++;
826                                 void **objptr = start_run;
827                                 while (bmap) {
828                                         if (bmap & 1)
829                                                 check_root_obj_specific_ref (root, key, *objptr);
830                                         bmap >>= 1;
831                                         ++objptr;
832                                 }
833                                 start_run += GC_BITS_PER_WORD;
834                         }
835                         break;
836                 }
837                 case ROOT_DESC_USER: {
838                         MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
839                         marker (start_root, check_root_obj_specific_ref_from_marker);
840                         break;
841                 }
842                 case ROOT_DESC_RUN_LEN:
843                         g_assert_not_reached ();
844                 default:
845                         g_assert_not_reached ();
846                 }
847         } SGEN_HASH_TABLE_FOREACH_END;
848
849         check_key = NULL;
850         check_root = NULL;
851 }
852
853 void
854 mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise)
855 {
856         void **ptr;
857         RootRecord *root;
858
859         scan_object_for_specific_ref_precise = precise;
860
861         sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
862                         (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key, TRUE);
863
864         major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
865
866         sgen_los_iterate_objects ((IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
867
868         scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
869         scan_roots_for_specific_ref (key, ROOT_TYPE_WBARRIER);
870
871         SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], ptr, root) {
872                 while (ptr < (void**)root->end_root) {
873                         check_root_obj_specific_ref (root, *ptr, key);
874                         ++ptr;
875                 }
876         } SGEN_HASH_TABLE_FOREACH_END;
877 }
878
879 static gboolean
880 need_remove_object_for_domain (char *start, MonoDomain *domain)
881 {
882         if (mono_object_domain (start) == domain) {
883                 SGEN_LOG (4, "Need to cleanup object %p", start);
884                 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
885                 return TRUE;
886         }
887         return FALSE;
888 }
889
890 static void
891 process_object_for_domain_clearing (char *start, MonoDomain *domain)
892 {
893         GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
894         if (vt->klass == mono_defaults.internal_thread_class)
895                 g_assert (mono_object_domain (start) == mono_get_root_domain ());
896         /* The object could be a proxy for an object in the domain
897            we're deleting. */
898 #ifndef DISABLE_REMOTING
899         if (mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
900                 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
901
902                 /* The server could already have been zeroed out, so
903                    we need to check for that, too. */
904                 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
905                         SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
906                         ((MonoRealProxy*)start)->unwrapped_server = NULL;
907                 }
908         }
909 #endif
910 }
911
912 static MonoDomain *check_domain = NULL;
913
914 static void
915 check_obj_not_in_domain (void **o)
916 {
917         g_assert (((MonoObject*)(*o))->vtable->domain != check_domain);
918 }
919
920 static void
921 scan_for_registered_roots_in_domain (MonoDomain *domain, int root_type)
922 {
923         void **start_root;
924         RootRecord *root;
925         check_domain = domain;
926         SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
927                 mword desc = root->root_desc;
928
929                 /* The MonoDomain struct is allowed to hold
930                    references to objects in its own domain. */
931                 if (start_root == (void**)domain)
932                         continue;
933
934                 switch (desc & ROOT_DESC_TYPE_MASK) {
935                 case ROOT_DESC_BITMAP:
936                         desc >>= ROOT_DESC_TYPE_SHIFT;
937                         while (desc) {
938                                 if ((desc & 1) && *start_root)
939                                         check_obj_not_in_domain (*start_root);
940                                 desc >>= 1;
941                                 start_root++;
942                         }
943                         break;
944                 case ROOT_DESC_COMPLEX: {
945                         gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
946                         int bwords = (*bitmap_data) - 1;
947                         void **start_run = start_root;
948                         bitmap_data++;
949                         while (bwords-- > 0) {
950                                 gsize bmap = *bitmap_data++;
951                                 void **objptr = start_run;
952                                 while (bmap) {
953                                         if ((bmap & 1) && *objptr)
954                                                 check_obj_not_in_domain (*objptr);
955                                         bmap >>= 1;
956                                         ++objptr;
957                                 }
958                                 start_run += GC_BITS_PER_WORD;
959                         }
960                         break;
961                 }
962                 case ROOT_DESC_USER: {
963                         MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
964                         marker (start_root, check_obj_not_in_domain);
965                         break;
966                 }
967                 case ROOT_DESC_RUN_LEN:
968                         g_assert_not_reached ();
969                 default:
970                         g_assert_not_reached ();
971                 }
972         } SGEN_HASH_TABLE_FOREACH_END;
973
974         check_domain = NULL;
975 }
976
977 static void
978 check_for_xdomain_refs (void)
979 {
980         LOSObject *bigobj;
981
982         sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
983                         (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL, FALSE);
984
985         major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
986
987         for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
988                 scan_object_for_xdomain_refs (bigobj->data, sgen_los_object_size (bigobj), NULL);
989 }
990
991 static gboolean
992 clear_domain_process_object (char *obj, MonoDomain *domain)
993 {
994         gboolean remove;
995
996         process_object_for_domain_clearing (obj, domain);
997         remove = need_remove_object_for_domain (obj, domain);
998
999         if (remove && ((MonoObject*)obj)->synchronisation) {
1000                 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
1001                 if (dislink)
1002                         sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
1003         }
1004
1005         return remove;
1006 }
1007
1008 static void
1009 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
1010 {
1011         if (clear_domain_process_object (obj, domain))
1012                 memset (obj, 0, size);
1013 }
1014
1015 static void
1016 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
1017 {
1018         clear_domain_process_object (obj, domain);
1019 }
1020
1021 static void
1022 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1023 {
1024         if (need_remove_object_for_domain (obj, domain))
1025                 major_collector.free_non_pinned_object (obj, size);
1026 }
1027
1028 static void
1029 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1030 {
1031         if (need_remove_object_for_domain (obj, domain))
1032                 major_collector.free_pinned_object (obj, size);
1033 }
1034
1035 /*
1036  * When appdomains are unloaded we can easily remove objects that have finalizers,
1037  * but all the others could still be present in random places on the heap.
1038  * We need a sweep to get rid of them even though it's going to be costly
1039  * with big heaps.
1040  * The reason we need to remove them is because we access the vtable and class
1041  * structures to know the object size and the reference bitmap: once the domain is
1042  * unloaded the point to random memory.
1043  */
1044 void
1045 mono_gc_clear_domain (MonoDomain * domain)
1046 {
1047         LOSObject *bigobj, *prev;
1048         int i;
1049
1050         LOCK_GC;
1051
1052         binary_protocol_domain_unload_begin (domain);
1053
1054         sgen_stop_world (0);
1055
1056         if (concurrent_collection_in_progress)
1057                 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
1058         g_assert (!concurrent_collection_in_progress);
1059
1060         sgen_process_fin_stage_entries ();
1061         sgen_process_dislink_stage_entries ();
1062
1063         sgen_clear_nursery_fragments ();
1064
1065         if (xdomain_checks && domain != mono_get_root_domain ()) {
1066                 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
1067                 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
1068                 check_for_xdomain_refs ();
1069         }
1070
1071         /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
1072         to memory returned to the OS.*/
1073         null_ephemerons_for_domain (domain);
1074
1075         for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1076                 sgen_null_links_for_domain (domain, i);
1077
1078         for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1079                 sgen_remove_finalizers_for_domain (domain, i);
1080
1081         sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1082                         (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
1083
1084         /* We need two passes over major and large objects because
1085            freeing such objects might give their memory back to the OS
1086            (in the case of large objects) or obliterate its vtable
1087            (pinned objects with major-copying or pinned and non-pinned
1088            objects with major-mark&sweep), but we might need to
1089            dereference a pointer from an object to another object if
1090            the first object is a proxy. */
1091         major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
1092         for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1093                 clear_domain_process_object (bigobj->data, domain);
1094
1095         prev = NULL;
1096         for (bigobj = los_object_list; bigobj;) {
1097                 if (need_remove_object_for_domain (bigobj->data, domain)) {
1098                         LOSObject *to_free = bigobj;
1099                         if (prev)
1100                                 prev->next = bigobj->next;
1101                         else
1102                                 los_object_list = bigobj->next;
1103                         bigobj = bigobj->next;
1104                         SGEN_LOG (4, "Freeing large object %p", bigobj->data);
1105                         sgen_los_free_object (to_free);
1106                         continue;
1107                 }
1108                 prev = bigobj;
1109                 bigobj = bigobj->next;
1110         }
1111         major_collector.iterate_objects (TRUE, FALSE, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
1112         major_collector.iterate_objects (FALSE, TRUE, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
1113
1114         if (domain == mono_get_root_domain ()) {
1115                 if (G_UNLIKELY (do_pin_stats))
1116                         sgen_pin_stats_print_class_stats ();
1117                 sgen_object_layout_dump (stdout);
1118         }
1119
1120         sgen_restart_world (0, NULL);
1121
1122         binary_protocol_domain_unload_end (domain);
1123
1124         UNLOCK_GC;
1125 }
1126
1127 /*
1128  * sgen_add_to_global_remset:
1129  *
1130  *   The global remset contains locations which point into newspace after
1131  * a minor collection. This can happen if the objects they point to are pinned.
1132  *
1133  * LOCKING: If called from a parallel collector, the global remset
1134  * lock must be held.  For serial collectors that is not necessary.
1135  */
1136 void
1137 sgen_add_to_global_remset (gpointer ptr, gpointer obj)
1138 {
1139         SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Target pointer of global remset must be in the nursery");
1140
1141         HEAVY_STAT (++stat_wbarrier_add_to_global_remset);
1142
1143         if (!major_collector.is_concurrent) {
1144                 SGEN_ASSERT (5, current_collection_generation != -1, "Global remsets can only be added during collections");
1145         } else {
1146                 if (current_collection_generation == -1)
1147                         SGEN_ASSERT (5, sgen_concurrent_collection_in_progress (), "Global remsets outside of collection pauses can only be added by the concurrent collector");
1148         }
1149
1150         if (!object_is_pinned (obj))
1151                 SGEN_ASSERT (5, sgen_minor_collector.is_split || sgen_concurrent_collection_in_progress (), "Non-pinned objects can only remain in nursery if it is a split nursery");
1152         else if (sgen_cement_lookup_or_register (obj))
1153                 return;
1154
1155         remset.record_pointer (ptr);
1156
1157         if (G_UNLIKELY (do_pin_stats))
1158                 sgen_pin_stats_register_global_remset (obj);
1159
1160         SGEN_LOG (8, "Adding global remset for %p", ptr);
1161         binary_protocol_global_remset (ptr, obj, (gpointer)SGEN_LOAD_VTABLE (obj));
1162
1163
1164 #ifdef ENABLE_DTRACE
1165         if (G_UNLIKELY (MONO_GC_GLOBAL_REMSET_ADD_ENABLED ())) {
1166                 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
1167                 MONO_GC_GLOBAL_REMSET_ADD ((mword)ptr, (mword)obj, sgen_safe_object_get_size (obj),
1168                                 vt->klass->name_space, vt->klass->name);
1169         }
1170 #endif
1171 }
1172
1173 /*
1174  * sgen_drain_gray_stack:
1175  *
1176  *   Scan objects in the gray stack until the stack is empty. This should be called
1177  * frequently after each object is copied, to achieve better locality and cache
1178  * usage.
1179  */
1180 gboolean
1181 sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
1182 {
1183         char *obj;
1184         ScanObjectFunc scan_func = ctx.scan_func;
1185         GrayQueue *queue = ctx.queue;
1186
1187         if (max_objs == -1) {
1188                 for (;;) {
1189                         GRAY_OBJECT_DEQUEUE (queue, obj);
1190                         if (!obj)
1191                                 return TRUE;
1192                         SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
1193                         scan_func (obj, queue);
1194                 }
1195         } else {
1196                 int i;
1197
1198                 do {
1199                         for (i = 0; i != max_objs; ++i) {
1200                                 GRAY_OBJECT_DEQUEUE (queue, obj);
1201                                 if (!obj)
1202                                         return TRUE;
1203                                 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
1204                                 scan_func (obj, queue);
1205                         }
1206                 } while (max_objs < 0);
1207                 return FALSE;
1208         }
1209 }
1210
1211 /*
1212  * Addresses from start to end are already sorted. This function finds
1213  * the object header for each address and pins the object. The
1214  * addresses must be inside the passed section.  The (start of the)
1215  * address array is overwritten with the addresses of the actually
1216  * pinned objects.  Return the number of pinned objects.
1217  */
1218 static int
1219 pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, ScanCopyContext ctx)
1220 {
1221         void *last = NULL;
1222         int count = 0;
1223         void *search_start;
1224         void *last_obj = NULL;
1225         size_t last_obj_size = 0;
1226         void *addr;
1227         int idx;
1228         void **definitely_pinned = start;
1229         ScanObjectFunc scan_func = ctx.scan_func;
1230         SgenGrayQueue *queue = ctx.queue;
1231
1232         sgen_nursery_allocator_prepare_for_pinning ();
1233
1234         while (start < end) {
1235                 addr = *start;
1236                 /* the range check should be reduntant */
1237                 if (addr != last && addr >= start_nursery && addr < end_nursery) {
1238                         SGEN_LOG (5, "Considering pinning addr %p", addr);
1239                         /* multiple pointers to the same object */
1240                         if (addr >= last_obj && (char*)addr < (char*)last_obj + last_obj_size) {
1241                                 start++;
1242                                 continue;
1243                         }
1244                         idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
1245                         g_assert (idx < section->num_scan_start);
1246                         search_start = (void*)section->scan_starts [idx];
1247                         if (!search_start || search_start > addr) {
1248                                 while (idx) {
1249                                         --idx;
1250                                         search_start = section->scan_starts [idx];
1251                                         if (search_start && search_start <= addr)
1252                                                 break;
1253                                 }
1254                                 if (!search_start || search_start > addr)
1255                                         search_start = start_nursery;
1256                         }
1257                         if (search_start < last_obj)
1258                                 search_start = (char*)last_obj + last_obj_size;
1259                         /* now addr should be in an object a short distance from search_start
1260                          * Note that search_start must point to zeroed mem or point to an object.
1261                          */
1262
1263                         do {
1264                                 if (!*(void**)search_start) {
1265                                         /* Consistency check */
1266                                         /*
1267                                         for (frag = nursery_fragments; frag; frag = frag->next) {
1268                                                 if (search_start >= frag->fragment_start && search_start < frag->fragment_end)
1269                                                         g_assert_not_reached ();
1270                                         }
1271                                         */
1272
1273                                         search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
1274                                         continue;
1275                                 }
1276                                 last_obj = search_start;
1277                                 last_obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
1278
1279                                 if (((MonoObject*)last_obj)->synchronisation == GINT_TO_POINTER (-1)) {
1280                                         /* Marks the beginning of a nursery fragment, skip */
1281                                 } else {
1282                                         SGEN_LOG (8, "Pinned try match %p (%s), size %zd", last_obj, safe_name (last_obj), last_obj_size);
1283                                         if (addr >= search_start && (char*)addr < (char*)last_obj + last_obj_size) {
1284                                                 if (scan_func) {
1285                                                         scan_func (search_start, queue);
1286                                                 } else {
1287                                                         SGEN_LOG (4, "Pinned object %p, vtable %p (%s), count %d\n",
1288                                                                         search_start, *(void**)search_start, safe_name (search_start), count);
1289                                                         binary_protocol_pin (search_start,
1290                                                                         (gpointer)LOAD_VTABLE (search_start),
1291                                                                         safe_object_get_size (search_start));
1292
1293 #ifdef ENABLE_DTRACE
1294                                                         if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1295                                                                 int gen = sgen_ptr_in_nursery (search_start) ? GENERATION_NURSERY : GENERATION_OLD;
1296                                                                 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (search_start);
1297                                                                 MONO_GC_OBJ_PINNED ((mword)search_start,
1298                                                                                 sgen_safe_object_get_size (search_start),
1299                                                                                 vt->klass->name_space, vt->klass->name, gen);
1300                                                         }
1301 #endif
1302
1303                                                         pin_object (search_start);
1304                                                         GRAY_OBJECT_ENQUEUE (queue, search_start);
1305                                                         if (G_UNLIKELY (do_pin_stats))
1306                                                                 sgen_pin_stats_register_object (search_start, last_obj_size);
1307                                                         definitely_pinned [count] = search_start;
1308                                                         count++;
1309                                                 }
1310                                                 break;
1311                                         }
1312                                 }
1313                                 /* skip to the next object */
1314                                 search_start = (void*)((char*)search_start + last_obj_size);
1315                         } while (search_start <= addr);
1316                         /* we either pinned the correct object or we ignored the addr because
1317                          * it points to unused zeroed memory.
1318                          */
1319                         last = addr;
1320                 }
1321                 start++;
1322         }
1323         //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1324         if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1325                 GCRootReport report;
1326                 report.count = 0;
1327                 for (idx = 0; idx < count; ++idx)
1328                         add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1329                 notify_gc_roots (&report);
1330         }
1331         stat_pinned_objects += count;
1332         return count;
1333 }
1334
1335 void
1336 sgen_pin_objects_in_section (GCMemSection *section, ScanCopyContext ctx)
1337 {
1338         int num_entries = section->pin_queue_num_entries;
1339         if (num_entries) {
1340                 void **start = section->pin_queue_start;
1341                 int reduced_to;
1342                 reduced_to = pin_objects_from_addresses (section, start, start + num_entries,
1343                                 section->data, section->next_data, ctx);
1344                 section->pin_queue_num_entries = reduced_to;
1345                 if (!reduced_to)
1346                         section->pin_queue_start = NULL;
1347         }
1348 }
1349
1350
1351 void
1352 sgen_pin_object (void *object, GrayQueue *queue)
1353 {
1354         g_assert (!concurrent_collection_in_progress);
1355
1356         if (sgen_collection_is_parallel ()) {
1357                 LOCK_PIN_QUEUE;
1358                 /*object arrives pinned*/
1359                 sgen_pin_stage_ptr (object);
1360                 ++objects_pinned ;
1361                 UNLOCK_PIN_QUEUE;
1362         } else {
1363                 SGEN_PIN_OBJECT (object);
1364                 sgen_pin_stage_ptr (object);
1365                 ++objects_pinned;
1366                 if (G_UNLIKELY (do_pin_stats))
1367                         sgen_pin_stats_register_object (object, safe_object_get_size (object));
1368         }
1369         GRAY_OBJECT_ENQUEUE (queue, object);
1370         binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
1371
1372 #ifdef ENABLE_DTRACE
1373         if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1374                 int gen = sgen_ptr_in_nursery (object) ? GENERATION_NURSERY : GENERATION_OLD;
1375                 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (object);
1376                 MONO_GC_OBJ_PINNED ((mword)object, sgen_safe_object_get_size (object), vt->klass->name_space, vt->klass->name, gen);
1377         }
1378 #endif
1379 }
1380
1381 void
1382 sgen_parallel_pin_or_update (void **ptr, void *obj, MonoVTable *vt, SgenGrayQueue *queue)
1383 {
1384         for (;;) {
1385                 mword vtable_word;
1386                 gboolean major_pinned = FALSE;
1387
1388                 if (sgen_ptr_in_nursery (obj)) {
1389                         if (SGEN_CAS_PTR (obj, (void*)((mword)vt | SGEN_PINNED_BIT), vt) == vt) {
1390                                 sgen_pin_object (obj, queue);
1391                                 break;
1392                         }
1393                 } else {
1394                         major_collector.pin_major_object (obj, queue);
1395                         major_pinned = TRUE;
1396                 }
1397
1398                 vtable_word = *(mword*)obj;
1399                 /*someone else forwarded it, update the pointer and bail out*/
1400                 if (vtable_word & SGEN_FORWARDED_BIT) {
1401                         *ptr = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1402                         break;
1403                 }
1404
1405                 /*someone pinned it, nothing to do.*/
1406                 if (vtable_word & SGEN_PINNED_BIT || major_pinned)
1407                         break;
1408         }
1409 }
1410
1411 /* Sort the addresses in array in increasing order.
1412  * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1413  */
1414 void
1415 sgen_sort_addresses (void **array, int size)
1416 {
1417         int i;
1418         void *tmp;
1419
1420         for (i = 1; i < size; ++i) {
1421                 int child = i;
1422                 while (child > 0) {
1423                         int parent = (child - 1) / 2;
1424
1425                         if (array [parent] >= array [child])
1426                                 break;
1427
1428                         tmp = array [parent];
1429                         array [parent] = array [child];
1430                         array [child] = tmp;
1431
1432                         child = parent;
1433                 }
1434         }
1435
1436         for (i = size - 1; i > 0; --i) {
1437                 int end, root;
1438                 tmp = array [i];
1439                 array [i] = array [0];
1440                 array [0] = tmp;
1441
1442                 end = i - 1;
1443                 root = 0;
1444
1445                 while (root * 2 + 1 <= end) {
1446                         int child = root * 2 + 1;
1447
1448                         if (child < end && array [child] < array [child + 1])
1449                                 ++child;
1450                         if (array [root] >= array [child])
1451                                 break;
1452
1453                         tmp = array [root];
1454                         array [root] = array [child];
1455                         array [child] = tmp;
1456
1457                         root = child;
1458                 }
1459         }
1460 }
1461
1462 /* 
1463  * Scan the memory between start and end and queue values which could be pointers
1464  * to the area between start_nursery and end_nursery for later consideration.
1465  * Typically used for thread stacks.
1466  */
1467 static void
1468 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1469 {
1470         int count = 0;
1471
1472 #ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
1473         VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
1474 #endif
1475
1476         while (start < end) {
1477                 if (*start >= start_nursery && *start < end_nursery) {
1478                         /*
1479                          * *start can point to the middle of an object
1480                          * note: should we handle pointing at the end of an object?
1481                          * pinning in C# code disallows pointing at the end of an object
1482                          * but there is some small chance that an optimizing C compiler
1483                          * may keep the only reference to an object by pointing
1484                          * at the end of it. We ignore this small chance for now.
1485                          * Pointers to the end of an object are indistinguishable
1486                          * from pointers to the start of the next object in memory
1487                          * so if we allow that we'd need to pin two objects...
1488                          * We queue the pointer in an array, the
1489                          * array will then be sorted and uniqued. This way
1490                          * we can coalesce several pinning pointers and it should
1491                          * be faster since we'd do a memory scan with increasing
1492                          * addresses. Note: we can align the address to the allocation
1493                          * alignment, so the unique process is more effective.
1494                          */
1495                         mword addr = (mword)*start;
1496                         addr &= ~(ALLOC_ALIGN - 1);
1497                         if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
1498                                 SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
1499                                 sgen_pin_stage_ptr ((void*)addr);
1500                                 count++;
1501                         }
1502                         if (G_UNLIKELY (do_pin_stats)) { 
1503                                 if (ptr_in_nursery ((void*)addr))
1504                                         sgen_pin_stats_register_address ((char*)addr, pin_type);
1505                         }
1506                 }
1507                 start++;
1508         }
1509         if (count)
1510                 SGEN_LOG (7, "found %d potential pinned heap pointers", count);
1511 }
1512
1513 /*
1514  * The first thing we do in a collection is to identify pinned objects.
1515  * This function considers all the areas of memory that need to be
1516  * conservatively scanned.
1517  */
1518 static void
1519 pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue)
1520 {
1521         void **start_root;
1522         RootRecord *root;
1523         SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
1524         /* objects pinned from the API are inside these roots */
1525         SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
1526                 SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
1527                 conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
1528         } SGEN_HASH_TABLE_FOREACH_END;
1529         /* now deal with the thread stacks
1530          * in the future we should be able to conservatively scan only:
1531          * *) the cpu registers
1532          * *) the unmanaged stack frames
1533          * *) the _last_ managed stack frame
1534          * *) pointers slots in managed frames
1535          */
1536         scan_thread_data (start_nursery, end_nursery, FALSE, queue);
1537 }
1538
1539 static void
1540 unpin_objects_from_queue (SgenGrayQueue *queue)
1541 {
1542         for (;;) {
1543                 char *addr;
1544                 GRAY_OBJECT_DEQUEUE (queue, addr);
1545                 if (!addr)
1546                         break;
1547                 g_assert (SGEN_OBJECT_IS_PINNED (addr));
1548                 SGEN_UNPIN_OBJECT (addr);
1549         }
1550 }
1551
1552 typedef struct {
1553         CopyOrMarkObjectFunc func;
1554         GrayQueue *queue;
1555 } UserCopyOrMarkData;
1556
1557 static MonoNativeTlsKey user_copy_or_mark_key;
1558
1559 static void
1560 init_user_copy_or_mark_key (void)
1561 {
1562         mono_native_tls_alloc (&user_copy_or_mark_key, NULL);
1563 }
1564
1565 static void
1566 set_user_copy_or_mark_data (UserCopyOrMarkData *data)
1567 {
1568         mono_native_tls_set_value (user_copy_or_mark_key, data);
1569 }
1570
1571 static void
1572 single_arg_user_copy_or_mark (void **obj)
1573 {
1574         UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
1575
1576         data->func (obj, data->queue);
1577 }
1578
1579 /*
1580  * The memory area from start_root to end_root contains pointers to objects.
1581  * Their position is precisely described by @desc (this means that the pointer
1582  * can be either NULL or the pointer to the start of an object).
1583  * This functions copies them to to_space updates them.
1584  *
1585  * This function is not thread-safe!
1586  */
1587 static void
1588 precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
1589 {
1590         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1591         SgenGrayQueue *queue = ctx.queue;
1592
1593         switch (desc & ROOT_DESC_TYPE_MASK) {
1594         case ROOT_DESC_BITMAP:
1595                 desc >>= ROOT_DESC_TYPE_SHIFT;
1596                 while (desc) {
1597                         if ((desc & 1) && *start_root) {
1598                                 copy_func (start_root, queue);
1599                                 SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
1600                                 sgen_drain_gray_stack (-1, ctx);
1601                         }
1602                         desc >>= 1;
1603                         start_root++;
1604                 }
1605                 return;
1606         case ROOT_DESC_COMPLEX: {
1607                 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1608                 int bwords = (*bitmap_data) - 1;
1609                 void **start_run = start_root;
1610                 bitmap_data++;
1611                 while (bwords-- > 0) {
1612                         gsize bmap = *bitmap_data++;
1613                         void **objptr = start_run;
1614                         while (bmap) {
1615                                 if ((bmap & 1) && *objptr) {
1616                                         copy_func (objptr, queue);
1617                                         SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
1618                                         sgen_drain_gray_stack (-1, ctx);
1619                                 }
1620                                 bmap >>= 1;
1621                                 ++objptr;
1622                         }
1623                         start_run += GC_BITS_PER_WORD;
1624                 }
1625                 break;
1626         }
1627         case ROOT_DESC_USER: {
1628                 UserCopyOrMarkData data = { copy_func, queue };
1629                 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1630                 set_user_copy_or_mark_data (&data);
1631                 marker (start_root, single_arg_user_copy_or_mark);
1632                 set_user_copy_or_mark_data (NULL);
1633                 break;
1634         }
1635         case ROOT_DESC_RUN_LEN:
1636                 g_assert_not_reached ();
1637         default:
1638                 g_assert_not_reached ();
1639         }
1640 }
1641
1642 static void
1643 reset_heap_boundaries (void)
1644 {
1645         lowest_heap_address = ~(mword)0;
1646         highest_heap_address = 0;
1647 }
1648
1649 void
1650 sgen_update_heap_boundaries (mword low, mword high)
1651 {
1652         mword old;
1653
1654         do {
1655                 old = lowest_heap_address;
1656                 if (low >= old)
1657                         break;
1658         } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
1659
1660         do {
1661                 old = highest_heap_address;
1662                 if (high <= old)
1663                         break;
1664         } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
1665 }
1666
1667 /*
1668  * Allocate and setup the data structures needed to be able to allocate objects
1669  * in the nursery. The nursery is stored in nursery_section.
1670  */
1671 static void
1672 alloc_nursery (void)
1673 {
1674         GCMemSection *section;
1675         char *data;
1676         int scan_starts;
1677         int alloc_size;
1678
1679         if (nursery_section)
1680                 return;
1681         SGEN_LOG (2, "Allocating nursery size: %lu", (unsigned long)sgen_nursery_size);
1682         /* later we will alloc a larger area for the nursery but only activate
1683          * what we need. The rest will be used as expansion if we have too many pinned
1684          * objects in the existing nursery.
1685          */
1686         /* FIXME: handle OOM */
1687         section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
1688
1689         alloc_size = sgen_nursery_size;
1690
1691         /* If there isn't enough space even for the nursery we should simply abort. */
1692         g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
1693
1694 #ifdef SGEN_ALIGN_NURSERY
1695         data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
1696 #else
1697         data = major_collector.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
1698 #endif
1699         sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
1700         SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)mono_gc_get_heap_size ());
1701         section->data = section->next_data = data;
1702         section->size = alloc_size;
1703         section->end_data = data + sgen_nursery_size;
1704         scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
1705         section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
1706         section->num_scan_start = scan_starts;
1707
1708         nursery_section = section;
1709
1710         sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
1711 }
1712
1713 void*
1714 mono_gc_get_nursery (int *shift_bits, size_t *size)
1715 {
1716         *size = sgen_nursery_size;
1717 #ifdef SGEN_ALIGN_NURSERY
1718         *shift_bits = DEFAULT_NURSERY_BITS;
1719 #else
1720         *shift_bits = -1;
1721 #endif
1722         return sgen_get_nursery_start ();
1723 }
1724
1725 void
1726 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
1727 {
1728         SgenThreadInfo *info = mono_thread_info_current ();
1729
1730         /* Could be called from sgen_thread_unregister () with a NULL info */
1731         if (domain) {
1732                 g_assert (info);
1733                 info->stopped_domain = domain;
1734         }
1735 }
1736
1737 gboolean
1738 mono_gc_precise_stack_mark_enabled (void)
1739 {
1740         return !conservative_stack_mark;
1741 }
1742
1743 FILE *
1744 mono_gc_get_logfile (void)
1745 {
1746         return gc_debug_file;
1747 }
1748
1749 static void
1750 report_finalizer_roots_list (FinalizeReadyEntry *list)
1751 {
1752         GCRootReport report;
1753         FinalizeReadyEntry *fin;
1754
1755         report.count = 0;
1756         for (fin = list; fin; fin = fin->next) {
1757                 if (!fin->object)
1758                         continue;
1759                 add_profile_gc_root (&report, fin->object, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1760         }
1761         notify_gc_roots (&report);
1762 }
1763
1764 static void
1765 report_finalizer_roots (void)
1766 {
1767         report_finalizer_roots_list (fin_ready_list);
1768         report_finalizer_roots_list (critical_fin_list);
1769 }
1770
1771 static GCRootReport *root_report;
1772
1773 static void
1774 single_arg_report_root (void **obj)
1775 {
1776         if (*obj)
1777                 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1778 }
1779
1780 static void
1781 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1782 {
1783         switch (desc & ROOT_DESC_TYPE_MASK) {
1784         case ROOT_DESC_BITMAP:
1785                 desc >>= ROOT_DESC_TYPE_SHIFT;
1786                 while (desc) {
1787                         if ((desc & 1) && *start_root) {
1788                                 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1789                         }
1790                         desc >>= 1;
1791                         start_root++;
1792                 }
1793                 return;
1794         case ROOT_DESC_COMPLEX: {
1795                 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1796                 int bwords = (*bitmap_data) - 1;
1797                 void **start_run = start_root;
1798                 bitmap_data++;
1799                 while (bwords-- > 0) {
1800                         gsize bmap = *bitmap_data++;
1801                         void **objptr = start_run;
1802                         while (bmap) {
1803                                 if ((bmap & 1) && *objptr) {
1804                                         add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1805                                 }
1806                                 bmap >>= 1;
1807                                 ++objptr;
1808                         }
1809                         start_run += GC_BITS_PER_WORD;
1810                 }
1811                 break;
1812         }
1813         case ROOT_DESC_USER: {
1814                 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1815                 root_report = report;
1816                 marker (start_root, single_arg_report_root);
1817                 break;
1818         }
1819         case ROOT_DESC_RUN_LEN:
1820                 g_assert_not_reached ();
1821         default:
1822                 g_assert_not_reached ();
1823         }
1824 }
1825
1826 static void
1827 report_registered_roots_by_type (int root_type)
1828 {
1829         GCRootReport report;
1830         void **start_root;
1831         RootRecord *root;
1832         report.count = 0;
1833         SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1834                 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1835                 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1836         } SGEN_HASH_TABLE_FOREACH_END;
1837         notify_gc_roots (&report);
1838 }
1839
1840 static void
1841 report_registered_roots (void)
1842 {
1843         report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1844         report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1845 }
1846
1847 static void
1848 scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx)
1849 {
1850         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1851         SgenGrayQueue *queue = ctx.queue;
1852         FinalizeReadyEntry *fin;
1853
1854         for (fin = list; fin; fin = fin->next) {
1855                 if (!fin->object)
1856                         continue;
1857                 SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object));
1858                 copy_func (&fin->object, queue);
1859         }
1860 }
1861
1862 static const char*
1863 generation_name (int generation)
1864 {
1865         switch (generation) {
1866         case GENERATION_NURSERY: return "nursery";
1867         case GENERATION_OLD: return "old";
1868         default: g_assert_not_reached ();
1869         }
1870 }
1871
1872 const char*
1873 sgen_generation_name (int generation)
1874 {
1875         return generation_name (generation);
1876 }
1877
1878 SgenObjectOperations *
1879 sgen_get_current_object_ops (void){
1880         return &current_object_ops;
1881 }
1882
1883
1884 static void
1885 finish_gray_stack (int generation, GrayQueue *queue)
1886 {
1887         TV_DECLARE (atv);
1888         TV_DECLARE (btv);
1889         int done_with_ephemerons, ephemeron_rounds = 0;
1890         CopyOrMarkObjectFunc copy_func = current_object_ops.copy_or_mark_object;
1891         ScanObjectFunc scan_func = current_object_ops.scan_object;
1892         ScanCopyContext ctx = { scan_func, copy_func, queue };
1893         char *start_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_start () : NULL;
1894         char *end_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_end () : (char*)-1;
1895
1896         /*
1897          * We copied all the reachable objects. Now it's the time to copy
1898          * the objects that were not referenced by the roots, but by the copied objects.
1899          * we built a stack of objects pointed to by gray_start: they are
1900          * additional roots and we may add more items as we go.
1901          * We loop until gray_start == gray_objects which means no more objects have
1902          * been added. Note this is iterative: no recursion is involved.
1903          * We need to walk the LO list as well in search of marked big objects
1904          * (use a flag since this is needed only on major collections). We need to loop
1905          * here as well, so keep a counter of marked LO (increasing it in copy_object).
1906          *   To achieve better cache locality and cache usage, we drain the gray stack 
1907          * frequently, after each object is copied, and just finish the work here.
1908          */
1909         sgen_drain_gray_stack (-1, ctx);
1910         TV_GETTIME (atv);
1911         SGEN_LOG (2, "%s generation done", generation_name (generation));
1912
1913         /*
1914         Reset bridge data, we might have lingering data from a previous collection if this is a major
1915         collection trigged by minor overflow.
1916
1917         We must reset the gathered bridges since their original block might be evacuated due to major
1918         fragmentation in the meanwhile and the bridge code should not have to deal with that.
1919         */
1920         if (sgen_need_bridge_processing ())
1921                 sgen_bridge_reset_data ();
1922
1923         /*
1924          * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
1925          * before processing finalizable objects and non-tracking weak links to avoid finalizing/clearing
1926          * objects that are in fact reachable.
1927          */
1928         done_with_ephemerons = 0;
1929         do {
1930                 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1931                 sgen_drain_gray_stack (-1, ctx);
1932                 ++ephemeron_rounds;
1933         } while (!done_with_ephemerons);
1934
1935         sgen_scan_togglerefs (start_addr, end_addr, ctx);
1936
1937         if (sgen_need_bridge_processing ()) {
1938                 /*Make sure the gray stack is empty before we process bridge objects so we get liveness right*/
1939                 sgen_drain_gray_stack (-1, ctx);
1940                 sgen_collect_bridge_objects (generation, ctx);
1941                 if (generation == GENERATION_OLD)
1942                         sgen_collect_bridge_objects (GENERATION_NURSERY, ctx);
1943
1944                 /*
1945                 Do the first bridge step here, as the collector liveness state will become useless after that.
1946
1947                 An important optimization is to only proccess the possibly dead part of the object graph and skip
1948                 over all live objects as we transitively know everything they point must be alive too.
1949
1950                 The above invariant is completely wrong if we let the gray queue be drained and mark/copy everything.
1951
1952                 This has the unfortunate side effect of making overflow collections perform the first step twice, but
1953                 given we now have heuristics that perform major GC in anticipation of minor overflows this should not
1954                 be a big deal.
1955                 */
1956                 sgen_bridge_processing_stw_step ();
1957         }
1958
1959         /*
1960         Make sure we drain the gray stack before processing disappearing links and finalizers.
1961         If we don't make sure it is empty we might wrongly see a live object as dead.
1962         */
1963         sgen_drain_gray_stack (-1, ctx);
1964
1965         /*
1966         We must clear weak links that don't track resurrection before processing object ready for
1967         finalization so they can be cleared before that.
1968         */
1969         sgen_null_link_in_range (generation, TRUE, ctx);
1970         if (generation == GENERATION_OLD)
1971                 sgen_null_link_in_range (GENERATION_NURSERY, TRUE, ctx);
1972
1973
1974         /* walk the finalization queue and move also the objects that need to be
1975          * finalized: use the finalized objects as new roots so the objects they depend
1976          * on are also not reclaimed. As with the roots above, only objects in the nursery
1977          * are marked/copied.
1978          */
1979         sgen_finalize_in_range (generation, ctx);
1980         if (generation == GENERATION_OLD)
1981                 sgen_finalize_in_range (GENERATION_NURSERY, ctx);
1982         /* drain the new stack that might have been created */
1983         SGEN_LOG (6, "Precise scan of gray area post fin");
1984         sgen_drain_gray_stack (-1, ctx);
1985
1986         /*
1987          * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
1988          */
1989         done_with_ephemerons = 0;
1990         do {
1991                 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1992                 sgen_drain_gray_stack (-1, ctx);
1993                 ++ephemeron_rounds;
1994         } while (!done_with_ephemerons);
1995
1996         /*
1997          * Clear ephemeron pairs with unreachable keys.
1998          * We pass the copy func so we can figure out if an array was promoted or not.
1999          */
2000         clear_unreachable_ephemerons (ctx);
2001
2002         TV_GETTIME (btv);
2003         SGEN_LOG (2, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
2004
2005         /*
2006          * handle disappearing links
2007          * Note we do this after checking the finalization queue because if an object
2008          * survives (at least long enough to be finalized) we don't clear the link.
2009          * This also deals with a possible issue with the monitor reclamation: with the Boehm
2010          * GC a finalized object my lose the monitor because it is cleared before the finalizer is
2011          * called.
2012          */
2013         g_assert (sgen_gray_object_queue_is_empty (queue));
2014         for (;;) {
2015                 sgen_null_link_in_range (generation, FALSE, ctx);
2016                 if (generation == GENERATION_OLD)
2017                         sgen_null_link_in_range (GENERATION_NURSERY, FALSE, ctx);
2018                 if (sgen_gray_object_queue_is_empty (queue))
2019                         break;
2020                 sgen_drain_gray_stack (-1, ctx);
2021         }
2022
2023         g_assert (sgen_gray_object_queue_is_empty (queue));
2024 }
2025
2026 void
2027 sgen_check_section_scan_starts (GCMemSection *section)
2028 {
2029         int i;
2030         for (i = 0; i < section->num_scan_start; ++i) {
2031                 if (section->scan_starts [i]) {
2032                         guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
2033                         g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
2034                 }
2035         }
2036 }
2037
2038 static void
2039 check_scan_starts (void)
2040 {
2041         if (!do_scan_starts_check)
2042                 return;
2043         sgen_check_section_scan_starts (nursery_section);
2044         major_collector.check_scan_starts ();
2045 }
2046
2047 static void
2048 scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx)
2049 {
2050         void **start_root;
2051         RootRecord *root;
2052         SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
2053                 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
2054                 precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
2055         } SGEN_HASH_TABLE_FOREACH_END;
2056 }
2057
2058 void
2059 sgen_dump_occupied (char *start, char *end, char *section_start)
2060 {
2061         fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
2062 }
2063
2064 void
2065 sgen_dump_section (GCMemSection *section, const char *type)
2066 {
2067         char *start = section->data;
2068         char *end = section->data + section->size;
2069         char *occ_start = NULL;
2070         GCVTable *vt;
2071         char *old_start = NULL; /* just for debugging */
2072
2073         fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
2074
2075         while (start < end) {
2076                 guint size;
2077                 MonoClass *class;
2078
2079                 if (!*(void**)start) {
2080                         if (occ_start) {
2081                                 sgen_dump_occupied (occ_start, start, section->data);
2082                                 occ_start = NULL;
2083                         }
2084                         start += sizeof (void*); /* should be ALLOC_ALIGN, really */
2085                         continue;
2086                 }
2087                 g_assert (start < section->next_data);
2088
2089                 if (!occ_start)
2090                         occ_start = start;
2091
2092                 vt = (GCVTable*)LOAD_VTABLE (start);
2093                 class = vt->klass;
2094
2095                 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
2096
2097                 /*
2098                 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
2099                                 start - section->data,
2100                                 vt->klass->name_space, vt->klass->name,
2101                                 size);
2102                 */
2103
2104                 old_start = start;
2105                 start += size;
2106         }
2107         if (occ_start)
2108                 sgen_dump_occupied (occ_start, start, section->data);
2109
2110         fprintf (heap_dump_file, "</section>\n");
2111 }
2112
2113 static void
2114 dump_object (MonoObject *obj, gboolean dump_location)
2115 {
2116         static char class_name [1024];
2117
2118         MonoClass *class = mono_object_class (obj);
2119         int i, j;
2120
2121         /*
2122          * Python's XML parser is too stupid to parse angle brackets
2123          * in strings, so we just ignore them;
2124          */
2125         i = j = 0;
2126         while (class->name [i] && j < sizeof (class_name) - 1) {
2127                 if (!strchr ("<>\"", class->name [i]))
2128                         class_name [j++] = class->name [i];
2129                 ++i;
2130         }
2131         g_assert (j < sizeof (class_name));
2132         class_name [j] = 0;
2133
2134         fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%d\"",
2135                         class->name_space, class_name,
2136                         safe_object_get_size (obj));
2137         if (dump_location) {
2138                 const char *location;
2139                 if (ptr_in_nursery (obj))
2140                         location = "nursery";
2141                 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
2142                         location = "major";
2143                 else
2144                         location = "LOS";
2145                 fprintf (heap_dump_file, " location=\"%s\"", location);
2146         }
2147         fprintf (heap_dump_file, "/>\n");
2148 }
2149
2150 static void
2151 dump_heap (const char *type, int num, const char *reason)
2152 {
2153         ObjectList *list;
2154         LOSObject *bigobj;
2155
2156         fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
2157         if (reason)
2158                 fprintf (heap_dump_file, " reason=\"%s\"", reason);
2159         fprintf (heap_dump_file, ">\n");
2160         fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
2161         sgen_dump_internal_mem_usage (heap_dump_file);
2162         fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
2163         /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
2164         fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
2165
2166         fprintf (heap_dump_file, "<pinned-objects>\n");
2167         for (list = sgen_pin_stats_get_object_list (); list; list = list->next)
2168                 dump_object (list->obj, TRUE);
2169         fprintf (heap_dump_file, "</pinned-objects>\n");
2170
2171         sgen_dump_section (nursery_section, "nursery");
2172
2173         major_collector.dump_heap (heap_dump_file);
2174
2175         fprintf (heap_dump_file, "<los>\n");
2176         for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
2177                 dump_object ((MonoObject*)bigobj->data, FALSE);
2178         fprintf (heap_dump_file, "</los>\n");
2179
2180         fprintf (heap_dump_file, "</collection>\n");
2181 }
2182
2183 void
2184 sgen_register_moved_object (void *obj, void *destination)
2185 {
2186         g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
2187
2188         /* FIXME: handle this for parallel collector */
2189         g_assert (!sgen_collection_is_parallel ());
2190
2191         if (moved_objects_idx == MOVED_OBJECTS_NUM) {
2192                 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
2193                 moved_objects_idx = 0;
2194         }
2195         moved_objects [moved_objects_idx++] = obj;
2196         moved_objects [moved_objects_idx++] = destination;
2197 }
2198
2199 static void
2200 init_stats (void)
2201 {
2202         static gboolean inited = FALSE;
2203
2204         if (inited)
2205                 return;
2206
2207         mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pre_collection_fragment_clear);
2208         mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pinning);
2209         mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_remsets);
2210         mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_pinned);
2211         mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_registered_roots);
2212         mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_thread_data);
2213         mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_finish_gray_stack);
2214         mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_fragment_creation);
2215
2216         mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pre_collection_fragment_clear);
2217         mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pinning);
2218         mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_pinned);
2219         mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_registered_roots);
2220         mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_thread_data);
2221         mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_alloc_pinned);
2222         mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_finalized);
2223         mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_big_objects);
2224         mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_finish_gray_stack);
2225         mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_free_bigobjs);
2226         mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_los_sweep);
2227         mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_sweep);
2228         mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_fragment_creation);
2229
2230         mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_pinned_objects);
2231
2232 #ifdef HEAVY_STATISTICS
2233         mono_counters_register ("WBarrier remember pointer", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_add_to_global_remset);
2234         mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
2235         mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
2236         mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
2237         mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
2238         mono_counters_register ("WBarrier generic atomic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store_atomic);
2239         mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
2240         mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
2241         mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
2242
2243         mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
2244         mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
2245
2246         mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
2247         mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
2248         mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_major);
2249         mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_major);
2250
2251         mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
2252         mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);
2253
2254         mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_slots_allocated_in_vain);
2255
2256         mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
2257         mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
2258         mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
2259         mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_to_space);
2260
2261         sgen_nursery_allocator_init_heavy_stats ();
2262         sgen_alloc_init_heavy_stats ();
2263 #endif
2264
2265         inited = TRUE;
2266 }
2267
2268
2269 static void
2270 reset_pinned_from_failed_allocation (void)
2271 {
2272         bytes_pinned_from_failed_allocation = 0;
2273 }
2274
2275 void
2276 sgen_set_pinned_from_failed_allocation (mword objsize)
2277 {
2278         bytes_pinned_from_failed_allocation += objsize;
2279 }
2280
2281 gboolean
2282 sgen_collection_is_parallel (void)
2283 {
2284         switch (current_collection_generation) {
2285         case GENERATION_NURSERY:
2286                 return nursery_collection_is_parallel;
2287         case GENERATION_OLD:
2288                 return major_collector.is_parallel;
2289         default:
2290                 g_error ("Invalid current generation %d", current_collection_generation);
2291         }
2292 }
2293
2294 gboolean
2295 sgen_collection_is_concurrent (void)
2296 {
2297         switch (current_collection_generation) {
2298         case GENERATION_NURSERY:
2299                 return FALSE;
2300         case GENERATION_OLD:
2301                 return concurrent_collection_in_progress;
2302         default:
2303                 g_error ("Invalid current generation %d", current_collection_generation);
2304         }
2305 }
2306
2307 gboolean
2308 sgen_concurrent_collection_in_progress (void)
2309 {
2310         return concurrent_collection_in_progress;
2311 }
2312
2313 typedef struct
2314 {
2315         char *heap_start;
2316         char *heap_end;
2317 } FinishRememberedSetScanJobData;
2318
2319 static void
2320 job_finish_remembered_set_scan (WorkerData *worker_data, void *job_data_untyped)
2321 {
2322         FinishRememberedSetScanJobData *job_data = job_data_untyped;
2323
2324         remset.finish_scan_remsets (job_data->heap_start, job_data->heap_end, sgen_workers_get_job_gray_queue (worker_data));
2325         sgen_free_internal_dynamic (job_data, sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2326 }
2327
2328 typedef struct
2329 {
2330         CopyOrMarkObjectFunc copy_or_mark_func;
2331         ScanObjectFunc scan_func;
2332         char *heap_start;
2333         char *heap_end;
2334         int root_type;
2335 } ScanFromRegisteredRootsJobData;
2336
2337 static void
2338 job_scan_from_registered_roots (WorkerData *worker_data, void *job_data_untyped)
2339 {
2340         ScanFromRegisteredRootsJobData *job_data = job_data_untyped;
2341         ScanCopyContext ctx = { job_data->scan_func, job_data->copy_or_mark_func,
2342                 sgen_workers_get_job_gray_queue (worker_data) };
2343
2344         scan_from_registered_roots (job_data->heap_start, job_data->heap_end, job_data->root_type, ctx);
2345         sgen_free_internal_dynamic (job_data, sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2346 }
2347
2348 typedef struct
2349 {
2350         char *heap_start;
2351         char *heap_end;
2352 } ScanThreadDataJobData;
2353
2354 static void
2355 job_scan_thread_data (WorkerData *worker_data, void *job_data_untyped)
2356 {
2357         ScanThreadDataJobData *job_data = job_data_untyped;
2358
2359         scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
2360                         sgen_workers_get_job_gray_queue (worker_data));
2361         sgen_free_internal_dynamic (job_data, sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2362 }
2363
2364 typedef struct
2365 {
2366         FinalizeReadyEntry *list;
2367 } ScanFinalizerEntriesJobData;
2368
2369 static void
2370 job_scan_finalizer_entries (WorkerData *worker_data, void *job_data_untyped)
2371 {
2372         ScanFinalizerEntriesJobData *job_data = job_data_untyped;
2373         ScanCopyContext ctx = { NULL, current_object_ops.copy_or_mark_object, sgen_workers_get_job_gray_queue (worker_data) };
2374
2375         scan_finalizer_entries (job_data->list, ctx);
2376         sgen_free_internal_dynamic (job_data, sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2377 }
2378
2379 static void
2380 job_scan_major_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2381 {
2382         g_assert (concurrent_collection_in_progress);
2383         major_collector.scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2384 }
2385
2386 static void
2387 job_scan_los_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2388 {
2389         g_assert (concurrent_collection_in_progress);
2390         sgen_los_scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2391 }
2392
2393 static void
2394 verify_scan_starts (char *start, char *end)
2395 {
2396         int i;
2397
2398         for (i = 0; i < nursery_section->num_scan_start; ++i) {
2399                 char *addr = nursery_section->scan_starts [i];
2400                 if (addr > start && addr < end)
2401                         SGEN_LOG (1, "NFC-BAD SCAN START [%d] %p for obj [%p %p]", i, addr, start, end);
2402         }
2403 }
2404
2405 static void
2406 verify_nursery (void)
2407 {
2408         char *start, *end, *cur, *hole_start;
2409
2410         if (!do_verify_nursery)
2411                 return;
2412
2413         /*This cleans up unused fragments */
2414         sgen_nursery_allocator_prepare_for_pinning ();
2415
2416         hole_start = start = cur = sgen_get_nursery_start ();
2417         end = sgen_get_nursery_end ();
2418
2419         while (cur < end) {
2420                 size_t ss, size;
2421
2422                 if (!*(void**)cur) {
2423                         cur += sizeof (void*);
2424                         continue;
2425                 }
2426
2427                 if (object_is_forwarded (cur))
2428                         SGEN_LOG (1, "FORWARDED OBJ %p", cur);
2429                 else if (object_is_pinned (cur))
2430                         SGEN_LOG (1, "PINNED OBJ %p", cur);
2431
2432                 ss = safe_object_get_size ((MonoObject*)cur);
2433                 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2434                 verify_scan_starts (cur, cur + size);
2435                 if (do_dump_nursery_content) {
2436                         if (cur > hole_start)
2437                                 SGEN_LOG (1, "HOLE [%p %p %d]", hole_start, cur, (int)(cur - hole_start));
2438                         SGEN_LOG (1, "OBJ  [%p %p %d %d %s %d]", cur, cur + size, (int)size, (int)ss, sgen_safe_name ((MonoObject*)cur), (gpointer)LOAD_VTABLE (cur) == sgen_get_array_fill_vtable ());
2439                 }
2440                 cur += size;
2441                 hole_start = cur;
2442         }
2443 }
2444
2445 /*
2446  * Checks that no objects in the nursery are fowarded or pinned.  This
2447  * is a precondition to restarting the mutator while doing a
2448  * concurrent collection.  Note that we don't clear fragments because
2449  * we depend on that having happened earlier.
2450  */
2451 static void
2452 check_nursery_is_clean (void)
2453 {
2454         char *start, *end, *cur;
2455
2456         start = cur = sgen_get_nursery_start ();
2457         end = sgen_get_nursery_end ();
2458
2459         while (cur < end) {
2460                 size_t ss, size;
2461
2462                 if (!*(void**)cur) {
2463                         cur += sizeof (void*);
2464                         continue;
2465                 }
2466
2467                 g_assert (!object_is_forwarded (cur));
2468                 g_assert (!object_is_pinned (cur));
2469
2470                 ss = safe_object_get_size ((MonoObject*)cur);
2471                 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2472                 verify_scan_starts (cur, cur + size);
2473
2474                 cur += size;
2475         }
2476 }
2477
2478 static void
2479 init_gray_queue (void)
2480 {
2481         if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ()) {
2482                 sgen_workers_init_distribute_gray_queue ();
2483                 sgen_gray_object_queue_init_with_alloc_prepare (&gray_queue, NULL,
2484                                 gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
2485         } else {
2486                 sgen_gray_object_queue_init (&gray_queue, NULL);
2487         }
2488 }
2489
2490 static void
2491 pin_stage_object_callback (char *obj, size_t size, void *data)
2492 {
2493         sgen_pin_stage_ptr (obj);
2494         /* FIXME: do pin stats if enabled */
2495 }
2496
2497 /*
2498  * Collect objects in the nursery.  Returns whether to trigger a major
2499  * collection.
2500  */
2501 static gboolean
2502 collect_nursery (SgenGrayQueue *unpin_queue, gboolean finish_up_concurrent_mark)
2503 {
2504         gboolean needs_major;
2505         size_t max_garbage_amount;
2506         char *nursery_next;
2507         FinishRememberedSetScanJobData *frssjd;
2508         ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2509         ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2510         ScanThreadDataJobData *stdjd;
2511         mword fragment_total;
2512         ScanCopyContext ctx;
2513         TV_DECLARE (all_atv);
2514         TV_DECLARE (all_btv);
2515         TV_DECLARE (atv);
2516         TV_DECLARE (btv);
2517
2518         if (disable_minor_collections)
2519                 return TRUE;
2520
2521         MONO_GC_BEGIN (GENERATION_NURSERY);
2522         binary_protocol_collection_begin (stat_minor_gcs, GENERATION_NURSERY);
2523
2524         verify_nursery ();
2525
2526 #ifndef DISABLE_PERFCOUNTERS
2527         mono_perfcounters->gc_collections0++;
2528 #endif
2529
2530         current_collection_generation = GENERATION_NURSERY;
2531         if (sgen_collection_is_parallel ())
2532                 current_object_ops = sgen_minor_collector.parallel_ops;
2533         else
2534                 current_object_ops = sgen_minor_collector.serial_ops;
2535         
2536         reset_pinned_from_failed_allocation ();
2537
2538         check_scan_starts ();
2539
2540         sgen_nursery_alloc_prepare_for_minor ();
2541
2542         degraded_mode = 0;
2543         objects_pinned = 0;
2544         nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
2545         /* FIXME: optimize later to use the higher address where an object can be present */
2546         nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
2547
2548         SGEN_LOG (1, "Start nursery collection %d %p-%p, size: %d", stat_minor_gcs, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ()));
2549         max_garbage_amount = nursery_next - sgen_get_nursery_start ();
2550         g_assert (nursery_section->size >= max_garbage_amount);
2551
2552         /* world must be stopped already */
2553         TV_GETTIME (all_atv);
2554         atv = all_atv;
2555
2556         TV_GETTIME (btv);
2557         time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2558
2559         if (xdomain_checks) {
2560                 sgen_clear_nursery_fragments ();
2561                 check_for_xdomain_refs ();
2562         }
2563
2564         nursery_section->next_data = nursery_next;
2565
2566         major_collector.start_nursery_collection ();
2567
2568         sgen_memgov_minor_collection_start ();
2569
2570         init_gray_queue ();
2571
2572         stat_minor_gcs++;
2573         gc_stats.minor_gc_count ++;
2574
2575         MONO_GC_CHECKPOINT_1 (GENERATION_NURSERY);
2576
2577         sgen_process_fin_stage_entries ();
2578         sgen_process_dislink_stage_entries ();
2579
2580         MONO_GC_CHECKPOINT_2 (GENERATION_NURSERY);
2581
2582         /* pin from pinned handles */
2583         sgen_init_pinning ();
2584         mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
2585         pin_from_roots (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2586         /* pin cemented objects */
2587         sgen_cement_iterate (pin_stage_object_callback, NULL);
2588         /* identify pinned objects */
2589         sgen_optimize_pin_queue (0);
2590         sgen_pinning_setup_section (nursery_section);
2591         ctx.scan_func = NULL;
2592         ctx.copy_func = NULL;
2593         ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2594         sgen_pin_objects_in_section (nursery_section, ctx);
2595         sgen_pinning_trim_queue_to_section (nursery_section);
2596
2597         TV_GETTIME (atv);
2598         time_minor_pinning += TV_ELAPSED (btv, atv);
2599         SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
2600         SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
2601
2602         MONO_GC_CHECKPOINT_3 (GENERATION_NURSERY);
2603
2604         if (whole_heap_check_before_collection) {
2605                 sgen_clear_nursery_fragments ();
2606                 sgen_check_whole_heap (finish_up_concurrent_mark);
2607         }
2608         if (consistency_check_at_minor_collection)
2609                 sgen_check_consistency ();
2610
2611         sgen_workers_start_all_workers ();
2612         sgen_workers_start_marking ();
2613
2614         frssjd = sgen_alloc_internal_dynamic (sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2615         frssjd->heap_start = sgen_get_nursery_start ();
2616         frssjd->heap_end = nursery_next;
2617         sgen_workers_enqueue_job (job_finish_remembered_set_scan, frssjd);
2618
2619         /* we don't have complete write barrier yet, so we scan all the old generation sections */
2620         TV_GETTIME (btv);
2621         time_minor_scan_remsets += TV_ELAPSED (atv, btv);
2622         SGEN_LOG (2, "Old generation scan: %d usecs", TV_ELAPSED (atv, btv));
2623
2624         MONO_GC_CHECKPOINT_4 (GENERATION_NURSERY);
2625
2626         if (!sgen_collection_is_parallel ()) {
2627                 ctx.scan_func = current_object_ops.scan_object;
2628                 ctx.copy_func = NULL;
2629                 ctx.queue = &gray_queue;
2630                 sgen_drain_gray_stack (-1, ctx);
2631         }
2632
2633         if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2634                 report_registered_roots ();
2635         if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2636                 report_finalizer_roots ();
2637         TV_GETTIME (atv);
2638         time_minor_scan_pinned += TV_ELAPSED (btv, atv);
2639
2640         MONO_GC_CHECKPOINT_5 (GENERATION_NURSERY);
2641
2642         /* registered roots, this includes static fields */
2643         scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2644         scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2645         scrrjd_normal->scan_func = current_object_ops.scan_object;
2646         scrrjd_normal->heap_start = sgen_get_nursery_start ();
2647         scrrjd_normal->heap_end = nursery_next;
2648         scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2649         sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2650
2651         scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2652         scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2653         scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2654         scrrjd_wbarrier->heap_start = sgen_get_nursery_start ();
2655         scrrjd_wbarrier->heap_end = nursery_next;
2656         scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2657         sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2658
2659         TV_GETTIME (btv);
2660         time_minor_scan_registered_roots += TV_ELAPSED (atv, btv);
2661
2662         MONO_GC_CHECKPOINT_6 (GENERATION_NURSERY);
2663
2664         /* thread data */
2665         stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2666         stdjd->heap_start = sgen_get_nursery_start ();
2667         stdjd->heap_end = nursery_next;
2668         sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2669
2670         TV_GETTIME (atv);
2671         time_minor_scan_thread_data += TV_ELAPSED (btv, atv);
2672         btv = atv;
2673
2674         MONO_GC_CHECKPOINT_7 (GENERATION_NURSERY);
2675
2676         g_assert (!sgen_collection_is_parallel () && !sgen_collection_is_concurrent ());
2677
2678         if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ())
2679                 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2680
2681         /* Scan the list of objects ready for finalization. If */
2682         sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2683         sfejd_fin_ready->list = fin_ready_list;
2684         sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
2685
2686         sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2687         sfejd_critical_fin->list = critical_fin_list;
2688         sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
2689
2690         MONO_GC_CHECKPOINT_8 (GENERATION_NURSERY);
2691
2692         finish_gray_stack (GENERATION_NURSERY, &gray_queue);
2693         TV_GETTIME (atv);
2694         time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
2695         mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
2696
2697         MONO_GC_CHECKPOINT_9 (GENERATION_NURSERY);
2698
2699         /*
2700          * The (single-threaded) finalization code might have done
2701          * some copying/marking so we can only reset the GC thread's
2702          * worker data here instead of earlier when we joined the
2703          * workers.
2704          */
2705         sgen_workers_reset_data ();
2706
2707         if (objects_pinned) {
2708                 sgen_optimize_pin_queue (0);
2709                 sgen_pinning_setup_section (nursery_section);
2710         }
2711
2712         /* walk the pin_queue, build up the fragment list of free memory, unmark
2713          * pinned objects as we go, memzero() the empty fragments so they are ready for the
2714          * next allocations.
2715          */
2716         mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
2717         fragment_total = sgen_build_nursery_fragments (nursery_section,
2718                         nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries,
2719                         unpin_queue);
2720         if (!fragment_total)
2721                 degraded_mode = 1;
2722
2723         /* Clear TLABs for all threads */
2724         sgen_clear_tlabs ();
2725
2726         mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
2727         TV_GETTIME (btv);
2728         time_minor_fragment_creation += TV_ELAPSED (atv, btv);
2729         SGEN_LOG (2, "Fragment creation: %d usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
2730
2731         if (consistency_check_at_minor_collection)
2732                 sgen_check_major_refs ();
2733
2734         major_collector.finish_nursery_collection ();
2735
2736         TV_GETTIME (all_btv);
2737         gc_stats.minor_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
2738
2739         if (heap_dump_file)
2740                 dump_heap ("minor", stat_minor_gcs - 1, NULL);
2741
2742         /* prepare the pin queue for the next collection */
2743         sgen_finish_pinning ();
2744         if (fin_ready_list || critical_fin_list) {
2745                 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2746                 mono_gc_finalize_notify ();
2747         }
2748         sgen_pin_stats_reset ();
2749         /* clear cemented hash */
2750         sgen_cement_clear_below_threshold ();
2751
2752         g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2753
2754         remset.finish_minor_collection ();
2755
2756         check_scan_starts ();
2757
2758         binary_protocol_flush_buffers (FALSE);
2759
2760         sgen_memgov_minor_collection_end ();
2761
2762         /*objects are late pinned because of lack of memory, so a major is a good call*/
2763         needs_major = objects_pinned > 0;
2764         current_collection_generation = -1;
2765         objects_pinned = 0;
2766
2767         MONO_GC_END (GENERATION_NURSERY);
2768         binary_protocol_collection_end (stat_minor_gcs - 1, GENERATION_NURSERY);
2769
2770         if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2771                 sgen_check_nursery_objects_pinned (unpin_queue != NULL);
2772
2773         return needs_major;
2774 }
2775
2776 static void
2777 scan_nursery_objects_callback (char *obj, size_t size, ScanCopyContext *ctx)
2778 {
2779         ctx->scan_func (obj, ctx->queue);
2780 }
2781
2782 static void
2783 scan_nursery_objects (ScanCopyContext ctx)
2784 {
2785         sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
2786                         (IterateObjectCallbackFunc)scan_nursery_objects_callback, (void*)&ctx, FALSE);
2787 }
2788
2789 static void
2790 major_copy_or_mark_from_roots (int *old_next_pin_slot, gboolean finish_up_concurrent_mark, gboolean scan_mod_union)
2791 {
2792         LOSObject *bigobj;
2793         TV_DECLARE (atv);
2794         TV_DECLARE (btv);
2795         /* FIXME: only use these values for the precise scan
2796          * note that to_space pointers should be excluded anyway...
2797          */
2798         char *heap_start = NULL;
2799         char *heap_end = (char*)-1;
2800         gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
2801         GCRootReport root_report = { 0 };
2802         ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2803         ScanThreadDataJobData *stdjd;
2804         ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2805         ScanCopyContext ctx;
2806
2807         if (concurrent_collection_in_progress) {
2808                 /*This cleans up unused fragments */
2809                 sgen_nursery_allocator_prepare_for_pinning ();
2810
2811                 if (do_concurrent_checks)
2812                         check_nursery_is_clean ();
2813         } else {
2814                 /* The concurrent collector doesn't touch the nursery. */
2815                 sgen_nursery_alloc_prepare_for_major ();
2816         }
2817
2818         init_gray_queue ();
2819
2820         TV_GETTIME (atv);
2821
2822         /* Pinning depends on this */
2823         sgen_clear_nursery_fragments ();
2824
2825         if (whole_heap_check_before_collection)
2826                 sgen_check_whole_heap (finish_up_concurrent_mark);
2827
2828         TV_GETTIME (btv);
2829         time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2830
2831         if (!sgen_collection_is_concurrent ())
2832                 nursery_section->next_data = sgen_get_nursery_end ();
2833         /* we should also coalesce scanning from sections close to each other
2834          * and deal with pointers outside of the sections later.
2835          */
2836
2837         objects_pinned = 0;
2838         *major_collector.have_swept = FALSE;
2839
2840         if (xdomain_checks) {
2841                 sgen_clear_nursery_fragments ();
2842                 check_for_xdomain_refs ();
2843         }
2844
2845         if (!concurrent_collection_in_progress) {
2846                 /* Remsets are not useful for a major collection */
2847                 remset.prepare_for_major_collection ();
2848         }
2849
2850         sgen_process_fin_stage_entries ();
2851         sgen_process_dislink_stage_entries ();
2852
2853         TV_GETTIME (atv);
2854         sgen_init_pinning ();
2855         SGEN_LOG (6, "Collecting pinned addresses");
2856         pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2857
2858         if (!concurrent_collection_in_progress || finish_up_concurrent_mark) {
2859                 if (major_collector.is_concurrent) {
2860                         /*
2861                          * The concurrent major collector cannot evict
2862                          * yet, so we need to pin cemented objects to
2863                          * not break some asserts.
2864                          *
2865                          * FIXME: We could evict now!
2866                          */
2867                         sgen_cement_iterate (pin_stage_object_callback, NULL);
2868                 }
2869
2870                 if (!concurrent_collection_in_progress)
2871                         sgen_cement_reset ();
2872         }
2873
2874         sgen_optimize_pin_queue (0);
2875
2876         /*
2877          * The concurrent collector doesn't move objects, neither on
2878          * the major heap nor in the nursery, so we can mark even
2879          * before pinning has finished.  For the non-concurrent
2880          * collector we start the workers after pinning.
2881          */
2882         if (concurrent_collection_in_progress) {
2883                 sgen_workers_start_all_workers ();
2884                 sgen_workers_start_marking ();
2885         }
2886
2887         /*
2888          * pin_queue now contains all candidate pointers, sorted and
2889          * uniqued.  We must do two passes now to figure out which
2890          * objects are pinned.
2891          *
2892          * The first is to find within the pin_queue the area for each
2893          * section.  This requires that the pin_queue be sorted.  We
2894          * also process the LOS objects and pinned chunks here.
2895          *
2896          * The second, destructive, pass is to reduce the section
2897          * areas to pointers to the actually pinned objects.
2898          */
2899         SGEN_LOG (6, "Pinning from sections");
2900         /* first pass for the sections */
2901         sgen_find_section_pin_queue_start_end (nursery_section);
2902         major_collector.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2903         /* identify possible pointers to the insize of large objects */
2904         SGEN_LOG (6, "Pinning from large objects");
2905         for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
2906                 int dummy;
2907                 if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy)) {
2908                         binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((MonoObject*)(bigobj->data))));
2909
2910 #ifdef ENABLE_DTRACE
2911                         if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
2912                                 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (bigobj->data);
2913                                 MONO_GC_OBJ_PINNED ((mword)bigobj->data, sgen_safe_object_get_size ((MonoObject*)bigobj->data), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
2914                         }
2915 #endif
2916
2917                         if (sgen_los_object_is_pinned (bigobj->data)) {
2918                                 g_assert (finish_up_concurrent_mark);
2919                                 continue;
2920                         }
2921                         sgen_los_pin_object (bigobj->data);
2922                         /* FIXME: only enqueue if object has references */
2923                         GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data);
2924                         if (G_UNLIKELY (do_pin_stats))
2925                                 sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
2926                         SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data, safe_name (bigobj->data), (unsigned long)sgen_los_object_size (bigobj));
2927
2928                         if (profile_roots)
2929                                 add_profile_gc_root (&root_report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
2930                 }
2931         }
2932         if (profile_roots)
2933                 notify_gc_roots (&root_report);
2934         /* second pass for the sections */
2935         ctx.scan_func = concurrent_collection_in_progress ? current_object_ops.scan_object : NULL;
2936         ctx.copy_func = NULL;
2937         ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2938
2939         /*
2940          * Concurrent mark never follows references into the nursery.
2941          * In the start and finish pauses we must scan live nursery
2942          * objects, though.  We could simply scan all nursery objects,
2943          * but that would be conservative.  The easiest way is to do a
2944          * nursery collection, which copies all live nursery objects
2945          * (except pinned ones, with the simple nursery) to the major
2946          * heap.  Scanning the mod union table later will then scan
2947          * those promoted objects, provided they're reachable.  Pinned
2948          * objects in the nursery - which we can trivially find in the
2949          * pinning queue - are treated as roots in the mark pauses.
2950          *
2951          * The split nursery complicates the latter part because
2952          * non-pinned objects can survive in the nursery.  That's why
2953          * we need to do a full front-to-back scan of the nursery,
2954          * marking all objects.
2955          *
2956          * Non-concurrent mark evacuates from the nursery, so it's
2957          * sufficient to just scan pinned nursery objects.
2958          */
2959         if (concurrent_collection_in_progress && sgen_minor_collector.is_split) {
2960                 scan_nursery_objects (ctx);
2961         } else {
2962                 sgen_pin_objects_in_section (nursery_section, ctx);
2963                 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2964                         sgen_check_nursery_objects_pinned (!concurrent_collection_in_progress || finish_up_concurrent_mark);
2965         }
2966
2967         major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2968         if (old_next_pin_slot)
2969                 *old_next_pin_slot = sgen_get_pinned_count ();
2970
2971         TV_GETTIME (btv);
2972         time_major_pinning += TV_ELAPSED (atv, btv);
2973         SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
2974         SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
2975
2976         major_collector.init_to_space ();
2977
2978 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2979         main_gc_thread = mono_native_thread_self ();
2980 #endif
2981
2982         if (!concurrent_collection_in_progress && major_collector.is_parallel) {
2983                 sgen_workers_start_all_workers ();
2984                 sgen_workers_start_marking ();
2985         }
2986
2987         if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2988                 report_registered_roots ();
2989         TV_GETTIME (atv);
2990         time_major_scan_pinned += TV_ELAPSED (btv, atv);
2991
2992         /* registered roots, this includes static fields */
2993         scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2994         scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2995         scrrjd_normal->scan_func = current_object_ops.scan_object;
2996         scrrjd_normal->heap_start = heap_start;
2997         scrrjd_normal->heap_end = heap_end;
2998         scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2999         sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
3000
3001         scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
3002         scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
3003         scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
3004         scrrjd_wbarrier->heap_start = heap_start;
3005         scrrjd_wbarrier->heap_end = heap_end;
3006         scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
3007         sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
3008
3009         TV_GETTIME (btv);
3010         time_major_scan_registered_roots += TV_ELAPSED (atv, btv);
3011
3012         /* Threads */
3013         stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
3014         stdjd->heap_start = heap_start;
3015         stdjd->heap_end = heap_end;
3016         sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
3017
3018         TV_GETTIME (atv);
3019         time_major_scan_thread_data += TV_ELAPSED (btv, atv);
3020
3021         TV_GETTIME (btv);
3022         time_major_scan_alloc_pinned += TV_ELAPSED (atv, btv);
3023
3024         if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
3025                 report_finalizer_roots ();
3026
3027         /* scan the list of objects ready for finalization */
3028         sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
3029         sfejd_fin_ready->list = fin_ready_list;
3030         sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
3031
3032         sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
3033         sfejd_critical_fin->list = critical_fin_list;
3034         sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
3035
3036         if (scan_mod_union) {
3037                 g_assert (finish_up_concurrent_mark);
3038
3039                 /* Mod union card table */
3040                 sgen_workers_enqueue_job (job_scan_major_mod_union_cardtable, NULL);
3041                 sgen_workers_enqueue_job (job_scan_los_mod_union_cardtable, NULL);
3042         }
3043
3044         TV_GETTIME (atv);
3045         time_major_scan_finalized += TV_ELAPSED (btv, atv);
3046         SGEN_LOG (2, "Root scan: %d usecs", TV_ELAPSED (btv, atv));
3047
3048         TV_GETTIME (btv);
3049         time_major_scan_big_objects += TV_ELAPSED (atv, btv);
3050
3051         if (concurrent_collection_in_progress) {
3052                 /* prepare the pin queue for the next collection */
3053                 sgen_finish_pinning ();
3054
3055                 sgen_pin_stats_reset ();
3056
3057                 if (do_concurrent_checks)
3058                         check_nursery_is_clean ();
3059         }
3060 }
3061
3062 static void
3063 major_start_collection (gboolean concurrent, int *old_next_pin_slot)
3064 {
3065         MONO_GC_BEGIN (GENERATION_OLD);
3066         binary_protocol_collection_begin (stat_major_gcs, GENERATION_OLD);
3067
3068         current_collection_generation = GENERATION_OLD;
3069 #ifndef DISABLE_PERFCOUNTERS
3070         mono_perfcounters->gc_collections1++;
3071 #endif
3072
3073         g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
3074
3075         if (concurrent) {
3076                 g_assert (major_collector.is_concurrent);
3077                 concurrent_collection_in_progress = TRUE;
3078
3079                 sgen_cement_concurrent_start ();
3080
3081                 current_object_ops = major_collector.major_concurrent_ops;
3082         } else {
3083                 current_object_ops = major_collector.major_ops;
3084         }
3085
3086         reset_pinned_from_failed_allocation ();
3087
3088         sgen_memgov_major_collection_start ();
3089
3090         //count_ref_nonref_objs ();
3091         //consistency_check ();
3092
3093         check_scan_starts ();
3094
3095         degraded_mode = 0;
3096         SGEN_LOG (1, "Start major collection %d", stat_major_gcs);
3097         stat_major_gcs++;
3098         gc_stats.major_gc_count ++;
3099
3100         if (major_collector.start_major_collection)
3101                 major_collector.start_major_collection ();
3102
3103         major_copy_or_mark_from_roots (old_next_pin_slot, FALSE, FALSE);
3104 }
3105
3106 static void
3107 wait_for_workers_to_finish (void)
3108 {
3109         while (!sgen_workers_all_done ())
3110                 g_usleep (200);
3111 }
3112
3113 static void
3114 join_workers (void)
3115 {
3116         if (concurrent_collection_in_progress || major_collector.is_parallel) {
3117                 gray_queue_redirect (&gray_queue);
3118                 sgen_workers_join ();
3119         }
3120
3121         g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3122
3123 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
3124         main_gc_thread = NULL;
3125 #endif
3126 }
3127
3128 static void
3129 major_finish_collection (const char *reason, int old_next_pin_slot, gboolean scan_mod_union)
3130 {
3131         LOSObject *bigobj, *prevbo;
3132         TV_DECLARE (atv);
3133         TV_DECLARE (btv);
3134
3135         TV_GETTIME (btv);
3136
3137         if (concurrent_collection_in_progress || major_collector.is_parallel)
3138                 join_workers ();
3139
3140         if (concurrent_collection_in_progress) {
3141                 current_object_ops = major_collector.major_concurrent_ops;
3142
3143                 major_copy_or_mark_from_roots (NULL, TRUE, scan_mod_union);
3144                 join_workers ();
3145
3146                 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3147
3148                 if (do_concurrent_checks)
3149                         check_nursery_is_clean ();
3150         } else {
3151                 current_object_ops = major_collector.major_ops;
3152         }
3153
3154         /*
3155          * The workers have stopped so we need to finish gray queue
3156          * work that might result from finalization in the main GC
3157          * thread.  Redirection must therefore be turned off.
3158          */
3159         sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
3160         g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
3161
3162         /* all the objects in the heap */
3163         finish_gray_stack (GENERATION_OLD, &gray_queue);
3164         TV_GETTIME (atv);
3165         time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
3166
3167         /*
3168          * The (single-threaded) finalization code might have done
3169          * some copying/marking so we can only reset the GC thread's
3170          * worker data here instead of earlier when we joined the
3171          * workers.
3172          */
3173         sgen_workers_reset_data ();
3174
3175         if (objects_pinned) {
3176                 g_assert (!concurrent_collection_in_progress);
3177
3178                 /*This is slow, but we just OOM'd*/
3179                 sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
3180                 sgen_optimize_pin_queue (0);
3181                 sgen_find_section_pin_queue_start_end (nursery_section);
3182                 objects_pinned = 0;
3183         }
3184
3185         reset_heap_boundaries ();
3186         sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
3187
3188         if (check_mark_bits_after_major_collection)
3189                 sgen_check_major_heap_marked ();
3190
3191         MONO_GC_SWEEP_BEGIN (GENERATION_OLD, !major_collector.sweeps_lazily);
3192
3193         /* sweep the big objects list */
3194         prevbo = NULL;
3195         for (bigobj = los_object_list; bigobj;) {
3196                 g_assert (!object_is_pinned (bigobj->data));
3197                 if (sgen_los_object_is_pinned (bigobj->data)) {
3198                         sgen_los_unpin_object (bigobj->data);
3199                         sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
3200                 } else {
3201                         LOSObject *to_free;
3202                         /* not referenced anywhere, so we can free it */
3203                         if (prevbo)
3204                                 prevbo->next = bigobj->next;
3205                         else
3206                                 los_object_list = bigobj->next;
3207                         to_free = bigobj;
3208                         bigobj = bigobj->next;
3209                         sgen_los_free_object (to_free);
3210                         continue;
3211                 }
3212                 prevbo = bigobj;
3213                 bigobj = bigobj->next;
3214         }
3215
3216         TV_GETTIME (btv);
3217         time_major_free_bigobjs += TV_ELAPSED (atv, btv);
3218
3219         sgen_los_sweep ();
3220
3221         TV_GETTIME (atv);
3222         time_major_los_sweep += TV_ELAPSED (btv, atv);
3223
3224         major_collector.sweep ();
3225
3226         MONO_GC_SWEEP_END (GENERATION_OLD, !major_collector.sweeps_lazily);
3227
3228         TV_GETTIME (btv);
3229         time_major_sweep += TV_ELAPSED (atv, btv);
3230
3231         if (!concurrent_collection_in_progress) {
3232                 /* walk the pin_queue, build up the fragment list of free memory, unmark
3233                  * pinned objects as we go, memzero() the empty fragments so they are ready for the
3234                  * next allocations.
3235                  */
3236                 if (!sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries, NULL))
3237                         degraded_mode = 1;
3238
3239                 /* prepare the pin queue for the next collection */
3240                 sgen_finish_pinning ();
3241
3242                 /* Clear TLABs for all threads */
3243                 sgen_clear_tlabs ();
3244
3245                 sgen_pin_stats_reset ();
3246         }
3247
3248         if (concurrent_collection_in_progress)
3249                 sgen_cement_concurrent_finish ();
3250         sgen_cement_clear_below_threshold ();
3251
3252         TV_GETTIME (atv);
3253         time_major_fragment_creation += TV_ELAPSED (btv, atv);
3254
3255         if (heap_dump_file)
3256                 dump_heap ("major", stat_major_gcs - 1, reason);
3257
3258         if (fin_ready_list || critical_fin_list) {
3259                 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
3260                 mono_gc_finalize_notify ();
3261         }
3262
3263         g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3264
3265         sgen_memgov_major_collection_end ();
3266         current_collection_generation = -1;
3267
3268         major_collector.finish_major_collection ();
3269
3270         g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
3271
3272         if (concurrent_collection_in_progress)
3273                 concurrent_collection_in_progress = FALSE;
3274
3275         check_scan_starts ();
3276
3277         binary_protocol_flush_buffers (FALSE);
3278
3279         //consistency_check ();
3280
3281         MONO_GC_END (GENERATION_OLD);
3282         binary_protocol_collection_end (stat_major_gcs - 1, GENERATION_OLD);
3283 }
3284
3285 static gboolean
3286 major_do_collection (const char *reason)
3287 {
3288         TV_DECLARE (all_atv);
3289         TV_DECLARE (all_btv);
3290         int old_next_pin_slot;
3291
3292         if (disable_major_collections)
3293                 return FALSE;
3294
3295         if (major_collector.get_and_reset_num_major_objects_marked) {
3296                 long long num_marked = major_collector.get_and_reset_num_major_objects_marked ();
3297                 g_assert (!num_marked);
3298         }
3299
3300         /* world must be stopped already */
3301         TV_GETTIME (all_atv);
3302
3303         major_start_collection (FALSE, &old_next_pin_slot);
3304         major_finish_collection (reason, old_next_pin_slot, FALSE);
3305
3306         TV_GETTIME (all_btv);
3307         gc_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
3308
3309         /* FIXME: also report this to the user, preferably in gc-end. */
3310         if (major_collector.get_and_reset_num_major_objects_marked)
3311                 major_collector.get_and_reset_num_major_objects_marked ();
3312
3313         return bytes_pinned_from_failed_allocation > 0;
3314 }
3315
3316 static void
3317 major_start_concurrent_collection (const char *reason)
3318 {
3319         long long num_objects_marked;
3320
3321         if (disable_major_collections)
3322                 return;
3323
3324         num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3325         g_assert (num_objects_marked == 0);
3326
3327         MONO_GC_CONCURRENT_START_BEGIN (GENERATION_OLD);
3328
3329         // FIXME: store reason and pass it when finishing
3330         major_start_collection (TRUE, NULL);
3331
3332         gray_queue_redirect (&gray_queue);
3333         sgen_workers_wait_for_jobs ();
3334
3335         num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3336         MONO_GC_CONCURRENT_START_END (GENERATION_OLD, num_objects_marked);
3337
3338         current_collection_generation = -1;
3339 }
3340
3341 static gboolean
3342 major_update_or_finish_concurrent_collection (gboolean force_finish)
3343 {
3344         SgenGrayQueue unpin_queue;
3345         memset (&unpin_queue, 0, sizeof (unpin_queue));
3346
3347         MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3348
3349         g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3350
3351         if (!force_finish && !sgen_workers_all_done ()) {
3352                 major_collector.update_cardtable_mod_union ();
3353                 sgen_los_update_cardtable_mod_union ();
3354
3355                 MONO_GC_CONCURRENT_UPDATE_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3356                 return FALSE;
3357         }
3358
3359         /*
3360          * The major collector can add global remsets which are processed in the finishing
3361          * nursery collection, below.  That implies that the workers must have finished
3362          * marking before the nursery collection is allowed to run, otherwise we might miss
3363          * some remsets.
3364          */
3365         wait_for_workers_to_finish ();
3366
3367         major_collector.update_cardtable_mod_union ();
3368         sgen_los_update_cardtable_mod_union ();
3369
3370         collect_nursery (&unpin_queue, TRUE);
3371
3372         if (mod_union_consistency_check)
3373                 sgen_check_mod_union_consistency ();
3374
3375         current_collection_generation = GENERATION_OLD;
3376         major_finish_collection ("finishing", -1, TRUE);
3377
3378         if (whole_heap_check_before_collection)
3379                 sgen_check_whole_heap (FALSE);
3380
3381         unpin_objects_from_queue (&unpin_queue);
3382         sgen_gray_object_queue_deinit (&unpin_queue);
3383
3384         MONO_GC_CONCURRENT_FINISH_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3385
3386         current_collection_generation = -1;
3387
3388         return TRUE;
3389 }
3390
3391 /*
3392  * Ensure an allocation request for @size will succeed by freeing enough memory.
3393  *
3394  * LOCKING: The GC lock MUST be held.
3395  */
3396 void
3397 sgen_ensure_free_space (size_t size)
3398 {
3399         int generation_to_collect = -1;
3400         const char *reason = NULL;
3401
3402
3403         if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
3404                 if (sgen_need_major_collection (size)) {
3405                         reason = "LOS overflow";
3406                         generation_to_collect = GENERATION_OLD;
3407                 }
3408         } else {
3409                 if (degraded_mode) {
3410                         if (sgen_need_major_collection (size)) {
3411                                 reason = "Degraded mode overflow";
3412                                 generation_to_collect = GENERATION_OLD;
3413                         }
3414                 } else if (sgen_need_major_collection (size)) {
3415                         reason = "Minor allowance";
3416                         generation_to_collect = GENERATION_OLD;
3417                 } else {
3418                         generation_to_collect = GENERATION_NURSERY;
3419                         reason = "Nursery full";                        
3420                 }
3421         }
3422
3423         if (generation_to_collect == -1) {
3424                 if (concurrent_collection_in_progress && sgen_workers_all_done ()) {
3425                         generation_to_collect = GENERATION_OLD;
3426                         reason = "Finish concurrent collection";
3427                 }
3428         }
3429
3430         if (generation_to_collect == -1)
3431                 return;
3432         sgen_perform_collection (size, generation_to_collect, reason, FALSE);
3433 }
3434
3435 /*
3436  * LOCKING: Assumes the GC lock is held.
3437  */
3438 void
3439 sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
3440 {
3441         TV_DECLARE (gc_end);
3442         GGTimingInfo infos [2];
3443         int overflow_generation_to_collect = -1;
3444         int oldest_generation_collected = generation_to_collect;
3445         const char *overflow_reason = NULL;
3446
3447         MONO_GC_REQUESTED (generation_to_collect, requested_size, wait_to_finish ? 1 : 0);
3448         if (wait_to_finish)
3449                 binary_protocol_collection_force (generation_to_collect);
3450
3451         g_assert (generation_to_collect == GENERATION_NURSERY || generation_to_collect == GENERATION_OLD);
3452
3453         memset (infos, 0, sizeof (infos));
3454         mono_profiler_gc_event (MONO_GC_EVENT_START, generation_to_collect);
3455
3456         infos [0].generation = generation_to_collect;
3457         infos [0].reason = reason;
3458         infos [0].is_overflow = FALSE;
3459         TV_GETTIME (infos [0].total_time);
3460         infos [1].generation = -1;
3461
3462         sgen_stop_world (generation_to_collect);
3463
3464         if (concurrent_collection_in_progress) {
3465                 if (major_update_or_finish_concurrent_collection (wait_to_finish && generation_to_collect == GENERATION_OLD)) {
3466                         oldest_generation_collected = GENERATION_OLD;
3467                         goto done;
3468                 }
3469                 if (generation_to_collect == GENERATION_OLD)
3470                         goto done;
3471         } else {
3472                 if (generation_to_collect == GENERATION_OLD &&
3473                                 allow_synchronous_major &&
3474                                 major_collector.want_synchronous_collection &&
3475                                 *major_collector.want_synchronous_collection) {
3476                         wait_to_finish = TRUE;
3477                 }
3478         }
3479
3480         //FIXME extract overflow reason
3481         if (generation_to_collect == GENERATION_NURSERY) {
3482                 if (collect_nursery (NULL, FALSE)) {
3483                         overflow_generation_to_collect = GENERATION_OLD;
3484                         overflow_reason = "Minor overflow";
3485                 }
3486         } else {
3487                 if (major_collector.is_concurrent) {
3488                         g_assert (!concurrent_collection_in_progress);
3489                         if (!wait_to_finish)
3490                                 collect_nursery (NULL, FALSE);
3491                 }
3492
3493                 if (major_collector.is_concurrent && !wait_to_finish) {
3494                         major_start_concurrent_collection (reason);
3495                         // FIXME: set infos[0] properly
3496                         goto done;
3497                 } else {
3498                         if (major_do_collection (reason)) {
3499                                 overflow_generation_to_collect = GENERATION_NURSERY;
3500                                 overflow_reason = "Excessive pinning";
3501                         }
3502                 }
3503         }
3504
3505         TV_GETTIME (gc_end);
3506         infos [0].total_time = SGEN_TV_ELAPSED (infos [0].total_time, gc_end);
3507
3508
3509         if (!major_collector.is_concurrent && overflow_generation_to_collect != -1) {
3510                 mono_profiler_gc_event (MONO_GC_EVENT_START, overflow_generation_to_collect);
3511                 infos [1].generation = overflow_generation_to_collect;
3512                 infos [1].reason = overflow_reason;
3513                 infos [1].is_overflow = TRUE;
3514                 infos [1].total_time = gc_end;
3515
3516                 if (overflow_generation_to_collect == GENERATION_NURSERY)
3517                         collect_nursery (NULL, FALSE);
3518                 else
3519                         major_do_collection (overflow_reason);
3520
3521                 TV_GETTIME (gc_end);
3522                 infos [1].total_time = SGEN_TV_ELAPSED (infos [1].total_time, gc_end);
3523
3524                 /* keep events symmetric */
3525                 mono_profiler_gc_event (MONO_GC_EVENT_END, overflow_generation_to_collect);
3526
3527                 oldest_generation_collected = MAX (oldest_generation_collected, overflow_generation_to_collect);
3528         }
3529
3530         SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)mono_gc_get_heap_size (), (unsigned long)los_memory_usage);
3531
3532         /* this also sets the proper pointers for the next allocation */
3533         if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
3534                 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
3535                 SGEN_LOG (1, "nursery collection didn't find enough room for %zd alloc (%d pinned)", requested_size, sgen_get_pinned_count ());
3536                 sgen_dump_pin_queue ();
3537                 degraded_mode = 1;
3538         }
3539
3540  done:
3541         g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3542
3543         sgen_restart_world (oldest_generation_collected, infos);
3544
3545         mono_profiler_gc_event (MONO_GC_EVENT_END, generation_to_collect);
3546 }
3547
3548 /*
3549  * ######################################################################
3550  * ########  Memory allocation from the OS
3551  * ######################################################################
3552  * This section of code deals with getting memory from the OS and
3553  * allocating memory for GC-internal data structures.
3554  * Internal memory can be handled with a freelist for small objects.
3555  */
3556
3557 /*
3558  * Debug reporting.
3559  */
3560 G_GNUC_UNUSED static void
3561 report_internal_mem_usage (void)
3562 {
3563         printf ("Internal memory usage:\n");
3564         sgen_report_internal_mem_usage ();
3565         printf ("Pinned memory usage:\n");
3566         major_collector.report_pinned_memory_usage ();
3567 }
3568
3569 /*
3570  * ######################################################################
3571  * ########  Finalization support
3572  * ######################################################################
3573  */
3574
3575 static inline gboolean
3576 sgen_major_is_object_alive (void *object)
3577 {
3578         mword objsize;
3579
3580         /* Oldgen objects can be pinned and forwarded too */
3581         if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
3582                 return TRUE;
3583
3584         /*
3585          * FIXME: major_collector.is_object_live() also calculates the
3586          * size.  Avoid the double calculation.
3587          */
3588         objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)object));
3589         if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
3590                 return sgen_los_object_is_pinned (object);
3591
3592         return major_collector.is_object_live (object);
3593 }
3594
3595 /*
3596  * If the object has been forwarded it means it's still referenced from a root. 
3597  * If it is pinned it's still alive as well.
3598  * A LOS object is only alive if we have pinned it.
3599  * Return TRUE if @obj is ready to be finalized.
3600  */
3601 static inline gboolean
3602 sgen_is_object_alive (void *object)
3603 {
3604         if (ptr_in_nursery (object))
3605                 return sgen_nursery_is_object_alive (object);
3606
3607         return sgen_major_is_object_alive (object);
3608 }
3609
3610 /*
3611  * This function returns true if @object is either alive or it belongs to the old gen
3612  * and we're currently doing a minor collection.
3613  */
3614 static inline int
3615 sgen_is_object_alive_for_current_gen (char *object)
3616 {
3617         if (ptr_in_nursery (object))
3618                 return sgen_nursery_is_object_alive (object);
3619
3620         if (current_collection_generation == GENERATION_NURSERY)
3621                 return TRUE;
3622
3623         return sgen_major_is_object_alive (object);
3624 }
3625
3626 /*
3627  * This function returns true if @object is either alive and belongs to the
3628  * current collection - major collections are full heap, so old gen objects
3629  * are never alive during a minor collection.
3630  */
3631 static inline int
3632 sgen_is_object_alive_and_on_current_collection (char *object)
3633 {
3634         if (ptr_in_nursery (object))
3635                 return sgen_nursery_is_object_alive (object);
3636
3637         if (current_collection_generation == GENERATION_NURSERY)
3638                 return FALSE;
3639
3640         return sgen_major_is_object_alive (object);
3641 }
3642
3643
3644 gboolean
3645 sgen_gc_is_object_ready_for_finalization (void *object)
3646 {
3647         return !sgen_is_object_alive (object);
3648 }
3649
3650 static gboolean
3651 has_critical_finalizer (MonoObject *obj)
3652 {
3653         MonoClass *class;
3654
3655         if (!mono_defaults.critical_finalizer_object)
3656                 return FALSE;
3657
3658         class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
3659
3660         return mono_class_has_parent_fast (class, mono_defaults.critical_finalizer_object);
3661 }
3662
3663 void
3664 sgen_queue_finalization_entry (MonoObject *obj)
3665 {
3666         FinalizeReadyEntry *entry = sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
3667         gboolean critical = has_critical_finalizer (obj);
3668         entry->object = obj;
3669         if (critical) {
3670                 entry->next = critical_fin_list;
3671                 critical_fin_list = entry;
3672         } else {
3673                 entry->next = fin_ready_list;
3674                 fin_ready_list = entry;
3675         }
3676
3677 #ifdef ENABLE_DTRACE
3678         if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
3679                 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
3680                 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
3681                 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
3682                                 vt->klass->name_space, vt->klass->name, gen, critical);
3683         }
3684 #endif
3685 }
3686
3687 gboolean
3688 sgen_object_is_live (void *obj)
3689 {
3690         return sgen_is_object_alive_and_on_current_collection (obj);
3691 }
3692
3693 /* LOCKING: requires that the GC lock is held */
3694 static void
3695 null_ephemerons_for_domain (MonoDomain *domain)
3696 {
3697         EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3698
3699         while (current) {
3700                 MonoObject *object = (MonoObject*)current->array;
3701
3702                 if (object && !object->vtable) {
3703                         EphemeronLinkNode *tmp = current;
3704
3705                         if (prev)
3706                                 prev->next = current->next;
3707                         else
3708                                 ephemeron_list = current->next;
3709
3710                         current = current->next;
3711                         sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3712                 } else {
3713                         prev = current;
3714                         current = current->next;
3715                 }
3716         }
3717 }
3718
3719 /* LOCKING: requires that the GC lock is held */
3720 static void
3721 clear_unreachable_ephemerons (ScanCopyContext ctx)
3722 {
3723         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3724         GrayQueue *queue = ctx.queue;
3725         EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3726         MonoArray *array;
3727         Ephemeron *cur, *array_end;
3728         char *tombstone;
3729
3730         while (current) {
3731                 char *object = current->array;
3732
3733                 if (!sgen_is_object_alive_for_current_gen (object)) {
3734                         EphemeronLinkNode *tmp = current;
3735
3736                         SGEN_LOG (5, "Dead Ephemeron array at %p", object);
3737
3738                         if (prev)
3739                                 prev->next = current->next;
3740                         else
3741                                 ephemeron_list = current->next;
3742
3743                         current = current->next;
3744                         sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3745
3746                         continue;
3747                 }
3748
3749                 copy_func ((void**)&object, queue);
3750                 current->array = object;
3751
3752                 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
3753
3754                 array = (MonoArray*)object;
3755                 cur = mono_array_addr (array, Ephemeron, 0);
3756                 array_end = cur + mono_array_length_fast (array);
3757                 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3758
3759                 for (; cur < array_end; ++cur) {
3760                         char *key = (char*)cur->key;
3761
3762                         if (!key || key == tombstone)
3763                                 continue;
3764
3765                         SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3766                                 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3767                                 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3768
3769                         if (!sgen_is_object_alive_for_current_gen (key)) {
3770                                 cur->key = tombstone;
3771                                 cur->value = NULL;
3772                                 continue;
3773                         }
3774                 }
3775                 prev = current;
3776                 current = current->next;
3777         }
3778 }
3779
3780 /*
3781 LOCKING: requires that the GC lock is held
3782
3783 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
3784 */
3785 static int
3786 mark_ephemerons_in_range (ScanCopyContext ctx)
3787 {
3788         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3789         GrayQueue *queue = ctx.queue;
3790         int nothing_marked = 1;
3791         EphemeronLinkNode *current = ephemeron_list;
3792         MonoArray *array;
3793         Ephemeron *cur, *array_end;
3794         char *tombstone;
3795
3796         for (current = ephemeron_list; current; current = current->next) {
3797                 char *object = current->array;
3798                 SGEN_LOG (5, "Ephemeron array at %p", object);
3799
3800                 /*It has to be alive*/
3801                 if (!sgen_is_object_alive_for_current_gen (object)) {
3802                         SGEN_LOG (5, "\tnot reachable");
3803                         continue;
3804                 }
3805
3806                 copy_func ((void**)&object, queue);
3807
3808                 array = (MonoArray*)object;
3809                 cur = mono_array_addr (array, Ephemeron, 0);
3810                 array_end = cur + mono_array_length_fast (array);
3811                 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3812
3813                 for (; cur < array_end; ++cur) {
3814                         char *key = cur->key;
3815
3816                         if (!key || key == tombstone)
3817                                 continue;
3818
3819                         SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3820                                 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3821                                 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3822
3823                         if (sgen_is_object_alive_for_current_gen (key)) {
3824                                 char *value = cur->value;
3825
3826                                 copy_func ((void**)&cur->key, queue);
3827                                 if (value) {
3828                                         if (!sgen_is_object_alive_for_current_gen (value))
3829                                                 nothing_marked = 0;
3830                                         copy_func ((void**)&cur->value, queue);
3831                                 }
3832                         }
3833                 }
3834         }
3835
3836         SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
3837         return nothing_marked;
3838 }
3839
3840 int
3841 mono_gc_invoke_finalizers (void)
3842 {
3843         FinalizeReadyEntry *entry = NULL;
3844         gboolean entry_is_critical = FALSE;
3845         int count = 0;
3846         void *obj;
3847         /* FIXME: batch to reduce lock contention */
3848         while (fin_ready_list || critical_fin_list) {
3849                 LOCK_GC;
3850
3851                 if (entry) {
3852                         FinalizeReadyEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
3853
3854                         /* We have finalized entry in the last
3855                            interation, now we need to remove it from
3856                            the list. */
3857                         if (*list == entry)
3858                                 *list = entry->next;
3859                         else {
3860                                 FinalizeReadyEntry *e = *list;
3861                                 while (e->next != entry)
3862                                         e = e->next;
3863                                 e->next = entry->next;
3864                         }
3865                         sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_READY_ENTRY);
3866                         entry = NULL;
3867                 }
3868
3869                 /* Now look for the first non-null entry. */
3870                 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
3871                         ;
3872                 if (entry) {
3873                         entry_is_critical = FALSE;
3874                 } else {
3875                         entry_is_critical = TRUE;
3876                         for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
3877                                 ;
3878                 }
3879
3880                 if (entry) {
3881                         g_assert (entry->object);
3882                         num_ready_finalizers--;
3883                         obj = entry->object;
3884                         entry->object = NULL;
3885                         SGEN_LOG (7, "Finalizing object %p (%s)", obj, safe_name (obj));
3886                 }
3887
3888                 UNLOCK_GC;
3889
3890                 if (!entry)
3891                         break;
3892
3893                 g_assert (entry->object == NULL);
3894                 count++;
3895                 /* the object is on the stack so it is pinned */
3896                 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
3897                 mono_gc_run_finalize (obj, NULL);
3898         }
3899         g_assert (!entry);
3900         return count;
3901 }
3902
3903 gboolean
3904 mono_gc_pending_finalizers (void)
3905 {
3906         return fin_ready_list || critical_fin_list;
3907 }
3908
3909 /*
3910  * ######################################################################
3911  * ########  registered roots support
3912  * ######################################################################
3913  */
3914
3915 /*
3916  * We do not coalesce roots.
3917  */
3918 static int
3919 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
3920 {
3921         RootRecord new_root;
3922         int i;
3923         LOCK_GC;
3924         for (i = 0; i < ROOT_TYPE_NUM; ++i) {
3925                 RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
3926                 /* we allow changing the size and the descriptor (for thread statics etc) */
3927                 if (root) {
3928                         size_t old_size = root->end_root - start;
3929                         root->end_root = start + size;
3930                         g_assert (((root->root_desc != 0) && (descr != NULL)) ||
3931                                           ((root->root_desc == 0) && (descr == NULL)));
3932                         root->root_desc = (mword)descr;
3933                         roots_size += size;
3934                         roots_size -= old_size;
3935                         UNLOCK_GC;
3936                         return TRUE;
3937                 }
3938         }
3939
3940         new_root.end_root = start + size;
3941         new_root.root_desc = (mword)descr;
3942
3943         sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
3944         roots_size += size;
3945
3946         SGEN_LOG (3, "Added root for range: %p-%p, descr: %p  (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
3947
3948         UNLOCK_GC;
3949         return TRUE;
3950 }
3951
3952 int
3953 mono_gc_register_root (char *start, size_t size, void *descr)
3954 {
3955         return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
3956 }
3957
3958 int
3959 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
3960 {
3961         return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
3962 }
3963
3964 void
3965 mono_gc_deregister_root (char* addr)
3966 {
3967         int root_type;
3968         RootRecord root;
3969
3970         LOCK_GC;
3971         for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
3972                 if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
3973                         roots_size -= (root.end_root - addr);
3974         }
3975         UNLOCK_GC;
3976 }
3977
3978 /*
3979  * ######################################################################
3980  * ########  Thread handling (stop/start code)
3981  * ######################################################################
3982  */
3983
3984 unsigned int sgen_global_stop_count = 0;
3985
3986 int
3987 sgen_get_current_collection_generation (void)
3988 {
3989         return current_collection_generation;
3990 }
3991
3992 void
3993 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
3994 {
3995         gc_callbacks = *callbacks;
3996 }
3997
3998 MonoGCCallbacks *
3999 mono_gc_get_gc_callbacks ()
4000 {
4001         return &gc_callbacks;
4002 }
4003
4004 /* Variables holding start/end nursery so it won't have to be passed at every call */
4005 static void *scan_area_arg_start, *scan_area_arg_end;
4006
4007 void
4008 mono_gc_conservatively_scan_area (void *start, void *end)
4009 {
4010         conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
4011 }
4012
4013 void*
4014 mono_gc_scan_object (void *obj)
4015 {
4016         UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
4017         current_object_ops.copy_or_mark_object (&obj, data->queue);
4018         return obj;
4019 }
4020
4021 /*
4022  * Mark from thread stacks and registers.
4023  */
4024 static void
4025 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue)
4026 {
4027         SgenThreadInfo *info;
4028
4029         scan_area_arg_start = start_nursery;
4030         scan_area_arg_end = end_nursery;
4031
4032         FOREACH_THREAD (info) {
4033                 if (info->skip) {
4034                         SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
4035                         continue;
4036                 }
4037                 if (info->gc_disabled) {
4038                         SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
4039                         continue;
4040                 }
4041                 if (mono_thread_info_run_state (info) != STATE_RUNNING) {
4042                         SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %td (state %d)", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, mono_thread_info_run_state (info));
4043                         continue;
4044                 }
4045                 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, sgen_get_pinned_count ());
4046                 if (gc_callbacks.thread_mark_func && !conservative_stack_mark) {
4047                         UserCopyOrMarkData data = { NULL, queue };
4048                         set_user_copy_or_mark_data (&data);
4049                         gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
4050                         set_user_copy_or_mark_data (NULL);
4051                 } else if (!precise) {
4052                         if (!conservative_stack_mark) {
4053                                 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
4054                                 conservative_stack_mark = TRUE;
4055                         }
4056                         conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
4057                 }
4058
4059                 if (!precise) {
4060 #ifdef USE_MONO_CTX
4061                         conservatively_pin_objects_from ((void**)&info->ctx, (void**)&info->ctx + ARCH_NUM_REGS,
4062                                 start_nursery, end_nursery, PIN_TYPE_STACK);
4063 #else
4064                         conservatively_pin_objects_from ((void**)&info->regs, (void**)&info->regs + ARCH_NUM_REGS,
4065                                         start_nursery, end_nursery, PIN_TYPE_STACK);
4066 #endif
4067                 }
4068         } END_FOREACH_THREAD
4069 }
4070
4071 static gboolean
4072 ptr_on_stack (void *ptr)
4073 {
4074         gpointer stack_start = &stack_start;
4075         SgenThreadInfo *info = mono_thread_info_current ();
4076
4077         if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
4078                 return TRUE;
4079         return FALSE;
4080 }
4081
4082 static void*
4083 sgen_thread_register (SgenThreadInfo* info, void *addr)
4084 {
4085 #ifndef HAVE_KW_THREAD
4086         info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
4087
4088         g_assert (!mono_native_tls_get_value (thread_info_key));
4089         mono_native_tls_set_value (thread_info_key, info);
4090 #else
4091         sgen_thread_info = info;
4092 #endif
4093
4094 #ifdef SGEN_POSIX_STW
4095         info->stop_count = -1;
4096         info->signal = 0;
4097 #endif
4098         info->skip = 0;
4099         info->stack_start = NULL;
4100         info->stopped_ip = NULL;
4101         info->stopped_domain = NULL;
4102 #ifdef USE_MONO_CTX
4103         memset (&info->ctx, 0, sizeof (MonoContext));
4104 #else
4105         memset (&info->regs, 0, sizeof (info->regs));
4106 #endif
4107
4108         sgen_init_tlab_info (info);
4109
4110         binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
4111
4112         // FIXME: Unift with mono_thread_get_stack_bounds ()
4113         /* try to get it with attributes first */
4114 #if (defined(HAVE_PTHREAD_GETATTR_NP) || defined(HAVE_PTHREAD_ATTR_GET_NP)) && defined(HAVE_PTHREAD_ATTR_GETSTACK)
4115   {
4116      size_t size;
4117      void *sstart;
4118      pthread_attr_t attr;
4119
4120 #if defined(HAVE_PTHREAD_GETATTR_NP)
4121     /* Linux */
4122     pthread_getattr_np (pthread_self (), &attr);
4123 #elif defined(HAVE_PTHREAD_ATTR_GET_NP)
4124     /* BSD */
4125     pthread_attr_init (&attr);
4126     pthread_attr_get_np (pthread_self (), &attr);
4127 #else
4128 #error Cannot determine which API is needed to retrieve pthread attributes.
4129 #endif
4130
4131      pthread_attr_getstack (&attr, &sstart, &size);
4132      info->stack_start_limit = sstart;
4133      info->stack_end = (char*)sstart + size;
4134      pthread_attr_destroy (&attr);
4135   }
4136 #elif defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP)
4137         {
4138                 size_t stsize = 0;
4139                 guint8 *staddr = NULL;
4140
4141                 mono_thread_get_stack_bounds (&staddr, &stsize);
4142                 info->stack_start_limit = staddr;
4143                 info->stack_end = staddr + stsize;
4144         }
4145 #else
4146         {
4147                 /* FIXME: we assume the stack grows down */
4148                 gsize stack_bottom = (gsize)addr;
4149                 stack_bottom += 4095;
4150                 stack_bottom &= ~4095;
4151                 info->stack_end = (char*)stack_bottom;
4152         }
4153 #endif
4154
4155 #ifdef HAVE_KW_THREAD
4156         stack_end = info->stack_end;
4157 #endif
4158
4159         SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->stack_end);
4160
4161         if (gc_callbacks.thread_attach_func)
4162                 info->runtime_data = gc_callbacks.thread_attach_func ();
4163         return info;
4164 }
4165
4166 static void
4167 sgen_thread_detach (SgenThreadInfo *p)
4168 {
4169         /* If a delegate is passed to native code and invoked on a thread we dont
4170          * know about, the jit will register it with mono_jit_thread_attach, but
4171          * we have no way of knowing when that thread goes away.  SGen has a TSD
4172          * so we assume that if the domain is still registered, we can detach
4173          * the thread
4174          */
4175         if (mono_domain_get ())
4176                 mono_thread_detach (mono_thread_current ());
4177 }
4178
4179 static void
4180 sgen_thread_unregister (SgenThreadInfo *p)
4181 {
4182         binary_protocol_thread_unregister ((gpointer)mono_thread_info_get_tid (p));
4183         SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)mono_thread_info_get_tid (p));
4184
4185         if (gc_callbacks.thread_detach_func) {
4186                 gc_callbacks.thread_detach_func (p->runtime_data);
4187                 p->runtime_data = NULL;
4188         }
4189 }
4190
4191
4192 static void
4193 sgen_thread_attach (SgenThreadInfo *info)
4194 {
4195         LOCK_GC;
4196         /*this is odd, can we get attached before the gc is inited?*/
4197         init_stats ();
4198         UNLOCK_GC;
4199         
4200         if (gc_callbacks.thread_attach_func && !info->runtime_data)
4201                 info->runtime_data = gc_callbacks.thread_attach_func ();
4202 }
4203 gboolean
4204 mono_gc_register_thread (void *baseptr)
4205 {
4206         return mono_thread_info_attach (baseptr) != NULL;
4207 }
4208
4209 /*
4210  * mono_gc_set_stack_end:
4211  *
4212  *   Set the end of the current threads stack to STACK_END. The stack space between 
4213  * STACK_END and the real end of the threads stack will not be scanned during collections.
4214  */
4215 void
4216 mono_gc_set_stack_end (void *stack_end)
4217 {
4218         SgenThreadInfo *info;
4219
4220         LOCK_GC;
4221         info = mono_thread_info_current ();
4222         if (info) {
4223                 g_assert (stack_end < info->stack_end);
4224                 info->stack_end = stack_end;
4225         }
4226         UNLOCK_GC;
4227 }
4228
4229 #if USE_PTHREAD_INTERCEPT
4230
4231
4232 int
4233 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
4234 {
4235         return pthread_create (new_thread, attr, start_routine, arg);
4236 }
4237
4238 int
4239 mono_gc_pthread_join (pthread_t thread, void **retval)
4240 {
4241         return pthread_join (thread, retval);
4242 }
4243
4244 int
4245 mono_gc_pthread_detach (pthread_t thread)
4246 {
4247         return pthread_detach (thread);
4248 }
4249
4250 void
4251 mono_gc_pthread_exit (void *retval) 
4252 {
4253         mono_thread_info_dettach ();
4254         pthread_exit (retval);
4255 }
4256
4257 #endif /* USE_PTHREAD_INTERCEPT */
4258
4259 /*
4260  * ######################################################################
4261  * ########  Write barriers
4262  * ######################################################################
4263  */
4264
4265 /*
4266  * Note: the write barriers first do the needed GC work and then do the actual store:
4267  * this way the value is visible to the conservative GC scan after the write barrier
4268  * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
4269  * the conservative scan, otherwise by the remembered set scan.
4270  */
4271 void
4272 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
4273 {
4274         HEAVY_STAT (++stat_wbarrier_set_field);
4275         if (ptr_in_nursery (field_ptr)) {
4276                 *(void**)field_ptr = value;
4277                 return;
4278         }
4279         SGEN_LOG (8, "Adding remset at %p", field_ptr);
4280         if (value)
4281                 binary_protocol_wbarrier (field_ptr, value, value->vtable);
4282
4283         remset.wbarrier_set_field (obj, field_ptr, value);
4284 }
4285
4286 void
4287 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
4288 {
4289         HEAVY_STAT (++stat_wbarrier_set_arrayref);
4290         if (ptr_in_nursery (slot_ptr)) {
4291                 *(void**)slot_ptr = value;
4292                 return;
4293         }
4294         SGEN_LOG (8, "Adding remset at %p", slot_ptr);
4295         if (value)
4296                 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
4297
4298         remset.wbarrier_set_arrayref (arr, slot_ptr, value);
4299 }
4300
4301 void
4302 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
4303 {
4304         HEAVY_STAT (++stat_wbarrier_arrayref_copy);
4305         /*This check can be done without taking a lock since dest_ptr array is pinned*/
4306         if (ptr_in_nursery (dest_ptr) || count <= 0) {
4307                 mono_gc_memmove (dest_ptr, src_ptr, count * sizeof (gpointer));
4308                 return;
4309         }
4310
4311 #ifdef SGEN_BINARY_PROTOCOL
4312         {
4313                 int i;
4314                 for (i = 0; i < count; ++i) {
4315                         gpointer dest = (gpointer*)dest_ptr + i;
4316                         gpointer obj = *((gpointer*)src_ptr + i);
4317                         if (obj)
4318                                 binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
4319                 }
4320         }
4321 #endif
4322
4323         remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
4324 }
4325
4326 static char *found_obj;
4327
4328 static void
4329 find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
4330 {
4331         char *ptr = user_data;
4332
4333         if (ptr >= obj && ptr < obj + size) {
4334                 g_assert (!found_obj);
4335                 found_obj = obj;
4336         }
4337 }
4338
4339 /* for use in the debugger */
4340 char* find_object_for_ptr (char *ptr);
4341 char*
4342 find_object_for_ptr (char *ptr)
4343 {
4344         if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
4345                 found_obj = NULL;
4346                 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
4347                                 find_object_for_ptr_callback, ptr, TRUE);
4348                 if (found_obj)
4349                         return found_obj;
4350         }
4351
4352         found_obj = NULL;
4353         sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
4354         if (found_obj)
4355                 return found_obj;
4356
4357         /*
4358          * Very inefficient, but this is debugging code, supposed to
4359          * be called from gdb, so we don't care.
4360          */
4361         found_obj = NULL;
4362         major_collector.iterate_objects (TRUE, TRUE, find_object_for_ptr_callback, ptr);
4363         return found_obj;
4364 }
4365
4366 void
4367 mono_gc_wbarrier_generic_nostore (gpointer ptr)
4368 {
4369         gpointer obj;
4370
4371         HEAVY_STAT (++stat_wbarrier_generic_store);
4372
4373 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
4374         /* FIXME: ptr_in_heap must be called with the GC lock held */
4375         if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
4376                 char *start = find_object_for_ptr (ptr);
4377                 MonoObject *value = *(MonoObject**)ptr;
4378                 LOCK_GC;
4379                 g_assert (start);
4380                 if (start) {
4381                         MonoObject *obj = (MonoObject*)start;
4382                         if (obj->vtable->domain != value->vtable->domain)
4383                                 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
4384                 }
4385                 UNLOCK_GC;
4386         }
4387 #endif
4388
4389         obj = *(gpointer*)ptr;
4390         if (obj)
4391                 binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
4392
4393         if (ptr_in_nursery (ptr) || ptr_on_stack (ptr)) {
4394                 SGEN_LOG (8, "Skipping remset at %p", ptr);
4395                 return;
4396         }
4397
4398         /*
4399          * We need to record old->old pointer locations for the
4400          * concurrent collector.
4401          */
4402         if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
4403                 SGEN_LOG (8, "Skipping remset at %p", ptr);
4404                 return;
4405         }
4406
4407         SGEN_LOG (8, "Adding remset at %p", ptr);
4408
4409         remset.wbarrier_generic_nostore (ptr);
4410 }
4411
4412 void
4413 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
4414 {
4415         SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4416         *(void**)ptr = value;
4417         if (ptr_in_nursery (value))
4418                 mono_gc_wbarrier_generic_nostore (ptr);
4419         sgen_dummy_use (value);
4420 }
4421
4422 /* Same as mono_gc_wbarrier_generic_store () but performs the store
4423  * as an atomic operation with release semantics.
4424  */
4425 void
4426 mono_gc_wbarrier_generic_store_atomic (gpointer ptr, MonoObject *value)
4427 {
4428         HEAVY_STAT (++stat_wbarrier_generic_store_atomic);
4429
4430         SGEN_LOG (8, "Wbarrier atomic store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4431
4432         InterlockedWritePointer (ptr, value);
4433
4434         if (ptr_in_nursery (value))
4435                 mono_gc_wbarrier_generic_nostore (ptr);
4436
4437         sgen_dummy_use (value);
4438 }
4439
4440 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
4441 {
4442         mword *dest = _dest;
4443         mword *src = _src;
4444
4445         while (size) {
4446                 if (bitmap & 0x1)
4447                         mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
4448                 else
4449                         *dest = *src;
4450                 ++src;
4451                 ++dest;
4452                 size -= SIZEOF_VOID_P;
4453                 bitmap >>= 1;
4454         }
4455 }
4456
4457 #ifdef SGEN_BINARY_PROTOCOL
4458 #undef HANDLE_PTR
4459 #define HANDLE_PTR(ptr,obj) do {                                        \
4460                 gpointer o = *(gpointer*)(ptr);                         \
4461                 if ((o)) {                                              \
4462                         gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
4463                         binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
4464                 }                                                       \
4465         } while (0)
4466
4467 static void
4468 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
4469 {
4470 #define SCAN_OBJECT_NOVTABLE
4471 #include "sgen-scan-object.h"
4472 }
4473 #endif
4474
4475 void
4476 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
4477 {
4478         HEAVY_STAT (++stat_wbarrier_value_copy);
4479         g_assert (klass->valuetype);
4480
4481         SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
4482
4483         if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
4484                 size_t element_size = mono_class_value_size (klass, NULL);
4485                 size_t size = count * element_size;
4486                 mono_gc_memmove (dest, src, size);              
4487                 return;
4488         }
4489
4490 #ifdef SGEN_BINARY_PROTOCOL
4491         {
4492                 size_t element_size = mono_class_value_size (klass, NULL);
4493                 int i;
4494                 for (i = 0; i < count; ++i) {
4495                         scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
4496                                         (char*)src + i * element_size - sizeof (MonoObject),
4497                                         (mword) klass->gc_descr);
4498                 }
4499         }
4500 #endif
4501
4502         remset.wbarrier_value_copy (dest, src, count, klass);
4503 }
4504
4505 /**
4506  * mono_gc_wbarrier_object_copy:
4507  *
4508  * Write barrier to call when obj is the result of a clone or copy of an object.
4509  */
4510 void
4511 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
4512 {
4513         int size;
4514
4515         HEAVY_STAT (++stat_wbarrier_object_copy);
4516
4517         if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
4518                 size = mono_object_class (obj)->instance_size;
4519                 mono_gc_memmove ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
4520                                 size - sizeof (MonoObject));
4521                 return; 
4522         }
4523
4524 #ifdef SGEN_BINARY_PROTOCOL
4525         scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
4526 #endif
4527
4528         remset.wbarrier_object_copy (obj, src);
4529 }
4530
4531
4532 /*
4533  * ######################################################################
4534  * ########  Other mono public interface functions.
4535  * ######################################################################
4536  */
4537
4538 #define REFS_SIZE 128
4539 typedef struct {
4540         void *data;
4541         MonoGCReferences callback;
4542         int flags;
4543         int count;
4544         int called;
4545         MonoObject *refs [REFS_SIZE];
4546         uintptr_t offsets [REFS_SIZE];
4547 } HeapWalkInfo;
4548
4549 #undef HANDLE_PTR
4550 #define HANDLE_PTR(ptr,obj)     do {    \
4551                 if (*(ptr)) {   \
4552                         if (hwi->count == REFS_SIZE) {  \
4553                                 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);    \
4554                                 hwi->count = 0; \
4555                                 hwi->called = 1;        \
4556                         }       \
4557                         hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start;  \
4558                         hwi->refs [hwi->count++] = *(ptr);      \
4559                 }       \
4560         } while (0)
4561
4562 static void
4563 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
4564 {
4565 #include "sgen-scan-object.h"
4566 }
4567
4568 static void
4569 walk_references (char *start, size_t size, void *data)
4570 {
4571         HeapWalkInfo *hwi = data;
4572         hwi->called = 0;
4573         hwi->count = 0;
4574         collect_references (hwi, start, size);
4575         if (hwi->count || !hwi->called)
4576                 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
4577 }
4578
4579 /**
4580  * mono_gc_walk_heap:
4581  * @flags: flags for future use
4582  * @callback: a function pointer called for each object in the heap
4583  * @data: a user data pointer that is passed to callback
4584  *
4585  * This function can be used to iterate over all the live objects in the heap:
4586  * for each object, @callback is invoked, providing info about the object's
4587  * location in memory, its class, its size and the objects it references.
4588  * For each referenced object it's offset from the object address is
4589  * reported in the offsets array.
4590  * The object references may be buffered, so the callback may be invoked
4591  * multiple times for the same object: in all but the first call, the size
4592  * argument will be zero.
4593  * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
4594  * profiler event handler.
4595  *
4596  * Returns: a non-zero value if the GC doesn't support heap walking
4597  */
4598 int
4599 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
4600 {
4601         HeapWalkInfo hwi;
4602
4603         hwi.flags = flags;
4604         hwi.callback = callback;
4605         hwi.data = data;
4606
4607         sgen_clear_nursery_fragments ();
4608         sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
4609
4610         major_collector.iterate_objects (TRUE, TRUE, walk_references, &hwi);
4611         sgen_los_iterate_objects (walk_references, &hwi);
4612
4613         return 0;
4614 }
4615
4616 void
4617 mono_gc_collect (int generation)
4618 {
4619         LOCK_GC;
4620         if (generation > 1)
4621                 generation = 1;
4622         sgen_perform_collection (0, generation, "user request", TRUE);
4623         UNLOCK_GC;
4624 }
4625
4626 int
4627 mono_gc_max_generation (void)
4628 {
4629         return 1;
4630 }
4631
4632 int
4633 mono_gc_collection_count (int generation)
4634 {
4635         if (generation == 0)
4636                 return stat_minor_gcs;
4637         return stat_major_gcs;
4638 }
4639
4640 int64_t
4641 mono_gc_get_used_size (void)
4642 {
4643         gint64 tot = 0;
4644         LOCK_GC;
4645         tot = los_memory_usage;
4646         tot += nursery_section->next_data - nursery_section->data;
4647         tot += major_collector.get_used_size ();
4648         /* FIXME: account for pinned objects */
4649         UNLOCK_GC;
4650         return tot;
4651 }
4652
4653 int
4654 mono_gc_get_los_limit (void)
4655 {
4656         return MAX_SMALL_OBJ_SIZE;
4657 }
4658
4659 gboolean
4660 mono_gc_user_markers_supported (void)
4661 {
4662         return TRUE;
4663 }
4664
4665 gboolean
4666 mono_object_is_alive (MonoObject* o)
4667 {
4668         return TRUE;
4669 }
4670
4671 int
4672 mono_gc_get_generation (MonoObject *obj)
4673 {
4674         if (ptr_in_nursery (obj))
4675                 return 0;
4676         return 1;
4677 }
4678
4679 void
4680 mono_gc_enable_events (void)
4681 {
4682 }
4683
4684 void
4685 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
4686 {
4687         sgen_register_disappearing_link (obj, link_addr, track, FALSE);
4688 }
4689
4690 void
4691 mono_gc_weak_link_remove (void **link_addr, gboolean track)
4692 {
4693         sgen_register_disappearing_link (NULL, link_addr, track, FALSE);
4694 }
4695
4696 MonoObject*
4697 mono_gc_weak_link_get (void **link_addr)
4698 {
4699         void * volatile *link_addr_volatile;
4700         void *ptr;
4701         MonoObject *obj;
4702  retry:
4703         link_addr_volatile = link_addr;
4704         ptr = (void*)*link_addr_volatile;
4705         /*
4706          * At this point we have a hidden pointer.  If the GC runs
4707          * here, it will not recognize the hidden pointer as a
4708          * reference, and if the object behind it is not referenced
4709          * elsewhere, it will be freed.  Once the world is restarted
4710          * we reveal the pointer, giving us a pointer to a freed
4711          * object.  To make sure we don't return it, we load the
4712          * hidden pointer again.  If it's still the same, we can be
4713          * sure the object reference is valid.
4714          */
4715         if (ptr)
4716                 obj = (MonoObject*) REVEAL_POINTER (ptr);
4717         else
4718                 return NULL;
4719
4720         mono_memory_barrier ();
4721
4722         /*
4723          * During the second bridge processing step the world is
4724          * running again.  That step processes all weak links once
4725          * more to null those that refer to dead objects.  Before that
4726          * is completed, those links must not be followed, so we
4727          * conservatively wait for bridge processing when any weak
4728          * link is dereferenced.
4729          */
4730         if (G_UNLIKELY (bridge_processing_in_progress))
4731                 mono_gc_wait_for_bridge_processing ();
4732
4733         if ((void*)*link_addr_volatile != ptr)
4734                 goto retry;
4735
4736         return obj;
4737 }
4738
4739 gboolean
4740 mono_gc_ephemeron_array_add (MonoObject *obj)
4741 {
4742         EphemeronLinkNode *node;
4743
4744         LOCK_GC;
4745
4746         node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
4747         if (!node) {
4748                 UNLOCK_GC;
4749                 return FALSE;
4750         }
4751         node->array = (char*)obj;
4752         node->next = ephemeron_list;
4753         ephemeron_list = node;
4754
4755         SGEN_LOG (5, "Registered ephemeron array %p", obj);
4756
4757         UNLOCK_GC;
4758         return TRUE;
4759 }
4760
4761 gboolean
4762 mono_gc_set_allow_synchronous_major (gboolean flag)
4763 {
4764         if (!major_collector.is_concurrent)
4765                 return flag;
4766
4767         allow_synchronous_major = flag;
4768         return TRUE;
4769 }
4770
4771 void*
4772 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
4773 {
4774         void *result;
4775         LOCK_INTERRUPTION;
4776         result = func (data);
4777         UNLOCK_INTERRUPTION;
4778         return result;
4779 }
4780
4781 gboolean
4782 mono_gc_is_gc_thread (void)
4783 {
4784         gboolean result;
4785         LOCK_GC;
4786         result = mono_thread_info_current () != NULL;
4787         UNLOCK_GC;
4788         return result;
4789 }
4790
4791 static gboolean
4792 is_critical_method (MonoMethod *method)
4793 {
4794         return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
4795 }
4796
4797 void
4798 sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...)
4799 {
4800         va_list ap;
4801
4802         va_start (ap, description_format);
4803
4804         fprintf (stderr, "Warning: In environment variable `%s': ", env_var);
4805         vfprintf (stderr, description_format, ap);
4806         if (fallback)
4807                 fprintf (stderr, " - %s", fallback);
4808         fprintf (stderr, "\n");
4809
4810         va_end (ap);
4811 }
4812
4813 static gboolean
4814 parse_double_in_interval (const char *env_var, const char *opt_name, const char *opt, double min, double max, double *result)
4815 {
4816         char *endptr;
4817         double val = strtod (opt, &endptr);
4818         if (endptr == opt) {
4819                 sgen_env_var_error (env_var, "Using default value.", "`%s` must be a number.", opt_name);
4820                 return FALSE;
4821         }
4822         else if (val < min || val > max) {
4823                 sgen_env_var_error (env_var, "Using default value.", "`%s` must be between %.2f - %.2f.", opt_name, min, max);
4824                 return FALSE;
4825         }
4826         *result = val;
4827         return TRUE;
4828 }
4829
4830 void
4831 mono_gc_base_init (void)
4832 {
4833         MonoThreadInfoCallbacks cb;
4834         const char *env;
4835         char **opts, **ptr;
4836         char *major_collector_opt = NULL;
4837         char *minor_collector_opt = NULL;
4838         glong max_heap = 0;
4839         glong soft_limit = 0;
4840         int num_workers;
4841         int result;
4842         int dummy;
4843         gboolean debug_print_allowance = FALSE;
4844         double allowance_ratio = 0, save_target = 0;
4845         gboolean have_split_nursery = FALSE;
4846         gboolean cement_enabled = TRUE;
4847
4848         do {
4849                 result = InterlockedCompareExchange (&gc_initialized, -1, 0);
4850                 switch (result) {
4851                 case 1:
4852                         /* already inited */
4853                         return;
4854                 case -1:
4855                         /* being inited by another thread */
4856                         g_usleep (1000);
4857                         break;
4858                 case 0:
4859                         /* we will init it */
4860                         break;
4861                 default:
4862                         g_assert_not_reached ();
4863                 }
4864         } while (result != 0);
4865
4866         LOCK_INIT (gc_mutex);
4867
4868         pagesize = mono_pagesize ();
4869         gc_debug_file = stderr;
4870
4871         cb.thread_register = sgen_thread_register;
4872         cb.thread_detach = sgen_thread_detach;
4873         cb.thread_unregister = sgen_thread_unregister;
4874         cb.thread_attach = sgen_thread_attach;
4875         cb.mono_method_is_critical = (gpointer)is_critical_method;
4876 #ifndef HOST_WIN32
4877         cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
4878 #endif
4879
4880         mono_threads_init (&cb, sizeof (SgenThreadInfo));
4881
4882         LOCK_INIT (sgen_interruption_mutex);
4883         LOCK_INIT (pin_queue_mutex);
4884
4885         init_user_copy_or_mark_key ();
4886
4887         if ((env = g_getenv (MONO_GC_PARAMS_NAME))) {
4888                 opts = g_strsplit (env, ",", -1);
4889                 for (ptr = opts; *ptr; ++ptr) {
4890                         char *opt = *ptr;
4891                         if (g_str_has_prefix (opt, "major=")) {
4892                                 opt = strchr (opt, '=') + 1;
4893                                 major_collector_opt = g_strdup (opt);
4894                         } else if (g_str_has_prefix (opt, "minor=")) {
4895                                 opt = strchr (opt, '=') + 1;
4896                                 minor_collector_opt = g_strdup (opt);
4897                         }
4898                 }
4899         } else {
4900                 opts = NULL;
4901         }
4902
4903         init_stats ();
4904         sgen_init_internal_allocator ();
4905         sgen_init_nursery_allocator ();
4906         sgen_init_fin_weak_hash ();
4907
4908         sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
4909         sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_READY_ENTRY, sizeof (FinalizeReadyEntry));
4910         sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
4911         sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
4912
4913 #ifndef HAVE_KW_THREAD
4914         mono_native_tls_alloc (&thread_info_key, NULL);
4915 #if defined(__APPLE__) || defined (HOST_WIN32)
4916         /* 
4917          * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin,
4918          * where the two are the same.
4919          */
4920         mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, thread_info_key);
4921 #endif
4922 #else
4923         {
4924                 int tls_offset = -1;
4925                 MONO_THREAD_VAR_OFFSET (sgen_thread_info, tls_offset);
4926                 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, tls_offset);
4927         }
4928 #endif
4929
4930         /*
4931          * This needs to happen before any internal allocations because
4932          * it inits the small id which is required for hazard pointer
4933          * operations.
4934          */
4935         sgen_os_init ();
4936
4937         mono_thread_info_attach (&dummy);
4938
4939         if (!minor_collector_opt) {
4940                 sgen_simple_nursery_init (&sgen_minor_collector);
4941         } else {
4942                 if (!strcmp (minor_collector_opt, "simple")) {
4943                 use_simple_nursery:
4944                         sgen_simple_nursery_init (&sgen_minor_collector);
4945                 } else if (!strcmp (minor_collector_opt, "split")) {
4946                         sgen_split_nursery_init (&sgen_minor_collector);
4947                         have_split_nursery = TRUE;
4948                 } else {
4949                         sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `simple` instead.", "Unknown minor collector `%s'.", minor_collector_opt);
4950                         goto use_simple_nursery;
4951                 }
4952         }
4953
4954         if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
4955         use_marksweep_major:
4956                 sgen_marksweep_init (&major_collector);
4957         } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed")) {
4958                 sgen_marksweep_fixed_init (&major_collector);
4959         } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-par")) {
4960                 sgen_marksweep_par_init (&major_collector);
4961         } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed-par")) {
4962                 sgen_marksweep_fixed_par_init (&major_collector);
4963         } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
4964                 sgen_marksweep_conc_init (&major_collector);
4965         } else {
4966                 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `marksweep` instead.", "Unknown major collector `%s'.", major_collector_opt);
4967                 goto use_marksweep_major;
4968         }
4969
4970         if (have_split_nursery && major_collector.is_parallel) {
4971                 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Disabling split minor collector.", "`minor=split` is not supported with the parallel collector yet.");
4972                 have_split_nursery = FALSE;
4973         }
4974
4975         num_workers = mono_cpu_count ();
4976         g_assert (num_workers > 0);
4977         if (num_workers > 16)
4978                 num_workers = 16;
4979
4980         ///* Keep this the default for now */
4981         /* Precise marking is broken on all supported targets. Disable until fixed. */
4982         conservative_stack_mark = TRUE;
4983
4984         sgen_nursery_size = DEFAULT_NURSERY_SIZE;
4985
4986         if (opts) {
4987                 gboolean usage_printed = FALSE;
4988
4989                 for (ptr = opts; *ptr; ++ptr) {
4990                         char *opt = *ptr;
4991                         if (!strcmp (opt, ""))
4992                                 continue;
4993                         if (g_str_has_prefix (opt, "major="))
4994                                 continue;
4995                         if (g_str_has_prefix (opt, "minor="))
4996                                 continue;
4997                         if (g_str_has_prefix (opt, "max-heap-size=")) {
4998                                 glong max_heap_candidate = 0;
4999                                 opt = strchr (opt, '=') + 1;
5000                                 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap_candidate)) {
5001                                         max_heap = (max_heap_candidate + mono_pagesize () - 1) & ~(glong)(mono_pagesize () - 1);
5002                                         if (max_heap != max_heap_candidate)
5003                                                 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Rounding up.", "`max-heap-size` size must be a multiple of %d.", mono_pagesize ());
5004                                 } else {
5005                                         sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`max-heap-size` must be an integer.");
5006                                 }
5007                                 continue;
5008                         }
5009                         if (g_str_has_prefix (opt, "soft-heap-limit=")) {
5010                                 opt = strchr (opt, '=') + 1;
5011                                 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
5012                                         if (soft_limit <= 0) {
5013                                                 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be positive.");
5014                                                 soft_limit = 0;
5015                                         }
5016                                 } else {
5017                                         sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be an integer.");
5018                                 }
5019                                 continue;
5020                         }
5021                         if (g_str_has_prefix (opt, "workers=")) {
5022                                 long val;
5023                                 char *endptr;
5024                                 if (!major_collector.is_parallel) {
5025                                         sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "The `workers` option can only be used for parallel collectors.");
5026                                         continue;
5027                                 }
5028                                 opt = strchr (opt, '=') + 1;
5029                                 val = strtol (opt, &endptr, 10);
5030                                 if (!*opt || *endptr) {
5031                                         sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Cannot parse the `workers` option value.");
5032                                         continue;
5033                                 }
5034                                 if (val <= 0 || val > 16) {
5035                                         sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "The number of `workers` must be in the range 1 to 16.");
5036                                         continue;
5037                                 }
5038                                 num_workers = (int)val;
5039                                 continue;
5040                         }
5041                         if (g_str_has_prefix (opt, "stack-mark=")) {
5042                                 opt = strchr (opt, '=') + 1;
5043                                 if (!strcmp (opt, "precise")) {
5044                                         conservative_stack_mark = FALSE;
5045                                 } else if (!strcmp (opt, "conservative")) {
5046                                         conservative_stack_mark = TRUE;
5047                                 } else {
5048                                         sgen_env_var_error (MONO_GC_PARAMS_NAME, conservative_stack_mark ? "Using `conservative`." : "Using `precise`.",
5049                                                         "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt);
5050                                 }
5051                                 continue;
5052                         }
5053                         if (g_str_has_prefix (opt, "bridge=")) {
5054                                 opt = strchr (opt, '=') + 1;
5055                                 sgen_register_test_bridge_callbacks (g_strdup (opt));
5056                                 continue;
5057                         }
5058 #ifdef USER_CONFIG
5059                         if (g_str_has_prefix (opt, "nursery-size=")) {
5060                                 long val;
5061                                 opt = strchr (opt, '=') + 1;
5062                                 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
5063 #ifdef SGEN_ALIGN_NURSERY
5064                                         if ((val & (val - 1))) {
5065                                                 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be a power of two.");
5066                                                 continue;
5067                                         }
5068
5069                                         if (val < SGEN_MAX_NURSERY_WASTE) {
5070                                                 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.",
5071                                                                 "`nursery-size` must be at least %d bytes.\n", SGEN_MAX_NURSERY_WASTE);
5072                                                 continue;
5073                                         }
5074
5075                                         sgen_nursery_size = val;
5076                                         sgen_nursery_bits = 0;
5077                                         while (1 << (++ sgen_nursery_bits) != sgen_nursery_size)
5078                                                 ;
5079 #else
5080                                         sgen_nursery_size = val;
5081 #endif
5082                                 } else {
5083                                         sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be an integer.");
5084                                         continue;
5085                                 }
5086                                 continue;
5087                         }
5088 #endif
5089                         if (g_str_has_prefix (opt, "save-target-ratio=")) {
5090                                 double val;
5091                                 opt = strchr (opt, '=') + 1;
5092                                 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "save-target-ratio", opt,
5093                                                 SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO, &val)) {
5094                                         save_target = val;
5095                                 }
5096                                 continue;
5097                         }
5098                         if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
5099                                 double val;
5100                                 opt = strchr (opt, '=') + 1;
5101                                 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "default-allowance-ratio", opt,
5102                                                 SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, &val)) {
5103                                         allowance_ratio = val;
5104                                 }
5105                                 continue;
5106                         }
5107                         if (g_str_has_prefix (opt, "allow-synchronous-major=")) {
5108                                 if (!major_collector.is_concurrent) {
5109                                         sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`allow-synchronous-major` is only valid for the concurrent major collector.");
5110                                         continue;
5111                                 }
5112
5113                                 opt = strchr (opt, '=') + 1;
5114
5115                                 if (!strcmp (opt, "yes")) {
5116                                         allow_synchronous_major = TRUE;
5117                                 } else if (!strcmp (opt, "no")) {
5118                                         allow_synchronous_major = FALSE;
5119                                 } else {
5120                                         sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`allow-synchronous-major` must be either `yes' or `no'.");
5121                                         continue;
5122                                 }
5123                         }
5124
5125                         if (!strcmp (opt, "cementing")) {
5126                                 if (major_collector.is_parallel) {
5127                                         sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`cementing` is not supported for the parallel major collector.");
5128                                         continue;
5129                                 }
5130                                 cement_enabled = TRUE;
5131                                 continue;
5132                         }
5133                         if (!strcmp (opt, "no-cementing")) {
5134                                 cement_enabled = FALSE;
5135                                 continue;
5136                         }
5137
5138                         if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
5139                                 continue;
5140
5141                         if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
5142                                 continue;
5143
5144                         sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Unknown option `%s`.", opt);
5145
5146                         if (usage_printed)
5147                                 continue;
5148
5149                         fprintf (stderr, "\n%s must be a comma-delimited list of one or more of the following:\n", MONO_GC_PARAMS_NAME);
5150                         fprintf (stderr, "  max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
5151                         fprintf (stderr, "  soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
5152                         fprintf (stderr, "  nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
5153                         fprintf (stderr, "  major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-conc', `marksweep-par', 'marksweep-fixed' or 'marksweep-fixed-par')\n");
5154                         fprintf (stderr, "  minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
5155                         fprintf (stderr, "  wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
5156                         fprintf (stderr, "  stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
5157                         fprintf (stderr, "  [no-]cementing\n");
5158                         if (major_collector.is_concurrent)
5159                                 fprintf (stderr, "  allow-synchronous-major=FLAG (where FLAG is `yes' or `no')\n");
5160                         if (major_collector.print_gc_param_usage)
5161                                 major_collector.print_gc_param_usage ();
5162                         if (sgen_minor_collector.print_gc_param_usage)
5163                                 sgen_minor_collector.print_gc_param_usage ();
5164                         fprintf (stderr, " Experimental options:\n");
5165                         fprintf (stderr, "  save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
5166                         fprintf (stderr, "  default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
5167                         fprintf (stderr, "\n");
5168
5169                         usage_printed = TRUE;
5170                 }
5171                 g_strfreev (opts);
5172         }
5173
5174         if (major_collector.is_parallel) {
5175                 cement_enabled = FALSE;
5176                 sgen_workers_init (num_workers);
5177         } else if (major_collector.is_concurrent) {
5178                 sgen_workers_init (1);
5179         }
5180
5181         if (major_collector_opt)
5182                 g_free (major_collector_opt);
5183
5184         if (minor_collector_opt)
5185                 g_free (minor_collector_opt);
5186
5187         alloc_nursery ();
5188
5189         sgen_cement_init (cement_enabled);
5190
5191         if ((env = g_getenv (MONO_GC_DEBUG_NAME))) {
5192                 gboolean usage_printed = FALSE;
5193
5194                 opts = g_strsplit (env, ",", -1);
5195                 for (ptr = opts; ptr && *ptr; ptr ++) {
5196                         char *opt = *ptr;
5197                         if (!strcmp (opt, ""))
5198                                 continue;
5199                         if (opt [0] >= '0' && opt [0] <= '9') {
5200                                 gc_debug_level = atoi (opt);
5201                                 opt++;
5202                                 if (opt [0] == ':')
5203                                         opt++;
5204                                 if (opt [0]) {
5205 #ifdef HOST_WIN32
5206                                         char *rf = g_strdup_printf ("%s.%d", opt, GetCurrentProcessId ());
5207 #else
5208                                         char *rf = g_strdup_printf ("%s.%d", opt, getpid ());
5209 #endif
5210                                         gc_debug_file = fopen (rf, "wb");
5211                                         if (!gc_debug_file)
5212                                                 gc_debug_file = stderr;
5213                                         g_free (rf);
5214                                 }
5215                         } else if (!strcmp (opt, "print-allowance")) {
5216                                 debug_print_allowance = TRUE;
5217                         } else if (!strcmp (opt, "print-pinning")) {
5218                                 do_pin_stats = TRUE;
5219                         } else if (!strcmp (opt, "verify-before-allocs")) {
5220                                 verify_before_allocs = 1;
5221                                 has_per_allocation_action = TRUE;
5222                         } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
5223                                 char *arg = strchr (opt, '=') + 1;
5224                                 verify_before_allocs = atoi (arg);
5225                                 has_per_allocation_action = TRUE;
5226                         } else if (!strcmp (opt, "collect-before-allocs")) {
5227                                 collect_before_allocs = 1;
5228                                 has_per_allocation_action = TRUE;
5229                         } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
5230                                 char *arg = strchr (opt, '=') + 1;
5231                                 has_per_allocation_action = TRUE;
5232                                 collect_before_allocs = atoi (arg);
5233                         } else if (!strcmp (opt, "verify-before-collections")) {
5234                                 whole_heap_check_before_collection = TRUE;
5235                         } else if (!strcmp (opt, "check-at-minor-collections")) {
5236                                 consistency_check_at_minor_collection = TRUE;
5237                                 nursery_clear_policy = CLEAR_AT_GC;
5238                         } else if (!strcmp (opt, "mod-union-consistency-check")) {
5239                                 if (!major_collector.is_concurrent) {
5240                                         sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`mod-union-consistency-check` only works with concurrent major collector.");
5241                                         continue;
5242                                 }
5243                                 mod_union_consistency_check = TRUE;
5244                         } else if (!strcmp (opt, "check-mark-bits")) {
5245                                 check_mark_bits_after_major_collection = TRUE;
5246                         } else if (!strcmp (opt, "check-nursery-pinned")) {
5247                                 check_nursery_objects_pinned = TRUE;
5248                         } else if (!strcmp (opt, "xdomain-checks")) {
5249                                 xdomain_checks = TRUE;
5250                         } else if (!strcmp (opt, "clear-at-gc")) {
5251                                 nursery_clear_policy = CLEAR_AT_GC;
5252                         } else if (!strcmp (opt, "clear-nursery-at-gc")) {
5253                                 nursery_clear_policy = CLEAR_AT_GC;
5254                         } else if (!strcmp (opt, "check-scan-starts")) {
5255                                 do_scan_starts_check = TRUE;
5256                         } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
5257                                 do_verify_nursery = TRUE;
5258                         } else if (!strcmp (opt, "check-concurrent")) {
5259                                 if (!major_collector.is_concurrent) {
5260                                         sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`check-concurrent` only works with concurrent major collectors.");
5261                                         continue;
5262                                 }
5263                                 do_concurrent_checks = TRUE;
5264                         } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
5265                                 do_dump_nursery_content = TRUE;
5266                         } else if (!strcmp (opt, "no-managed-allocator")) {
5267                                 sgen_set_use_managed_allocator (FALSE);
5268                         } else if (!strcmp (opt, "disable-minor")) {
5269                                 disable_minor_collections = TRUE;
5270                         } else if (!strcmp (opt, "disable-major")) {
5271                                 disable_major_collections = TRUE;
5272                         } else if (g_str_has_prefix (opt, "heap-dump=")) {
5273                                 char *filename = strchr (opt, '=') + 1;
5274                                 nursery_clear_policy = CLEAR_AT_GC;
5275                                 heap_dump_file = fopen (filename, "w");
5276                                 if (heap_dump_file) {
5277                                         fprintf (heap_dump_file, "<sgen-dump>\n");
5278                                         do_pin_stats = TRUE;
5279                                 }
5280 #ifdef SGEN_BINARY_PROTOCOL
5281                         } else if (g_str_has_prefix (opt, "binary-protocol=")) {
5282                                 char *filename = strchr (opt, '=') + 1;
5283                                 binary_protocol_init (filename);
5284 #endif
5285                         } else {
5286                                 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "Unknown option `%s`.", opt);
5287
5288                                 if (usage_printed)
5289                                         continue;
5290
5291                                 fprintf (stderr, "\n%s must be of the format [<l>[:<filename>]|<option>]+ where <l> is a debug level 0-9.\n", MONO_GC_DEBUG_NAME);
5292                                 fprintf (stderr, "Valid <option>s are:\n");
5293                                 fprintf (stderr, "  collect-before-allocs[=<n>]\n");
5294                                 fprintf (stderr, "  verify-before-allocs[=<n>]\n");
5295                                 fprintf (stderr, "  check-at-minor-collections\n");
5296                                 fprintf (stderr, "  check-mark-bits\n");
5297                                 fprintf (stderr, "  check-nursery-pinned\n");
5298                                 fprintf (stderr, "  verify-before-collections\n");
5299                                 fprintf (stderr, "  verify-nursery-at-minor-gc\n");
5300                                 fprintf (stderr, "  dump-nursery-at-minor-gc\n");
5301                                 fprintf (stderr, "  disable-minor\n");
5302                                 fprintf (stderr, "  disable-major\n");
5303                                 fprintf (stderr, "  xdomain-checks\n");
5304                                 fprintf (stderr, "  check-concurrent\n");
5305                                 fprintf (stderr, "  clear-at-gc\n");
5306                                 fprintf (stderr, "  clear-nursery-at-gc\n");
5307                                 fprintf (stderr, "  check-scan-starts\n");
5308                                 fprintf (stderr, "  no-managed-allocator\n");
5309                                 fprintf (stderr, "  print-allowance\n");
5310                                 fprintf (stderr, "  print-pinning\n");
5311                                 fprintf (stderr, "  heap-dump=<filename>\n");
5312 #ifdef SGEN_BINARY_PROTOCOL
5313                                 fprintf (stderr, "  binary-protocol=<filename>\n");
5314 #endif
5315                                 fprintf (stderr, "\n");
5316
5317                                 usage_printed = TRUE;
5318                         }
5319                 }
5320                 g_strfreev (opts);
5321         }
5322
5323         if (major_collector.is_parallel) {
5324                 if (heap_dump_file) {
5325                         sgen_env_var_error (MONO_GC_DEBUG_NAME, "Disabling.", "Cannot do `heap-dump` with the parallel collector.");
5326                         fclose (heap_dump_file);
5327                         heap_dump_file = NULL;
5328                 }
5329                 if (do_pin_stats) {
5330                         sgen_env_var_error (MONO_GC_DEBUG_NAME, "Disabling.", "`print-pinning` is not supported with the parallel collector.");
5331                         do_pin_stats = FALSE;
5332                 }
5333         }
5334
5335         if (major_collector.post_param_init)
5336                 major_collector.post_param_init (&major_collector);
5337
5338         sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
5339
5340         memset (&remset, 0, sizeof (remset));
5341
5342         sgen_card_table_init (&remset);
5343
5344         gc_initialized = 1;
5345 }
5346
5347 const char *
5348 mono_gc_get_gc_name (void)
5349 {
5350         return "sgen";
5351 }
5352
5353 static MonoMethod *write_barrier_method;
5354
5355 gboolean
5356 sgen_is_critical_method (MonoMethod *method)
5357 {
5358         return (method == write_barrier_method || sgen_is_managed_allocator (method));
5359 }
5360
5361 gboolean
5362 sgen_has_critical_method (void)
5363 {
5364         return write_barrier_method || sgen_has_managed_allocator ();
5365 }
5366
5367 #ifndef DISABLE_JIT
5368
5369 static void
5370 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels)
5371 {
5372         memset (nursery_check_return_labels, 0, sizeof (int) * 3);
5373 #ifdef SGEN_ALIGN_NURSERY
5374         // if (ptr_in_nursery (ptr)) return;
5375         /*
5376          * Masking out the bits might be faster, but we would have to use 64 bit
5377          * immediates, which might be slower.
5378          */
5379         mono_mb_emit_ldarg (mb, 0);
5380         mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5381         mono_mb_emit_byte (mb, CEE_SHR_UN);
5382         mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
5383         nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
5384
5385         if (!major_collector.is_concurrent) {
5386                 // if (!ptr_in_nursery (*ptr)) return;
5387                 mono_mb_emit_ldarg (mb, 0);
5388                 mono_mb_emit_byte (mb, CEE_LDIND_I);
5389                 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5390                 mono_mb_emit_byte (mb, CEE_SHR_UN);
5391                 mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
5392                 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
5393         }
5394 #else
5395         int label_continue1, label_continue2;
5396         int dereferenced_var;
5397
5398         // if (ptr < (sgen_get_nursery_start ())) goto continue;
5399         mono_mb_emit_ldarg (mb, 0);
5400         mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5401         label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
5402
5403         // if (ptr >= sgen_get_nursery_end ())) goto continue;
5404         mono_mb_emit_ldarg (mb, 0);
5405         mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5406         label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
5407
5408         // Otherwise return
5409         nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BR);
5410
5411         // continue:
5412         mono_mb_patch_branch (mb, label_continue_1);
5413         mono_mb_patch_branch (mb, label_continue_2);
5414
5415         // Dereference and store in local var
5416         dereferenced_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5417         mono_mb_emit_ldarg (mb, 0);
5418         mono_mb_emit_byte (mb, CEE_LDIND_I);
5419         mono_mb_emit_stloc (mb, dereferenced_var);
5420
5421         if (!major_collector.is_concurrent) {
5422                 // if (*ptr < sgen_get_nursery_start ()) return;
5423                 mono_mb_emit_ldloc (mb, dereferenced_var);
5424                 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5425                 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
5426
5427                 // if (*ptr >= sgen_get_nursery_end ()) return;
5428                 mono_mb_emit_ldloc (mb, dereferenced_var);
5429                 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5430                 nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
5431         }
5432 #endif  
5433 }
5434 #endif
5435
5436 MonoMethod*
5437 mono_gc_get_write_barrier (void)
5438 {
5439         MonoMethod *res;
5440         MonoMethodBuilder *mb;
5441         MonoMethodSignature *sig;
5442 #ifdef MANAGED_WBARRIER
5443         int i, nursery_check_labels [3];
5444
5445 #ifdef HAVE_KW_THREAD
5446         int stack_end_offset = -1;
5447
5448         MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
5449         g_assert (stack_end_offset != -1);
5450 #endif
5451 #endif
5452
5453         // FIXME: Maybe create a separate version for ctors (the branch would be
5454         // correctly predicted more times)
5455         if (write_barrier_method)
5456                 return write_barrier_method;
5457
5458         /* Create the IL version of mono_gc_barrier_generic_store () */
5459         sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
5460         sig->ret = &mono_defaults.void_class->byval_arg;
5461         sig->params [0] = &mono_defaults.int_class->byval_arg;
5462
5463         mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
5464
5465 #ifndef DISABLE_JIT
5466 #ifdef MANAGED_WBARRIER
5467         emit_nursery_check (mb, nursery_check_labels);
5468         /*
5469         addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
5470         *addr = 1;
5471
5472         sgen_cardtable:
5473                 LDC_PTR sgen_cardtable
5474
5475         address >> CARD_BITS
5476                 LDARG_0
5477                 LDC_I4 CARD_BITS
5478                 SHR_UN
5479         if (SGEN_HAVE_OVERLAPPING_CARDS) {
5480                 LDC_PTR card_table_mask
5481                 AND
5482         }
5483         AND
5484         ldc_i4_1
5485         stind_i1
5486         */
5487         mono_mb_emit_ptr (mb, sgen_cardtable);
5488         mono_mb_emit_ldarg (mb, 0);
5489         mono_mb_emit_icon (mb, CARD_BITS);
5490         mono_mb_emit_byte (mb, CEE_SHR_UN);
5491 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
5492         mono_mb_emit_ptr (mb, (gpointer)CARD_MASK);
5493         mono_mb_emit_byte (mb, CEE_AND);
5494 #endif
5495         mono_mb_emit_byte (mb, CEE_ADD);
5496         mono_mb_emit_icon (mb, 1);
5497         mono_mb_emit_byte (mb, CEE_STIND_I1);
5498
5499         // return;
5500         for (i = 0; i < 3; ++i) {
5501                 if (nursery_check_labels [i])
5502                         mono_mb_patch_branch (mb, nursery_check_labels [i]);
5503         }
5504         mono_mb_emit_byte (mb, CEE_RET);
5505 #else
5506         mono_mb_emit_ldarg (mb, 0);
5507         mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5508         mono_mb_emit_byte (mb, CEE_RET);
5509 #endif
5510 #endif
5511         res = mono_mb_create_method (mb, sig, 16);
5512         mono_mb_free (mb);
5513
5514         LOCK_GC;
5515         if (write_barrier_method) {
5516                 /* Already created */
5517                 mono_free_method (res);
5518         } else {
5519                 /* double-checked locking */
5520                 mono_memory_barrier ();
5521                 write_barrier_method = res;
5522         }
5523         UNLOCK_GC;
5524
5525         return write_barrier_method;
5526 }
5527
5528 char*
5529 mono_gc_get_description (void)
5530 {
5531         return g_strdup ("sgen");
5532 }
5533
5534 void
5535 mono_gc_set_desktop_mode (void)
5536 {
5537 }
5538
5539 gboolean
5540 mono_gc_is_moving (void)
5541 {
5542         return TRUE;
5543 }
5544
5545 gboolean
5546 mono_gc_is_disabled (void)
5547 {
5548         return FALSE;
5549 }
5550
5551 #ifdef HOST_WIN32
5552 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
5553 {
5554         return TRUE;
5555 }
5556 #endif
5557
5558 NurseryClearPolicy
5559 sgen_get_nursery_clear_policy (void)
5560 {
5561         return nursery_clear_policy;
5562 }
5563
5564 MonoVTable*
5565 sgen_get_array_fill_vtable (void)
5566 {
5567         if (!array_fill_vtable) {
5568                 static MonoClass klass;
5569                 static MonoVTable vtable;
5570                 gsize bmap;
5571
5572                 MonoDomain *domain = mono_get_root_domain ();
5573                 g_assert (domain);
5574
5575                 klass.element_class = mono_defaults.byte_class;
5576                 klass.rank = 1;
5577                 klass.instance_size = sizeof (MonoArray);
5578                 klass.sizes.element_size = 1;
5579                 klass.name = "array_filler_type";
5580
5581                 vtable.klass = &klass;
5582                 bmap = 0;
5583                 vtable.gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
5584                 vtable.rank = 1;
5585
5586                 array_fill_vtable = &vtable;
5587         }
5588         return array_fill_vtable;
5589 }
5590
5591 void
5592 sgen_gc_lock (void)
5593 {
5594         LOCK_GC;
5595 }
5596
5597 void
5598 sgen_gc_unlock (void)
5599 {
5600         UNLOCK_GC;
5601 }
5602
5603 void
5604 sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
5605 {
5606         major_collector.iterate_live_block_ranges (callback);
5607 }
5608
5609 void
5610 sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
5611 {
5612         major_collector.scan_card_table (FALSE, queue);
5613 }
5614
5615 SgenMajorCollector*
5616 sgen_get_major_collector (void)
5617 {
5618         return &major_collector;
5619 }
5620
5621 void mono_gc_set_skip_thread (gboolean skip)
5622 {
5623         SgenThreadInfo *info = mono_thread_info_current ();
5624
5625         LOCK_GC;
5626         info->gc_disabled = skip;
5627         UNLOCK_GC;
5628 }
5629
5630 SgenRemeberedSet*
5631 sgen_get_remset (void)
5632 {
5633         return &remset;
5634 }
5635
5636 guint
5637 mono_gc_get_vtable_bits (MonoClass *class)
5638 {
5639         if (sgen_need_bridge_processing () && sgen_is_bridge_class (class))
5640                 return SGEN_GC_BIT_BRIDGE_OBJECT;
5641         return 0;
5642 }
5643
5644 void
5645 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
5646 {
5647         // FIXME:
5648 }
5649
5650
5651 void
5652 sgen_check_whole_heap_stw (void)
5653 {
5654         sgen_stop_world (0);
5655         sgen_clear_nursery_fragments ();
5656         sgen_check_whole_heap (FALSE);
5657         sgen_restart_world (0, NULL);
5658 }
5659
5660 void
5661 sgen_gc_event_moves (void)
5662 {
5663         if (moved_objects_idx) {
5664                 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
5665                 moved_objects_idx = 0;
5666         }
5667 }
5668
5669 #endif /* HAVE_SGEN_GC */