[sgen] Remove SSB write barrier.
[mono.git] / mono / metadata / sgen-gc.c
1 /*
2  * sgen-gc.c: Simple generational GC.
3  *
4  * Author:
5  *      Paolo Molaro (lupus@ximian.com)
6  *  Rodrigo Kumpera (kumpera@gmail.com)
7  *
8  * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9  * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10  *
11  * Thread start/stop adapted from Boehm's GC:
12  * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
13  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
14  * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
15  * Copyright (c) 2000-2004 by Hewlett-Packard Company.  All rights reserved.
16  * Copyright 2001-2003 Ximian, Inc
17  * Copyright 2003-2010 Novell, Inc.
18  * Copyright 2011 Xamarin, Inc.
19  * Copyright (C) 2012 Xamarin Inc
20  *
21  * This library is free software; you can redistribute it and/or
22  * modify it under the terms of the GNU Library General Public
23  * License 2.0 as published by the Free Software Foundation;
24  *
25  * This library is distributed in the hope that it will be useful,
26  * but WITHOUT ANY WARRANTY; without even the implied warranty of
27  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
28  * Library General Public License for more details.
29  *
30  * You should have received a copy of the GNU Library General Public
31  * License 2.0 along with this library; if not, write to the Free
32  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33  *
34  * Important: allocation provides always zeroed memory, having to do
35  * a memset after allocation is deadly for performance.
36  * Memory usage at startup is currently as follows:
37  * 64 KB pinned space
38  * 64 KB internal space
39  * size of nursery
40  * We should provide a small memory config with half the sizes
41  *
42  * We currently try to make as few mono assumptions as possible:
43  * 1) 2-word header with no GC pointers in it (first vtable, second to store the
44  *    forwarding ptr)
45  * 2) gc descriptor is the second word in the vtable (first word in the class)
46  * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
47  * 4) there is a function to get an object's size and the number of
48  *    elements in an array.
49  * 5) we know the special way bounds are allocated for complex arrays
50  * 6) we know about proxies and how to treat them when domains are unloaded
51  *
52  * Always try to keep stack usage to a minimum: no recursive behaviour
53  * and no large stack allocs.
54  *
55  * General description.
56  * Objects are initially allocated in a nursery using a fast bump-pointer technique.
57  * When the nursery is full we start a nursery collection: this is performed with a
58  * copying GC.
59  * When the old generation is full we start a copying GC of the old generation as well:
60  * this will be changed to mark&sweep with copying when fragmentation becomes to severe
61  * in the future.  Maybe we'll even do both during the same collection like IMMIX.
62  *
63  * The things that complicate this description are:
64  * *) pinned objects: we can't move them so we need to keep track of them
65  * *) no precise info of the thread stacks and registers: we need to be able to
66  *    quickly find the objects that may be referenced conservatively and pin them
67  *    (this makes the first issues more important)
68  * *) large objects are too expensive to be dealt with using copying GC: we handle them
69  *    with mark/sweep during major collections
70  * *) some objects need to not move even if they are small (interned strings, Type handles):
71  *    we use mark/sweep for them, too: they are not allocated in the nursery, but inside
72  *    PinnedChunks regions
73  */
74
75 /*
76  * TODO:
77
78  *) we could have a function pointer in MonoClass to implement
79   customized write barriers for value types
80
81  *) investigate the stuff needed to advance a thread to a GC-safe
82   point (single-stepping, read from unmapped memory etc) and implement it.
83   This would enable us to inline allocations and write barriers, for example,
84   or at least parts of them, like the write barrier checks.
85   We may need this also for handling precise info on stacks, even simple things
86   as having uninitialized data on the stack and having to wait for the prolog
87   to zero it. Not an issue for the last frame that we scan conservatively.
88   We could always not trust the value in the slots anyway.
89
90  *) modify the jit to save info about references in stack locations:
91   this can be done just for locals as a start, so that at least
92   part of the stack is handled precisely.
93
94  *) test/fix endianess issues
95
96  *) Implement a card table as the write barrier instead of remembered
97     sets?  Card tables are not easy to implement with our current
98     memory layout.  We have several different kinds of major heap
99     objects: Small objects in regular blocks, small objects in pinned
100     chunks and LOS objects.  If we just have a pointer we have no way
101     to tell which kind of object it points into, therefore we cannot
102     know where its card table is.  The least we have to do to make
103     this happen is to get rid of write barriers for indirect stores.
104     (See next item)
105
106  *) Get rid of write barriers for indirect stores.  We can do this by
107     telling the GC to wbarrier-register an object once we do an ldloca
108     or ldelema on it, and to unregister it once it's not used anymore
109     (it can only travel downwards on the stack).  The problem with
110     unregistering is that it needs to happen eventually no matter
111     what, even if exceptions are thrown, the thread aborts, etc.
112     Rodrigo suggested that we could do only the registering part and
113     let the collector find out (pessimistically) when it's safe to
114     unregister, namely when the stack pointer of the thread that
115     registered the object is higher than it was when the registering
116     happened.  This might make for a good first implementation to get
117     some data on performance.
118
119  *) Some sort of blacklist support?  Blacklists is a concept from the
120     Boehm GC: if during a conservative scan we find pointers to an
121     area which we might use as heap, we mark that area as unusable, so
122     pointer retention by random pinning pointers is reduced.
123
124  *) experiment with max small object size (very small right now - 2kb,
125     because it's tied to the max freelist size)
126
127   *) add an option to mmap the whole heap in one chunk: it makes for many
128      simplifications in the checks (put the nursery at the top and just use a single
129      check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
130      not flexible (too much of the address space may be used by default or we can't
131      increase the heap as needed) and we'd need a race-free mechanism to return memory
132      back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
133      was written to, munmap is needed, but the following mmap may not find the same segment
134      free...)
135
136  *) memzero the major fragments after restarting the world and optionally a smaller
137     chunk at a time
138
139  *) investigate having fragment zeroing threads
140
141  *) separate locks for finalization and other minor stuff to reduce
142     lock contention
143
144  *) try a different copying order to improve memory locality
145
146  *) a thread abort after a store but before the write barrier will
147     prevent the write barrier from executing
148
149  *) specialized dynamically generated markers/copiers
150
151  *) Dynamically adjust TLAB size to the number of threads.  If we have
152     too many threads that do allocation, we might need smaller TLABs,
153     and we might get better performance with larger TLABs if we only
154     have a handful of threads.  We could sum up the space left in all
155     assigned TLABs and if that's more than some percentage of the
156     nursery size, reduce the TLAB size.
157
158  *) Explore placing unreachable objects on unused nursery memory.
159         Instead of memset'ng a region to zero, place an int[] covering it.
160         A good place to start is add_nursery_frag. The tricky thing here is
161         placing those objects atomically outside of a collection.
162
163  *) Allocation should use asymmetric Dekker synchronization:
164         http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
165         This should help weak consistency archs.
166  */
167 #include "config.h"
168 #ifdef HAVE_SGEN_GC
169
170 #ifdef __MACH__
171 #undef _XOPEN_SOURCE
172 #define _XOPEN_SOURCE
173 #define _DARWIN_C_SOURCE
174 #endif
175
176 #ifdef HAVE_UNISTD_H
177 #include <unistd.h>
178 #endif
179 #ifdef HAVE_PTHREAD_H
180 #include <pthread.h>
181 #endif
182 #ifdef HAVE_PTHREAD_NP_H
183 #include <pthread_np.h>
184 #endif
185 #ifdef HAVE_SEMAPHORE_H
186 #include <semaphore.h>
187 #endif
188 #include <stdio.h>
189 #include <string.h>
190 #include <signal.h>
191 #include <errno.h>
192 #include <assert.h>
193
194 #include "metadata/sgen-gc.h"
195 #include "metadata/metadata-internals.h"
196 #include "metadata/class-internals.h"
197 #include "metadata/gc-internal.h"
198 #include "metadata/object-internals.h"
199 #include "metadata/threads.h"
200 #include "metadata/sgen-cardtable.h"
201 #include "metadata/sgen-protocol.h"
202 #include "metadata/sgen-archdep.h"
203 #include "metadata/sgen-bridge.h"
204 #include "metadata/sgen-memory-governor.h"
205 #include "metadata/sgen-hash-table.h"
206 #include "metadata/mono-gc.h"
207 #include "metadata/method-builder.h"
208 #include "metadata/profiler-private.h"
209 #include "metadata/monitor.h"
210 #include "metadata/threadpool-internals.h"
211 #include "metadata/mempool-internals.h"
212 #include "metadata/marshal.h"
213 #include "metadata/runtime.h"
214 #include "metadata/sgen-cardtable.h"
215 #include "metadata/sgen-pinning.h"
216 #include "metadata/sgen-workers.h"
217 #include "utils/mono-mmap.h"
218 #include "utils/mono-time.h"
219 #include "utils/mono-semaphore.h"
220 #include "utils/mono-counters.h"
221 #include "utils/mono-proclib.h"
222 #include "utils/mono-memory-model.h"
223 #include "utils/mono-logger-internal.h"
224 #include "utils/dtrace.h"
225
226 #include <mono/utils/mono-logger-internal.h>
227 #include <mono/utils/memcheck.h>
228
229 #if defined(__MACH__)
230 #include "utils/mach-support.h"
231 #endif
232
233 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
234         a = i,
235
236 enum {
237 #include "mono/cil/opcode.def"
238         CEE_LAST
239 };
240
241 #undef OPDEF
242
243 #undef pthread_create
244 #undef pthread_join
245 #undef pthread_detach
246
247 /*
248  * ######################################################################
249  * ########  Types and constants used by the GC.
250  * ######################################################################
251  */
252
253 /* 0 means not initialized, 1 is initialized, -1 means in progress */
254 static int gc_initialized = 0;
255 /* If set, check if we need to do something every X allocations */
256 gboolean has_per_allocation_action;
257 /* If set, do a heap check every X allocation */
258 guint32 verify_before_allocs = 0;
259 /* If set, do a minor collection before every X allocation */
260 guint32 collect_before_allocs = 0;
261 /* If set, do a whole heap check before each collection */
262 static gboolean whole_heap_check_before_collection = FALSE;
263 /* If set, do a heap consistency check before each minor collection */
264 static gboolean consistency_check_at_minor_collection = FALSE;
265 /* If set, check whether mark bits are consistent after major collections */
266 static gboolean check_mark_bits_after_major_collection = FALSE;
267 /* If set, check that all nursery objects are pinned/not pinned, depending on context */
268 static gboolean check_nursery_objects_pinned = FALSE;
269 /* If set, do a few checks when the concurrent collector is used */
270 static gboolean do_concurrent_checks = FALSE;
271 /* If set, check that there are no references to the domain left at domain unload */
272 static gboolean xdomain_checks = FALSE;
273 /* If not null, dump the heap after each collection into this file */
274 static FILE *heap_dump_file = NULL;
275 /* If set, mark stacks conservatively, even if precise marking is possible */
276 static gboolean conservative_stack_mark = FALSE;
277 /* If set, do a plausibility check on the scan_starts before and after
278    each collection */
279 static gboolean do_scan_starts_check = FALSE;
280 /*
281  * If the major collector is concurrent and this is FALSE, we will
282  * never initiate a synchronous major collection, unless requested via
283  * GC.Collect().
284  */
285 static gboolean allow_synchronous_major = TRUE;
286 static gboolean nursery_collection_is_parallel = FALSE;
287 static gboolean disable_minor_collections = FALSE;
288 static gboolean disable_major_collections = FALSE;
289 gboolean do_pin_stats = FALSE;
290 static gboolean do_verify_nursery = FALSE;
291 static gboolean do_dump_nursery_content = FALSE;
292
293 #ifdef HEAVY_STATISTICS
294 long long stat_objects_alloced_degraded = 0;
295 long long stat_bytes_alloced_degraded = 0;
296
297 long long stat_copy_object_called_nursery = 0;
298 long long stat_objects_copied_nursery = 0;
299 long long stat_copy_object_called_major = 0;
300 long long stat_objects_copied_major = 0;
301
302 long long stat_scan_object_called_nursery = 0;
303 long long stat_scan_object_called_major = 0;
304
305 long long stat_slots_allocated_in_vain;
306
307 long long stat_nursery_copy_object_failed_from_space = 0;
308 long long stat_nursery_copy_object_failed_forwarded = 0;
309 long long stat_nursery_copy_object_failed_pinned = 0;
310 long long stat_nursery_copy_object_failed_to_space = 0;
311
312 static int stat_wbarrier_add_to_global_remset = 0;
313 static int stat_wbarrier_set_field = 0;
314 static int stat_wbarrier_set_arrayref = 0;
315 static int stat_wbarrier_arrayref_copy = 0;
316 static int stat_wbarrier_generic_store = 0;
317 static int stat_wbarrier_set_root = 0;
318 static int stat_wbarrier_value_copy = 0;
319 static int stat_wbarrier_object_copy = 0;
320 #endif
321
322 int stat_minor_gcs = 0;
323 int stat_major_gcs = 0;
324
325 static long long stat_pinned_objects = 0;
326
327 static long long time_minor_pre_collection_fragment_clear = 0;
328 static long long time_minor_pinning = 0;
329 static long long time_minor_scan_remsets = 0;
330 static long long time_minor_scan_pinned = 0;
331 static long long time_minor_scan_registered_roots = 0;
332 static long long time_minor_scan_thread_data = 0;
333 static long long time_minor_finish_gray_stack = 0;
334 static long long time_minor_fragment_creation = 0;
335
336 static long long time_major_pre_collection_fragment_clear = 0;
337 static long long time_major_pinning = 0;
338 static long long time_major_scan_pinned = 0;
339 static long long time_major_scan_registered_roots = 0;
340 static long long time_major_scan_thread_data = 0;
341 static long long time_major_scan_alloc_pinned = 0;
342 static long long time_major_scan_finalized = 0;
343 static long long time_major_scan_big_objects = 0;
344 static long long time_major_finish_gray_stack = 0;
345 static long long time_major_free_bigobjs = 0;
346 static long long time_major_los_sweep = 0;
347 static long long time_major_sweep = 0;
348 static long long time_major_fragment_creation = 0;
349
350 int gc_debug_level = 0;
351 FILE* gc_debug_file;
352
353 /*
354 void
355 mono_gc_flush_info (void)
356 {
357         fflush (gc_debug_file);
358 }
359 */
360
361 #define TV_DECLARE SGEN_TV_DECLARE
362 #define TV_GETTIME SGEN_TV_GETTIME
363 #define TV_ELAPSED SGEN_TV_ELAPSED
364 #define TV_ELAPSED_MS SGEN_TV_ELAPSED_MS
365
366 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
367
368 NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
369
370 #define object_is_forwarded     SGEN_OBJECT_IS_FORWARDED
371 #define object_is_pinned        SGEN_OBJECT_IS_PINNED
372 #define pin_object              SGEN_PIN_OBJECT
373 #define unpin_object            SGEN_UNPIN_OBJECT
374
375 #define ptr_in_nursery sgen_ptr_in_nursery
376
377 #define LOAD_VTABLE     SGEN_LOAD_VTABLE
378
379 static const char*
380 safe_name (void* obj)
381 {
382         MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
383         return vt->klass->name;
384 }
385
386 #define safe_object_get_size    sgen_safe_object_get_size
387
388 const char*
389 sgen_safe_name (void* obj)
390 {
391         return safe_name (obj);
392 }
393
394 /*
395  * ######################################################################
396  * ########  Global data.
397  * ######################################################################
398  */
399 LOCK_DECLARE (gc_mutex);
400
401 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
402
403 static mword pagesize = 4096;
404 int degraded_mode = 0;
405
406 static mword bytes_pinned_from_failed_allocation = 0;
407
408 GCMemSection *nursery_section = NULL;
409 static mword lowest_heap_address = ~(mword)0;
410 static mword highest_heap_address = 0;
411
412 LOCK_DECLARE (sgen_interruption_mutex);
413 static LOCK_DECLARE (pin_queue_mutex);
414
415 #define LOCK_PIN_QUEUE mono_mutex_lock (&pin_queue_mutex)
416 #define UNLOCK_PIN_QUEUE mono_mutex_unlock (&pin_queue_mutex)
417
418 typedef struct _FinalizeReadyEntry FinalizeReadyEntry;
419 struct _FinalizeReadyEntry {
420         FinalizeReadyEntry *next;
421         void *object;
422 };
423
424 typedef struct _EphemeronLinkNode EphemeronLinkNode;
425
426 struct _EphemeronLinkNode {
427         EphemeronLinkNode *next;
428         char *array;
429 };
430
431 typedef struct {
432        void *key;
433        void *value;
434 } Ephemeron;
435
436 int current_collection_generation = -1;
437 volatile gboolean concurrent_collection_in_progress = FALSE;
438
439 /* objects that are ready to be finalized */
440 static FinalizeReadyEntry *fin_ready_list = NULL;
441 static FinalizeReadyEntry *critical_fin_list = NULL;
442
443 static EphemeronLinkNode *ephemeron_list;
444
445 /* registered roots: the key to the hash is the root start address */
446 /* 
447  * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
448  */
449 SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
450         SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
451         SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
452         SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL)
453 };
454 static mword roots_size = 0; /* amount of memory in the root set */
455
456 #define GC_ROOT_NUM 32
457 typedef struct {
458         int count;              /* must be the first field */
459         void *objects [GC_ROOT_NUM];
460         int root_types [GC_ROOT_NUM];
461         uintptr_t extra_info [GC_ROOT_NUM];
462 } GCRootReport;
463
464 static void
465 notify_gc_roots (GCRootReport *report)
466 {
467         if (!report->count)
468                 return;
469         mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
470         report->count = 0;
471 }
472
473 static void
474 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
475 {
476         if (report->count == GC_ROOT_NUM)
477                 notify_gc_roots (report);
478         report->objects [report->count] = object;
479         report->root_types [report->count] = rtype;
480         report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
481 }
482
483 MonoNativeTlsKey thread_info_key;
484
485 #ifdef HAVE_KW_THREAD
486 __thread SgenThreadInfo *sgen_thread_info;
487 __thread char *stack_end;
488 #endif
489
490 /* The size of a TLAB */
491 /* The bigger the value, the less often we have to go to the slow path to allocate a new 
492  * one, but the more space is wasted by threads not allocating much memory.
493  * FIXME: Tune this.
494  * FIXME: Make this self-tuning for each thread.
495  */
496 guint32 tlab_size = (1024 * 4);
497
498 #define MAX_SMALL_OBJ_SIZE      SGEN_MAX_SMALL_OBJ_SIZE
499
500 /* Functions supplied by the runtime to be called by the GC */
501 static MonoGCCallbacks gc_callbacks;
502
503 #define ALLOC_ALIGN             SGEN_ALLOC_ALIGN
504 #define ALLOC_ALIGN_BITS        SGEN_ALLOC_ALIGN_BITS
505
506 #define ALIGN_UP                SGEN_ALIGN_UP
507
508 #define MOVED_OBJECTS_NUM 64
509 static void *moved_objects [MOVED_OBJECTS_NUM];
510 static int moved_objects_idx = 0;
511
512 /* Vtable of the objects used to fill out nursery fragments before a collection */
513 static MonoVTable *array_fill_vtable;
514
515 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
516 MonoNativeThreadId main_gc_thread = NULL;
517 #endif
518
519 /*Object was pinned during the current collection*/
520 static mword objects_pinned;
521
522 /*
523  * ######################################################################
524  * ########  Macros and function declarations.
525  * ######################################################################
526  */
527
528 inline static void*
529 align_pointer (void *ptr)
530 {
531         mword p = (mword)ptr;
532         p += sizeof (gpointer) - 1;
533         p &= ~ (sizeof (gpointer) - 1);
534         return (void*)p;
535 }
536
537 typedef SgenGrayQueue GrayQueue;
538
539 /* forward declarations */
540 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
541 static void scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx);
542 static void scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx);
543 static void report_finalizer_roots (void);
544 static void report_registered_roots (void);
545
546 static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
547 static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, ScanCopyContext ctx);
548 static void finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue);
549
550 void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
551
552
553 static void init_stats (void);
554
555 static int mark_ephemerons_in_range (ScanCopyContext ctx);
556 static void clear_unreachable_ephemerons (ScanCopyContext ctx);
557 static void null_ephemerons_for_domain (MonoDomain *domain);
558
559 static gboolean major_update_or_finish_concurrent_collection (gboolean force_finish);
560
561 SgenObjectOperations current_object_ops;
562 SgenMajorCollector major_collector;
563 SgenMinorCollector sgen_minor_collector;
564 static GrayQueue gray_queue;
565
566 static SgenRemeberedSet remset;
567
568 /* The gray queue to use from the main collection thread. */
569 #define WORKERS_DISTRIBUTE_GRAY_QUEUE   (&gray_queue)
570
571 /*
572  * The gray queue a worker job must use.  If we're not parallel or
573  * concurrent, we use the main gray queue.
574  */
575 static SgenGrayQueue*
576 sgen_workers_get_job_gray_queue (WorkerData *worker_data)
577 {
578         return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
579 }
580
581 static void
582 gray_queue_redirect (SgenGrayQueue *queue)
583 {
584         gboolean wake = FALSE;
585
586
587         for (;;) {
588                 GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
589                 if (!section)
590                         break;
591                 sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
592                 wake = TRUE;
593         }
594
595         if (wake) {
596                 g_assert (concurrent_collection_in_progress ||
597                                 (current_collection_generation == GENERATION_OLD && major_collector.is_parallel));
598                 if (sgen_workers_have_started ()) {
599                         sgen_workers_wake_up_all ();
600                 } else {
601                         if (concurrent_collection_in_progress)
602                                 g_assert (current_collection_generation == -1);
603                 }
604         }
605 }
606
607 static gboolean
608 is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
609 {
610         MonoObject *o = (MonoObject*)(obj);
611         MonoObject *ref = (MonoObject*)*(ptr);
612         int offset = (char*)(ptr) - (char*)o;
613
614         if (o->vtable->klass == mono_defaults.thread_class && offset == G_STRUCT_OFFSET (MonoThread, internal_thread))
615                 return TRUE;
616         if (o->vtable->klass == mono_defaults.internal_thread_class && offset == G_STRUCT_OFFSET (MonoInternalThread, current_appcontext))
617                 return TRUE;
618         if (mono_class_has_parent_fast (o->vtable->klass, mono_defaults.real_proxy_class) &&
619                         offset == G_STRUCT_OFFSET (MonoRealProxy, unwrapped_server))
620                 return TRUE;
621         /* Thread.cached_culture_info */
622         if (!strcmp (ref->vtable->klass->name_space, "System.Globalization") &&
623                         !strcmp (ref->vtable->klass->name, "CultureInfo") &&
624                         !strcmp(o->vtable->klass->name_space, "System") &&
625                         !strcmp(o->vtable->klass->name, "Object[]"))
626                 return TRUE;
627         /*
628          *  at System.IO.MemoryStream.InternalConstructor (byte[],int,int,bool,bool) [0x0004d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:121
629          * at System.IO.MemoryStream..ctor (byte[]) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:81
630          * at (wrapper remoting-invoke-with-check) System.IO.MemoryStream..ctor (byte[]) <IL 0x00020, 0xffffffff>
631          * at System.Runtime.Remoting.Messaging.CADMethodCallMessage.GetArguments () [0x0000d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/CADMessages.cs:327
632          * at System.Runtime.Remoting.Messaging.MethodCall..ctor (System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/MethodCall.cs:87
633          * at System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) [0x00018] in /home/schani/Work/novell/trunk/mcs/class/corlib/System/AppDomain.cs:1213
634          * at (wrapper remoting-invoke-with-check) System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) <IL 0x0003d, 0xffffffff>
635          * at System.Runtime.Remoting.Channels.CrossAppDomainSink.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00008] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Channels/CrossAppDomainChannel.cs:198
636          * at (wrapper runtime-invoke) object.runtime_invoke_CrossAppDomainSink/ProcessMessageRes_object_object (object,intptr,intptr,intptr) <IL 0x0004c, 0xffffffff>
637          */
638         if (!strcmp (ref->vtable->klass->name_space, "System") &&
639                         !strcmp (ref->vtable->klass->name, "Byte[]") &&
640                         !strcmp (o->vtable->klass->name_space, "System.IO") &&
641                         !strcmp (o->vtable->klass->name, "MemoryStream"))
642                 return TRUE;
643         /* append_job() in threadpool.c */
644         if (!strcmp (ref->vtable->klass->name_space, "System.Runtime.Remoting.Messaging") &&
645                         !strcmp (ref->vtable->klass->name, "AsyncResult") &&
646                         !strcmp (o->vtable->klass->name_space, "System") &&
647                         !strcmp (o->vtable->klass->name, "Object[]") &&
648                         mono_thread_pool_is_queue_array ((MonoArray*) o))
649                 return TRUE;
650         return FALSE;
651 }
652
653 static void
654 check_reference_for_xdomain (gpointer *ptr, char *obj, MonoDomain *domain)
655 {
656         MonoObject *o = (MonoObject*)(obj);
657         MonoObject *ref = (MonoObject*)*(ptr);
658         int offset = (char*)(ptr) - (char*)o;
659         MonoClass *class;
660         MonoClassField *field;
661         char *str;
662
663         if (!ref || ref->vtable->domain == domain)
664                 return;
665         if (is_xdomain_ref_allowed (ptr, obj, domain))
666                 return;
667
668         field = NULL;
669         for (class = o->vtable->klass; class; class = class->parent) {
670                 int i;
671
672                 for (i = 0; i < class->field.count; ++i) {
673                         if (class->fields[i].offset == offset) {
674                                 field = &class->fields[i];
675                                 break;
676                         }
677                 }
678                 if (field)
679                         break;
680         }
681
682         if (ref->vtable->klass == mono_defaults.string_class)
683                 str = mono_string_to_utf8 ((MonoString*)ref);
684         else
685                 str = NULL;
686         g_print ("xdomain reference in %p (%s.%s) at offset %d (%s) to %p (%s.%s) (%s)  -  pointed to by:\n",
687                         o, o->vtable->klass->name_space, o->vtable->klass->name,
688                         offset, field ? field->name : "",
689                         ref, ref->vtable->klass->name_space, ref->vtable->klass->name, str ? str : "");
690         mono_gc_scan_for_specific_ref (o, TRUE);
691         if (str)
692                 g_free (str);
693 }
694
695 #undef HANDLE_PTR
696 #define HANDLE_PTR(ptr,obj)     check_reference_for_xdomain ((ptr), (obj), domain)
697
698 static void
699 scan_object_for_xdomain_refs (char *start, mword size, void *data)
700 {
701         MonoDomain *domain = ((MonoObject*)start)->vtable->domain;
702
703         #include "sgen-scan-object.h"
704 }
705
706 static gboolean scan_object_for_specific_ref_precise = TRUE;
707
708 #undef HANDLE_PTR
709 #define HANDLE_PTR(ptr,obj) do {                \
710         if ((MonoObject*)*(ptr) == key) {       \
711         g_print ("found ref to %p in object %p (%s) at offset %td\n",   \
712                         key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
713         }                                                               \
714         } while (0)
715
716 static void
717 scan_object_for_specific_ref (char *start, MonoObject *key)
718 {
719         char *forwarded;
720
721         if ((forwarded = SGEN_OBJECT_IS_FORWARDED (start)))
722                 start = forwarded;
723
724         if (scan_object_for_specific_ref_precise) {
725                 #include "sgen-scan-object.h"
726         } else {
727                 mword *words = (mword*)start;
728                 size_t size = safe_object_get_size ((MonoObject*)start);
729                 int i;
730                 for (i = 0; i < size / sizeof (mword); ++i) {
731                         if (words [i] == (mword)key) {
732                                 g_print ("found possible ref to %p in object %p (%s) at offset %td\n",
733                                                 key, start, safe_name (start), i * sizeof (mword));
734                         }
735                 }
736         }
737 }
738
739 void
740 sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
741 {
742         while (start < end) {
743                 size_t size;
744                 char *obj;
745
746                 if (!*(void**)start) {
747                         start += sizeof (void*); /* should be ALLOC_ALIGN, really */
748                         continue;
749                 }
750
751                 if (allow_flags) {
752                         if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
753                                 obj = start;
754                 } else {
755                         obj = start;
756                 }
757
758                 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
759
760                 if ((MonoVTable*)SGEN_LOAD_VTABLE (obj) != array_fill_vtable)
761                         callback (obj, size, data);
762
763                 start += size;
764         }
765 }
766
767 static void
768 scan_object_for_specific_ref_callback (char *obj, size_t size, MonoObject *key)
769 {
770         scan_object_for_specific_ref (obj, key);
771 }
772
773 static void
774 check_root_obj_specific_ref (RootRecord *root, MonoObject *key, MonoObject *obj)
775 {
776         if (key != obj)
777                 return;
778         g_print ("found ref to %p in root record %p\n", key, root);
779 }
780
781 static MonoObject *check_key = NULL;
782 static RootRecord *check_root = NULL;
783
784 static void
785 check_root_obj_specific_ref_from_marker (void **obj)
786 {
787         check_root_obj_specific_ref (check_root, check_key, *obj);
788 }
789
790 static void
791 scan_roots_for_specific_ref (MonoObject *key, int root_type)
792 {
793         void **start_root;
794         RootRecord *root;
795         check_key = key;
796
797         SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
798                 mword desc = root->root_desc;
799
800                 check_root = root;
801
802                 switch (desc & ROOT_DESC_TYPE_MASK) {
803                 case ROOT_DESC_BITMAP:
804                         desc >>= ROOT_DESC_TYPE_SHIFT;
805                         while (desc) {
806                                 if (desc & 1)
807                                         check_root_obj_specific_ref (root, key, *start_root);
808                                 desc >>= 1;
809                                 start_root++;
810                         }
811                         return;
812                 case ROOT_DESC_COMPLEX: {
813                         gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
814                         int bwords = (*bitmap_data) - 1;
815                         void **start_run = start_root;
816                         bitmap_data++;
817                         while (bwords-- > 0) {
818                                 gsize bmap = *bitmap_data++;
819                                 void **objptr = start_run;
820                                 while (bmap) {
821                                         if (bmap & 1)
822                                                 check_root_obj_specific_ref (root, key, *objptr);
823                                         bmap >>= 1;
824                                         ++objptr;
825                                 }
826                                 start_run += GC_BITS_PER_WORD;
827                         }
828                         break;
829                 }
830                 case ROOT_DESC_USER: {
831                         MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
832                         marker (start_root, check_root_obj_specific_ref_from_marker);
833                         break;
834                 }
835                 case ROOT_DESC_RUN_LEN:
836                         g_assert_not_reached ();
837                 default:
838                         g_assert_not_reached ();
839                 }
840         } SGEN_HASH_TABLE_FOREACH_END;
841
842         check_key = NULL;
843         check_root = NULL;
844 }
845
846 void
847 mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise)
848 {
849         void **ptr;
850         RootRecord *root;
851
852         scan_object_for_specific_ref_precise = precise;
853
854         sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
855                         (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key, TRUE);
856
857         major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
858
859         sgen_los_iterate_objects ((IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
860
861         scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
862         scan_roots_for_specific_ref (key, ROOT_TYPE_WBARRIER);
863
864         SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], ptr, root) {
865                 while (ptr < (void**)root->end_root) {
866                         check_root_obj_specific_ref (root, *ptr, key);
867                         ++ptr;
868                 }
869         } SGEN_HASH_TABLE_FOREACH_END;
870 }
871
872 static gboolean
873 need_remove_object_for_domain (char *start, MonoDomain *domain)
874 {
875         if (mono_object_domain (start) == domain) {
876                 SGEN_LOG (4, "Need to cleanup object %p", start);
877                 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
878                 return TRUE;
879         }
880         return FALSE;
881 }
882
883 static void
884 process_object_for_domain_clearing (char *start, MonoDomain *domain)
885 {
886         GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
887         if (vt->klass == mono_defaults.internal_thread_class)
888                 g_assert (mono_object_domain (start) == mono_get_root_domain ());
889         /* The object could be a proxy for an object in the domain
890            we're deleting. */
891         if (mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
892                 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
893
894                 /* The server could already have been zeroed out, so
895                    we need to check for that, too. */
896                 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
897                         SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
898                         ((MonoRealProxy*)start)->unwrapped_server = NULL;
899                 }
900         }
901 }
902
903 static MonoDomain *check_domain = NULL;
904
905 static void
906 check_obj_not_in_domain (void **o)
907 {
908         g_assert (((MonoObject*)(*o))->vtable->domain != check_domain);
909 }
910
911 static void
912 scan_for_registered_roots_in_domain (MonoDomain *domain, int root_type)
913 {
914         void **start_root;
915         RootRecord *root;
916         check_domain = domain;
917         SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
918                 mword desc = root->root_desc;
919
920                 /* The MonoDomain struct is allowed to hold
921                    references to objects in its own domain. */
922                 if (start_root == (void**)domain)
923                         continue;
924
925                 switch (desc & ROOT_DESC_TYPE_MASK) {
926                 case ROOT_DESC_BITMAP:
927                         desc >>= ROOT_DESC_TYPE_SHIFT;
928                         while (desc) {
929                                 if ((desc & 1) && *start_root)
930                                         check_obj_not_in_domain (*start_root);
931                                 desc >>= 1;
932                                 start_root++;
933                         }
934                         break;
935                 case ROOT_DESC_COMPLEX: {
936                         gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
937                         int bwords = (*bitmap_data) - 1;
938                         void **start_run = start_root;
939                         bitmap_data++;
940                         while (bwords-- > 0) {
941                                 gsize bmap = *bitmap_data++;
942                                 void **objptr = start_run;
943                                 while (bmap) {
944                                         if ((bmap & 1) && *objptr)
945                                                 check_obj_not_in_domain (*objptr);
946                                         bmap >>= 1;
947                                         ++objptr;
948                                 }
949                                 start_run += GC_BITS_PER_WORD;
950                         }
951                         break;
952                 }
953                 case ROOT_DESC_USER: {
954                         MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
955                         marker (start_root, check_obj_not_in_domain);
956                         break;
957                 }
958                 case ROOT_DESC_RUN_LEN:
959                         g_assert_not_reached ();
960                 default:
961                         g_assert_not_reached ();
962                 }
963         } SGEN_HASH_TABLE_FOREACH_END;
964
965         check_domain = NULL;
966 }
967
968 static void
969 check_for_xdomain_refs (void)
970 {
971         LOSObject *bigobj;
972
973         sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
974                         (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL, FALSE);
975
976         major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
977
978         for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
979                 scan_object_for_xdomain_refs (bigobj->data, sgen_los_object_size (bigobj), NULL);
980 }
981
982 static gboolean
983 clear_domain_process_object (char *obj, MonoDomain *domain)
984 {
985         gboolean remove;
986
987         process_object_for_domain_clearing (obj, domain);
988         remove = need_remove_object_for_domain (obj, domain);
989
990         if (remove && ((MonoObject*)obj)->synchronisation) {
991                 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
992                 if (dislink)
993                         sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
994         }
995
996         return remove;
997 }
998
999 static void
1000 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
1001 {
1002         if (clear_domain_process_object (obj, domain))
1003                 memset (obj, 0, size);
1004 }
1005
1006 static void
1007 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
1008 {
1009         clear_domain_process_object (obj, domain);
1010 }
1011
1012 static void
1013 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1014 {
1015         if (need_remove_object_for_domain (obj, domain))
1016                 major_collector.free_non_pinned_object (obj, size);
1017 }
1018
1019 static void
1020 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1021 {
1022         if (need_remove_object_for_domain (obj, domain))
1023                 major_collector.free_pinned_object (obj, size);
1024 }
1025
1026 /*
1027  * When appdomains are unloaded we can easily remove objects that have finalizers,
1028  * but all the others could still be present in random places on the heap.
1029  * We need a sweep to get rid of them even though it's going to be costly
1030  * with big heaps.
1031  * The reason we need to remove them is because we access the vtable and class
1032  * structures to know the object size and the reference bitmap: once the domain is
1033  * unloaded the point to random memory.
1034  */
1035 void
1036 mono_gc_clear_domain (MonoDomain * domain)
1037 {
1038         LOSObject *bigobj, *prev;
1039         int i;
1040
1041         LOCK_GC;
1042
1043         if (concurrent_collection_in_progress)
1044                 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
1045         g_assert (!concurrent_collection_in_progress);
1046
1047         sgen_process_fin_stage_entries ();
1048         sgen_process_dislink_stage_entries ();
1049
1050         sgen_clear_nursery_fragments ();
1051
1052         if (xdomain_checks && domain != mono_get_root_domain ()) {
1053                 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
1054                 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
1055                 check_for_xdomain_refs ();
1056         }
1057
1058         /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
1059         to memory returned to the OS.*/
1060         null_ephemerons_for_domain (domain);
1061
1062         for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1063                 sgen_null_links_for_domain (domain, i);
1064
1065         for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1066                 sgen_remove_finalizers_for_domain (domain, i);
1067
1068         sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1069                         (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
1070
1071         /* We need two passes over major and large objects because
1072            freeing such objects might give their memory back to the OS
1073            (in the case of large objects) or obliterate its vtable
1074            (pinned objects with major-copying or pinned and non-pinned
1075            objects with major-mark&sweep), but we might need to
1076            dereference a pointer from an object to another object if
1077            the first object is a proxy. */
1078         major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
1079         for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1080                 clear_domain_process_object (bigobj->data, domain);
1081
1082         prev = NULL;
1083         for (bigobj = los_object_list; bigobj;) {
1084                 if (need_remove_object_for_domain (bigobj->data, domain)) {
1085                         LOSObject *to_free = bigobj;
1086                         if (prev)
1087                                 prev->next = bigobj->next;
1088                         else
1089                                 los_object_list = bigobj->next;
1090                         bigobj = bigobj->next;
1091                         SGEN_LOG (4, "Freeing large object %p", bigobj->data);
1092                         sgen_los_free_object (to_free);
1093                         continue;
1094                 }
1095                 prev = bigobj;
1096                 bigobj = bigobj->next;
1097         }
1098         major_collector.iterate_objects (TRUE, FALSE, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
1099         major_collector.iterate_objects (FALSE, TRUE, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
1100
1101         if (G_UNLIKELY (do_pin_stats)) {
1102                 if (domain == mono_get_root_domain ())
1103                         sgen_pin_stats_print_class_stats ();
1104         }
1105
1106         UNLOCK_GC;
1107 }
1108
1109 /*
1110  * sgen_add_to_global_remset:
1111  *
1112  *   The global remset contains locations which point into newspace after
1113  * a minor collection. This can happen if the objects they point to are pinned.
1114  *
1115  * LOCKING: If called from a parallel collector, the global remset
1116  * lock must be held.  For serial collectors that is not necessary.
1117  */
1118 void
1119 sgen_add_to_global_remset (gpointer ptr, gpointer obj)
1120 {
1121         SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Target pointer of global remset must be in the nursery");
1122
1123         HEAVY_STAT (++stat_wbarrier_add_to_global_remset);
1124
1125         if (!major_collector.is_concurrent) {
1126                 SGEN_ASSERT (5, current_collection_generation != -1, "Global remsets can only be added during collections");
1127         } else {
1128                 if (current_collection_generation == -1)
1129                         SGEN_ASSERT (5, sgen_concurrent_collection_in_progress (), "Global remsets outside of collection pauses can only be added by the concurrent collector");
1130         }
1131
1132         if (!object_is_pinned (obj))
1133                 SGEN_ASSERT (5, sgen_minor_collector.is_split || sgen_concurrent_collection_in_progress (), "Non-pinned objects can only remain in nursery if it is a split nursery");
1134         else if (sgen_cement_lookup_or_register (obj))
1135                 return;
1136
1137         remset.record_pointer (ptr);
1138
1139         if (G_UNLIKELY (do_pin_stats))
1140                 sgen_pin_stats_register_global_remset (obj);
1141
1142         SGEN_LOG (8, "Adding global remset for %p", ptr);
1143         binary_protocol_global_remset (ptr, obj, (gpointer)SGEN_LOAD_VTABLE (obj));
1144
1145
1146 #ifdef ENABLE_DTRACE
1147         if (G_UNLIKELY (MONO_GC_GLOBAL_REMSET_ADD_ENABLED ())) {
1148                 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
1149                 MONO_GC_GLOBAL_REMSET_ADD ((mword)ptr, (mword)obj, sgen_safe_object_get_size (obj),
1150                                 vt->klass->name_space, vt->klass->name);
1151         }
1152 #endif
1153 }
1154
1155 /*
1156  * sgen_drain_gray_stack:
1157  *
1158  *   Scan objects in the gray stack until the stack is empty. This should be called
1159  * frequently after each object is copied, to achieve better locality and cache
1160  * usage.
1161  */
1162 gboolean
1163 sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
1164 {
1165         char *obj;
1166         ScanObjectFunc scan_func = ctx.scan_func;
1167         GrayQueue *queue = ctx.queue;
1168
1169         if (max_objs == -1) {
1170                 for (;;) {
1171                         GRAY_OBJECT_DEQUEUE (queue, obj);
1172                         if (!obj)
1173                                 return TRUE;
1174                         SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
1175                         scan_func (obj, queue);
1176                 }
1177         } else {
1178                 int i;
1179
1180                 do {
1181                         for (i = 0; i != max_objs; ++i) {
1182                                 GRAY_OBJECT_DEQUEUE (queue, obj);
1183                                 if (!obj)
1184                                         return TRUE;
1185                                 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
1186                                 scan_func (obj, queue);
1187                         }
1188                 } while (max_objs < 0);
1189                 return FALSE;
1190         }
1191 }
1192
1193 /*
1194  * Addresses from start to end are already sorted. This function finds
1195  * the object header for each address and pins the object. The
1196  * addresses must be inside the passed section.  The (start of the)
1197  * address array is overwritten with the addresses of the actually
1198  * pinned objects.  Return the number of pinned objects.
1199  */
1200 static int
1201 pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, ScanCopyContext ctx)
1202 {
1203         void *last = NULL;
1204         int count = 0;
1205         void *search_start;
1206         void *last_obj = NULL;
1207         size_t last_obj_size = 0;
1208         void *addr;
1209         int idx;
1210         void **definitely_pinned = start;
1211         ScanObjectFunc scan_func = ctx.scan_func;
1212         SgenGrayQueue *queue = ctx.queue;
1213
1214         sgen_nursery_allocator_prepare_for_pinning ();
1215
1216         while (start < end) {
1217                 addr = *start;
1218                 /* the range check should be reduntant */
1219                 if (addr != last && addr >= start_nursery && addr < end_nursery) {
1220                         SGEN_LOG (5, "Considering pinning addr %p", addr);
1221                         /* multiple pointers to the same object */
1222                         if (addr >= last_obj && (char*)addr < (char*)last_obj + last_obj_size) {
1223                                 start++;
1224                                 continue;
1225                         }
1226                         idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
1227                         g_assert (idx < section->num_scan_start);
1228                         search_start = (void*)section->scan_starts [idx];
1229                         if (!search_start || search_start > addr) {
1230                                 while (idx) {
1231                                         --idx;
1232                                         search_start = section->scan_starts [idx];
1233                                         if (search_start && search_start <= addr)
1234                                                 break;
1235                                 }
1236                                 if (!search_start || search_start > addr)
1237                                         search_start = start_nursery;
1238                         }
1239                         if (search_start < last_obj)
1240                                 search_start = (char*)last_obj + last_obj_size;
1241                         /* now addr should be in an object a short distance from search_start
1242                          * Note that search_start must point to zeroed mem or point to an object.
1243                          */
1244
1245                         do {
1246                                 if (!*(void**)search_start) {
1247                                         /* Consistency check */
1248                                         /*
1249                                         for (frag = nursery_fragments; frag; frag = frag->next) {
1250                                                 if (search_start >= frag->fragment_start && search_start < frag->fragment_end)
1251                                                         g_assert_not_reached ();
1252                                         }
1253                                         */
1254
1255                                         search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
1256                                         continue;
1257                                 }
1258                                 last_obj = search_start;
1259                                 last_obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
1260
1261                                 if (((MonoObject*)last_obj)->synchronisation == GINT_TO_POINTER (-1)) {
1262                                         /* Marks the beginning of a nursery fragment, skip */
1263                                 } else {
1264                                         SGEN_LOG (8, "Pinned try match %p (%s), size %zd", last_obj, safe_name (last_obj), last_obj_size);
1265                                         if (addr >= search_start && (char*)addr < (char*)last_obj + last_obj_size) {
1266                                                 if (scan_func) {
1267                                                         scan_func (search_start, queue);
1268                                                 } else {
1269                                                         SGEN_LOG (4, "Pinned object %p, vtable %p (%s), count %d\n",
1270                                                                         search_start, *(void**)search_start, safe_name (search_start), count);
1271                                                         binary_protocol_pin (search_start,
1272                                                                         (gpointer)LOAD_VTABLE (search_start),
1273                                                                         safe_object_get_size (search_start));
1274
1275 #ifdef ENABLE_DTRACE
1276                                                         if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1277                                                                 int gen = sgen_ptr_in_nursery (search_start) ? GENERATION_NURSERY : GENERATION_OLD;
1278                                                                 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (search_start);
1279                                                                 MONO_GC_OBJ_PINNED ((mword)search_start,
1280                                                                                 sgen_safe_object_get_size (search_start),
1281                                                                                 vt->klass->name_space, vt->klass->name, gen);
1282                                                         }
1283 #endif
1284
1285                                                         pin_object (search_start);
1286                                                         GRAY_OBJECT_ENQUEUE (queue, search_start);
1287                                                         if (G_UNLIKELY (do_pin_stats))
1288                                                                 sgen_pin_stats_register_object (search_start, last_obj_size);
1289                                                         definitely_pinned [count] = search_start;
1290                                                         count++;
1291                                                 }
1292                                                 break;
1293                                         }
1294                                 }
1295                                 /* skip to the next object */
1296                                 search_start = (void*)((char*)search_start + last_obj_size);
1297                         } while (search_start <= addr);
1298                         /* we either pinned the correct object or we ignored the addr because
1299                          * it points to unused zeroed memory.
1300                          */
1301                         last = addr;
1302                 }
1303                 start++;
1304         }
1305         //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1306         if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1307                 GCRootReport report;
1308                 report.count = 0;
1309                 for (idx = 0; idx < count; ++idx)
1310                         add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1311                 notify_gc_roots (&report);
1312         }
1313         stat_pinned_objects += count;
1314         return count;
1315 }
1316
1317 void
1318 sgen_pin_objects_in_section (GCMemSection *section, ScanCopyContext ctx)
1319 {
1320         int num_entries = section->pin_queue_num_entries;
1321         if (num_entries) {
1322                 void **start = section->pin_queue_start;
1323                 int reduced_to;
1324                 reduced_to = pin_objects_from_addresses (section, start, start + num_entries,
1325                                 section->data, section->next_data, ctx);
1326                 section->pin_queue_num_entries = reduced_to;
1327                 if (!reduced_to)
1328                         section->pin_queue_start = NULL;
1329         }
1330 }
1331
1332
1333 void
1334 sgen_pin_object (void *object, GrayQueue *queue)
1335 {
1336         g_assert (!concurrent_collection_in_progress);
1337
1338         if (sgen_collection_is_parallel ()) {
1339                 LOCK_PIN_QUEUE;
1340                 /*object arrives pinned*/
1341                 sgen_pin_stage_ptr (object);
1342                 ++objects_pinned ;
1343                 UNLOCK_PIN_QUEUE;
1344         } else {
1345                 SGEN_PIN_OBJECT (object);
1346                 sgen_pin_stage_ptr (object);
1347                 ++objects_pinned;
1348                 if (G_UNLIKELY (do_pin_stats))
1349                         sgen_pin_stats_register_object (object, safe_object_get_size (object));
1350         }
1351         GRAY_OBJECT_ENQUEUE (queue, object);
1352         binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
1353
1354 #ifdef ENABLE_DTRACE
1355         if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1356                 int gen = sgen_ptr_in_nursery (object) ? GENERATION_NURSERY : GENERATION_OLD;
1357                 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (object);
1358                 MONO_GC_OBJ_PINNED ((mword)object, sgen_safe_object_get_size (object), vt->klass->name_space, vt->klass->name, gen);
1359         }
1360 #endif
1361 }
1362
1363 void
1364 sgen_parallel_pin_or_update (void **ptr, void *obj, MonoVTable *vt, SgenGrayQueue *queue)
1365 {
1366         for (;;) {
1367                 mword vtable_word;
1368                 gboolean major_pinned = FALSE;
1369
1370                 if (sgen_ptr_in_nursery (obj)) {
1371                         if (SGEN_CAS_PTR (obj, (void*)((mword)vt | SGEN_PINNED_BIT), vt) == vt) {
1372                                 sgen_pin_object (obj, queue);
1373                                 break;
1374                         }
1375                 } else {
1376                         major_collector.pin_major_object (obj, queue);
1377                         major_pinned = TRUE;
1378                 }
1379
1380                 vtable_word = *(mword*)obj;
1381                 /*someone else forwarded it, update the pointer and bail out*/
1382                 if (vtable_word & SGEN_FORWARDED_BIT) {
1383                         *ptr = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1384                         break;
1385                 }
1386
1387                 /*someone pinned it, nothing to do.*/
1388                 if (vtable_word & SGEN_PINNED_BIT || major_pinned)
1389                         break;
1390         }
1391 }
1392
1393 /* Sort the addresses in array in increasing order.
1394  * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1395  */
1396 void
1397 sgen_sort_addresses (void **array, int size)
1398 {
1399         int i;
1400         void *tmp;
1401
1402         for (i = 1; i < size; ++i) {
1403                 int child = i;
1404                 while (child > 0) {
1405                         int parent = (child - 1) / 2;
1406
1407                         if (array [parent] >= array [child])
1408                                 break;
1409
1410                         tmp = array [parent];
1411                         array [parent] = array [child];
1412                         array [child] = tmp;
1413
1414                         child = parent;
1415                 }
1416         }
1417
1418         for (i = size - 1; i > 0; --i) {
1419                 int end, root;
1420                 tmp = array [i];
1421                 array [i] = array [0];
1422                 array [0] = tmp;
1423
1424                 end = i - 1;
1425                 root = 0;
1426
1427                 while (root * 2 + 1 <= end) {
1428                         int child = root * 2 + 1;
1429
1430                         if (child < end && array [child] < array [child + 1])
1431                                 ++child;
1432                         if (array [root] >= array [child])
1433                                 break;
1434
1435                         tmp = array [root];
1436                         array [root] = array [child];
1437                         array [child] = tmp;
1438
1439                         root = child;
1440                 }
1441         }
1442 }
1443
1444 /* 
1445  * Scan the memory between start and end and queue values which could be pointers
1446  * to the area between start_nursery and end_nursery for later consideration.
1447  * Typically used for thread stacks.
1448  */
1449 static void
1450 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1451 {
1452         int count = 0;
1453
1454 #ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
1455         VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
1456 #endif
1457
1458         while (start < end) {
1459                 if (*start >= start_nursery && *start < end_nursery) {
1460                         /*
1461                          * *start can point to the middle of an object
1462                          * note: should we handle pointing at the end of an object?
1463                          * pinning in C# code disallows pointing at the end of an object
1464                          * but there is some small chance that an optimizing C compiler
1465                          * may keep the only reference to an object by pointing
1466                          * at the end of it. We ignore this small chance for now.
1467                          * Pointers to the end of an object are indistinguishable
1468                          * from pointers to the start of the next object in memory
1469                          * so if we allow that we'd need to pin two objects...
1470                          * We queue the pointer in an array, the
1471                          * array will then be sorted and uniqued. This way
1472                          * we can coalesce several pinning pointers and it should
1473                          * be faster since we'd do a memory scan with increasing
1474                          * addresses. Note: we can align the address to the allocation
1475                          * alignment, so the unique process is more effective.
1476                          */
1477                         mword addr = (mword)*start;
1478                         addr &= ~(ALLOC_ALIGN - 1);
1479                         if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
1480                                 SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
1481                                 sgen_pin_stage_ptr ((void*)addr);
1482                                 count++;
1483                         }
1484                         if (G_UNLIKELY (do_pin_stats)) { 
1485                                 if (ptr_in_nursery ((void*)addr))
1486                                         sgen_pin_stats_register_address ((char*)addr, pin_type);
1487                         }
1488                 }
1489                 start++;
1490         }
1491         if (count)
1492                 SGEN_LOG (7, "found %d potential pinned heap pointers", count);
1493 }
1494
1495 /*
1496  * The first thing we do in a collection is to identify pinned objects.
1497  * This function considers all the areas of memory that need to be
1498  * conservatively scanned.
1499  */
1500 static void
1501 pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue)
1502 {
1503         void **start_root;
1504         RootRecord *root;
1505         SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
1506         /* objects pinned from the API are inside these roots */
1507         SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
1508                 SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
1509                 conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
1510         } SGEN_HASH_TABLE_FOREACH_END;
1511         /* now deal with the thread stacks
1512          * in the future we should be able to conservatively scan only:
1513          * *) the cpu registers
1514          * *) the unmanaged stack frames
1515          * *) the _last_ managed stack frame
1516          * *) pointers slots in managed frames
1517          */
1518         scan_thread_data (start_nursery, end_nursery, FALSE, queue);
1519 }
1520
1521 static void
1522 unpin_objects_from_queue (SgenGrayQueue *queue)
1523 {
1524         for (;;) {
1525                 char *addr;
1526                 GRAY_OBJECT_DEQUEUE (queue, addr);
1527                 if (!addr)
1528                         break;
1529                 g_assert (SGEN_OBJECT_IS_PINNED (addr));
1530                 SGEN_UNPIN_OBJECT (addr);
1531         }
1532 }
1533
1534 typedef struct {
1535         CopyOrMarkObjectFunc func;
1536         GrayQueue *queue;
1537 } UserCopyOrMarkData;
1538
1539 static MonoNativeTlsKey user_copy_or_mark_key;
1540
1541 static void
1542 init_user_copy_or_mark_key (void)
1543 {
1544         mono_native_tls_alloc (&user_copy_or_mark_key, NULL);
1545 }
1546
1547 static void
1548 set_user_copy_or_mark_data (UserCopyOrMarkData *data)
1549 {
1550         mono_native_tls_set_value (user_copy_or_mark_key, data);
1551 }
1552
1553 static void
1554 single_arg_user_copy_or_mark (void **obj)
1555 {
1556         UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
1557
1558         data->func (obj, data->queue);
1559 }
1560
1561 /*
1562  * The memory area from start_root to end_root contains pointers to objects.
1563  * Their position is precisely described by @desc (this means that the pointer
1564  * can be either NULL or the pointer to the start of an object).
1565  * This functions copies them to to_space updates them.
1566  *
1567  * This function is not thread-safe!
1568  */
1569 static void
1570 precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
1571 {
1572         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1573         SgenGrayQueue *queue = ctx.queue;
1574
1575         switch (desc & ROOT_DESC_TYPE_MASK) {
1576         case ROOT_DESC_BITMAP:
1577                 desc >>= ROOT_DESC_TYPE_SHIFT;
1578                 while (desc) {
1579                         if ((desc & 1) && *start_root) {
1580                                 copy_func (start_root, queue);
1581                                 SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
1582                                 sgen_drain_gray_stack (-1, ctx);
1583                         }
1584                         desc >>= 1;
1585                         start_root++;
1586                 }
1587                 return;
1588         case ROOT_DESC_COMPLEX: {
1589                 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1590                 int bwords = (*bitmap_data) - 1;
1591                 void **start_run = start_root;
1592                 bitmap_data++;
1593                 while (bwords-- > 0) {
1594                         gsize bmap = *bitmap_data++;
1595                         void **objptr = start_run;
1596                         while (bmap) {
1597                                 if ((bmap & 1) && *objptr) {
1598                                         copy_func (objptr, queue);
1599                                         SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
1600                                         sgen_drain_gray_stack (-1, ctx);
1601                                 }
1602                                 bmap >>= 1;
1603                                 ++objptr;
1604                         }
1605                         start_run += GC_BITS_PER_WORD;
1606                 }
1607                 break;
1608         }
1609         case ROOT_DESC_USER: {
1610                 UserCopyOrMarkData data = { copy_func, queue };
1611                 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1612                 set_user_copy_or_mark_data (&data);
1613                 marker (start_root, single_arg_user_copy_or_mark);
1614                 set_user_copy_or_mark_data (NULL);
1615                 break;
1616         }
1617         case ROOT_DESC_RUN_LEN:
1618                 g_assert_not_reached ();
1619         default:
1620                 g_assert_not_reached ();
1621         }
1622 }
1623
1624 static void
1625 reset_heap_boundaries (void)
1626 {
1627         lowest_heap_address = ~(mword)0;
1628         highest_heap_address = 0;
1629 }
1630
1631 void
1632 sgen_update_heap_boundaries (mword low, mword high)
1633 {
1634         mword old;
1635
1636         do {
1637                 old = lowest_heap_address;
1638                 if (low >= old)
1639                         break;
1640         } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
1641
1642         do {
1643                 old = highest_heap_address;
1644                 if (high <= old)
1645                         break;
1646         } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
1647 }
1648
1649 /*
1650  * Allocate and setup the data structures needed to be able to allocate objects
1651  * in the nursery. The nursery is stored in nursery_section.
1652  */
1653 static void
1654 alloc_nursery (void)
1655 {
1656         GCMemSection *section;
1657         char *data;
1658         int scan_starts;
1659         int alloc_size;
1660
1661         if (nursery_section)
1662                 return;
1663         SGEN_LOG (2, "Allocating nursery size: %lu", (unsigned long)sgen_nursery_size);
1664         /* later we will alloc a larger area for the nursery but only activate
1665          * what we need. The rest will be used as expansion if we have too many pinned
1666          * objects in the existing nursery.
1667          */
1668         /* FIXME: handle OOM */
1669         section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
1670
1671         alloc_size = sgen_nursery_size;
1672
1673         /* If there isn't enough space even for the nursery we should simply abort. */
1674         g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
1675
1676 #ifdef SGEN_ALIGN_NURSERY
1677         data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
1678 #else
1679         data = major_collector.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
1680 #endif
1681         sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
1682         SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)mono_gc_get_heap_size ());
1683         section->data = section->next_data = data;
1684         section->size = alloc_size;
1685         section->end_data = data + sgen_nursery_size;
1686         scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
1687         section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
1688         section->num_scan_start = scan_starts;
1689
1690         nursery_section = section;
1691
1692         sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
1693 }
1694
1695 void*
1696 mono_gc_get_nursery (int *shift_bits, size_t *size)
1697 {
1698         *size = sgen_nursery_size;
1699 #ifdef SGEN_ALIGN_NURSERY
1700         *shift_bits = DEFAULT_NURSERY_BITS;
1701 #else
1702         *shift_bits = -1;
1703 #endif
1704         return sgen_get_nursery_start ();
1705 }
1706
1707 void
1708 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
1709 {
1710         SgenThreadInfo *info = mono_thread_info_current ();
1711
1712         /* Could be called from sgen_thread_unregister () with a NULL info */
1713         if (domain) {
1714                 g_assert (info);
1715                 info->stopped_domain = domain;
1716         }
1717 }
1718
1719 gboolean
1720 mono_gc_precise_stack_mark_enabled (void)
1721 {
1722         return !conservative_stack_mark;
1723 }
1724
1725 FILE *
1726 mono_gc_get_logfile (void)
1727 {
1728         return gc_debug_file;
1729 }
1730
1731 static void
1732 report_finalizer_roots_list (FinalizeReadyEntry *list)
1733 {
1734         GCRootReport report;
1735         FinalizeReadyEntry *fin;
1736
1737         report.count = 0;
1738         for (fin = list; fin; fin = fin->next) {
1739                 if (!fin->object)
1740                         continue;
1741                 add_profile_gc_root (&report, fin->object, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1742         }
1743         notify_gc_roots (&report);
1744 }
1745
1746 static void
1747 report_finalizer_roots (void)
1748 {
1749         report_finalizer_roots_list (fin_ready_list);
1750         report_finalizer_roots_list (critical_fin_list);
1751 }
1752
1753 static GCRootReport *root_report;
1754
1755 static void
1756 single_arg_report_root (void **obj)
1757 {
1758         if (*obj)
1759                 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1760 }
1761
1762 static void
1763 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1764 {
1765         switch (desc & ROOT_DESC_TYPE_MASK) {
1766         case ROOT_DESC_BITMAP:
1767                 desc >>= ROOT_DESC_TYPE_SHIFT;
1768                 while (desc) {
1769                         if ((desc & 1) && *start_root) {
1770                                 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1771                         }
1772                         desc >>= 1;
1773                         start_root++;
1774                 }
1775                 return;
1776         case ROOT_DESC_COMPLEX: {
1777                 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1778                 int bwords = (*bitmap_data) - 1;
1779                 void **start_run = start_root;
1780                 bitmap_data++;
1781                 while (bwords-- > 0) {
1782                         gsize bmap = *bitmap_data++;
1783                         void **objptr = start_run;
1784                         while (bmap) {
1785                                 if ((bmap & 1) && *objptr) {
1786                                         add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1787                                 }
1788                                 bmap >>= 1;
1789                                 ++objptr;
1790                         }
1791                         start_run += GC_BITS_PER_WORD;
1792                 }
1793                 break;
1794         }
1795         case ROOT_DESC_USER: {
1796                 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1797                 root_report = report;
1798                 marker (start_root, single_arg_report_root);
1799                 break;
1800         }
1801         case ROOT_DESC_RUN_LEN:
1802                 g_assert_not_reached ();
1803         default:
1804                 g_assert_not_reached ();
1805         }
1806 }
1807
1808 static void
1809 report_registered_roots_by_type (int root_type)
1810 {
1811         GCRootReport report;
1812         void **start_root;
1813         RootRecord *root;
1814         report.count = 0;
1815         SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1816                 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1817                 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1818         } SGEN_HASH_TABLE_FOREACH_END;
1819         notify_gc_roots (&report);
1820 }
1821
1822 static void
1823 report_registered_roots (void)
1824 {
1825         report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1826         report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1827 }
1828
1829 static void
1830 scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx)
1831 {
1832         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1833         SgenGrayQueue *queue = ctx.queue;
1834         FinalizeReadyEntry *fin;
1835
1836         for (fin = list; fin; fin = fin->next) {
1837                 if (!fin->object)
1838                         continue;
1839                 SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object));
1840                 copy_func (&fin->object, queue);
1841         }
1842 }
1843
1844 static const char*
1845 generation_name (int generation)
1846 {
1847         switch (generation) {
1848         case GENERATION_NURSERY: return "nursery";
1849         case GENERATION_OLD: return "old";
1850         default: g_assert_not_reached ();
1851         }
1852 }
1853
1854 const char*
1855 sgen_generation_name (int generation)
1856 {
1857         return generation_name (generation);
1858 }
1859
1860 SgenObjectOperations *
1861 sgen_get_current_object_ops (void){
1862         return &current_object_ops;
1863 }
1864
1865
1866 static void
1867 finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue)
1868 {
1869         TV_DECLARE (atv);
1870         TV_DECLARE (btv);
1871         int done_with_ephemerons, ephemeron_rounds = 0;
1872         CopyOrMarkObjectFunc copy_func = current_object_ops.copy_or_mark_object;
1873         ScanObjectFunc scan_func = current_object_ops.scan_object;
1874         ScanCopyContext ctx = { scan_func, copy_func, queue };
1875
1876         /*
1877          * We copied all the reachable objects. Now it's the time to copy
1878          * the objects that were not referenced by the roots, but by the copied objects.
1879          * we built a stack of objects pointed to by gray_start: they are
1880          * additional roots and we may add more items as we go.
1881          * We loop until gray_start == gray_objects which means no more objects have
1882          * been added. Note this is iterative: no recursion is involved.
1883          * We need to walk the LO list as well in search of marked big objects
1884          * (use a flag since this is needed only on major collections). We need to loop
1885          * here as well, so keep a counter of marked LO (increasing it in copy_object).
1886          *   To achieve better cache locality and cache usage, we drain the gray stack 
1887          * frequently, after each object is copied, and just finish the work here.
1888          */
1889         sgen_drain_gray_stack (-1, ctx);
1890         TV_GETTIME (atv);
1891         SGEN_LOG (2, "%s generation done", generation_name (generation));
1892
1893         /*
1894         Reset bridge data, we might have lingering data from a previous collection if this is a major
1895         collection trigged by minor overflow.
1896
1897         We must reset the gathered bridges since their original block might be evacuated due to major
1898         fragmentation in the meanwhile and the bridge code should not have to deal with that.
1899         */
1900         sgen_bridge_reset_data ();
1901
1902         /*
1903          * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
1904          * before processing finalizable objects and non-tracking weak links to avoid finalizing/clearing
1905          * objects that are in fact reachable.
1906          */
1907         done_with_ephemerons = 0;
1908         do {
1909                 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1910                 sgen_drain_gray_stack (-1, ctx);
1911                 ++ephemeron_rounds;
1912         } while (!done_with_ephemerons);
1913
1914         sgen_scan_togglerefs (start_addr, end_addr, ctx);
1915         if (generation == GENERATION_OLD)
1916                 sgen_scan_togglerefs (sgen_get_nursery_start (), sgen_get_nursery_end (), ctx);
1917
1918         if (sgen_need_bridge_processing ()) {
1919                 sgen_collect_bridge_objects (generation, ctx);
1920                 if (generation == GENERATION_OLD)
1921                         sgen_collect_bridge_objects (GENERATION_NURSERY, ctx);
1922         }
1923
1924         /*
1925         Make sure we drain the gray stack before processing disappearing links and finalizers.
1926         If we don't make sure it is empty we might wrongly see a live object as dead.
1927         */
1928         sgen_drain_gray_stack (-1, ctx);
1929
1930         /*
1931         We must clear weak links that don't track resurrection before processing object ready for
1932         finalization so they can be cleared before that.
1933         */
1934         sgen_null_link_in_range (generation, TRUE, ctx);
1935         if (generation == GENERATION_OLD)
1936                 sgen_null_link_in_range (GENERATION_NURSERY, TRUE, ctx);
1937
1938
1939         /* walk the finalization queue and move also the objects that need to be
1940          * finalized: use the finalized objects as new roots so the objects they depend
1941          * on are also not reclaimed. As with the roots above, only objects in the nursery
1942          * are marked/copied.
1943          */
1944         sgen_finalize_in_range (generation, ctx);
1945         if (generation == GENERATION_OLD)
1946                 sgen_finalize_in_range (GENERATION_NURSERY, ctx);
1947         /* drain the new stack that might have been created */
1948         SGEN_LOG (6, "Precise scan of gray area post fin");
1949         sgen_drain_gray_stack (-1, ctx);
1950
1951         /*
1952          * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
1953          */
1954         done_with_ephemerons = 0;
1955         do {
1956                 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1957                 sgen_drain_gray_stack (-1, ctx);
1958                 ++ephemeron_rounds;
1959         } while (!done_with_ephemerons);
1960
1961         /*
1962          * Clear ephemeron pairs with unreachable keys.
1963          * We pass the copy func so we can figure out if an array was promoted or not.
1964          */
1965         clear_unreachable_ephemerons (ctx);
1966
1967         TV_GETTIME (btv);
1968         SGEN_LOG (2, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
1969
1970         /*
1971          * handle disappearing links
1972          * Note we do this after checking the finalization queue because if an object
1973          * survives (at least long enough to be finalized) we don't clear the link.
1974          * This also deals with a possible issue with the monitor reclamation: with the Boehm
1975          * GC a finalized object my lose the monitor because it is cleared before the finalizer is
1976          * called.
1977          */
1978         g_assert (sgen_gray_object_queue_is_empty (queue));
1979         for (;;) {
1980                 sgen_null_link_in_range (generation, FALSE, ctx);
1981                 if (generation == GENERATION_OLD)
1982                         sgen_null_link_in_range (GENERATION_NURSERY, FALSE, ctx);
1983                 if (sgen_gray_object_queue_is_empty (queue))
1984                         break;
1985                 sgen_drain_gray_stack (-1, ctx);
1986         }
1987
1988         g_assert (sgen_gray_object_queue_is_empty (queue));
1989 }
1990
1991 void
1992 sgen_check_section_scan_starts (GCMemSection *section)
1993 {
1994         int i;
1995         for (i = 0; i < section->num_scan_start; ++i) {
1996                 if (section->scan_starts [i]) {
1997                         guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
1998                         g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
1999                 }
2000         }
2001 }
2002
2003 static void
2004 check_scan_starts (void)
2005 {
2006         if (!do_scan_starts_check)
2007                 return;
2008         sgen_check_section_scan_starts (nursery_section);
2009         major_collector.check_scan_starts ();
2010 }
2011
2012 static void
2013 scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx)
2014 {
2015         void **start_root;
2016         RootRecord *root;
2017         SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
2018                 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
2019                 precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
2020         } SGEN_HASH_TABLE_FOREACH_END;
2021 }
2022
2023 void
2024 sgen_dump_occupied (char *start, char *end, char *section_start)
2025 {
2026         fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
2027 }
2028
2029 void
2030 sgen_dump_section (GCMemSection *section, const char *type)
2031 {
2032         char *start = section->data;
2033         char *end = section->data + section->size;
2034         char *occ_start = NULL;
2035         GCVTable *vt;
2036         char *old_start = NULL; /* just for debugging */
2037
2038         fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
2039
2040         while (start < end) {
2041                 guint size;
2042                 MonoClass *class;
2043
2044                 if (!*(void**)start) {
2045                         if (occ_start) {
2046                                 sgen_dump_occupied (occ_start, start, section->data);
2047                                 occ_start = NULL;
2048                         }
2049                         start += sizeof (void*); /* should be ALLOC_ALIGN, really */
2050                         continue;
2051                 }
2052                 g_assert (start < section->next_data);
2053
2054                 if (!occ_start)
2055                         occ_start = start;
2056
2057                 vt = (GCVTable*)LOAD_VTABLE (start);
2058                 class = vt->klass;
2059
2060                 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
2061
2062                 /*
2063                 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
2064                                 start - section->data,
2065                                 vt->klass->name_space, vt->klass->name,
2066                                 size);
2067                 */
2068
2069                 old_start = start;
2070                 start += size;
2071         }
2072         if (occ_start)
2073                 sgen_dump_occupied (occ_start, start, section->data);
2074
2075         fprintf (heap_dump_file, "</section>\n");
2076 }
2077
2078 static void
2079 dump_object (MonoObject *obj, gboolean dump_location)
2080 {
2081         static char class_name [1024];
2082
2083         MonoClass *class = mono_object_class (obj);
2084         int i, j;
2085
2086         /*
2087          * Python's XML parser is too stupid to parse angle brackets
2088          * in strings, so we just ignore them;
2089          */
2090         i = j = 0;
2091         while (class->name [i] && j < sizeof (class_name) - 1) {
2092                 if (!strchr ("<>\"", class->name [i]))
2093                         class_name [j++] = class->name [i];
2094                 ++i;
2095         }
2096         g_assert (j < sizeof (class_name));
2097         class_name [j] = 0;
2098
2099         fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%d\"",
2100                         class->name_space, class_name,
2101                         safe_object_get_size (obj));
2102         if (dump_location) {
2103                 const char *location;
2104                 if (ptr_in_nursery (obj))
2105                         location = "nursery";
2106                 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
2107                         location = "major";
2108                 else
2109                         location = "LOS";
2110                 fprintf (heap_dump_file, " location=\"%s\"", location);
2111         }
2112         fprintf (heap_dump_file, "/>\n");
2113 }
2114
2115 static void
2116 dump_heap (const char *type, int num, const char *reason)
2117 {
2118         ObjectList *list;
2119         LOSObject *bigobj;
2120
2121         fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
2122         if (reason)
2123                 fprintf (heap_dump_file, " reason=\"%s\"", reason);
2124         fprintf (heap_dump_file, ">\n");
2125         fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
2126         sgen_dump_internal_mem_usage (heap_dump_file);
2127         fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
2128         /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
2129         fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
2130
2131         fprintf (heap_dump_file, "<pinned-objects>\n");
2132         for (list = sgen_pin_stats_get_object_list (); list; list = list->next)
2133                 dump_object (list->obj, TRUE);
2134         fprintf (heap_dump_file, "</pinned-objects>\n");
2135
2136         sgen_dump_section (nursery_section, "nursery");
2137
2138         major_collector.dump_heap (heap_dump_file);
2139
2140         fprintf (heap_dump_file, "<los>\n");
2141         for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
2142                 dump_object ((MonoObject*)bigobj->data, FALSE);
2143         fprintf (heap_dump_file, "</los>\n");
2144
2145         fprintf (heap_dump_file, "</collection>\n");
2146 }
2147
2148 void
2149 sgen_register_moved_object (void *obj, void *destination)
2150 {
2151         g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
2152
2153         /* FIXME: handle this for parallel collector */
2154         g_assert (!sgen_collection_is_parallel ());
2155
2156         if (moved_objects_idx == MOVED_OBJECTS_NUM) {
2157                 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
2158                 moved_objects_idx = 0;
2159         }
2160         moved_objects [moved_objects_idx++] = obj;
2161         moved_objects [moved_objects_idx++] = destination;
2162 }
2163
2164 static void
2165 init_stats (void)
2166 {
2167         static gboolean inited = FALSE;
2168
2169         if (inited)
2170                 return;
2171
2172         mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pre_collection_fragment_clear);
2173         mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pinning);
2174         mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_remsets);
2175         mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_pinned);
2176         mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_registered_roots);
2177         mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_thread_data);
2178         mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_finish_gray_stack);
2179         mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_fragment_creation);
2180
2181         mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pre_collection_fragment_clear);
2182         mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pinning);
2183         mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_pinned);
2184         mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_registered_roots);
2185         mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_thread_data);
2186         mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_alloc_pinned);
2187         mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_finalized);
2188         mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_big_objects);
2189         mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_finish_gray_stack);
2190         mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_free_bigobjs);
2191         mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_los_sweep);
2192         mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_sweep);
2193         mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_fragment_creation);
2194
2195         mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_pinned_objects);
2196
2197 #ifdef HEAVY_STATISTICS
2198         mono_counters_register ("WBarrier remember pointer", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_add_to_global_remset);
2199         mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
2200         mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
2201         mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
2202         mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
2203         mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
2204         mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
2205         mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
2206
2207         mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
2208         mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
2209
2210         mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
2211         mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
2212         mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_major);
2213         mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_major);
2214
2215         mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
2216         mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);
2217
2218         mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_slots_allocated_in_vain);
2219
2220         mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
2221         mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
2222         mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
2223         mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_to_space);
2224
2225         sgen_nursery_allocator_init_heavy_stats ();
2226         sgen_alloc_init_heavy_stats ();
2227 #endif
2228
2229         inited = TRUE;
2230 }
2231
2232
2233 static void
2234 reset_pinned_from_failed_allocation (void)
2235 {
2236         bytes_pinned_from_failed_allocation = 0;
2237 }
2238
2239 void
2240 sgen_set_pinned_from_failed_allocation (mword objsize)
2241 {
2242         bytes_pinned_from_failed_allocation += objsize;
2243 }
2244
2245 gboolean
2246 sgen_collection_is_parallel (void)
2247 {
2248         switch (current_collection_generation) {
2249         case GENERATION_NURSERY:
2250                 return nursery_collection_is_parallel;
2251         case GENERATION_OLD:
2252                 return major_collector.is_parallel;
2253         default:
2254                 g_error ("Invalid current generation %d", current_collection_generation);
2255         }
2256 }
2257
2258 gboolean
2259 sgen_collection_is_concurrent (void)
2260 {
2261         switch (current_collection_generation) {
2262         case GENERATION_NURSERY:
2263                 return FALSE;
2264         case GENERATION_OLD:
2265                 return concurrent_collection_in_progress;
2266         default:
2267                 g_error ("Invalid current generation %d", current_collection_generation);
2268         }
2269 }
2270
2271 gboolean
2272 sgen_concurrent_collection_in_progress (void)
2273 {
2274         return concurrent_collection_in_progress;
2275 }
2276
2277 typedef struct
2278 {
2279         char *heap_start;
2280         char *heap_end;
2281 } FinishRememberedSetScanJobData;
2282
2283 static void
2284 job_finish_remembered_set_scan (WorkerData *worker_data, void *job_data_untyped)
2285 {
2286         FinishRememberedSetScanJobData *job_data = job_data_untyped;
2287
2288         remset.finish_scan_remsets (job_data->heap_start, job_data->heap_end, sgen_workers_get_job_gray_queue (worker_data));
2289         sgen_free_internal_dynamic (job_data, sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2290 }
2291
2292 typedef struct
2293 {
2294         CopyOrMarkObjectFunc copy_or_mark_func;
2295         ScanObjectFunc scan_func;
2296         char *heap_start;
2297         char *heap_end;
2298         int root_type;
2299 } ScanFromRegisteredRootsJobData;
2300
2301 static void
2302 job_scan_from_registered_roots (WorkerData *worker_data, void *job_data_untyped)
2303 {
2304         ScanFromRegisteredRootsJobData *job_data = job_data_untyped;
2305         ScanCopyContext ctx = { job_data->scan_func, job_data->copy_or_mark_func,
2306                 sgen_workers_get_job_gray_queue (worker_data) };
2307
2308         scan_from_registered_roots (job_data->heap_start, job_data->heap_end, job_data->root_type, ctx);
2309         sgen_free_internal_dynamic (job_data, sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2310 }
2311
2312 typedef struct
2313 {
2314         char *heap_start;
2315         char *heap_end;
2316 } ScanThreadDataJobData;
2317
2318 static void
2319 job_scan_thread_data (WorkerData *worker_data, void *job_data_untyped)
2320 {
2321         ScanThreadDataJobData *job_data = job_data_untyped;
2322
2323         scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
2324                         sgen_workers_get_job_gray_queue (worker_data));
2325         sgen_free_internal_dynamic (job_data, sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2326 }
2327
2328 typedef struct
2329 {
2330         FinalizeReadyEntry *list;
2331 } ScanFinalizerEntriesJobData;
2332
2333 static void
2334 job_scan_finalizer_entries (WorkerData *worker_data, void *job_data_untyped)
2335 {
2336         ScanFinalizerEntriesJobData *job_data = job_data_untyped;
2337         ScanCopyContext ctx = { NULL, current_object_ops.copy_or_mark_object, sgen_workers_get_job_gray_queue (worker_data) };
2338
2339         scan_finalizer_entries (job_data->list, ctx);
2340         sgen_free_internal_dynamic (job_data, sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2341 }
2342
2343 static void
2344 job_scan_major_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2345 {
2346         g_assert (concurrent_collection_in_progress);
2347         major_collector.scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2348 }
2349
2350 static void
2351 job_scan_los_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2352 {
2353         g_assert (concurrent_collection_in_progress);
2354         sgen_los_scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2355 }
2356
2357 static void
2358 verify_scan_starts (char *start, char *end)
2359 {
2360         int i;
2361
2362         for (i = 0; i < nursery_section->num_scan_start; ++i) {
2363                 char *addr = nursery_section->scan_starts [i];
2364                 if (addr > start && addr < end)
2365                         SGEN_LOG (1, "NFC-BAD SCAN START [%d] %p for obj [%p %p]", i, addr, start, end);
2366         }
2367 }
2368
2369 static void
2370 verify_nursery (void)
2371 {
2372         char *start, *end, *cur, *hole_start;
2373
2374         if (!do_verify_nursery)
2375                 return;
2376
2377         /*This cleans up unused fragments */
2378         sgen_nursery_allocator_prepare_for_pinning ();
2379
2380         hole_start = start = cur = sgen_get_nursery_start ();
2381         end = sgen_get_nursery_end ();
2382
2383         while (cur < end) {
2384                 size_t ss, size;
2385
2386                 if (!*(void**)cur) {
2387                         cur += sizeof (void*);
2388                         continue;
2389                 }
2390
2391                 if (object_is_forwarded (cur))
2392                         SGEN_LOG (1, "FORWARDED OBJ %p", cur);
2393                 else if (object_is_pinned (cur))
2394                         SGEN_LOG (1, "PINNED OBJ %p", cur);
2395
2396                 ss = safe_object_get_size ((MonoObject*)cur);
2397                 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2398                 verify_scan_starts (cur, cur + size);
2399                 if (do_dump_nursery_content) {
2400                         if (cur > hole_start)
2401                                 SGEN_LOG (1, "HOLE [%p %p %d]", hole_start, cur, (int)(cur - hole_start));
2402                         SGEN_LOG (1, "OBJ  [%p %p %d %d %s %d]", cur, cur + size, (int)size, (int)ss, sgen_safe_name ((MonoObject*)cur), (gpointer)LOAD_VTABLE (cur) == sgen_get_array_fill_vtable ());
2403                 }
2404                 cur += size;
2405                 hole_start = cur;
2406         }
2407 }
2408
2409 /*
2410  * Checks that no objects in the nursery are fowarded or pinned.  This
2411  * is a precondition to restarting the mutator while doing a
2412  * concurrent collection.  Note that we don't clear fragments because
2413  * we depend on that having happened earlier.
2414  */
2415 static void
2416 check_nursery_is_clean (void)
2417 {
2418         char *start, *end, *cur;
2419
2420         start = cur = sgen_get_nursery_start ();
2421         end = sgen_get_nursery_end ();
2422
2423         while (cur < end) {
2424                 size_t ss, size;
2425
2426                 if (!*(void**)cur) {
2427                         cur += sizeof (void*);
2428                         continue;
2429                 }
2430
2431                 g_assert (!object_is_forwarded (cur));
2432                 g_assert (!object_is_pinned (cur));
2433
2434                 ss = safe_object_get_size ((MonoObject*)cur);
2435                 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2436                 verify_scan_starts (cur, cur + size);
2437
2438                 cur += size;
2439         }
2440 }
2441
2442 static void
2443 init_gray_queue (void)
2444 {
2445         if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ()) {
2446                 sgen_workers_init_distribute_gray_queue ();
2447                 sgen_gray_object_queue_init_with_alloc_prepare (&gray_queue, NULL,
2448                                 gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
2449         } else {
2450                 sgen_gray_object_queue_init (&gray_queue, NULL);
2451         }
2452 }
2453
2454 static void
2455 pin_stage_object_callback (char *obj, size_t size, void *data)
2456 {
2457         sgen_pin_stage_ptr (obj);
2458         /* FIXME: do pin stats if enabled */
2459 }
2460
2461 /*
2462  * Collect objects in the nursery.  Returns whether to trigger a major
2463  * collection.
2464  */
2465 static gboolean
2466 collect_nursery (SgenGrayQueue *unpin_queue, gboolean finish_up_concurrent_mark)
2467 {
2468         gboolean needs_major;
2469         size_t max_garbage_amount;
2470         char *nursery_next;
2471         FinishRememberedSetScanJobData *frssjd;
2472         ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2473         ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2474         ScanThreadDataJobData *stdjd;
2475         mword fragment_total;
2476         ScanCopyContext ctx;
2477         TV_DECLARE (all_atv);
2478         TV_DECLARE (all_btv);
2479         TV_DECLARE (atv);
2480         TV_DECLARE (btv);
2481
2482         if (disable_minor_collections)
2483                 return TRUE;
2484
2485         MONO_GC_BEGIN (GENERATION_NURSERY);
2486         binary_protocol_collection_begin (stat_minor_gcs, GENERATION_NURSERY);
2487
2488         verify_nursery ();
2489
2490 #ifndef DISABLE_PERFCOUNTERS
2491         mono_perfcounters->gc_collections0++;
2492 #endif
2493
2494         current_collection_generation = GENERATION_NURSERY;
2495         if (sgen_collection_is_parallel ())
2496                 current_object_ops = sgen_minor_collector.parallel_ops;
2497         else
2498                 current_object_ops = sgen_minor_collector.serial_ops;
2499         
2500         reset_pinned_from_failed_allocation ();
2501
2502         check_scan_starts ();
2503
2504         sgen_nursery_alloc_prepare_for_minor ();
2505
2506         degraded_mode = 0;
2507         objects_pinned = 0;
2508         nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
2509         /* FIXME: optimize later to use the higher address where an object can be present */
2510         nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
2511
2512         SGEN_LOG (1, "Start nursery collection %d %p-%p, size: %d", stat_minor_gcs, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ()));
2513         max_garbage_amount = nursery_next - sgen_get_nursery_start ();
2514         g_assert (nursery_section->size >= max_garbage_amount);
2515
2516         /* world must be stopped already */
2517         TV_GETTIME (all_atv);
2518         atv = all_atv;
2519
2520         TV_GETTIME (btv);
2521         time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2522
2523         if (xdomain_checks) {
2524                 sgen_clear_nursery_fragments ();
2525                 check_for_xdomain_refs ();
2526         }
2527
2528         nursery_section->next_data = nursery_next;
2529
2530         major_collector.start_nursery_collection ();
2531
2532         sgen_memgov_minor_collection_start ();
2533
2534         init_gray_queue ();
2535
2536         stat_minor_gcs++;
2537         gc_stats.minor_gc_count ++;
2538
2539         if (remset.prepare_for_minor_collection)
2540                 remset.prepare_for_minor_collection ();
2541
2542         MONO_GC_CHECKPOINT_1 (GENERATION_NURSERY);
2543
2544         sgen_process_fin_stage_entries ();
2545         sgen_process_dislink_stage_entries ();
2546
2547         MONO_GC_CHECKPOINT_2 (GENERATION_NURSERY);
2548
2549         /* pin from pinned handles */
2550         sgen_init_pinning ();
2551         mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
2552         pin_from_roots (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2553         /* pin cemented objects */
2554         sgen_cement_iterate (pin_stage_object_callback, NULL);
2555         /* identify pinned objects */
2556         sgen_optimize_pin_queue (0);
2557         sgen_pinning_setup_section (nursery_section);
2558         ctx.scan_func = NULL;
2559         ctx.copy_func = NULL;
2560         ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2561         sgen_pin_objects_in_section (nursery_section, ctx);
2562         sgen_pinning_trim_queue_to_section (nursery_section);
2563
2564         TV_GETTIME (atv);
2565         time_minor_pinning += TV_ELAPSED (btv, atv);
2566         SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
2567         SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
2568
2569         MONO_GC_CHECKPOINT_3 (GENERATION_NURSERY);
2570
2571         if (whole_heap_check_before_collection) {
2572                 sgen_clear_nursery_fragments ();
2573                 sgen_check_whole_heap (finish_up_concurrent_mark);
2574         }
2575         if (consistency_check_at_minor_collection)
2576                 sgen_check_consistency ();
2577
2578         sgen_workers_start_all_workers ();
2579
2580         /*
2581          * Perform the sequential part of remembered set scanning.
2582          * This usually involves scanning global information that might later be produced by evacuation.
2583          */
2584         if (remset.begin_scan_remsets)
2585                 remset.begin_scan_remsets (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2586
2587         sgen_workers_start_marking ();
2588
2589         frssjd = sgen_alloc_internal_dynamic (sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2590         frssjd->heap_start = sgen_get_nursery_start ();
2591         frssjd->heap_end = nursery_next;
2592         sgen_workers_enqueue_job (job_finish_remembered_set_scan, frssjd);
2593
2594         /* we don't have complete write barrier yet, so we scan all the old generation sections */
2595         TV_GETTIME (btv);
2596         time_minor_scan_remsets += TV_ELAPSED (atv, btv);
2597         SGEN_LOG (2, "Old generation scan: %d usecs", TV_ELAPSED (atv, btv));
2598
2599         MONO_GC_CHECKPOINT_4 (GENERATION_NURSERY);
2600
2601         if (!sgen_collection_is_parallel ()) {
2602                 ctx.scan_func = current_object_ops.scan_object;
2603                 ctx.copy_func = NULL;
2604                 ctx.queue = &gray_queue;
2605                 sgen_drain_gray_stack (-1, ctx);
2606         }
2607
2608         if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2609                 report_registered_roots ();
2610         if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2611                 report_finalizer_roots ();
2612         TV_GETTIME (atv);
2613         time_minor_scan_pinned += TV_ELAPSED (btv, atv);
2614
2615         MONO_GC_CHECKPOINT_5 (GENERATION_NURSERY);
2616
2617         /* registered roots, this includes static fields */
2618         scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2619         scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2620         scrrjd_normal->scan_func = current_object_ops.scan_object;
2621         scrrjd_normal->heap_start = sgen_get_nursery_start ();
2622         scrrjd_normal->heap_end = nursery_next;
2623         scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2624         sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2625
2626         scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2627         scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2628         scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2629         scrrjd_wbarrier->heap_start = sgen_get_nursery_start ();
2630         scrrjd_wbarrier->heap_end = nursery_next;
2631         scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2632         sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2633
2634         TV_GETTIME (btv);
2635         time_minor_scan_registered_roots += TV_ELAPSED (atv, btv);
2636
2637         MONO_GC_CHECKPOINT_6 (GENERATION_NURSERY);
2638
2639         /* thread data */
2640         stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2641         stdjd->heap_start = sgen_get_nursery_start ();
2642         stdjd->heap_end = nursery_next;
2643         sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2644
2645         TV_GETTIME (atv);
2646         time_minor_scan_thread_data += TV_ELAPSED (btv, atv);
2647         btv = atv;
2648
2649         MONO_GC_CHECKPOINT_7 (GENERATION_NURSERY);
2650
2651         g_assert (!sgen_collection_is_parallel () && !sgen_collection_is_concurrent ());
2652
2653         if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ())
2654                 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2655
2656         /* Scan the list of objects ready for finalization. If */
2657         sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2658         sfejd_fin_ready->list = fin_ready_list;
2659         sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
2660
2661         sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2662         sfejd_critical_fin->list = critical_fin_list;
2663         sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
2664
2665         MONO_GC_CHECKPOINT_8 (GENERATION_NURSERY);
2666
2667         finish_gray_stack (sgen_get_nursery_start (), nursery_next, GENERATION_NURSERY, &gray_queue);
2668         TV_GETTIME (atv);
2669         time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
2670         mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
2671
2672         MONO_GC_CHECKPOINT_9 (GENERATION_NURSERY);
2673
2674         /*
2675          * The (single-threaded) finalization code might have done
2676          * some copying/marking so we can only reset the GC thread's
2677          * worker data here instead of earlier when we joined the
2678          * workers.
2679          */
2680         sgen_workers_reset_data ();
2681
2682         if (objects_pinned) {
2683                 sgen_optimize_pin_queue (0);
2684                 sgen_pinning_setup_section (nursery_section);
2685         }
2686
2687         /* walk the pin_queue, build up the fragment list of free memory, unmark
2688          * pinned objects as we go, memzero() the empty fragments so they are ready for the
2689          * next allocations.
2690          */
2691         mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
2692         fragment_total = sgen_build_nursery_fragments (nursery_section,
2693                         nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries,
2694                         unpin_queue);
2695         if (!fragment_total)
2696                 degraded_mode = 1;
2697
2698         /* Clear TLABs for all threads */
2699         sgen_clear_tlabs ();
2700
2701         mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
2702         TV_GETTIME (btv);
2703         time_minor_fragment_creation += TV_ELAPSED (atv, btv);
2704         SGEN_LOG (2, "Fragment creation: %d usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
2705
2706         if (consistency_check_at_minor_collection)
2707                 sgen_check_major_refs ();
2708
2709         major_collector.finish_nursery_collection ();
2710
2711         TV_GETTIME (all_btv);
2712         gc_stats.minor_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
2713
2714         if (heap_dump_file)
2715                 dump_heap ("minor", stat_minor_gcs - 1, NULL);
2716
2717         /* prepare the pin queue for the next collection */
2718         sgen_finish_pinning ();
2719         if (fin_ready_list || critical_fin_list) {
2720                 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2721                 mono_gc_finalize_notify ();
2722         }
2723         sgen_pin_stats_reset ();
2724         /* clear cemented hash */
2725         sgen_cement_clear_below_threshold ();
2726
2727         g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2728
2729         if (remset.finish_minor_collection)
2730                 remset.finish_minor_collection ();
2731
2732         check_scan_starts ();
2733
2734         binary_protocol_flush_buffers (FALSE);
2735
2736         sgen_memgov_minor_collection_end ();
2737
2738         /*objects are late pinned because of lack of memory, so a major is a good call*/
2739         needs_major = objects_pinned > 0;
2740         current_collection_generation = -1;
2741         objects_pinned = 0;
2742
2743         MONO_GC_END (GENERATION_NURSERY);
2744         binary_protocol_collection_end (stat_minor_gcs - 1, GENERATION_NURSERY);
2745
2746         if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2747                 sgen_check_nursery_objects_pinned (unpin_queue != NULL);
2748
2749         return needs_major;
2750 }
2751
2752 static void
2753 scan_nursery_objects_callback (char *obj, size_t size, ScanCopyContext *ctx)
2754 {
2755         ctx->scan_func (obj, ctx->queue);
2756 }
2757
2758 static void
2759 scan_nursery_objects (ScanCopyContext ctx)
2760 {
2761         sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
2762                         (IterateObjectCallbackFunc)scan_nursery_objects_callback, (void*)&ctx, FALSE);
2763 }
2764
2765 static void
2766 major_copy_or_mark_from_roots (int *old_next_pin_slot, gboolean finish_up_concurrent_mark, gboolean scan_mod_union)
2767 {
2768         LOSObject *bigobj;
2769         TV_DECLARE (atv);
2770         TV_DECLARE (btv);
2771         /* FIXME: only use these values for the precise scan
2772          * note that to_space pointers should be excluded anyway...
2773          */
2774         char *heap_start = NULL;
2775         char *heap_end = (char*)-1;
2776         gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
2777         GCRootReport root_report = { 0 };
2778         ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2779         ScanThreadDataJobData *stdjd;
2780         ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2781         ScanCopyContext ctx;
2782
2783         if (concurrent_collection_in_progress) {
2784                 /*This cleans up unused fragments */
2785                 sgen_nursery_allocator_prepare_for_pinning ();
2786
2787                 if (do_concurrent_checks)
2788                         check_nursery_is_clean ();
2789         } else {
2790                 /* The concurrent collector doesn't touch the nursery. */
2791                 sgen_nursery_alloc_prepare_for_major ();
2792         }
2793
2794         init_gray_queue ();
2795
2796         TV_GETTIME (atv);
2797
2798         /* Pinning depends on this */
2799         sgen_clear_nursery_fragments ();
2800
2801         if (whole_heap_check_before_collection)
2802                 sgen_check_whole_heap (finish_up_concurrent_mark);
2803
2804         TV_GETTIME (btv);
2805         time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2806
2807         if (!sgen_collection_is_concurrent ())
2808                 nursery_section->next_data = sgen_get_nursery_end ();
2809         /* we should also coalesce scanning from sections close to each other
2810          * and deal with pointers outside of the sections later.
2811          */
2812
2813         objects_pinned = 0;
2814         *major_collector.have_swept = FALSE;
2815
2816         if (xdomain_checks) {
2817                 sgen_clear_nursery_fragments ();
2818                 check_for_xdomain_refs ();
2819         }
2820
2821         if (!concurrent_collection_in_progress) {
2822                 /* Remsets are not useful for a major collection */
2823                 remset.prepare_for_major_collection ();
2824         }
2825
2826         sgen_process_fin_stage_entries ();
2827         sgen_process_dislink_stage_entries ();
2828
2829         TV_GETTIME (atv);
2830         sgen_init_pinning ();
2831         SGEN_LOG (6, "Collecting pinned addresses");
2832         pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2833
2834         if (!concurrent_collection_in_progress || finish_up_concurrent_mark) {
2835                 if (major_collector.is_concurrent) {
2836                         /*
2837                          * The concurrent major collector cannot evict
2838                          * yet, so we need to pin cemented objects to
2839                          * not break some asserts.
2840                          *
2841                          * FIXME: We could evict now!
2842                          */
2843                         sgen_cement_iterate (pin_stage_object_callback, NULL);
2844                 }
2845
2846                 if (!concurrent_collection_in_progress)
2847                         sgen_cement_reset ();
2848         }
2849
2850         sgen_optimize_pin_queue (0);
2851
2852         /*
2853          * The concurrent collector doesn't move objects, neither on
2854          * the major heap nor in the nursery, so we can mark even
2855          * before pinning has finished.  For the non-concurrent
2856          * collector we start the workers after pinning.
2857          */
2858         if (concurrent_collection_in_progress) {
2859                 sgen_workers_start_all_workers ();
2860                 sgen_workers_start_marking ();
2861         }
2862
2863         /*
2864          * pin_queue now contains all candidate pointers, sorted and
2865          * uniqued.  We must do two passes now to figure out which
2866          * objects are pinned.
2867          *
2868          * The first is to find within the pin_queue the area for each
2869          * section.  This requires that the pin_queue be sorted.  We
2870          * also process the LOS objects and pinned chunks here.
2871          *
2872          * The second, destructive, pass is to reduce the section
2873          * areas to pointers to the actually pinned objects.
2874          */
2875         SGEN_LOG (6, "Pinning from sections");
2876         /* first pass for the sections */
2877         sgen_find_section_pin_queue_start_end (nursery_section);
2878         major_collector.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2879         /* identify possible pointers to the insize of large objects */
2880         SGEN_LOG (6, "Pinning from large objects");
2881         for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
2882                 int dummy;
2883                 if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy)) {
2884                         binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((MonoObject*)(bigobj->data))));
2885
2886 #ifdef ENABLE_DTRACE
2887                         if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
2888                                 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (bigobj->data);
2889                                 MONO_GC_OBJ_PINNED ((mword)bigobj->data, sgen_safe_object_get_size ((MonoObject*)bigobj->data), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
2890                         }
2891 #endif
2892
2893                         if (sgen_los_object_is_pinned (bigobj->data)) {
2894                                 g_assert (finish_up_concurrent_mark);
2895                                 continue;
2896                         }
2897                         sgen_los_pin_object (bigobj->data);
2898                         /* FIXME: only enqueue if object has references */
2899                         GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data);
2900                         if (G_UNLIKELY (do_pin_stats))
2901                                 sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
2902                         SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data, safe_name (bigobj->data), (unsigned long)sgen_los_object_size (bigobj));
2903
2904                         if (profile_roots)
2905                                 add_profile_gc_root (&root_report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
2906                 }
2907         }
2908         if (profile_roots)
2909                 notify_gc_roots (&root_report);
2910         /* second pass for the sections */
2911         ctx.scan_func = concurrent_collection_in_progress ? current_object_ops.scan_object : NULL;
2912         ctx.copy_func = NULL;
2913         ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2914
2915         /*
2916          * Concurrent mark never follows references into the nursery.
2917          * In the start and finish pauses we must scan live nursery
2918          * objects, though.  We could simply scan all nursery objects,
2919          * but that would be conservative.  The easiest way is to do a
2920          * nursery collection, which copies all live nursery objects
2921          * (except pinned ones, with the simple nursery) to the major
2922          * heap.  Scanning the mod union table later will then scan
2923          * those promoted objects, provided they're reachable.  Pinned
2924          * objects in the nursery - which we can trivially find in the
2925          * pinning queue - are treated as roots in the mark pauses.
2926          *
2927          * The split nursery complicates the latter part because
2928          * non-pinned objects can survive in the nursery.  That's why
2929          * we need to do a full front-to-back scan of the nursery,
2930          * marking all objects.
2931          *
2932          * Non-concurrent mark evacuates from the nursery, so it's
2933          * sufficient to just scan pinned nursery objects.
2934          */
2935         if (concurrent_collection_in_progress && sgen_minor_collector.is_split) {
2936                 scan_nursery_objects (ctx);
2937         } else {
2938                 sgen_pin_objects_in_section (nursery_section, ctx);
2939                 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2940                         sgen_check_nursery_objects_pinned (!concurrent_collection_in_progress || finish_up_concurrent_mark);
2941         }
2942
2943         major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2944         if (old_next_pin_slot)
2945                 *old_next_pin_slot = sgen_get_pinned_count ();
2946
2947         TV_GETTIME (btv);
2948         time_major_pinning += TV_ELAPSED (atv, btv);
2949         SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
2950         SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
2951
2952         major_collector.init_to_space ();
2953
2954 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2955         main_gc_thread = mono_native_thread_self ();
2956 #endif
2957
2958         if (!concurrent_collection_in_progress && major_collector.is_parallel) {
2959                 sgen_workers_start_all_workers ();
2960                 sgen_workers_start_marking ();
2961         }
2962
2963         if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2964                 report_registered_roots ();
2965         TV_GETTIME (atv);
2966         time_major_scan_pinned += TV_ELAPSED (btv, atv);
2967
2968         /* registered roots, this includes static fields */
2969         scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2970         scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2971         scrrjd_normal->scan_func = current_object_ops.scan_object;
2972         scrrjd_normal->heap_start = heap_start;
2973         scrrjd_normal->heap_end = heap_end;
2974         scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2975         sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2976
2977         scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2978         scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2979         scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2980         scrrjd_wbarrier->heap_start = heap_start;
2981         scrrjd_wbarrier->heap_end = heap_end;
2982         scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2983         sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2984
2985         TV_GETTIME (btv);
2986         time_major_scan_registered_roots += TV_ELAPSED (atv, btv);
2987
2988         /* Threads */
2989         stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2990         stdjd->heap_start = heap_start;
2991         stdjd->heap_end = heap_end;
2992         sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2993
2994         TV_GETTIME (atv);
2995         time_major_scan_thread_data += TV_ELAPSED (btv, atv);
2996
2997         TV_GETTIME (btv);
2998         time_major_scan_alloc_pinned += TV_ELAPSED (atv, btv);
2999
3000         if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
3001                 report_finalizer_roots ();
3002
3003         /* scan the list of objects ready for finalization */
3004         sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
3005         sfejd_fin_ready->list = fin_ready_list;
3006         sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
3007
3008         sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
3009         sfejd_critical_fin->list = critical_fin_list;
3010         sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
3011
3012         if (scan_mod_union) {
3013                 g_assert (finish_up_concurrent_mark);
3014
3015                 /* Mod union card table */
3016                 sgen_workers_enqueue_job (job_scan_major_mod_union_cardtable, NULL);
3017                 sgen_workers_enqueue_job (job_scan_los_mod_union_cardtable, NULL);
3018         }
3019
3020         TV_GETTIME (atv);
3021         time_major_scan_finalized += TV_ELAPSED (btv, atv);
3022         SGEN_LOG (2, "Root scan: %d usecs", TV_ELAPSED (btv, atv));
3023
3024         TV_GETTIME (btv);
3025         time_major_scan_big_objects += TV_ELAPSED (atv, btv);
3026
3027         if (concurrent_collection_in_progress) {
3028                 /* prepare the pin queue for the next collection */
3029                 sgen_finish_pinning ();
3030
3031                 sgen_pin_stats_reset ();
3032
3033                 if (do_concurrent_checks)
3034                         check_nursery_is_clean ();
3035         }
3036 }
3037
3038 static void
3039 major_start_collection (gboolean concurrent, int *old_next_pin_slot)
3040 {
3041         MONO_GC_BEGIN (GENERATION_OLD);
3042         binary_protocol_collection_begin (stat_major_gcs, GENERATION_OLD);
3043
3044         current_collection_generation = GENERATION_OLD;
3045 #ifndef DISABLE_PERFCOUNTERS
3046         mono_perfcounters->gc_collections1++;
3047 #endif
3048
3049         g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
3050
3051         if (concurrent) {
3052                 g_assert (major_collector.is_concurrent);
3053                 concurrent_collection_in_progress = TRUE;
3054
3055                 sgen_cement_concurrent_start ();
3056
3057                 current_object_ops = major_collector.major_concurrent_ops;
3058         } else {
3059                 current_object_ops = major_collector.major_ops;
3060         }
3061
3062         reset_pinned_from_failed_allocation ();
3063
3064         sgen_memgov_major_collection_start ();
3065
3066         //count_ref_nonref_objs ();
3067         //consistency_check ();
3068
3069         check_scan_starts ();
3070
3071         degraded_mode = 0;
3072         SGEN_LOG (1, "Start major collection %d", stat_major_gcs);
3073         stat_major_gcs++;
3074         gc_stats.major_gc_count ++;
3075
3076         if (major_collector.start_major_collection)
3077                 major_collector.start_major_collection ();
3078
3079         major_copy_or_mark_from_roots (old_next_pin_slot, FALSE, FALSE);
3080 }
3081
3082 static void
3083 wait_for_workers_to_finish (void)
3084 {
3085         if (concurrent_collection_in_progress || major_collector.is_parallel) {
3086                 gray_queue_redirect (&gray_queue);
3087                 sgen_workers_join ();
3088         }
3089
3090         g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3091
3092 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
3093         main_gc_thread = NULL;
3094 #endif
3095 }
3096
3097 static void
3098 major_finish_collection (const char *reason, int old_next_pin_slot, gboolean scan_mod_union)
3099 {
3100         LOSObject *bigobj, *prevbo;
3101         TV_DECLARE (atv);
3102         TV_DECLARE (btv);
3103         char *heap_start = NULL;
3104         char *heap_end = (char*)-1;
3105
3106         TV_GETTIME (btv);
3107
3108         if (concurrent_collection_in_progress || major_collector.is_parallel)
3109                 wait_for_workers_to_finish ();
3110
3111         if (concurrent_collection_in_progress) {
3112                 current_object_ops = major_collector.major_concurrent_ops;
3113
3114                 major_copy_or_mark_from_roots (NULL, TRUE, scan_mod_union);
3115                 wait_for_workers_to_finish ();
3116
3117                 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3118
3119                 if (do_concurrent_checks)
3120                         check_nursery_is_clean ();
3121         } else {
3122                 current_object_ops = major_collector.major_ops;
3123         }
3124
3125         /*
3126          * The workers have stopped so we need to finish gray queue
3127          * work that might result from finalization in the main GC
3128          * thread.  Redirection must therefore be turned off.
3129          */
3130         sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
3131         g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
3132
3133         /* all the objects in the heap */
3134         finish_gray_stack (heap_start, heap_end, GENERATION_OLD, &gray_queue);
3135         TV_GETTIME (atv);
3136         time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
3137
3138         /*
3139          * The (single-threaded) finalization code might have done
3140          * some copying/marking so we can only reset the GC thread's
3141          * worker data here instead of earlier when we joined the
3142          * workers.
3143          */
3144         sgen_workers_reset_data ();
3145
3146         if (objects_pinned) {
3147                 g_assert (!concurrent_collection_in_progress);
3148
3149                 /*This is slow, but we just OOM'd*/
3150                 sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
3151                 sgen_optimize_pin_queue (0);
3152                 sgen_find_section_pin_queue_start_end (nursery_section);
3153                 objects_pinned = 0;
3154         }
3155
3156         reset_heap_boundaries ();
3157         sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
3158
3159         if (check_mark_bits_after_major_collection)
3160                 sgen_check_major_heap_marked ();
3161
3162         MONO_GC_SWEEP_BEGIN (GENERATION_OLD, !major_collector.sweeps_lazily);
3163
3164         /* sweep the big objects list */
3165         prevbo = NULL;
3166         for (bigobj = los_object_list; bigobj;) {
3167                 g_assert (!object_is_pinned (bigobj->data));
3168                 if (sgen_los_object_is_pinned (bigobj->data)) {
3169                         sgen_los_unpin_object (bigobj->data);
3170                         sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
3171                 } else {
3172                         LOSObject *to_free;
3173                         /* not referenced anywhere, so we can free it */
3174                         if (prevbo)
3175                                 prevbo->next = bigobj->next;
3176                         else
3177                                 los_object_list = bigobj->next;
3178                         to_free = bigobj;
3179                         bigobj = bigobj->next;
3180                         sgen_los_free_object (to_free);
3181                         continue;
3182                 }
3183                 prevbo = bigobj;
3184                 bigobj = bigobj->next;
3185         }
3186
3187         TV_GETTIME (btv);
3188         time_major_free_bigobjs += TV_ELAPSED (atv, btv);
3189
3190         sgen_los_sweep ();
3191
3192         TV_GETTIME (atv);
3193         time_major_los_sweep += TV_ELAPSED (btv, atv);
3194
3195         major_collector.sweep ();
3196
3197         MONO_GC_SWEEP_END (GENERATION_OLD, !major_collector.sweeps_lazily);
3198
3199         TV_GETTIME (btv);
3200         time_major_sweep += TV_ELAPSED (atv, btv);
3201
3202         if (!concurrent_collection_in_progress) {
3203                 /* walk the pin_queue, build up the fragment list of free memory, unmark
3204                  * pinned objects as we go, memzero() the empty fragments so they are ready for the
3205                  * next allocations.
3206                  */
3207                 if (!sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries, NULL))
3208                         degraded_mode = 1;
3209
3210                 /* prepare the pin queue for the next collection */
3211                 sgen_finish_pinning ();
3212
3213                 /* Clear TLABs for all threads */
3214                 sgen_clear_tlabs ();
3215
3216                 sgen_pin_stats_reset ();
3217         }
3218
3219         if (concurrent_collection_in_progress)
3220                 sgen_cement_concurrent_finish ();
3221         sgen_cement_clear_below_threshold ();
3222
3223         TV_GETTIME (atv);
3224         time_major_fragment_creation += TV_ELAPSED (btv, atv);
3225
3226         if (heap_dump_file)
3227                 dump_heap ("major", stat_major_gcs - 1, reason);
3228
3229         if (fin_ready_list || critical_fin_list) {
3230                 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
3231                 mono_gc_finalize_notify ();
3232         }
3233
3234         g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3235
3236         sgen_memgov_major_collection_end ();
3237         current_collection_generation = -1;
3238
3239         major_collector.finish_major_collection ();
3240
3241         g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
3242
3243         if (concurrent_collection_in_progress)
3244                 concurrent_collection_in_progress = FALSE;
3245
3246         check_scan_starts ();
3247
3248         binary_protocol_flush_buffers (FALSE);
3249
3250         //consistency_check ();
3251
3252         MONO_GC_END (GENERATION_OLD);
3253         binary_protocol_collection_end (stat_major_gcs - 1, GENERATION_OLD);
3254 }
3255
3256 static gboolean
3257 major_do_collection (const char *reason)
3258 {
3259         TV_DECLARE (all_atv);
3260         TV_DECLARE (all_btv);
3261         int old_next_pin_slot;
3262
3263         if (major_collector.get_and_reset_num_major_objects_marked) {
3264                 long long num_marked = major_collector.get_and_reset_num_major_objects_marked ();
3265                 g_assert (!num_marked);
3266         }
3267
3268         /* world must be stopped already */
3269         TV_GETTIME (all_atv);
3270
3271         major_start_collection (FALSE, &old_next_pin_slot);
3272         major_finish_collection (reason, old_next_pin_slot, FALSE);
3273
3274         TV_GETTIME (all_btv);
3275         gc_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
3276
3277         /* FIXME: also report this to the user, preferably in gc-end. */
3278         if (major_collector.get_and_reset_num_major_objects_marked)
3279                 major_collector.get_and_reset_num_major_objects_marked ();
3280
3281         return bytes_pinned_from_failed_allocation > 0;
3282 }
3283
3284 static gboolean major_do_collection (const char *reason);
3285
3286 static void
3287 major_start_concurrent_collection (const char *reason)
3288 {
3289         long long num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3290
3291         g_assert (num_objects_marked == 0);
3292
3293         MONO_GC_CONCURRENT_START_BEGIN (GENERATION_OLD);
3294
3295         // FIXME: store reason and pass it when finishing
3296         major_start_collection (TRUE, NULL);
3297
3298         gray_queue_redirect (&gray_queue);
3299         sgen_workers_wait_for_jobs ();
3300
3301         num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3302         MONO_GC_CONCURRENT_START_END (GENERATION_OLD, num_objects_marked);
3303
3304         current_collection_generation = -1;
3305 }
3306
3307 static gboolean
3308 major_update_or_finish_concurrent_collection (gboolean force_finish)
3309 {
3310         SgenGrayQueue unpin_queue;
3311         memset (&unpin_queue, 0, sizeof (unpin_queue));
3312
3313         MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3314
3315         g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3316
3317         major_collector.update_cardtable_mod_union ();
3318         sgen_los_update_cardtable_mod_union ();
3319
3320         if (!force_finish && !sgen_workers_all_done ()) {
3321                 MONO_GC_CONCURRENT_UPDATE_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3322                 return FALSE;
3323         }
3324
3325         collect_nursery (&unpin_queue, TRUE);
3326
3327         current_collection_generation = GENERATION_OLD;
3328         major_finish_collection ("finishing", -1, TRUE);
3329
3330         if (whole_heap_check_before_collection)
3331                 sgen_check_whole_heap (FALSE);
3332
3333         unpin_objects_from_queue (&unpin_queue);
3334         sgen_gray_object_queue_deinit (&unpin_queue);
3335
3336         MONO_GC_CONCURRENT_FINISH_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3337
3338         current_collection_generation = -1;
3339
3340         return TRUE;
3341 }
3342
3343 /*
3344  * Ensure an allocation request for @size will succeed by freeing enough memory.
3345  *
3346  * LOCKING: The GC lock MUST be held.
3347  */
3348 void
3349 sgen_ensure_free_space (size_t size)
3350 {
3351         int generation_to_collect = -1;
3352         const char *reason = NULL;
3353
3354
3355         if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
3356                 if (sgen_need_major_collection (size)) {
3357                         reason = "LOS overflow";
3358                         generation_to_collect = GENERATION_OLD;
3359                 }
3360         } else {
3361                 if (degraded_mode) {
3362                         if (sgen_need_major_collection (size)) {
3363                                 reason = "Degraded mode overflow";
3364                                 generation_to_collect = GENERATION_OLD;
3365                         }
3366                 } else if (sgen_need_major_collection (size)) {
3367                         reason = "Minor allowance";
3368                         generation_to_collect = GENERATION_OLD;
3369                 } else {
3370                         generation_to_collect = GENERATION_NURSERY;
3371                         reason = "Nursery full";                        
3372                 }
3373         }
3374
3375         if (generation_to_collect == -1) {
3376                 if (concurrent_collection_in_progress && sgen_workers_all_done ()) {
3377                         generation_to_collect = GENERATION_OLD;
3378                         reason = "Finish concurrent collection";
3379                 }
3380         }
3381
3382         if (generation_to_collect == -1)
3383                 return;
3384         sgen_perform_collection (size, generation_to_collect, reason, FALSE);
3385 }
3386
3387 void
3388 sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
3389 {
3390         TV_DECLARE (gc_end);
3391         GGTimingInfo infos [2];
3392         int overflow_generation_to_collect = -1;
3393         int oldest_generation_collected = generation_to_collect;
3394         const char *overflow_reason = NULL;
3395
3396         MONO_GC_REQUESTED (generation_to_collect, requested_size, wait_to_finish ? 1 : 0);
3397
3398         g_assert (generation_to_collect == GENERATION_NURSERY || generation_to_collect == GENERATION_OLD);
3399
3400         memset (infos, 0, sizeof (infos));
3401         mono_profiler_gc_event (MONO_GC_EVENT_START, generation_to_collect);
3402
3403         infos [0].generation = generation_to_collect;
3404         infos [0].reason = reason;
3405         infos [0].is_overflow = FALSE;
3406         TV_GETTIME (infos [0].total_time);
3407         infos [1].generation = -1;
3408
3409         sgen_stop_world (generation_to_collect);
3410
3411         if (concurrent_collection_in_progress) {
3412                 if (major_update_or_finish_concurrent_collection (wait_to_finish && generation_to_collect == GENERATION_OLD)) {
3413                         oldest_generation_collected = GENERATION_OLD;
3414                         goto done;
3415                 }
3416                 if (generation_to_collect == GENERATION_OLD)
3417                         goto done;
3418         } else {
3419                 if (generation_to_collect == GENERATION_OLD &&
3420                                 allow_synchronous_major &&
3421                                 major_collector.want_synchronous_collection &&
3422                                 *major_collector.want_synchronous_collection) {
3423                         wait_to_finish = TRUE;
3424                 }
3425         }
3426
3427         //FIXME extract overflow reason
3428         if (generation_to_collect == GENERATION_NURSERY) {
3429                 if (collect_nursery (NULL, FALSE)) {
3430                         overflow_generation_to_collect = GENERATION_OLD;
3431                         overflow_reason = "Minor overflow";
3432                 }
3433         } else {
3434                 if (major_collector.is_concurrent) {
3435                         g_assert (!concurrent_collection_in_progress);
3436                         if (!wait_to_finish)
3437                                 collect_nursery (NULL, FALSE);
3438                 }
3439
3440                 if (major_collector.is_concurrent && !wait_to_finish) {
3441                         major_start_concurrent_collection (reason);
3442                         // FIXME: set infos[0] properly
3443                         goto done;
3444                 } else {
3445                         if (major_do_collection (reason)) {
3446                                 overflow_generation_to_collect = GENERATION_NURSERY;
3447                                 overflow_reason = "Excessive pinning";
3448                         }
3449                 }
3450         }
3451
3452         TV_GETTIME (gc_end);
3453         infos [0].total_time = SGEN_TV_ELAPSED (infos [0].total_time, gc_end);
3454
3455
3456         if (!major_collector.is_concurrent && overflow_generation_to_collect != -1) {
3457                 mono_profiler_gc_event (MONO_GC_EVENT_START, overflow_generation_to_collect);
3458                 infos [1].generation = overflow_generation_to_collect;
3459                 infos [1].reason = overflow_reason;
3460                 infos [1].is_overflow = TRUE;
3461                 infos [1].total_time = gc_end;
3462
3463                 if (overflow_generation_to_collect == GENERATION_NURSERY)
3464                         collect_nursery (NULL, FALSE);
3465                 else
3466                         major_do_collection (overflow_reason);
3467
3468                 TV_GETTIME (gc_end);
3469                 infos [1].total_time = SGEN_TV_ELAPSED (infos [1].total_time, gc_end);
3470
3471                 /* keep events symmetric */
3472                 mono_profiler_gc_event (MONO_GC_EVENT_END, overflow_generation_to_collect);
3473
3474                 oldest_generation_collected = MAX (oldest_generation_collected, overflow_generation_to_collect);
3475         }
3476
3477         SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)mono_gc_get_heap_size (), (unsigned long)los_memory_usage);
3478
3479         /* this also sets the proper pointers for the next allocation */
3480         if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
3481                 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
3482                 SGEN_LOG (1, "nursery collection didn't find enough room for %zd alloc (%d pinned)", requested_size, sgen_get_pinned_count ());
3483                 sgen_dump_pin_queue ();
3484                 degraded_mode = 1;
3485         }
3486
3487  done:
3488         g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3489
3490         sgen_restart_world (oldest_generation_collected, infos);
3491
3492         mono_profiler_gc_event (MONO_GC_EVENT_END, generation_to_collect);
3493 }
3494
3495 /*
3496  * ######################################################################
3497  * ########  Memory allocation from the OS
3498  * ######################################################################
3499  * This section of code deals with getting memory from the OS and
3500  * allocating memory for GC-internal data structures.
3501  * Internal memory can be handled with a freelist for small objects.
3502  */
3503
3504 /*
3505  * Debug reporting.
3506  */
3507 G_GNUC_UNUSED static void
3508 report_internal_mem_usage (void)
3509 {
3510         printf ("Internal memory usage:\n");
3511         sgen_report_internal_mem_usage ();
3512         printf ("Pinned memory usage:\n");
3513         major_collector.report_pinned_memory_usage ();
3514 }
3515
3516 /*
3517  * ######################################################################
3518  * ########  Finalization support
3519  * ######################################################################
3520  */
3521
3522 static inline gboolean
3523 sgen_major_is_object_alive (void *object)
3524 {
3525         mword objsize;
3526
3527         /* Oldgen objects can be pinned and forwarded too */
3528         if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
3529                 return TRUE;
3530
3531         /*
3532          * FIXME: major_collector.is_object_live() also calculates the
3533          * size.  Avoid the double calculation.
3534          */
3535         objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)object));
3536         if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
3537                 return sgen_los_object_is_pinned (object);
3538
3539         return major_collector.is_object_live (object);
3540 }
3541
3542 /*
3543  * If the object has been forwarded it means it's still referenced from a root. 
3544  * If it is pinned it's still alive as well.
3545  * A LOS object is only alive if we have pinned it.
3546  * Return TRUE if @obj is ready to be finalized.
3547  */
3548 static inline gboolean
3549 sgen_is_object_alive (void *object)
3550 {
3551         if (ptr_in_nursery (object))
3552                 return sgen_nursery_is_object_alive (object);
3553
3554         return sgen_major_is_object_alive (object);
3555 }
3556
3557 /*
3558  * This function returns true if @object is either alive or it belongs to the old gen
3559  * and we're currently doing a minor collection.
3560  */
3561 static inline int
3562 sgen_is_object_alive_for_current_gen (char *object)
3563 {
3564         if (ptr_in_nursery (object))
3565                 return sgen_nursery_is_object_alive (object);
3566
3567         if (current_collection_generation == GENERATION_NURSERY)
3568                 return TRUE;
3569
3570         return sgen_major_is_object_alive (object);
3571 }
3572
3573 /*
3574  * This function returns true if @object is either alive and belongs to the
3575  * current collection - major collections are full heap, so old gen objects
3576  * are never alive during a minor collection.
3577  */
3578 static inline int
3579 sgen_is_object_alive_and_on_current_collection (char *object)
3580 {
3581         if (ptr_in_nursery (object))
3582                 return sgen_nursery_is_object_alive (object);
3583
3584         if (current_collection_generation == GENERATION_NURSERY)
3585                 return FALSE;
3586
3587         return sgen_major_is_object_alive (object);
3588 }
3589
3590
3591 gboolean
3592 sgen_gc_is_object_ready_for_finalization (void *object)
3593 {
3594         return !sgen_is_object_alive (object);
3595 }
3596
3597 static gboolean
3598 has_critical_finalizer (MonoObject *obj)
3599 {
3600         MonoClass *class;
3601
3602         if (!mono_defaults.critical_finalizer_object)
3603                 return FALSE;
3604
3605         class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
3606
3607         return mono_class_has_parent_fast (class, mono_defaults.critical_finalizer_object);
3608 }
3609
3610 void
3611 sgen_queue_finalization_entry (MonoObject *obj)
3612 {
3613         FinalizeReadyEntry *entry = sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
3614         gboolean critical = has_critical_finalizer (obj);
3615         entry->object = obj;
3616         if (critical) {
3617                 entry->next = critical_fin_list;
3618                 critical_fin_list = entry;
3619         } else {
3620                 entry->next = fin_ready_list;
3621                 fin_ready_list = entry;
3622         }
3623
3624 #ifdef ENABLE_DTRACE
3625         if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
3626                 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
3627                 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
3628                 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
3629                                 vt->klass->name_space, vt->klass->name, gen, critical);
3630         }
3631 #endif
3632 }
3633
3634 gboolean
3635 sgen_object_is_live (void *obj)
3636 {
3637         return sgen_is_object_alive_and_on_current_collection (obj);
3638 }
3639
3640 /* LOCKING: requires that the GC lock is held */
3641 static void
3642 null_ephemerons_for_domain (MonoDomain *domain)
3643 {
3644         EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3645
3646         while (current) {
3647                 MonoObject *object = (MonoObject*)current->array;
3648
3649                 if (object && !object->vtable) {
3650                         EphemeronLinkNode *tmp = current;
3651
3652                         if (prev)
3653                                 prev->next = current->next;
3654                         else
3655                                 ephemeron_list = current->next;
3656
3657                         current = current->next;
3658                         sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3659                 } else {
3660                         prev = current;
3661                         current = current->next;
3662                 }
3663         }
3664 }
3665
3666 /* LOCKING: requires that the GC lock is held */
3667 static void
3668 clear_unreachable_ephemerons (ScanCopyContext ctx)
3669 {
3670         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3671         GrayQueue *queue = ctx.queue;
3672         EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3673         MonoArray *array;
3674         Ephemeron *cur, *array_end;
3675         char *tombstone;
3676
3677         while (current) {
3678                 char *object = current->array;
3679
3680                 if (!sgen_is_object_alive_for_current_gen (object)) {
3681                         EphemeronLinkNode *tmp = current;
3682
3683                         SGEN_LOG (5, "Dead Ephemeron array at %p", object);
3684
3685                         if (prev)
3686                                 prev->next = current->next;
3687                         else
3688                                 ephemeron_list = current->next;
3689
3690                         current = current->next;
3691                         sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3692
3693                         continue;
3694                 }
3695
3696                 copy_func ((void**)&object, queue);
3697                 current->array = object;
3698
3699                 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
3700
3701                 array = (MonoArray*)object;
3702                 cur = mono_array_addr (array, Ephemeron, 0);
3703                 array_end = cur + mono_array_length_fast (array);
3704                 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3705
3706                 for (; cur < array_end; ++cur) {
3707                         char *key = (char*)cur->key;
3708
3709                         if (!key || key == tombstone)
3710                                 continue;
3711
3712                         SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3713                                 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3714                                 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3715
3716                         if (!sgen_is_object_alive_for_current_gen (key)) {
3717                                 cur->key = tombstone;
3718                                 cur->value = NULL;
3719                                 continue;
3720                         }
3721                 }
3722                 prev = current;
3723                 current = current->next;
3724         }
3725 }
3726
3727 /*
3728 LOCKING: requires that the GC lock is held
3729
3730 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
3731 */
3732 static int
3733 mark_ephemerons_in_range (ScanCopyContext ctx)
3734 {
3735         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3736         GrayQueue *queue = ctx.queue;
3737         int nothing_marked = 1;
3738         EphemeronLinkNode *current = ephemeron_list;
3739         MonoArray *array;
3740         Ephemeron *cur, *array_end;
3741         char *tombstone;
3742
3743         for (current = ephemeron_list; current; current = current->next) {
3744                 char *object = current->array;
3745                 SGEN_LOG (5, "Ephemeron array at %p", object);
3746
3747                 /*It has to be alive*/
3748                 if (!sgen_is_object_alive_for_current_gen (object)) {
3749                         SGEN_LOG (5, "\tnot reachable");
3750                         continue;
3751                 }
3752
3753                 copy_func ((void**)&object, queue);
3754
3755                 array = (MonoArray*)object;
3756                 cur = mono_array_addr (array, Ephemeron, 0);
3757                 array_end = cur + mono_array_length_fast (array);
3758                 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3759
3760                 for (; cur < array_end; ++cur) {
3761                         char *key = cur->key;
3762
3763                         if (!key || key == tombstone)
3764                                 continue;
3765
3766                         SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3767                                 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3768                                 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3769
3770                         if (sgen_is_object_alive_for_current_gen (key)) {
3771                                 char *value = cur->value;
3772
3773                                 copy_func ((void**)&cur->key, queue);
3774                                 if (value) {
3775                                         if (!sgen_is_object_alive_for_current_gen (value))
3776                                                 nothing_marked = 0;
3777                                         copy_func ((void**)&cur->value, queue);
3778                                 }
3779                         }
3780                 }
3781         }
3782
3783         SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
3784         return nothing_marked;
3785 }
3786
3787 int
3788 mono_gc_invoke_finalizers (void)
3789 {
3790         FinalizeReadyEntry *entry = NULL;
3791         gboolean entry_is_critical = FALSE;
3792         int count = 0;
3793         void *obj;
3794         /* FIXME: batch to reduce lock contention */
3795         while (fin_ready_list || critical_fin_list) {
3796                 LOCK_GC;
3797
3798                 if (entry) {
3799                         FinalizeReadyEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
3800
3801                         /* We have finalized entry in the last
3802                            interation, now we need to remove it from
3803                            the list. */
3804                         if (*list == entry)
3805                                 *list = entry->next;
3806                         else {
3807                                 FinalizeReadyEntry *e = *list;
3808                                 while (e->next != entry)
3809                                         e = e->next;
3810                                 e->next = entry->next;
3811                         }
3812                         sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_READY_ENTRY);
3813                         entry = NULL;
3814                 }
3815
3816                 /* Now look for the first non-null entry. */
3817                 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
3818                         ;
3819                 if (entry) {
3820                         entry_is_critical = FALSE;
3821                 } else {
3822                         entry_is_critical = TRUE;
3823                         for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
3824                                 ;
3825                 }
3826
3827                 if (entry) {
3828                         g_assert (entry->object);
3829                         num_ready_finalizers--;
3830                         obj = entry->object;
3831                         entry->object = NULL;
3832                         SGEN_LOG (7, "Finalizing object %p (%s)", obj, safe_name (obj));
3833                 }
3834
3835                 UNLOCK_GC;
3836
3837                 if (!entry)
3838                         break;
3839
3840                 g_assert (entry->object == NULL);
3841                 count++;
3842                 /* the object is on the stack so it is pinned */
3843                 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
3844                 mono_gc_run_finalize (obj, NULL);
3845         }
3846         g_assert (!entry);
3847         return count;
3848 }
3849
3850 gboolean
3851 mono_gc_pending_finalizers (void)
3852 {
3853         return fin_ready_list || critical_fin_list;
3854 }
3855
3856 /*
3857  * ######################################################################
3858  * ########  registered roots support
3859  * ######################################################################
3860  */
3861
3862 /*
3863  * We do not coalesce roots.
3864  */
3865 static int
3866 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
3867 {
3868         RootRecord new_root;
3869         int i;
3870         LOCK_GC;
3871         for (i = 0; i < ROOT_TYPE_NUM; ++i) {
3872                 RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
3873                 /* we allow changing the size and the descriptor (for thread statics etc) */
3874                 if (root) {
3875                         size_t old_size = root->end_root - start;
3876                         root->end_root = start + size;
3877                         g_assert (((root->root_desc != 0) && (descr != NULL)) ||
3878                                           ((root->root_desc == 0) && (descr == NULL)));
3879                         root->root_desc = (mword)descr;
3880                         roots_size += size;
3881                         roots_size -= old_size;
3882                         UNLOCK_GC;
3883                         return TRUE;
3884                 }
3885         }
3886
3887         new_root.end_root = start + size;
3888         new_root.root_desc = (mword)descr;
3889
3890         sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
3891         roots_size += size;
3892
3893         SGEN_LOG (3, "Added root for range: %p-%p, descr: %p  (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
3894
3895         UNLOCK_GC;
3896         return TRUE;
3897 }
3898
3899 int
3900 mono_gc_register_root (char *start, size_t size, void *descr)
3901 {
3902         return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
3903 }
3904
3905 int
3906 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
3907 {
3908         return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
3909 }
3910
3911 void
3912 mono_gc_deregister_root (char* addr)
3913 {
3914         int root_type;
3915         RootRecord root;
3916
3917         LOCK_GC;
3918         for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
3919                 if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
3920                         roots_size -= (root.end_root - addr);
3921         }
3922         UNLOCK_GC;
3923 }
3924
3925 /*
3926  * ######################################################################
3927  * ########  Thread handling (stop/start code)
3928  * ######################################################################
3929  */
3930
3931 unsigned int sgen_global_stop_count = 0;
3932
3933 void
3934 sgen_fill_thread_info_for_suspend (SgenThreadInfo *info)
3935 {
3936         if (remset.fill_thread_info_for_suspend)
3937                 remset.fill_thread_info_for_suspend (info);
3938 }
3939
3940 int
3941 sgen_get_current_collection_generation (void)
3942 {
3943         return current_collection_generation;
3944 }
3945
3946 void
3947 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
3948 {
3949         gc_callbacks = *callbacks;
3950 }
3951
3952 MonoGCCallbacks *
3953 mono_gc_get_gc_callbacks ()
3954 {
3955         return &gc_callbacks;
3956 }
3957
3958 /* Variables holding start/end nursery so it won't have to be passed at every call */
3959 static void *scan_area_arg_start, *scan_area_arg_end;
3960
3961 void
3962 mono_gc_conservatively_scan_area (void *start, void *end)
3963 {
3964         conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
3965 }
3966
3967 void*
3968 mono_gc_scan_object (void *obj)
3969 {
3970         UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
3971         current_object_ops.copy_or_mark_object (&obj, data->queue);
3972         return obj;
3973 }
3974
3975 /*
3976  * Mark from thread stacks and registers.
3977  */
3978 static void
3979 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue)
3980 {
3981         SgenThreadInfo *info;
3982
3983         scan_area_arg_start = start_nursery;
3984         scan_area_arg_end = end_nursery;
3985
3986         FOREACH_THREAD (info) {
3987                 if (info->skip) {
3988                         SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3989                         continue;
3990                 }
3991                 if (info->gc_disabled) {
3992                         SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3993                         continue;
3994                 }
3995
3996                 if (!info->joined_stw) {
3997                         SGEN_LOG (3, "Skipping thread not seen in STW %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3998                         continue;
3999                 }
4000                 
4001                 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, sgen_get_pinned_count ());
4002                 if (!info->thread_is_dying) {
4003                         if (gc_callbacks.thread_mark_func && !conservative_stack_mark) {
4004                                 UserCopyOrMarkData data = { NULL, queue };
4005                                 set_user_copy_or_mark_data (&data);
4006                                 gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
4007                                 set_user_copy_or_mark_data (NULL);
4008                         } else if (!precise) {
4009                                 if (!conservative_stack_mark) {
4010                                         fprintf (stderr, "Precise stack mark not supported - disabling.\n");
4011                                         conservative_stack_mark = TRUE;
4012                                 }
4013                                 conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
4014                         }
4015                 }
4016
4017                 if (!info->thread_is_dying && !precise) {
4018 #ifdef USE_MONO_CTX
4019                         conservatively_pin_objects_from ((void**)&info->ctx, (void**)&info->ctx + ARCH_NUM_REGS,
4020                                 start_nursery, end_nursery, PIN_TYPE_STACK);
4021 #else
4022                         conservatively_pin_objects_from (&info->regs, &info->regs + ARCH_NUM_REGS,
4023                                         start_nursery, end_nursery, PIN_TYPE_STACK);
4024 #endif
4025                 }
4026         } END_FOREACH_THREAD
4027 }
4028
4029 static gboolean
4030 ptr_on_stack (void *ptr)
4031 {
4032         gpointer stack_start = &stack_start;
4033         SgenThreadInfo *info = mono_thread_info_current ();
4034
4035         if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
4036                 return TRUE;
4037         return FALSE;
4038 }
4039
4040 static void*
4041 sgen_thread_register (SgenThreadInfo* info, void *addr)
4042 {
4043         LOCK_GC;
4044 #ifndef HAVE_KW_THREAD
4045         info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
4046
4047         g_assert (!mono_native_tls_get_value (thread_info_key));
4048         mono_native_tls_set_value (thread_info_key, info);
4049 #else
4050         sgen_thread_info = info;
4051 #endif
4052
4053 #if !defined(__MACH__)
4054         info->stop_count = -1;
4055         info->signal = 0;
4056 #endif
4057         info->skip = 0;
4058         info->joined_stw = FALSE;
4059         info->doing_handshake = FALSE;
4060         info->thread_is_dying = FALSE;
4061         info->stack_start = NULL;
4062         info->stopped_ip = NULL;
4063         info->stopped_domain = NULL;
4064 #ifdef USE_MONO_CTX
4065         memset (&info->ctx, 0, sizeof (MonoContext));
4066 #else
4067         memset (&info->regs, 0, sizeof (info->regs));
4068 #endif
4069
4070         sgen_init_tlab_info (info);
4071
4072         binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
4073
4074         /* try to get it with attributes first */
4075 #if (defined(HAVE_PTHREAD_GETATTR_NP) || defined(HAVE_PTHREAD_ATTR_GET_NP)) && defined(HAVE_PTHREAD_ATTR_GETSTACK)
4076   {
4077      size_t size;
4078      void *sstart;
4079      pthread_attr_t attr;
4080
4081 #if defined(HAVE_PTHREAD_GETATTR_NP)
4082     /* Linux */
4083     pthread_getattr_np (pthread_self (), &attr);
4084 #elif defined(HAVE_PTHREAD_ATTR_GET_NP)
4085     /* BSD */
4086     pthread_attr_init (&attr);
4087     pthread_attr_get_np (pthread_self (), &attr);
4088 #else
4089 #error Cannot determine which API is needed to retrieve pthread attributes.
4090 #endif
4091
4092      pthread_attr_getstack (&attr, &sstart, &size);
4093      info->stack_start_limit = sstart;
4094      info->stack_end = (char*)sstart + size;
4095      pthread_attr_destroy (&attr);
4096   }
4097 #elif defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP)
4098                  info->stack_end = (char*)pthread_get_stackaddr_np (pthread_self ());
4099                  info->stack_start_limit = (char*)info->stack_end - pthread_get_stacksize_np (pthread_self ());
4100 #else
4101         {
4102                 /* FIXME: we assume the stack grows down */
4103                 gsize stack_bottom = (gsize)addr;
4104                 stack_bottom += 4095;
4105                 stack_bottom &= ~4095;
4106                 info->stack_end = (char*)stack_bottom;
4107         }
4108 #endif
4109
4110 #ifdef HAVE_KW_THREAD
4111         stack_end = info->stack_end;
4112 #endif
4113
4114         if (remset.register_thread)
4115                 remset.register_thread (info);
4116
4117         SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->stack_end);
4118
4119         if (gc_callbacks.thread_attach_func)
4120                 info->runtime_data = gc_callbacks.thread_attach_func ();
4121
4122         UNLOCK_GC;
4123         return info;
4124 }
4125
4126 static void
4127 sgen_wbarrier_cleanup_thread (SgenThreadInfo *p)
4128 {
4129         if (remset.cleanup_thread)
4130                 remset.cleanup_thread (p);
4131 }
4132
4133 static void
4134 sgen_thread_unregister (SgenThreadInfo *p)
4135 {
4136         /* If a delegate is passed to native code and invoked on a thread we dont
4137          * know about, the jit will register it with mono_jit_thread_attach, but
4138          * we have no way of knowing when that thread goes away.  SGen has a TSD
4139          * so we assume that if the domain is still registered, we can detach
4140          * the thread
4141          */
4142         if (mono_domain_get ())
4143                 mono_thread_detach (mono_thread_current ());
4144
4145         p->thread_is_dying = TRUE;
4146
4147         /*
4148         There is a race condition between a thread finishing executing and been removed
4149         from the GC thread set.
4150         This happens on posix systems when TLS data is been cleaned-up, libpthread will
4151         set the thread_info slot to NULL before calling the cleanup function. This
4152         opens a window in which the thread is registered but has a NULL TLS.
4153
4154         The suspend signal handler needs TLS data to know where to store thread state
4155         data or otherwise it will simply ignore the thread.
4156
4157         This solution works because the thread doing STW will wait until all threads been
4158         suspended handshake back, so there is no race between the doing_hankshake test
4159         and the suspend_thread call.
4160
4161         This is not required on systems that do synchronous STW as those can deal with
4162         the above race at suspend time.
4163
4164         FIXME: I believe we could avoid this by using mono_thread_info_lookup when
4165         mono_thread_info_current returns NULL. Or fix mono_thread_info_lookup to do so.
4166         */
4167 #if (defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED) || !defined(HAVE_PTHREAD_KILL)
4168         LOCK_GC;
4169 #else
4170         while (!TRYLOCK_GC) {
4171                 if (!sgen_park_current_thread_if_doing_handshake (p))
4172                         g_usleep (50);
4173         }
4174         MONO_GC_LOCKED ();
4175 #endif
4176
4177         binary_protocol_thread_unregister ((gpointer)mono_thread_info_get_tid (p));
4178         SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)mono_thread_info_get_tid (p));
4179
4180         if (gc_callbacks.thread_detach_func) {
4181                 gc_callbacks.thread_detach_func (p->runtime_data);
4182                 p->runtime_data = NULL;
4183         }
4184         sgen_wbarrier_cleanup_thread (p);
4185
4186         mono_threads_unregister_current_thread (p);
4187         UNLOCK_GC;
4188 }
4189
4190
4191 static void
4192 sgen_thread_attach (SgenThreadInfo *info)
4193 {
4194         LOCK_GC;
4195         /*this is odd, can we get attached before the gc is inited?*/
4196         init_stats ();
4197         UNLOCK_GC;
4198         
4199         if (gc_callbacks.thread_attach_func && !info->runtime_data)
4200                 info->runtime_data = gc_callbacks.thread_attach_func ();
4201 }
4202 gboolean
4203 mono_gc_register_thread (void *baseptr)
4204 {
4205         return mono_thread_info_attach (baseptr) != NULL;
4206 }
4207
4208 /*
4209  * mono_gc_set_stack_end:
4210  *
4211  *   Set the end of the current threads stack to STACK_END. The stack space between 
4212  * STACK_END and the real end of the threads stack will not be scanned during collections.
4213  */
4214 void
4215 mono_gc_set_stack_end (void *stack_end)
4216 {
4217         SgenThreadInfo *info;
4218
4219         LOCK_GC;
4220         info = mono_thread_info_current ();
4221         if (info) {
4222                 g_assert (stack_end < info->stack_end);
4223                 info->stack_end = stack_end;
4224         }
4225         UNLOCK_GC;
4226 }
4227
4228 #if USE_PTHREAD_INTERCEPT
4229
4230
4231 int
4232 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
4233 {
4234         return pthread_create (new_thread, attr, start_routine, arg);
4235 }
4236
4237 int
4238 mono_gc_pthread_join (pthread_t thread, void **retval)
4239 {
4240         return pthread_join (thread, retval);
4241 }
4242
4243 int
4244 mono_gc_pthread_detach (pthread_t thread)
4245 {
4246         return pthread_detach (thread);
4247 }
4248
4249 void
4250 mono_gc_pthread_exit (void *retval) 
4251 {
4252         pthread_exit (retval);
4253 }
4254
4255 #endif /* USE_PTHREAD_INTERCEPT */
4256
4257 /*
4258  * ######################################################################
4259  * ########  Write barriers
4260  * ######################################################################
4261  */
4262
4263 /*
4264  * Note: the write barriers first do the needed GC work and then do the actual store:
4265  * this way the value is visible to the conservative GC scan after the write barrier
4266  * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
4267  * the conservative scan, otherwise by the remembered set scan.
4268  */
4269 void
4270 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
4271 {
4272         HEAVY_STAT (++stat_wbarrier_set_field);
4273         if (ptr_in_nursery (field_ptr)) {
4274                 *(void**)field_ptr = value;
4275                 return;
4276         }
4277         SGEN_LOG (8, "Adding remset at %p", field_ptr);
4278         if (value)
4279                 binary_protocol_wbarrier (field_ptr, value, value->vtable);
4280
4281         remset.wbarrier_set_field (obj, field_ptr, value);
4282 }
4283
4284 void
4285 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
4286 {
4287         HEAVY_STAT (++stat_wbarrier_set_arrayref);
4288         if (ptr_in_nursery (slot_ptr)) {
4289                 *(void**)slot_ptr = value;
4290                 return;
4291         }
4292         SGEN_LOG (8, "Adding remset at %p", slot_ptr);
4293         if (value)
4294                 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
4295
4296         remset.wbarrier_set_arrayref (arr, slot_ptr, value);
4297 }
4298
4299 void
4300 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
4301 {
4302         HEAVY_STAT (++stat_wbarrier_arrayref_copy);
4303         /*This check can be done without taking a lock since dest_ptr array is pinned*/
4304         if (ptr_in_nursery (dest_ptr) || count <= 0) {
4305                 mono_gc_memmove (dest_ptr, src_ptr, count * sizeof (gpointer));
4306                 return;
4307         }
4308
4309 #ifdef SGEN_BINARY_PROTOCOL
4310         {
4311                 int i;
4312                 for (i = 0; i < count; ++i) {
4313                         gpointer dest = (gpointer*)dest_ptr + i;
4314                         gpointer obj = *((gpointer*)src_ptr + i);
4315                         if (obj)
4316                                 binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
4317                 }
4318         }
4319 #endif
4320
4321         remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
4322 }
4323
4324 static char *found_obj;
4325
4326 static void
4327 find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
4328 {
4329         char *ptr = user_data;
4330
4331         if (ptr >= obj && ptr < obj + size) {
4332                 g_assert (!found_obj);
4333                 found_obj = obj;
4334         }
4335 }
4336
4337 /* for use in the debugger */
4338 char* find_object_for_ptr (char *ptr);
4339 char*
4340 find_object_for_ptr (char *ptr)
4341 {
4342         if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
4343                 found_obj = NULL;
4344                 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
4345                                 find_object_for_ptr_callback, ptr, TRUE);
4346                 if (found_obj)
4347                         return found_obj;
4348         }
4349
4350         found_obj = NULL;
4351         sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
4352         if (found_obj)
4353                 return found_obj;
4354
4355         /*
4356          * Very inefficient, but this is debugging code, supposed to
4357          * be called from gdb, so we don't care.
4358          */
4359         found_obj = NULL;
4360         major_collector.iterate_objects (TRUE, TRUE, find_object_for_ptr_callback, ptr);
4361         return found_obj;
4362 }
4363
4364 void
4365 mono_gc_wbarrier_generic_nostore (gpointer ptr)
4366 {
4367         gpointer obj;
4368
4369         HEAVY_STAT (++stat_wbarrier_generic_store);
4370
4371 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
4372         /* FIXME: ptr_in_heap must be called with the GC lock held */
4373         if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
4374                 char *start = find_object_for_ptr (ptr);
4375                 MonoObject *value = *(MonoObject**)ptr;
4376                 LOCK_GC;
4377                 g_assert (start);
4378                 if (start) {
4379                         MonoObject *obj = (MonoObject*)start;
4380                         if (obj->vtable->domain != value->vtable->domain)
4381                                 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
4382                 }
4383                 UNLOCK_GC;
4384         }
4385 #endif
4386
4387         obj = *(gpointer*)ptr;
4388         if (obj)
4389                 binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
4390
4391         if (ptr_in_nursery (ptr) || ptr_on_stack (ptr)) {
4392                 SGEN_LOG (8, "Skipping remset at %p", ptr);
4393                 return;
4394         }
4395
4396         /*
4397          * We need to record old->old pointer locations for the
4398          * concurrent collector.
4399          */
4400         if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
4401                 SGEN_LOG (8, "Skipping remset at %p", ptr);
4402                 return;
4403         }
4404
4405         SGEN_LOG (8, "Adding remset at %p", ptr);
4406
4407         remset.wbarrier_generic_nostore (ptr);
4408 }
4409
4410 void
4411 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
4412 {
4413         SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4414         *(void**)ptr = value;
4415         if (ptr_in_nursery (value))
4416                 mono_gc_wbarrier_generic_nostore (ptr);
4417         sgen_dummy_use (value);
4418 }
4419
4420 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
4421 {
4422         mword *dest = _dest;
4423         mword *src = _src;
4424
4425         while (size) {
4426                 if (bitmap & 0x1)
4427                         mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
4428                 else
4429                         *dest = *src;
4430                 ++src;
4431                 ++dest;
4432                 size -= SIZEOF_VOID_P;
4433                 bitmap >>= 1;
4434         }
4435 }
4436
4437 #ifdef SGEN_BINARY_PROTOCOL
4438 #undef HANDLE_PTR
4439 #define HANDLE_PTR(ptr,obj) do {                                        \
4440                 gpointer o = *(gpointer*)(ptr);                         \
4441                 if ((o)) {                                              \
4442                         gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
4443                         binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
4444                 }                                                       \
4445         } while (0)
4446
4447 static void
4448 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
4449 {
4450 #define SCAN_OBJECT_NOVTABLE
4451 #include "sgen-scan-object.h"
4452 }
4453 #endif
4454
4455 void
4456 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
4457 {
4458         HEAVY_STAT (++stat_wbarrier_value_copy);
4459         g_assert (klass->valuetype);
4460
4461         SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
4462
4463         if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
4464                 size_t element_size = mono_class_value_size (klass, NULL);
4465                 size_t size = count * element_size;
4466                 mono_gc_memmove (dest, src, size);              
4467                 return;
4468         }
4469
4470 #ifdef SGEN_BINARY_PROTOCOL
4471         {
4472                 size_t element_size = mono_class_value_size (klass, NULL);
4473                 int i;
4474                 for (i = 0; i < count; ++i) {
4475                         scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
4476                                         (char*)src + i * element_size - sizeof (MonoObject),
4477                                         (mword) klass->gc_descr);
4478                 }
4479         }
4480 #endif
4481
4482         remset.wbarrier_value_copy (dest, src, count, klass);
4483 }
4484
4485 /**
4486  * mono_gc_wbarrier_object_copy:
4487  *
4488  * Write barrier to call when obj is the result of a clone or copy of an object.
4489  */
4490 void
4491 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
4492 {
4493         int size;
4494
4495         HEAVY_STAT (++stat_wbarrier_object_copy);
4496
4497         if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
4498                 size = mono_object_class (obj)->instance_size;
4499                 mono_gc_memmove ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
4500                                 size - sizeof (MonoObject));
4501                 return; 
4502         }
4503
4504 #ifdef SGEN_BINARY_PROTOCOL
4505         scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
4506 #endif
4507
4508         remset.wbarrier_object_copy (obj, src);
4509 }
4510
4511
4512 /*
4513  * ######################################################################
4514  * ########  Other mono public interface functions.
4515  * ######################################################################
4516  */
4517
4518 #define REFS_SIZE 128
4519 typedef struct {
4520         void *data;
4521         MonoGCReferences callback;
4522         int flags;
4523         int count;
4524         int called;
4525         MonoObject *refs [REFS_SIZE];
4526         uintptr_t offsets [REFS_SIZE];
4527 } HeapWalkInfo;
4528
4529 #undef HANDLE_PTR
4530 #define HANDLE_PTR(ptr,obj)     do {    \
4531                 if (*(ptr)) {   \
4532                         if (hwi->count == REFS_SIZE) {  \
4533                                 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);    \
4534                                 hwi->count = 0; \
4535                                 hwi->called = 1;        \
4536                         }       \
4537                         hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start;  \
4538                         hwi->refs [hwi->count++] = *(ptr);      \
4539                 }       \
4540         } while (0)
4541
4542 static void
4543 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
4544 {
4545 #include "sgen-scan-object.h"
4546 }
4547
4548 static void
4549 walk_references (char *start, size_t size, void *data)
4550 {
4551         HeapWalkInfo *hwi = data;
4552         hwi->called = 0;
4553         hwi->count = 0;
4554         collect_references (hwi, start, size);
4555         if (hwi->count || !hwi->called)
4556                 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
4557 }
4558
4559 /**
4560  * mono_gc_walk_heap:
4561  * @flags: flags for future use
4562  * @callback: a function pointer called for each object in the heap
4563  * @data: a user data pointer that is passed to callback
4564  *
4565  * This function can be used to iterate over all the live objects in the heap:
4566  * for each object, @callback is invoked, providing info about the object's
4567  * location in memory, its class, its size and the objects it references.
4568  * For each referenced object it's offset from the object address is
4569  * reported in the offsets array.
4570  * The object references may be buffered, so the callback may be invoked
4571  * multiple times for the same object: in all but the first call, the size
4572  * argument will be zero.
4573  * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
4574  * profiler event handler.
4575  *
4576  * Returns: a non-zero value if the GC doesn't support heap walking
4577  */
4578 int
4579 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
4580 {
4581         HeapWalkInfo hwi;
4582
4583         hwi.flags = flags;
4584         hwi.callback = callback;
4585         hwi.data = data;
4586
4587         sgen_clear_nursery_fragments ();
4588         sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
4589
4590         major_collector.iterate_objects (TRUE, TRUE, walk_references, &hwi);
4591         sgen_los_iterate_objects (walk_references, &hwi);
4592
4593         return 0;
4594 }
4595
4596 void
4597 mono_gc_collect (int generation)
4598 {
4599         LOCK_GC;
4600         if (generation > 1)
4601                 generation = 1;
4602         sgen_perform_collection (0, generation, "user request", TRUE);
4603         UNLOCK_GC;
4604 }
4605
4606 int
4607 mono_gc_max_generation (void)
4608 {
4609         return 1;
4610 }
4611
4612 int
4613 mono_gc_collection_count (int generation)
4614 {
4615         if (generation == 0)
4616                 return stat_minor_gcs;
4617         return stat_major_gcs;
4618 }
4619
4620 int64_t
4621 mono_gc_get_used_size (void)
4622 {
4623         gint64 tot = 0;
4624         LOCK_GC;
4625         tot = los_memory_usage;
4626         tot += nursery_section->next_data - nursery_section->data;
4627         tot += major_collector.get_used_size ();
4628         /* FIXME: account for pinned objects */
4629         UNLOCK_GC;
4630         return tot;
4631 }
4632
4633 int
4634 mono_gc_get_los_limit (void)
4635 {
4636         return MAX_SMALL_OBJ_SIZE;
4637 }
4638
4639 gboolean
4640 mono_gc_user_markers_supported (void)
4641 {
4642         return TRUE;
4643 }
4644
4645 gboolean
4646 mono_object_is_alive (MonoObject* o)
4647 {
4648         return TRUE;
4649 }
4650
4651 int
4652 mono_gc_get_generation (MonoObject *obj)
4653 {
4654         if (ptr_in_nursery (obj))
4655                 return 0;
4656         return 1;
4657 }
4658
4659 void
4660 mono_gc_enable_events (void)
4661 {
4662 }
4663
4664 void
4665 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
4666 {
4667         sgen_register_disappearing_link (obj, link_addr, track, FALSE);
4668 }
4669
4670 void
4671 mono_gc_weak_link_remove (void **link_addr, gboolean track)
4672 {
4673         sgen_register_disappearing_link (NULL, link_addr, track, FALSE);
4674 }
4675
4676 MonoObject*
4677 mono_gc_weak_link_get (void **link_addr)
4678 {
4679         void * volatile *link_addr_volatile;
4680         void *ptr;
4681         MonoObject *obj;
4682  retry:
4683         link_addr_volatile = link_addr;
4684         ptr = (void*)*link_addr_volatile;
4685         /*
4686          * At this point we have a hidden pointer.  If the GC runs
4687          * here, it will not recognize the hidden pointer as a
4688          * reference, and if the object behind it is not referenced
4689          * elsewhere, it will be freed.  Once the world is restarted
4690          * we reveal the pointer, giving us a pointer to a freed
4691          * object.  To make sure we don't return it, we load the
4692          * hidden pointer again.  If it's still the same, we can be
4693          * sure the object reference is valid.
4694          */
4695         if (ptr)
4696                 obj = (MonoObject*) REVEAL_POINTER (ptr);
4697         else
4698                 return NULL;
4699
4700         mono_memory_barrier ();
4701
4702         /*
4703          * During the second bridge processing step the world is
4704          * running again.  That step processes all weak links once
4705          * more to null those that refer to dead objects.  Before that
4706          * is completed, those links must not be followed, so we
4707          * conservatively wait for bridge processing when any weak
4708          * link is dereferenced.
4709          */
4710         if (G_UNLIKELY (bridge_processing_in_progress))
4711                 mono_gc_wait_for_bridge_processing ();
4712
4713         if ((void*)*link_addr_volatile != ptr)
4714                 goto retry;
4715
4716         return obj;
4717 }
4718
4719 gboolean
4720 mono_gc_ephemeron_array_add (MonoObject *obj)
4721 {
4722         EphemeronLinkNode *node;
4723
4724         LOCK_GC;
4725
4726         node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
4727         if (!node) {
4728                 UNLOCK_GC;
4729                 return FALSE;
4730         }
4731         node->array = (char*)obj;
4732         node->next = ephemeron_list;
4733         ephemeron_list = node;
4734
4735         SGEN_LOG (5, "Registered ephemeron array %p", obj);
4736
4737         UNLOCK_GC;
4738         return TRUE;
4739 }
4740
4741 gboolean
4742 mono_gc_set_allow_synchronous_major (gboolean flag)
4743 {
4744         if (!major_collector.is_concurrent)
4745                 return flag;
4746
4747         allow_synchronous_major = flag;
4748         return TRUE;
4749 }
4750
4751 void*
4752 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
4753 {
4754         void *result;
4755         LOCK_INTERRUPTION;
4756         result = func (data);
4757         UNLOCK_INTERRUPTION;
4758         return result;
4759 }
4760
4761 gboolean
4762 mono_gc_is_gc_thread (void)
4763 {
4764         gboolean result;
4765         LOCK_GC;
4766         result = mono_thread_info_current () != NULL;
4767         UNLOCK_GC;
4768         return result;
4769 }
4770
4771 static gboolean
4772 is_critical_method (MonoMethod *method)
4773 {
4774         return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
4775 }
4776         
4777 void
4778 mono_gc_base_init (void)
4779 {
4780         MonoThreadInfoCallbacks cb;
4781         char *env;
4782         char **opts, **ptr;
4783         char *major_collector_opt = NULL;
4784         char *minor_collector_opt = NULL;
4785         glong max_heap = 0;
4786         glong soft_limit = 0;
4787         int num_workers;
4788         int result;
4789         int dummy;
4790         gboolean debug_print_allowance = FALSE;
4791         double allowance_ratio = 0, save_target = 0;
4792         gboolean have_split_nursery = FALSE;
4793         gboolean cement_enabled = TRUE;
4794
4795         do {
4796                 result = InterlockedCompareExchange (&gc_initialized, -1, 0);
4797                 switch (result) {
4798                 case 1:
4799                         /* already inited */
4800                         return;
4801                 case -1:
4802                         /* being inited by another thread */
4803                         g_usleep (1000);
4804                         break;
4805                 case 0:
4806                         /* we will init it */
4807                         break;
4808                 default:
4809                         g_assert_not_reached ();
4810                 }
4811         } while (result != 0);
4812
4813         LOCK_INIT (gc_mutex);
4814
4815         pagesize = mono_pagesize ();
4816         gc_debug_file = stderr;
4817
4818         cb.thread_register = sgen_thread_register;
4819         cb.thread_unregister = sgen_thread_unregister;
4820         cb.thread_attach = sgen_thread_attach;
4821         cb.mono_method_is_critical = (gpointer)is_critical_method;
4822 #ifndef HOST_WIN32
4823         cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
4824 #endif
4825
4826         mono_threads_init (&cb, sizeof (SgenThreadInfo));
4827
4828         LOCK_INIT (sgen_interruption_mutex);
4829         LOCK_INIT (pin_queue_mutex);
4830
4831         init_user_copy_or_mark_key ();
4832
4833         if ((env = getenv ("MONO_GC_PARAMS"))) {
4834                 opts = g_strsplit (env, ",", -1);
4835                 for (ptr = opts; *ptr; ++ptr) {
4836                         char *opt = *ptr;
4837                         if (g_str_has_prefix (opt, "major=")) {
4838                                 opt = strchr (opt, '=') + 1;
4839                                 major_collector_opt = g_strdup (opt);
4840                         } else if (g_str_has_prefix (opt, "minor=")) {
4841                                 opt = strchr (opt, '=') + 1;
4842                                 minor_collector_opt = g_strdup (opt);
4843                         }
4844                 }
4845         } else {
4846                 opts = NULL;
4847         }
4848
4849         init_stats ();
4850         sgen_init_internal_allocator ();
4851         sgen_init_nursery_allocator ();
4852
4853         sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
4854         sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_READY_ENTRY, sizeof (FinalizeReadyEntry));
4855         sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
4856         sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
4857
4858 #ifndef HAVE_KW_THREAD
4859         mono_native_tls_alloc (&thread_info_key, NULL);
4860 #endif
4861
4862         /*
4863          * This needs to happen before any internal allocations because
4864          * it inits the small id which is required for hazard pointer
4865          * operations.
4866          */
4867         sgen_os_init ();
4868
4869         mono_thread_info_attach (&dummy);
4870
4871         if (!minor_collector_opt) {
4872                 sgen_simple_nursery_init (&sgen_minor_collector);
4873         } else {
4874                 if (!strcmp (minor_collector_opt, "simple")) {
4875                         sgen_simple_nursery_init (&sgen_minor_collector);
4876                 } else if (!strcmp (minor_collector_opt, "split")) {
4877                         sgen_split_nursery_init (&sgen_minor_collector);
4878                         have_split_nursery = TRUE;
4879                 } else {
4880                         fprintf (stderr, "Unknown minor collector `%s'.\n", minor_collector_opt);
4881                         exit (1);
4882                 }
4883         }
4884
4885         if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
4886                 sgen_marksweep_init (&major_collector);
4887         } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed")) {
4888                 sgen_marksweep_fixed_init (&major_collector);
4889         } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-par")) {
4890                 sgen_marksweep_par_init (&major_collector);
4891         } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed-par")) {
4892                 sgen_marksweep_fixed_par_init (&major_collector);
4893         } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
4894                 sgen_marksweep_conc_init (&major_collector);
4895         } else {
4896                 fprintf (stderr, "Unknown major collector `%s'.\n", major_collector_opt);
4897                 exit (1);
4898         }
4899
4900         num_workers = mono_cpu_count ();
4901         g_assert (num_workers > 0);
4902         if (num_workers > 16)
4903                 num_workers = 16;
4904
4905         ///* Keep this the default for now */
4906         /* Precise marking is broken on all supported targets. Disable until fixed. */
4907         conservative_stack_mark = TRUE;
4908
4909         sgen_nursery_size = DEFAULT_NURSERY_SIZE;
4910
4911         if (opts) {
4912                 for (ptr = opts; *ptr; ++ptr) {
4913                         char *opt = *ptr;
4914                         if (g_str_has_prefix (opt, "major="))
4915                                 continue;
4916                         if (g_str_has_prefix (opt, "minor="))
4917                                 continue;
4918                         if (g_str_has_prefix (opt, "max-heap-size=")) {
4919                                 opt = strchr (opt, '=') + 1;
4920                                 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap)) {
4921                                         if ((max_heap & (mono_pagesize () - 1))) {
4922                                                 fprintf (stderr, "max-heap-size size must be a multiple of %d.\n", mono_pagesize ());
4923                                                 exit (1);
4924                                         }
4925                                 } else {
4926                                         fprintf (stderr, "max-heap-size must be an integer.\n");
4927                                         exit (1);
4928                                 }
4929                                 continue;
4930                         }
4931                         if (g_str_has_prefix (opt, "soft-heap-limit=")) {
4932                                 opt = strchr (opt, '=') + 1;
4933                                 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
4934                                         if (soft_limit <= 0) {
4935                                                 fprintf (stderr, "soft-heap-limit must be positive.\n");
4936                                                 exit (1);
4937                                         }
4938                                 } else {
4939                                         fprintf (stderr, "soft-heap-limit must be an integer.\n");
4940                                         exit (1);
4941                                 }
4942                                 continue;
4943                         }
4944                         if (g_str_has_prefix (opt, "workers=")) {
4945                                 long val;
4946                                 char *endptr;
4947                                 if (!major_collector.is_parallel) {
4948                                         fprintf (stderr, "The workers= option can only be used for parallel collectors.");
4949                                         exit (1);
4950                                 }
4951                                 opt = strchr (opt, '=') + 1;
4952                                 val = strtol (opt, &endptr, 10);
4953                                 if (!*opt || *endptr) {
4954                                         fprintf (stderr, "Cannot parse the workers= option value.");
4955                                         exit (1);
4956                                 }
4957                                 if (val <= 0 || val > 16) {
4958                                         fprintf (stderr, "The number of workers must be in the range 1 to 16.");
4959                                         exit (1);
4960                                 }
4961                                 num_workers = (int)val;
4962                                 continue;
4963                         }
4964                         if (g_str_has_prefix (opt, "stack-mark=")) {
4965                                 opt = strchr (opt, '=') + 1;
4966                                 if (!strcmp (opt, "precise")) {
4967                                         conservative_stack_mark = FALSE;
4968                                 } else if (!strcmp (opt, "conservative")) {
4969                                         conservative_stack_mark = TRUE;
4970                                 } else {
4971                                         fprintf (stderr, "Invalid value '%s' for stack-mark= option, possible values are: 'precise', 'conservative'.\n", opt);
4972                                         exit (1);
4973                                 }
4974                                 continue;
4975                         }
4976                         if (g_str_has_prefix (opt, "bridge=")) {
4977                                 opt = strchr (opt, '=') + 1;
4978                                 sgen_register_test_bridge_callbacks (g_strdup (opt));
4979                                 continue;
4980                         }
4981 #ifdef USER_CONFIG
4982                         if (g_str_has_prefix (opt, "nursery-size=")) {
4983                                 long val;
4984                                 opt = strchr (opt, '=') + 1;
4985                                 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
4986                                         sgen_nursery_size = val;
4987 #ifdef SGEN_ALIGN_NURSERY
4988                                         if ((val & (val - 1))) {
4989                                                 fprintf (stderr, "The nursery size must be a power of two.\n");
4990                                                 exit (1);
4991                                         }
4992
4993                                         if (val < SGEN_MAX_NURSERY_WASTE) {
4994                                                 fprintf (stderr, "The nursery size must be at least %d bytes.\n", SGEN_MAX_NURSERY_WASTE);
4995                                                 exit (1);
4996                                         }
4997
4998                                         sgen_nursery_bits = 0;
4999                                         while (1 << (++ sgen_nursery_bits) != sgen_nursery_size)
5000                                                 ;
5001 #endif
5002                                 } else {
5003                                         fprintf (stderr, "nursery-size must be an integer.\n");
5004                                         exit (1);
5005                                 }
5006                                 continue;
5007                         }
5008 #endif
5009                         if (g_str_has_prefix (opt, "save-target-ratio=")) {
5010                                 char *endptr;
5011                                 opt = strchr (opt, '=') + 1;
5012                                 save_target = strtod (opt, &endptr);
5013                                 if (endptr == opt) {
5014                                         fprintf (stderr, "save-target-ratio must be a number.");
5015                                         exit (1);
5016                                 }
5017                                 if (save_target < SGEN_MIN_SAVE_TARGET_RATIO || save_target > SGEN_MAX_SAVE_TARGET_RATIO) {
5018                                         fprintf (stderr, "save-target-ratio must be between %.2f - %.2f.", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
5019                                         exit (1);
5020                                 }
5021                                 continue;
5022                         }
5023                         if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
5024                                 char *endptr;
5025                                 opt = strchr (opt, '=') + 1;
5026
5027                                 allowance_ratio = strtod (opt, &endptr);
5028                                 if (endptr == opt) {
5029                                         fprintf (stderr, "save-target-ratio must be a number.");
5030                                         exit (1);
5031                                 }
5032                                 if (allowance_ratio < SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO || allowance_ratio > SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO) {
5033                                         fprintf (stderr, "default-allowance-ratio must be between %.2f - %.2f.", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO);
5034                                         exit (1);
5035                                 }
5036                                 continue;
5037                         }
5038                         if (g_str_has_prefix (opt, "allow-synchronous-major=")) {
5039                                 if (!major_collector.is_concurrent) {
5040                                         fprintf (stderr, "Warning: allow-synchronous-major has no effect because the major collector is not concurrent.\n");
5041                                         continue;
5042                                 }
5043
5044                                 opt = strchr (opt, '=') + 1;
5045
5046                                 if (!strcmp (opt, "yes")) {
5047                                         allow_synchronous_major = TRUE;
5048                                 } else if (!strcmp (opt, "no")) {
5049                                         allow_synchronous_major = FALSE;
5050                                 } else {
5051                                         fprintf (stderr, "allow-synchronous-major must be either `yes' or `no'.\n");
5052                                         exit (1);
5053                                 }
5054                         }
5055
5056                         if (!strcmp (opt, "cementing")) {
5057                                 cement_enabled = TRUE;
5058                                 continue;
5059                         }
5060                         if (!strcmp (opt, "no-cementing")) {
5061                                 cement_enabled = FALSE;
5062                                 continue;
5063                         }
5064
5065                         if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
5066                                 continue;
5067
5068                         if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
5069                                 continue;
5070
5071                         fprintf (stderr, "MONO_GC_PARAMS must be a comma-delimited list of one or more of the following:\n");
5072                         fprintf (stderr, "  max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
5073                         fprintf (stderr, "  soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
5074                         fprintf (stderr, "  nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
5075                         fprintf (stderr, "  major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-conc', `marksweep-par', 'marksweep-fixed' or 'marksweep-fixed-par')\n");
5076                         fprintf (stderr, "  minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
5077                         fprintf (stderr, "  wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
5078                         fprintf (stderr, "  stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
5079                         fprintf (stderr, "  [no-]cementing\n");
5080                         if (major_collector.is_concurrent)
5081                                 fprintf (stderr, "  allow-synchronous-major=FLAG (where FLAG is `yes' or `no')\n");
5082                         if (major_collector.print_gc_param_usage)
5083                                 major_collector.print_gc_param_usage ();
5084                         if (sgen_minor_collector.print_gc_param_usage)
5085                                 sgen_minor_collector.print_gc_param_usage ();
5086                         fprintf (stderr, " Experimental options:\n");
5087                         fprintf (stderr, "  save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
5088                         fprintf (stderr, "  default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
5089                         exit (1);
5090                 }
5091                 g_strfreev (opts);
5092         }
5093
5094         if (major_collector.is_parallel)
5095                 sgen_workers_init (num_workers);
5096         else if (major_collector.is_concurrent)
5097                 sgen_workers_init (1);
5098
5099         if (major_collector_opt)
5100                 g_free (major_collector_opt);
5101
5102         if (minor_collector_opt)
5103                 g_free (minor_collector_opt);
5104
5105         alloc_nursery ();
5106
5107         sgen_cement_init (cement_enabled);
5108
5109         if ((env = getenv ("MONO_GC_DEBUG"))) {
5110                 opts = g_strsplit (env, ",", -1);
5111                 for (ptr = opts; ptr && *ptr; ptr ++) {
5112                         char *opt = *ptr;
5113                         if (opt [0] >= '0' && opt [0] <= '9') {
5114                                 gc_debug_level = atoi (opt);
5115                                 opt++;
5116                                 if (opt [0] == ':')
5117                                         opt++;
5118                                 if (opt [0]) {
5119 #ifdef HOST_WIN32
5120                                         char *rf = g_strdup_printf ("%s.%d", opt, GetCurrentProcessId ());
5121 #else
5122                                         char *rf = g_strdup_printf ("%s.%d", opt, getpid ());
5123 #endif
5124                                         gc_debug_file = fopen (rf, "wb");
5125                                         if (!gc_debug_file)
5126                                                 gc_debug_file = stderr;
5127                                         g_free (rf);
5128                                 }
5129                         } else if (!strcmp (opt, "print-allowance")) {
5130                                 debug_print_allowance = TRUE;
5131                         } else if (!strcmp (opt, "print-pinning")) {
5132                                 do_pin_stats = TRUE;
5133                         } else if (!strcmp (opt, "verify-before-allocs")) {
5134                                 verify_before_allocs = 1;
5135                                 has_per_allocation_action = TRUE;
5136                         } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
5137                                 char *arg = strchr (opt, '=') + 1;
5138                                 verify_before_allocs = atoi (arg);
5139                                 has_per_allocation_action = TRUE;
5140                         } else if (!strcmp (opt, "collect-before-allocs")) {
5141                                 collect_before_allocs = 1;
5142                                 has_per_allocation_action = TRUE;
5143                         } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
5144                                 char *arg = strchr (opt, '=') + 1;
5145                                 has_per_allocation_action = TRUE;
5146                                 collect_before_allocs = atoi (arg);
5147                         } else if (!strcmp (opt, "verify-before-collections")) {
5148                                 whole_heap_check_before_collection = TRUE;
5149                         } else if (!strcmp (opt, "check-at-minor-collections")) {
5150                                 consistency_check_at_minor_collection = TRUE;
5151                                 nursery_clear_policy = CLEAR_AT_GC;
5152                         } else if (!strcmp (opt, "check-mark-bits")) {
5153                                 check_mark_bits_after_major_collection = TRUE;
5154                         } else if (!strcmp (opt, "check-nursery-pinned")) {
5155                                 check_nursery_objects_pinned = TRUE;
5156                         } else if (!strcmp (opt, "xdomain-checks")) {
5157                                 xdomain_checks = TRUE;
5158                         } else if (!strcmp (opt, "clear-at-gc")) {
5159                                 nursery_clear_policy = CLEAR_AT_GC;
5160                         } else if (!strcmp (opt, "clear-nursery-at-gc")) {
5161                                 nursery_clear_policy = CLEAR_AT_GC;
5162                         } else if (!strcmp (opt, "check-scan-starts")) {
5163                                 do_scan_starts_check = TRUE;
5164                         } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
5165                                 do_verify_nursery = TRUE;
5166                         } else if (!strcmp (opt, "check-concurrent")) {
5167                                 if (!major_collector.is_concurrent) {
5168                                         fprintf (stderr, "Error: check-concurrent only world with concurrent major collectors.\n");
5169                                         exit (1);
5170                                 }
5171                                 do_concurrent_checks = TRUE;
5172                         } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
5173                                 do_dump_nursery_content = TRUE;
5174                         } else if (!strcmp (opt, "no-managed-allocator")) {
5175                                 sgen_set_use_managed_allocator (FALSE);
5176                         } else if (!strcmp (opt, "disable-minor")) {
5177                                 disable_minor_collections = TRUE;
5178                         } else if (!strcmp (opt, "disable-major")) {
5179                                 disable_major_collections = TRUE;
5180                         } else if (g_str_has_prefix (opt, "heap-dump=")) {
5181                                 char *filename = strchr (opt, '=') + 1;
5182                                 nursery_clear_policy = CLEAR_AT_GC;
5183                                 heap_dump_file = fopen (filename, "w");
5184                                 if (heap_dump_file) {
5185                                         fprintf (heap_dump_file, "<sgen-dump>\n");
5186                                         do_pin_stats = TRUE;
5187                                 }
5188 #ifdef SGEN_BINARY_PROTOCOL
5189                         } else if (g_str_has_prefix (opt, "binary-protocol=")) {
5190                                 char *filename = strchr (opt, '=') + 1;
5191                                 binary_protocol_init (filename);
5192 #endif
5193                         } else {
5194                                 fprintf (stderr, "Invalid format for the MONO_GC_DEBUG env variable: '%s'\n", env);
5195                                 fprintf (stderr, "The format is: MONO_GC_DEBUG=[l[:filename]|<option>]+ where l is a debug level 0-9.\n");
5196                                 fprintf (stderr, "Valid options are:\n");
5197                                 fprintf (stderr, "  collect-before-allocs[=<n>]\n");
5198                                 fprintf (stderr, "  verify-before-allocs[=<n>]\n");
5199                                 fprintf (stderr, "  check-at-minor-collections\n");
5200                                 fprintf (stderr, "  check-mark-bits\n");
5201                                 fprintf (stderr, "  check-nursery-pinned\n");
5202                                 fprintf (stderr, "  verify-before-collections\n");
5203                                 fprintf (stderr, "  verify-nursery-at-minor-gc\n");
5204                                 fprintf (stderr, "  dump-nursery-at-minor-gc\n");
5205                                 fprintf (stderr, "  disable-minor\n");
5206                                 fprintf (stderr, "  disable-major\n");
5207                                 fprintf (stderr, "  xdomain-checks\n");
5208                                 fprintf (stderr, "  check-concurrent\n");
5209                                 fprintf (stderr, "  clear-at-gc\n");
5210                                 fprintf (stderr, "  clear-nursery-at-gc\n");
5211                                 fprintf (stderr, "  check-scan-starts\n");
5212                                 fprintf (stderr, "  no-managed-allocator\n");
5213                                 fprintf (stderr, "  print-allowance\n");
5214                                 fprintf (stderr, "  print-pinning\n");
5215                                 fprintf (stderr, "  heap-dump=<filename>\n");
5216 #ifdef SGEN_BINARY_PROTOCOL
5217                                 fprintf (stderr, "  binary-protocol=<filename>\n");
5218 #endif
5219                                 exit (1);
5220                         }
5221                 }
5222                 g_strfreev (opts);
5223         }
5224
5225         if (major_collector.is_parallel) {
5226                 if (heap_dump_file) {
5227                         fprintf (stderr, "Error: Cannot do heap dump with the parallel collector.\n");
5228                         exit (1);
5229                 }
5230                 if (do_pin_stats) {
5231                         fprintf (stderr, "Error: Cannot gather pinning statistics with the parallel collector.\n");
5232                         exit (1);
5233                 }
5234         }
5235
5236         if (major_collector.post_param_init)
5237                 major_collector.post_param_init (&major_collector);
5238
5239         sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
5240
5241         memset (&remset, 0, sizeof (remset));
5242
5243         sgen_card_table_init (&remset);
5244
5245         if (remset.register_thread)
5246                 remset.register_thread (mono_thread_info_current ());
5247
5248         gc_initialized = 1;
5249 }
5250
5251 const char *
5252 mono_gc_get_gc_name (void)
5253 {
5254         return "sgen";
5255 }
5256
5257 static MonoMethod *write_barrier_method;
5258
5259 gboolean
5260 sgen_is_critical_method (MonoMethod *method)
5261 {
5262         return (method == write_barrier_method || sgen_is_managed_allocator (method));
5263 }
5264
5265 gboolean
5266 sgen_has_critical_method (void)
5267 {
5268         return write_barrier_method || sgen_has_managed_allocator ();
5269 }
5270
5271 #ifndef DISABLE_JIT
5272
5273 static void
5274 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels)
5275 {
5276         memset (nursery_check_return_labels, 0, sizeof (int) * 3);
5277 #ifdef SGEN_ALIGN_NURSERY
5278         // if (ptr_in_nursery (ptr)) return;
5279         /*
5280          * Masking out the bits might be faster, but we would have to use 64 bit
5281          * immediates, which might be slower.
5282          */
5283         mono_mb_emit_ldarg (mb, 0);
5284         mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5285         mono_mb_emit_byte (mb, CEE_SHR_UN);
5286         mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
5287         nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
5288
5289         if (!major_collector.is_concurrent) {
5290                 // if (!ptr_in_nursery (*ptr)) return;
5291                 mono_mb_emit_ldarg (mb, 0);
5292                 mono_mb_emit_byte (mb, CEE_LDIND_I);
5293                 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5294                 mono_mb_emit_byte (mb, CEE_SHR_UN);
5295                 mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
5296                 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
5297         }
5298 #else
5299         int label_continue1, label_continue2;
5300         int dereferenced_var;
5301
5302         // if (ptr < (sgen_get_nursery_start ())) goto continue;
5303         mono_mb_emit_ldarg (mb, 0);
5304         mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5305         label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
5306
5307         // if (ptr >= sgen_get_nursery_end ())) goto continue;
5308         mono_mb_emit_ldarg (mb, 0);
5309         mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5310         label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
5311
5312         // Otherwise return
5313         nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BR);
5314
5315         // continue:
5316         mono_mb_patch_branch (mb, label_continue_1);
5317         mono_mb_patch_branch (mb, label_continue_2);
5318
5319         // Dereference and store in local var
5320         dereferenced_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5321         mono_mb_emit_ldarg (mb, 0);
5322         mono_mb_emit_byte (mb, CEE_LDIND_I);
5323         mono_mb_emit_stloc (mb, dereferenced_var);
5324
5325         if (!major_collector.is_concurrent) {
5326                 // if (*ptr < sgen_get_nursery_start ()) return;
5327                 mono_mb_emit_ldloc (mb, dereferenced_var);
5328                 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5329                 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
5330
5331                 // if (*ptr >= sgen_get_nursery_end ()) return;
5332                 mono_mb_emit_ldloc (mb, dereferenced_var);
5333                 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5334                 nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
5335         }
5336 #endif  
5337 }
5338 #endif
5339
5340 MonoMethod*
5341 mono_gc_get_write_barrier (void)
5342 {
5343         MonoMethod *res;
5344         MonoMethodBuilder *mb;
5345         MonoMethodSignature *sig;
5346 #ifdef MANAGED_WBARRIER
5347         int i, nursery_check_labels [3];
5348
5349 #ifdef HAVE_KW_THREAD
5350         int stack_end_offset = -1;
5351
5352         MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
5353         g_assert (stack_end_offset != -1);
5354 #endif
5355 #endif
5356
5357         // FIXME: Maybe create a separate version for ctors (the branch would be
5358         // correctly predicted more times)
5359         if (write_barrier_method)
5360                 return write_barrier_method;
5361
5362         /* Create the IL version of mono_gc_barrier_generic_store () */
5363         sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
5364         sig->ret = &mono_defaults.void_class->byval_arg;
5365         sig->params [0] = &mono_defaults.int_class->byval_arg;
5366
5367         mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
5368
5369 #ifndef DISABLE_JIT
5370 #ifdef MANAGED_WBARRIER
5371         emit_nursery_check (mb, nursery_check_labels);
5372         /*
5373         addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
5374         *addr = 1;
5375
5376         sgen_cardtable:
5377                 LDC_PTR sgen_cardtable
5378
5379         address >> CARD_BITS
5380                 LDARG_0
5381                 LDC_I4 CARD_BITS
5382                 SHR_UN
5383         if (SGEN_HAVE_OVERLAPPING_CARDS) {
5384                 LDC_PTR card_table_mask
5385                 AND
5386         }
5387         AND
5388         ldc_i4_1
5389         stind_i1
5390         */
5391         mono_mb_emit_ptr (mb, sgen_cardtable);
5392         mono_mb_emit_ldarg (mb, 0);
5393         mono_mb_emit_icon (mb, CARD_BITS);
5394         mono_mb_emit_byte (mb, CEE_SHR_UN);
5395 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
5396         mono_mb_emit_ptr (mb, (gpointer)CARD_MASK);
5397         mono_mb_emit_byte (mb, CEE_AND);
5398 #endif
5399         mono_mb_emit_byte (mb, CEE_ADD);
5400         mono_mb_emit_icon (mb, 1);
5401         mono_mb_emit_byte (mb, CEE_STIND_I1);
5402
5403         // return;
5404         for (i = 0; i < 3; ++i) {
5405                 if (nursery_check_labels [i])
5406                         mono_mb_patch_branch (mb, nursery_check_labels [i]);
5407         }
5408         mono_mb_emit_byte (mb, CEE_RET);
5409 #else
5410         mono_mb_emit_ldarg (mb, 0);
5411         mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5412         mono_mb_emit_byte (mb, CEE_RET);
5413 #endif
5414 #endif
5415         res = mono_mb_create_method (mb, sig, 16);
5416         mono_mb_free (mb);
5417
5418         mono_loader_lock ();
5419         if (write_barrier_method) {
5420                 /* Already created */
5421                 mono_free_method (res);
5422         } else {
5423                 /* double-checked locking */
5424                 mono_memory_barrier ();
5425                 write_barrier_method = res;
5426         }
5427         mono_loader_unlock ();
5428
5429         return write_barrier_method;
5430 }
5431
5432 char*
5433 mono_gc_get_description (void)
5434 {
5435         return g_strdup ("sgen");
5436 }
5437
5438 void
5439 mono_gc_set_desktop_mode (void)
5440 {
5441 }
5442
5443 gboolean
5444 mono_gc_is_moving (void)
5445 {
5446         return TRUE;
5447 }
5448
5449 gboolean
5450 mono_gc_is_disabled (void)
5451 {
5452         return FALSE;
5453 }
5454
5455 #ifdef HOST_WIN32
5456 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
5457 {
5458         return TRUE;
5459 }
5460 #endif
5461
5462 NurseryClearPolicy
5463 sgen_get_nursery_clear_policy (void)
5464 {
5465         return nursery_clear_policy;
5466 }
5467
5468 MonoVTable*
5469 sgen_get_array_fill_vtable (void)
5470 {
5471         if (!array_fill_vtable) {
5472                 static MonoClass klass;
5473                 static MonoVTable vtable;
5474                 gsize bmap;
5475
5476                 MonoDomain *domain = mono_get_root_domain ();
5477                 g_assert (domain);
5478
5479                 klass.element_class = mono_defaults.byte_class;
5480                 klass.rank = 1;
5481                 klass.instance_size = sizeof (MonoArray);
5482                 klass.sizes.element_size = 1;
5483                 klass.name = "array_filler_type";
5484
5485                 vtable.klass = &klass;
5486                 bmap = 0;
5487                 vtable.gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
5488                 vtable.rank = 1;
5489
5490                 array_fill_vtable = &vtable;
5491         }
5492         return array_fill_vtable;
5493 }
5494
5495 void
5496 sgen_gc_lock (void)
5497 {
5498         LOCK_GC;
5499 }
5500
5501 void
5502 sgen_gc_unlock (void)
5503 {
5504         UNLOCK_GC;
5505 }
5506
5507 void
5508 sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
5509 {
5510         major_collector.iterate_live_block_ranges (callback);
5511 }
5512
5513 void
5514 sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
5515 {
5516         major_collector.scan_card_table (FALSE, queue);
5517 }
5518
5519 SgenMajorCollector*
5520 sgen_get_major_collector (void)
5521 {
5522         return &major_collector;
5523 }
5524
5525 void mono_gc_set_skip_thread (gboolean skip)
5526 {
5527         SgenThreadInfo *info = mono_thread_info_current ();
5528
5529         LOCK_GC;
5530         info->gc_disabled = skip;
5531         UNLOCK_GC;
5532 }
5533
5534 SgenRemeberedSet*
5535 sgen_get_remset (void)
5536 {
5537         return &remset;
5538 }
5539
5540 guint
5541 mono_gc_get_vtable_bits (MonoClass *class)
5542 {
5543         if (sgen_need_bridge_processing () && sgen_is_bridge_class (class))
5544                 return SGEN_GC_BIT_BRIDGE_OBJECT;
5545         return 0;
5546 }
5547
5548 void
5549 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
5550 {
5551         // FIXME:
5552 }
5553
5554
5555 void
5556 sgen_check_whole_heap_stw (void)
5557 {
5558         sgen_stop_world (0);
5559         sgen_clear_nursery_fragments ();
5560         sgen_check_whole_heap (FALSE);
5561         sgen_restart_world (0, NULL);
5562 }
5563
5564 void
5565 sgen_gc_event_moves (void)
5566 {
5567         if (moved_objects_idx) {
5568                 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
5569                 moved_objects_idx = 0;
5570         }
5571 }
5572
5573 #endif /* HAVE_SGEN_GC */