[sgen] Move the independent parts of SGen to a separate library.
authorAlex Rønne Petersen <alexrp@xamarin.com>
Mon, 4 May 2015 00:22:58 +0000 (02:22 +0200)
committerAlex Rønne Petersen <alexrp@xamarin.com>
Mon, 4 May 2015 19:09:29 +0000 (21:09 +0200)
134 files changed:
configure.ac
mono/Makefile.am
mono/dis/Makefile.am
mono/metadata/Makefile.am
mono/metadata/gc-internal-agnostic.h [deleted file]
mono/metadata/gc-internal.h
mono/metadata/gc-memfuncs.c [deleted file]
mono/metadata/gc-parse.c [deleted file]
mono/metadata/gc-stats.c
mono/metadata/gc.c
mono/metadata/sgen-alloc.c [deleted file]
mono/metadata/sgen-archdep.h [deleted file]
mono/metadata/sgen-bridge-internal.h
mono/metadata/sgen-bridge.c
mono/metadata/sgen-cardtable.c [deleted file]
mono/metadata/sgen-cardtable.h [deleted file]
mono/metadata/sgen-client-mono.h
mono/metadata/sgen-client.h [deleted file]
mono/metadata/sgen-conf.h [deleted file]
mono/metadata/sgen-copy-object.h [deleted file]
mono/metadata/sgen-debug.c [deleted file]
mono/metadata/sgen-descriptor.c [deleted file]
mono/metadata/sgen-descriptor.h [deleted file]
mono/metadata/sgen-fin-weak-hash.c [deleted file]
mono/metadata/sgen-gc.c [deleted file]
mono/metadata/sgen-gc.h [deleted file]
mono/metadata/sgen-gray.c [deleted file]
mono/metadata/sgen-gray.h [deleted file]
mono/metadata/sgen-hash-table.c [deleted file]
mono/metadata/sgen-hash-table.h [deleted file]
mono/metadata/sgen-internal.c [deleted file]
mono/metadata/sgen-layout-stats.c [deleted file]
mono/metadata/sgen-layout-stats.h [deleted file]
mono/metadata/sgen-los.c [deleted file]
mono/metadata/sgen-major-copy-object.h [deleted file]
mono/metadata/sgen-marksweep-drain-gray-stack.h [deleted file]
mono/metadata/sgen-marksweep-scan-object-concurrent.h [deleted file]
mono/metadata/sgen-marksweep.c [deleted file]
mono/metadata/sgen-memory-governor.c [deleted file]
mono/metadata/sgen-memory-governor.h [deleted file]
mono/metadata/sgen-minor-copy-object.h [deleted file]
mono/metadata/sgen-minor-scan-object.h [deleted file]
mono/metadata/sgen-mono.c
mono/metadata/sgen-new-bridge.c
mono/metadata/sgen-nursery-allocator.c [deleted file]
mono/metadata/sgen-old-bridge.c
mono/metadata/sgen-os-mach.c
mono/metadata/sgen-os-posix.c
mono/metadata/sgen-os-win32.c
mono/metadata/sgen-pinning-stats.c [deleted file]
mono/metadata/sgen-pinning.c [deleted file]
mono/metadata/sgen-pinning.h [deleted file]
mono/metadata/sgen-pointer-queue.c [deleted file]
mono/metadata/sgen-pointer-queue.h [deleted file]
mono/metadata/sgen-protocol-def.h [deleted file]
mono/metadata/sgen-protocol.c [deleted file]
mono/metadata/sgen-protocol.h [deleted file]
mono/metadata/sgen-qsort.c [deleted file]
mono/metadata/sgen-qsort.h [deleted file]
mono/metadata/sgen-scan-object.h [deleted file]
mono/metadata/sgen-simple-nursery.c [deleted file]
mono/metadata/sgen-split-nursery.c [deleted file]
mono/metadata/sgen-stw.c
mono/metadata/sgen-tagged-pointer.h [deleted file]
mono/metadata/sgen-tarjan-bridge.c
mono/metadata/sgen-thread-pool.c [deleted file]
mono/metadata/sgen-thread-pool.h [deleted file]
mono/metadata/sgen-toggleref.c
mono/metadata/sgen-workers.c [deleted file]
mono/metadata/sgen-workers.h [deleted file]
mono/mini/Makefile.am.in
mono/sgen/.gitignore [new file with mode: 0644]
mono/sgen/Makefile.am [new file with mode: 0644]
mono/sgen/gc-internal-agnostic.h [new file with mode: 0644]
mono/sgen/sgen-alloc.c [new file with mode: 0644]
mono/sgen/sgen-archdep.h [new file with mode: 0644]
mono/sgen/sgen-cardtable.c [new file with mode: 0644]
mono/sgen/sgen-cardtable.h [new file with mode: 0644]
mono/sgen/sgen-client.h [new file with mode: 0644]
mono/sgen/sgen-conf.h [new file with mode: 0644]
mono/sgen/sgen-copy-object.h [new file with mode: 0644]
mono/sgen/sgen-debug.c [new file with mode: 0644]
mono/sgen/sgen-descriptor.c [new file with mode: 0644]
mono/sgen/sgen-descriptor.h [new file with mode: 0644]
mono/sgen/sgen-fin-weak-hash.c [new file with mode: 0644]
mono/sgen/sgen-gc.c [new file with mode: 0644]
mono/sgen/sgen-gc.h [new file with mode: 0644]
mono/sgen/sgen-gray.c [new file with mode: 0644]
mono/sgen/sgen-gray.h [new file with mode: 0644]
mono/sgen/sgen-hash-table.c [new file with mode: 0644]
mono/sgen/sgen-hash-table.h [new file with mode: 0644]
mono/sgen/sgen-internal.c [new file with mode: 0644]
mono/sgen/sgen-layout-stats.c [new file with mode: 0644]
mono/sgen/sgen-layout-stats.h [new file with mode: 0644]
mono/sgen/sgen-los.c [new file with mode: 0644]
mono/sgen/sgen-major-copy-object.h [new file with mode: 0644]
mono/sgen/sgen-marksweep-drain-gray-stack.h [new file with mode: 0644]
mono/sgen/sgen-marksweep-scan-object-concurrent.h [new file with mode: 0644]
mono/sgen/sgen-marksweep.c [new file with mode: 0644]
mono/sgen/sgen-memory-governor.c [new file with mode: 0644]
mono/sgen/sgen-memory-governor.h [new file with mode: 0644]
mono/sgen/sgen-minor-copy-object.h [new file with mode: 0644]
mono/sgen/sgen-minor-scan-object.h [new file with mode: 0644]
mono/sgen/sgen-nursery-allocator.c [new file with mode: 0644]
mono/sgen/sgen-pinning-stats.c [new file with mode: 0644]
mono/sgen/sgen-pinning.c [new file with mode: 0644]
mono/sgen/sgen-pinning.h [new file with mode: 0644]
mono/sgen/sgen-pointer-queue.c [new file with mode: 0644]
mono/sgen/sgen-pointer-queue.h [new file with mode: 0644]
mono/sgen/sgen-protocol-def.h [new file with mode: 0644]
mono/sgen/sgen-protocol.c [new file with mode: 0644]
mono/sgen/sgen-protocol.h [new file with mode: 0644]
mono/sgen/sgen-qsort.c [new file with mode: 0644]
mono/sgen/sgen-qsort.h [new file with mode: 0644]
mono/sgen/sgen-scan-object.h [new file with mode: 0644]
mono/sgen/sgen-simple-nursery.c [new file with mode: 0644]
mono/sgen/sgen-split-nursery.c [new file with mode: 0644]
mono/sgen/sgen-tagged-pointer.h [new file with mode: 0644]
mono/sgen/sgen-thread-pool.c [new file with mode: 0644]
mono/sgen/sgen-thread-pool.h [new file with mode: 0644]
mono/sgen/sgen-workers.c [new file with mode: 0644]
mono/sgen/sgen-workers.h [new file with mode: 0644]
mono/unit-tests/.gitignore
mono/unit-tests/Makefile.am
mono/unit-tests/test-gc-memfuncs.c [deleted file]
mono/unit-tests/test-memfuncs.c [new file with mode: 0644]
mono/unit-tests/test-sgen-qsort.c
mono/utils/Makefile.am
mono/utils/memfuncs.c [new file with mode: 0644]
mono/utils/memfuncs.h [new file with mode: 0644]
mono/utils/parse.c [new file with mode: 0644]
mono/utils/parse.h [new file with mode: 0644]
msvc/libmonoruntime.vcxproj
msvc/libmonoutils.vcxproj

index 23cfdd2c34c1c2ee0cc8f23952013e7bdc587117..3861be1bd9df8505aa75e47d3f1d3dd3fd11e726 100644 (file)
@@ -3881,6 +3881,7 @@ mono/arch/arm/Makefile
 mono/arch/arm64/Makefile
 mono/arch/ia64/Makefile
 mono/arch/mips/Makefile
+mono/sgen/Makefile
 mono/tests/Makefile
 mono/tests/tests-config
 mono/tests/assemblyresolve/Makefile
index e2af5ccabfc561a67d947a84bbe64d40fb7d64ca..ef41dfeef5c4c27a659ea515e327bfa2c09c9ebf 100644 (file)
@@ -1,8 +1,12 @@
+if SUPPORT_SGEN
+sgen_dirs = sgen
+endif
+
 if CROSS_COMPILING
-SUBDIRS = arch utils io-layer cil metadata mini dis profiler
+SUBDIRS = arch utils io-layer cil metadata $(sgen_dirs) mini dis profiler
 else
 if INSTALL_MONOTOUCH
-SUBDIRS = utils io-layer metadata arch mini profiler
+SUBDIRS = utils io-layer metadata arch $(sgen_dirs) mini profiler
 
 monotouch-do-build:
        @list='$(SUBDIRS)'; for subdir in $$list; do \
@@ -26,7 +30,7 @@ monotouch-do-clean:
          (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$target); \
     done;
 else
-SUBDIRS = arch utils io-layer cil metadata mini dis tests unit-tests benchmark profiler
+SUBDIRS = arch utils io-layer cil metadata $(sgen_dirs) mini dis tests unit-tests benchmark profiler
 endif
 endif
-DIST_SUBDIRS = arch utils io-layer cil metadata mini dis tests unit-tests benchmark profiler
+DIST_SUBDIRS = arch utils io-layer cil metadata $(sgen_dirs) mini dis tests unit-tests benchmark profiler
index 6d6875b6742d7db93f39464bb2561ca356117aad..40f0dbf95524a9372405cbce3eef665f45f8da8f 100644 (file)
@@ -6,6 +6,7 @@ endif
 
 if SUPPORT_SGEN
 metadata_lib=$(top_builddir)/mono/metadata/libmonoruntimesgen-static.la
+gc_lib=$(top_builddir)/mono/sgen/libmonosgen-static.la
 else
 metadata_lib=$(top_builddir)/mono/metadata/libmonoruntime-static.a
 gc_lib=$(LIBGC_STATIC_LIBS)
@@ -13,10 +14,10 @@ endif
 
 runtime_lib=   \
        $(metadata_lib) \
+       $(gc_lib)       \
        $(top_builddir)/mono/io-layer/libwapi.la        \
        $(top_builddir)/mono/utils/libmonoutils.la \
-       $(GLIB_LIBS) $(LIBICONV) \
-       $(gc_lib)
+       $(GLIB_LIBS) $(LIBICONV)
 
 if DISABLE_EXECUTABLES
 bin_PROGRAMS =
index 77950fcc2c7e54379fba204fd0510db89e894335..771542a3fe93921b0bd77b6f89297a4389b6ea89 100644 (file)
@@ -129,9 +129,6 @@ common_sources = \
        filewatcher.c           \
        filewatcher.h           \
        gc-internal.h           \
-       gc-internal-agnostic.h  \
-       gc-memfuncs.c           \
-       gc-parse.c              \
        icall.c                 \
        icall-def.h             \
        image.c                 \
@@ -242,11 +239,6 @@ sgen_sources = \
        sgen-os-posix.c         \
        sgen-os-mach.c          \
        sgen-os-win32.c         \
-       sgen-gc.c               \
-       sgen-internal.c         \
-       sgen-marksweep.c        \
-       sgen-los.c              \
-       sgen-protocol.c \
        sgen-bridge.c           \
        sgen-bridge.h           \
        sgen-bridge-internal.h  \
@@ -255,51 +247,8 @@ sgen_sources = \
        sgen-tarjan-bridge.c            \
        sgen-toggleref.c                \
        sgen-toggleref.h                \
-       sgen-gc.h               \
-       sgen-conf.h             \
-       sgen-archdep.h          \
-       sgen-cardtable.c        \
-       sgen-cardtable.h        \
-       sgen-pointer-queue.c    \
-       sgen-pointer-queue.h    \
-       sgen-pinning.c  \
-       sgen-pinning.h  \
-       sgen-pinning-stats.c    \
-       sgen-workers.c  \
-       sgen-workers.h  \
-       sgen-gray.c     \
-       sgen-gray.h     \
-       sgen-major-copy-object.h \
-       sgen-minor-copy-object.h \
-       sgen-copy-object.h \
-       sgen-marksweep-scan-object-concurrent.h \
-       sgen-minor-scan-object.h \
-       sgen-marksweep-drain-gray-stack.h       \
-       sgen-protocol.h         \
-       sgen-protocol-def.h             \
-       sgen-scan-object.h      \
-       sgen-nursery-allocator.c        \
-       sgen-hash-table.c       \
-       sgen-hash-table.h       \
-       sgen-descriptor.c               \
-       sgen-descriptor.h               \
-       sgen-alloc.c            \
-       sgen-debug.c            \
-       sgen-simple-nursery.c   \
-       sgen-split-nursery.c    \
-       sgen-memory-governor.c  \
-       sgen-memory-governor.h  \
        sgen-stw.c                              \
-       sgen-fin-weak-hash.c    \
-       sgen-layout-stats.c     \
-       sgen-layout-stats.h     \
-       sgen-qsort.c    \
-       sgen-qsort.h    \
-       sgen-thread-pool.c      \
-       sgen-thread-pool.h      \
-       sgen-tagged-pointer.h   \
        sgen-mono.c             \
-       sgen-client.h           \
        sgen-client-mono.h
 
 libmonoruntime_la_SOURCES = $(common_sources) $(gc_dependent_sources) $(null_gc_sources) $(boehm_sources)
diff --git a/mono/metadata/gc-internal-agnostic.h b/mono/metadata/gc-internal-agnostic.h
deleted file mode 100644 (file)
index e39c423..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * gc-internal-agnostic.h: Mono-agnostic GC interface.
- *
- * Copyright (C) 2015 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __MONO_METADATA_GCINTERNALAGNOSTIC_H__
-#define __MONO_METADATA_GCINTERNALAGNOSTIC_H__
-
-#include <config.h>
-#include <glib.h>
-#include <stdio.h>
-
-#include "mono/utils/mono-compiler.h"
-
-typedef struct {
-       guint minor_gc_count;
-       guint major_gc_count;
-       guint64 minor_gc_time;
-       guint64 major_gc_time;
-       guint64 major_gc_time_concurrent;
-} GCStats;
-
-extern GCStats gc_stats;
-
-/*
- * Try to register a foreign thread with the GC, if we fail or the backend
- * can't cope with this concept - we return FALSE.
- */
-extern gboolean mono_gc_register_thread (void *baseptr);
-
-gboolean mono_gc_parse_environment_string_extract_number (const char *str, size_t *out);
-
-void* mono_gc_make_descr_for_object (gsize *bitmap, int numbits, size_t obj_size);
-void* mono_gc_make_descr_for_array (int vector, gsize *elem_bitmap, int numbits, size_t elem_size);
-
-/* simple interface for data structures needed in the runtime */
-void* mono_gc_make_descr_from_bitmap (gsize *bitmap, int numbits);
-
-/* Return a root descriptor for a root with all refs */
-void* mono_gc_make_root_descr_all_refs (int numbits);
-
-/* Return the bitmap encoded by a descriptor */
-gsize* mono_gc_get_bitmap_for_descr (void *descr, int *numbits);
-
-/*
-These functions must be used when it's possible that either destination is not
-word aligned or size is not a multiple of word size.
-*/
-void mono_gc_bzero_atomic (void *dest, size_t size);
-void mono_gc_bzero_aligned (void *dest, size_t size);
-void mono_gc_memmove_atomic (void *dest, const void *src, size_t size);
-void mono_gc_memmove_aligned (void *dest, const void *src, size_t size);
-
-FILE *mono_gc_get_logfile (void);
-
-#endif
index 5952ac1bf43f6af769966b5e0bdf4b35b913a3c6..39fdc4cb70f722ef948e4d7ee0243211a8d5dd42 100644 (file)
@@ -13,7 +13,7 @@
 #include <glib.h>
 #include <mono/metadata/object-internals.h>
 #include <mono/metadata/threads-types.h>
-#include <mono/metadata/gc-internal-agnostic.h>
+#include <mono/sgen/gc-internal-agnostic.h>
 #include <mono/utils/gc_wrapper.h>
 
 #define mono_domain_finalizers_lock(domain) mono_mutex_lock (&(domain)->finalizable_objects_hash_lock);
diff --git a/mono/metadata/gc-memfuncs.c b/mono/metadata/gc-memfuncs.c
deleted file mode 100644 (file)
index c8acd5e..0000000
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * gc-memfuncs.c: Our own bzero/memmove.
- *
- * Copyright (C) 2013-2015 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * SGen cannot deal with invalid pointers on the heap or in registered roots.  Sometimes we
- * need to copy or zero out memory in code that might be interrupted by collections.  To
- * guarantee that those operations will not result in invalid pointers, we must do it
- * word-atomically.
- *
- * libc's bzero() and memcpy()/memmove() functions do not guarantee word-atomicity, even in
- * cases where one would assume so.  For instance, some implementations (like Darwin's on
- * x86) have variants of memcpy() using vector instructions.  Those may copy bytewise for
- * the region preceding the first vector-aligned address.  That region could be
- * word-aligned, but it would still be copied byte-wise.
- *
- * All our memory writes here are to "volatile" locations.  This is so that C compilers
- * don't "optimize" our code back to calls to bzero()/memmove().  LLVM, specifically, will
- * do that.
- */
-
-#include <config.h>
-#include <glib.h>
-#include <string.h>
-
-#include "mono/metadata/gc-internal-agnostic.h"
-
-#define ptr_mask ((sizeof (void*) - 1))
-#define _toi(ptr) ((size_t)ptr)
-#define unaligned_bytes(ptr) (_toi(ptr) & ptr_mask)
-#define align_down(ptr) ((void*)(_toi(ptr) & ~ptr_mask))
-#define align_up(ptr) ((void*) ((_toi(ptr) + ptr_mask) & ~ptr_mask))
-#if SIZEOF_VOID_P == 4
-#define bytes_to_words(n)      ((size_t)(n) >> 2)
-#elif SIZEOF_VOID_P == 8
-#define bytes_to_words(n)      ((size_t)(n) >> 3)
-#else
-#error We only support 32 and 64 bit architectures.
-#endif
-
-#define BZERO_WORDS(dest,words) do {                   \
-               void * volatile *__d = (void* volatile*)(dest);         \
-               int __n = (words);                      \
-               int __i;                                \
-               for (__i = 0; __i < __n; ++__i)         \
-                       __d [__i] = NULL;               \
-       } while (0)
-
-
-/**
- * mono_gc_bzero_aligned:
- * @dest: address to start to clear
- * @size: size of the region to clear
- *
- * Zero @size bytes starting at @dest.
- * The address of @dest MUST be aligned to word boundaries
- *
- * FIXME borrow faster code from some BSD libc or bionic
- */
-void
-mono_gc_bzero_aligned (void *dest, size_t size)
-{
-       volatile char *d = (char*)dest;
-       size_t tail_bytes, word_bytes;
-
-       g_assert (unaligned_bytes (dest) == 0);
-
-       /* copy all words with memmove */
-       word_bytes = (size_t)align_down (size);
-       switch (word_bytes) {
-       case sizeof (void*) * 1:
-               BZERO_WORDS (d, 1);
-               break;
-       case sizeof (void*) * 2:
-               BZERO_WORDS (d, 2);
-               break;
-       case sizeof (void*) * 3:
-               BZERO_WORDS (d, 3);
-               break;
-       case sizeof (void*) * 4:
-               BZERO_WORDS (d, 4);
-               break;
-       default:
-               BZERO_WORDS (d, bytes_to_words (word_bytes));
-       }
-
-       tail_bytes = unaligned_bytes (size);
-       if (tail_bytes) {
-               d += word_bytes;
-               do {
-                       *d++ = 0;
-               } while (--tail_bytes);
-       }
-}
-
-/**
- * mono_gc_bzero_atomic:
- * @dest: address to start to clear
- * @size: size of the region to clear
- *
- * Zero @size bytes starting at @dest.
- *
- * Use this to zero memory without word tearing when dest is aligned.
- */
-void
-mono_gc_bzero_atomic (void *dest, size_t size)
-{
-       if (unaligned_bytes (dest))
-               memset (dest, 0, size);
-       else
-               mono_gc_bzero_aligned (dest, size);
-}
-
-#define MEMMOVE_WORDS_UPWARD(dest,src,words) do {      \
-               void * volatile *__d = (void* volatile*)(dest);         \
-               void **__s = (void**)(src);             \
-               int __n = (int)(words);                 \
-               int __i;                                \
-               for (__i = 0; __i < __n; ++__i)         \
-                       __d [__i] = __s [__i];          \
-       } while (0)
-
-#define MEMMOVE_WORDS_DOWNWARD(dest,src,words) do {    \
-               void * volatile *__d = (void* volatile*)(dest);         \
-               void **__s = (void**)(src);             \
-               int __n = (int)(words);                 \
-               int __i;                                \
-               for (__i = __n - 1; __i >= 0; --__i)    \
-                       __d [__i] = __s [__i];          \
-       } while (0)
-
-
-/**
- * mono_gc_memmove_aligned:
- * @dest: destination of the move
- * @src: source
- * @size: size of the block to move
- *
- * Move @size bytes from @src to @dest.
- *
- * Use this to copy memory without word tearing when both pointers are aligned
- */void
-mono_gc_memmove_aligned (void *dest, const void *src, size_t size)
-{
-       g_assert (unaligned_bytes (dest) == 0);
-       g_assert (unaligned_bytes (src) == 0);
-
-       /*
-       If we're copying less than a word we don't need to worry about word tearing
-       so we bailout to memmove early.
-       */
-       if (size < sizeof(void*)) {
-               memmove (dest, src, size);
-               return;
-       }
-
-       /*
-        * A bit of explanation on why we align only dest before doing word copies.
-        * Pointers to managed objects must always be stored in word aligned addresses, so
-        * even if dest is misaligned, src will be by the same amount - this ensure proper atomicity of reads.
-        *
-        * We don't need to case when source and destination have different alignments since we only do word stores
-        * using memmove, which must handle it.
-        */
-       if (dest > src && ((size_t)((char*)dest - (char*)src) < size)) { /*backward copy*/
-                       volatile char *p = (char*)dest + size;
-                       char *s = (char*)src + size;
-                       char *start = (char*)dest;
-                       char *align_end = MAX((char*)dest, (char*)align_down (p));
-                       char *word_start;
-                       size_t bytes_to_memmove;
-
-                       while (p > align_end)
-                               *--p = *--s;
-
-                       word_start = align_up (start);
-                       bytes_to_memmove = p - word_start;
-                       p -= bytes_to_memmove;
-                       s -= bytes_to_memmove;
-                       MEMMOVE_WORDS_DOWNWARD (p, s, bytes_to_words (bytes_to_memmove));
-       } else {
-               volatile char *d = (char*)dest;
-               const char *s = (const char*)src;
-               size_t tail_bytes;
-
-               /* copy all words with memmove */
-               MEMMOVE_WORDS_UPWARD (d, s, bytes_to_words (align_down (size)));
-
-               tail_bytes = unaligned_bytes (size);
-               if (tail_bytes) {
-                       d += (size_t)align_down (size);
-                       s += (size_t)align_down (size);
-                       do {
-                               *d++ = *s++;
-                       } while (--tail_bytes);
-               }
-       }
-}
-
-/**
- * mono_gc_memmove_atomic:
- * @dest: destination of the move
- * @src: source
- * @size: size of the block to move
- *
- * Move @size bytes from @src to @dest.
- *
- * Use this to copy memory without word tearing when both pointers are aligned
- */
-void
-mono_gc_memmove_atomic (void *dest, const void *src, size_t size)
-{
-       if (unaligned_bytes (_toi (dest) | _toi (src)))
-               memmove (dest, src, size);
-       else
-               mono_gc_memmove_aligned (dest, src, size);
-}
diff --git a/mono/metadata/gc-parse.c b/mono/metadata/gc-parse.c
deleted file mode 100644 (file)
index 1585bae..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * gc-parse.c: Parsing for GC options.
- *
- * Copyright (C) 2015 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <config.h>
-#include <glib.h>
-#include <string.h>
-#include <errno.h>
-#include <ctype.h>
-#include <stdlib.h>
-
-#include "mono/metadata/gc-internal-agnostic.h"
-
-/**
- * mono_gc_parse_environment_string_extract_number:
- *
- * @str: points to the first digit of the number
- * @out: pointer to the variable that will receive the value
- *
- * Tries to extract a number from the passed string, taking in to account m, k
- * and g suffixes
- *
- * Returns true if passing was successful
- */
-gboolean
-mono_gc_parse_environment_string_extract_number (const char *str, size_t *out)
-{
-       char *endptr;
-       int len = strlen (str), shift = 0;
-       size_t val;
-       gboolean is_suffix = FALSE;
-       char suffix;
-
-       if (!len)
-               return FALSE;
-
-       suffix = str [len - 1];
-
-       switch (suffix) {
-               case 'g':
-               case 'G':
-                       shift += 10;
-               case 'm':
-               case 'M':
-                       shift += 10;
-               case 'k':
-               case 'K':
-                       shift += 10;
-                       is_suffix = TRUE;
-                       break;
-               default:
-                       if (!isdigit (suffix))
-                               return FALSE;
-                       break;
-       }
-
-       errno = 0;
-       val = strtol (str, &endptr, 10);
-
-       if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN))
-                       || (errno != 0 && val == 0) || (endptr == str))
-               return FALSE;
-
-       if (is_suffix) {
-               size_t unshifted;
-
-               if (*(endptr + 1)) /* Invalid string. */
-                       return FALSE;
-
-               unshifted = (size_t)val;
-               val <<= shift;
-               if (((size_t)val >> shift) != unshifted) /* value too large */
-                       return FALSE;
-       }
-
-       *out = val;
-       return TRUE;
-}
index f21ac02d0bf84d424ec5d6a03e438a2c386f3a14..20e7cae5831d5dc404c7516f55be8cc992fd07fb 100644 (file)
@@ -17,7 +17,7 @@
  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include "mono/metadata/gc-internal-agnostic.h"
+#include "mono/sgen/gc-internal-agnostic.h"
 
 /*
  * Due to a bug in the linker on Darwin we need to initialize this struct, or there will be
index 7ff92ddc527aa29798c4803c300841d204256956..f6a4eb920822c0d2570d55a23c93893dfcc81ab0 100644 (file)
@@ -25,7 +25,7 @@
 #include <mono/metadata/threadpool.h>
 #include <mono/metadata/threadpool-internals.h>
 #include <mono/metadata/threads-types.h>
-#include <mono/metadata/sgen-conf.h>
+#include <mono/sgen/sgen-conf.h>
 #include <mono/utils/mono-logger-internal.h>
 #include <mono/metadata/gc-internal.h>
 #include <mono/metadata/marshal.h> /* for mono_delegate_free_ftnptr () */
diff --git a/mono/metadata/sgen-alloc.c b/mono/metadata/sgen-alloc.c
deleted file mode 100644 (file)
index 898f509..0000000
+++ /dev/null
@@ -1,569 +0,0 @@
-/*
- * sgen-alloc.c: Object allocation routines + managed allocators
- *
- * Author:
- *     Paolo Molaro (lupus@ximian.com)
- *  Rodrigo Kumpera (kumpera@gmail.com)
- *
- * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * Copyright 2011 Xamarin, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * ######################################################################
- * ########  Object allocation
- * ######################################################################
- * This section of code deals with allocating memory for objects.
- * There are several ways:
- * *) allocate large objects
- * *) allocate normal objects
- * *) fast lock-free allocation
- * *) allocation of pinned objects
- */
-
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include <string.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-protocol.h"
-#include "mono/metadata/sgen-memory-governor.h"
-#include "mono/metadata/sgen-client.h"
-#include "mono/utils/mono-memory-model.h"
-
-#define ALIGN_UP               SGEN_ALIGN_UP
-#define ALLOC_ALIGN            SGEN_ALLOC_ALIGN
-#define MAX_SMALL_OBJ_SIZE     SGEN_MAX_SMALL_OBJ_SIZE
-
-#ifdef HEAVY_STATISTICS
-static guint64 stat_objects_alloced = 0;
-static guint64 stat_bytes_alloced = 0;
-static guint64 stat_bytes_alloced_los = 0;
-
-#endif
-
-/*
- * Allocation is done from a Thread Local Allocation Buffer (TLAB). TLABs are allocated
- * from nursery fragments.
- * tlab_next is the pointer to the space inside the TLAB where the next object will 
- * be allocated.
- * tlab_temp_end is the pointer to the end of the temporary space reserved for
- * the allocation: it allows us to set the scan starts at reasonable intervals.
- * tlab_real_end points to the end of the TLAB.
- */
-
-/*
- * FIXME: What is faster, a TLS variable pointing to a structure, or separate TLS 
- * variables for next+temp_end ?
- */
-#ifdef HAVE_KW_THREAD
-static __thread char *tlab_start;
-static __thread char *tlab_next;
-static __thread char *tlab_temp_end;
-static __thread char *tlab_real_end;
-/* Used by the managed allocator/wbarrier */
-static __thread char **tlab_next_addr MONO_ATTR_USED;
-#endif
-
-#ifdef HAVE_KW_THREAD
-#define TLAB_START     tlab_start
-#define TLAB_NEXT      tlab_next
-#define TLAB_TEMP_END  tlab_temp_end
-#define TLAB_REAL_END  tlab_real_end
-#else
-#define TLAB_START     (__thread_info__->tlab_start)
-#define TLAB_NEXT      (__thread_info__->tlab_next)
-#define TLAB_TEMP_END  (__thread_info__->tlab_temp_end)
-#define TLAB_REAL_END  (__thread_info__->tlab_real_end)
-#endif
-
-static void*
-alloc_degraded (GCVTable *vtable, size_t size, gboolean for_mature)
-{
-       void *p;
-
-       if (!for_mature) {
-               sgen_client_degraded_allocation (size);
-               SGEN_ATOMIC_ADD_P (degraded_mode, size);
-               sgen_ensure_free_space (size);
-       } else {
-               if (sgen_need_major_collection (size))
-                       sgen_perform_collection (size, GENERATION_OLD, "mature allocation failure", !for_mature);
-       }
-
-
-       p = major_collector.alloc_degraded (vtable, size);
-
-       if (!for_mature)
-               binary_protocol_alloc_degraded (p, vtable, size, sgen_client_get_provenance ());
-
-       return p;
-}
-
-static void
-zero_tlab_if_necessary (void *p, size_t size)
-{
-       if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION || nursery_clear_policy == CLEAR_AT_TLAB_CREATION_DEBUG) {
-               memset (p, 0, size);
-       } else {
-               /*
-                * This function is called for all allocations in
-                * TLABs.  TLABs originate from fragments, which are
-                * initialized to be faux arrays.  The remainder of
-                * the fragments are zeroed out at initialization for
-                * CLEAR_AT_GC, so here we just need to make sure that
-                * the array header is zeroed.  Since we don't know
-                * whether we're called for the start of a fragment or
-                * for somewhere in between, we zero in any case, just
-                * to make sure.
-                */
-               sgen_client_zero_array_fill_header (p, size);
-       }
-}
-
-/*
- * Provide a variant that takes just the vtable for small fixed-size objects.
- * The aligned size is already computed and stored in vt->gc_descr.
- * Note: every SGEN_SCAN_START_SIZE or so we are given the chance to do some special
- * processing. We can keep track of where objects start, for example,
- * so when we scan the thread stacks for pinned objects, we can start
- * a search for the pinned object in SGEN_SCAN_START_SIZE chunks.
- */
-void*
-sgen_alloc_obj_nolock (GCVTable *vtable, size_t size)
-{
-       /* FIXME: handle OOM */
-       void **p;
-       char *new_next;
-       size_t real_size = size;
-       TLAB_ACCESS_INIT;
-       
-       CANARIFY_SIZE(size);
-
-       HEAVY_STAT (++stat_objects_alloced);
-       if (real_size <= SGEN_MAX_SMALL_OBJ_SIZE)
-               HEAVY_STAT (stat_bytes_alloced += size);
-       else
-               HEAVY_STAT (stat_bytes_alloced_los += size);
-
-       size = ALIGN_UP (size);
-
-       SGEN_ASSERT (6, sgen_vtable_get_descriptor (vtable), "VTable without descriptor");
-
-       if (G_UNLIKELY (has_per_allocation_action)) {
-               static int alloc_count;
-               int current_alloc = InterlockedIncrement (&alloc_count);
-
-               if (collect_before_allocs) {
-                       if (((current_alloc % collect_before_allocs) == 0) && nursery_section) {
-                               sgen_perform_collection (0, GENERATION_NURSERY, "collect-before-alloc-triggered", TRUE);
-                               if (!degraded_mode && sgen_can_alloc_size (size) && real_size <= SGEN_MAX_SMALL_OBJ_SIZE) {
-                                       // FIXME:
-                                       g_assert_not_reached ();
-                               }
-                       }
-               } else if (verify_before_allocs) {
-                       if ((current_alloc % verify_before_allocs) == 0)
-                               sgen_check_whole_heap_stw ();
-               }
-       }
-
-       /*
-        * We must already have the lock here instead of after the
-        * fast path because we might be interrupted in the fast path
-        * (after confirming that new_next < TLAB_TEMP_END) by the GC,
-        * and we'll end up allocating an object in a fragment which
-        * no longer belongs to us.
-        *
-        * The managed allocator does not do this, but it's treated
-        * specially by the world-stopping code.
-        */
-
-       if (real_size > SGEN_MAX_SMALL_OBJ_SIZE) {
-               p = sgen_los_alloc_large_inner (vtable, ALIGN_UP (real_size));
-       } else {
-               /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
-
-               p = (void**)TLAB_NEXT;
-               /* FIXME: handle overflow */
-               new_next = (char*)p + size;
-               TLAB_NEXT = new_next;
-
-               if (G_LIKELY (new_next < TLAB_TEMP_END)) {
-                       /* Fast path */
-
-                       /* 
-                        * FIXME: We might need a memory barrier here so the change to tlab_next is 
-                        * visible before the vtable store.
-                        */
-
-                       CANARIFY_ALLOC(p,real_size);
-                       SGEN_LOG (6, "Allocated object %p, vtable: %p (%s), size: %zd", p, vtable, sgen_client_vtable_get_name (vtable), size);
-                       binary_protocol_alloc (p , vtable, size, sgen_client_get_provenance ());
-                       g_assert (*p == NULL);
-                       mono_atomic_store_seq (p, vtable);
-
-                       return p;
-               }
-
-               /* Slow path */
-
-               /* there are two cases: the object is too big or we run out of space in the TLAB */
-               /* we also reach here when the thread does its first allocation after a minor 
-                * collection, since the tlab_ variables are initialized to NULL.
-                * there can be another case (from ORP), if we cooperate with the runtime a bit:
-                * objects that need finalizers can have the high bit set in their size
-                * so the above check fails and we can readily add the object to the queue.
-                * This avoids taking again the GC lock when registering, but this is moot when
-                * doing thread-local allocation, so it may not be a good idea.
-                */
-               if (TLAB_NEXT >= TLAB_REAL_END) {
-                       int available_in_tlab;
-                       /* 
-                        * Run out of space in the TLAB. When this happens, some amount of space
-                        * remains in the TLAB, but not enough to satisfy the current allocation
-                        * request. Currently, we retire the TLAB in all cases, later we could
-                        * keep it if the remaining space is above a treshold, and satisfy the
-                        * allocation directly from the nursery.
-                        */
-                       TLAB_NEXT -= size;
-                       /* when running in degraded mode, we continue allocing that way
-                        * for a while, to decrease the number of useless nursery collections.
-                        */
-                       if (degraded_mode && degraded_mode < DEFAULT_NURSERY_SIZE)
-                               return alloc_degraded (vtable, size, FALSE);
-
-                       available_in_tlab = (int)(TLAB_REAL_END - TLAB_NEXT);//We'll never have tlabs > 2Gb
-                       if (size > tlab_size || available_in_tlab > SGEN_MAX_NURSERY_WASTE) {
-                               /* Allocate directly from the nursery */
-                               p = sgen_nursery_alloc (size);
-                               if (!p) {
-                                       /*
-                                        * We couldn't allocate from the nursery, so we try
-                                        * collecting.  Even after the collection, we might
-                                        * still not have enough memory to allocate the
-                                        * object.  The reason will most likely be that we've
-                                        * run out of memory, but there is the theoretical
-                                        * possibility that other threads might have consumed
-                                        * the freed up memory ahead of us.
-                                        *
-                                        * What we do in this case is allocate degraded, i.e.,
-                                        * from the major heap.
-                                        *
-                                        * Ideally we'd like to detect the case of other
-                                        * threads allocating ahead of us and loop (if we
-                                        * always loop we will loop endlessly in the case of
-                                        * OOM).
-                                        */
-                                       sgen_ensure_free_space (real_size);
-                                       if (!degraded_mode)
-                                               p = sgen_nursery_alloc (size);
-                               }
-                               if (!p)
-                                       return alloc_degraded (vtable, size, FALSE);
-
-                               zero_tlab_if_necessary (p, size);
-                       } else {
-                               size_t alloc_size = 0;
-                               if (TLAB_START)
-                                       SGEN_LOG (3, "Retire TLAB: %p-%p [%ld]", TLAB_START, TLAB_REAL_END, (long)(TLAB_REAL_END - TLAB_NEXT - size));
-                               sgen_nursery_retire_region (p, available_in_tlab);
-
-                               p = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
-                               if (!p) {
-                                       /* See comment above in similar case. */
-                                       sgen_ensure_free_space (tlab_size);
-                                       if (!degraded_mode)
-                                               p = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
-                               }
-                               if (!p)
-                                       return alloc_degraded (vtable, size, FALSE);
-
-                               /* Allocate a new TLAB from the current nursery fragment */
-                               TLAB_START = (char*)p;
-                               TLAB_NEXT = TLAB_START;
-                               TLAB_REAL_END = TLAB_START + alloc_size;
-                               TLAB_TEMP_END = TLAB_START + MIN (SGEN_SCAN_START_SIZE, alloc_size);
-
-                               zero_tlab_if_necessary (TLAB_START, alloc_size);
-
-                               /* Allocate from the TLAB */
-                               p = (void*)TLAB_NEXT;
-                               TLAB_NEXT += size;
-                               sgen_set_nursery_scan_start ((char*)p);
-                       }
-               } else {
-                       /* Reached tlab_temp_end */
-
-                       /* record the scan start so we can find pinned objects more easily */
-                       sgen_set_nursery_scan_start ((char*)p);
-                       /* we just bump tlab_temp_end as well */
-                       TLAB_TEMP_END = MIN (TLAB_REAL_END, TLAB_NEXT + SGEN_SCAN_START_SIZE);
-                       SGEN_LOG (5, "Expanding local alloc: %p-%p", TLAB_NEXT, TLAB_TEMP_END);
-               }
-               CANARIFY_ALLOC(p,real_size);
-       }
-
-       if (G_LIKELY (p)) {
-               SGEN_LOG (6, "Allocated object %p, vtable: %p (%s), size: %zd", p, vtable, sgen_client_vtable_get_name (vtable), size);
-               binary_protocol_alloc (p, vtable, size, sgen_client_get_provenance ());
-               mono_atomic_store_seq (p, vtable);
-       }
-
-       return p;
-}
-
-void*
-sgen_try_alloc_obj_nolock (GCVTable *vtable, size_t size)
-{
-       void **p;
-       char *new_next;
-       size_t real_size = size;
-       TLAB_ACCESS_INIT;
-
-       CANARIFY_SIZE(size);
-
-       size = ALIGN_UP (size);
-       SGEN_ASSERT (9, real_size >= SGEN_CLIENT_MINIMUM_OBJECT_SIZE, "Object too small");
-
-       SGEN_ASSERT (6, sgen_vtable_get_descriptor (vtable), "VTable without descriptor");
-
-       if (real_size > SGEN_MAX_SMALL_OBJ_SIZE)
-               return NULL;
-
-       if (G_UNLIKELY (size > tlab_size)) {
-               /* Allocate directly from the nursery */
-               p = sgen_nursery_alloc (size);
-               if (!p)
-                       return NULL;
-               sgen_set_nursery_scan_start ((char*)p);
-
-               /*FIXME we should use weak memory ops here. Should help specially on x86. */
-               zero_tlab_if_necessary (p, size);
-       } else {
-               int available_in_tlab;
-               char *real_end;
-               /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
-
-               p = (void**)TLAB_NEXT;
-               /* FIXME: handle overflow */
-               new_next = (char*)p + size;
-
-               real_end = TLAB_REAL_END;
-               available_in_tlab = (int)(real_end - (char*)p);//We'll never have tlabs > 2Gb
-
-               if (G_LIKELY (new_next < real_end)) {
-                       TLAB_NEXT = new_next;
-
-                       /* Second case, we overflowed temp end */
-                       if (G_UNLIKELY (new_next >= TLAB_TEMP_END)) {
-                               sgen_set_nursery_scan_start (new_next);
-                               /* we just bump tlab_temp_end as well */
-                               TLAB_TEMP_END = MIN (TLAB_REAL_END, TLAB_NEXT + SGEN_SCAN_START_SIZE);
-                               SGEN_LOG (5, "Expanding local alloc: %p-%p", TLAB_NEXT, TLAB_TEMP_END);
-                       }
-               } else if (available_in_tlab > SGEN_MAX_NURSERY_WASTE) {
-                       /* Allocate directly from the nursery */
-                       p = sgen_nursery_alloc (size);
-                       if (!p)
-                               return NULL;
-
-                       zero_tlab_if_necessary (p, size);
-               } else {
-                       size_t alloc_size = 0;
-
-                       sgen_nursery_retire_region (p, available_in_tlab);
-                       new_next = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
-                       p = (void**)new_next;
-                       if (!p)
-                               return NULL;
-
-                       TLAB_START = (char*)new_next;
-                       TLAB_NEXT = new_next + size;
-                       TLAB_REAL_END = new_next + alloc_size;
-                       TLAB_TEMP_END = new_next + MIN (SGEN_SCAN_START_SIZE, alloc_size);
-                       sgen_set_nursery_scan_start ((char*)p);
-
-                       zero_tlab_if_necessary (new_next, alloc_size);
-               }
-       }
-
-       HEAVY_STAT (++stat_objects_alloced);
-       HEAVY_STAT (stat_bytes_alloced += size);
-
-       CANARIFY_ALLOC(p,real_size);
-       SGEN_LOG (6, "Allocated object %p, vtable: %p (%s), size: %zd", p, vtable, sgen_client_vtable_get_name (vtable), size);
-       binary_protocol_alloc (p, vtable, size, sgen_client_get_provenance ());
-       g_assert (*p == NULL); /* FIXME disable this in non debug builds */
-
-       mono_atomic_store_seq (p, vtable);
-
-       return p;
-}
-
-void*
-sgen_alloc_obj (GCVTable *vtable, size_t size)
-{
-       void *res;
-       TLAB_ACCESS_INIT;
-
-       if (!SGEN_CAN_ALIGN_UP (size))
-               return NULL;
-
-#ifndef DISABLE_CRITICAL_REGION
-
-       if (G_UNLIKELY (has_per_allocation_action)) {
-               static int alloc_count;
-               int current_alloc = InterlockedIncrement (&alloc_count);
-
-               if (verify_before_allocs) {
-                       if ((current_alloc % verify_before_allocs) == 0)
-                               sgen_check_whole_heap_stw ();
-               }
-               if (collect_before_allocs) {
-                       if (((current_alloc % collect_before_allocs) == 0) && nursery_section) {
-                               LOCK_GC;
-                               sgen_perform_collection (0, GENERATION_NURSERY, "collect-before-alloc-triggered", TRUE);
-                               UNLOCK_GC;
-                       }
-               }
-       }
-
-       ENTER_CRITICAL_REGION;
-       res = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size);
-       if (res) {
-               EXIT_CRITICAL_REGION;
-               return res;
-       }
-       EXIT_CRITICAL_REGION;
-#endif
-       LOCK_GC;
-       res = sgen_alloc_obj_nolock ((GCVTable*)vtable, size);
-       UNLOCK_GC;
-       if (G_UNLIKELY (!res))
-               sgen_client_out_of_memory (size);
-       return res;
-}
-
-/*
- * To be used for interned strings and possibly MonoThread, reflection handles.
- * We may want to explicitly free these objects.
- */
-void*
-sgen_alloc_obj_pinned (GCVTable *vtable, size_t size)
-{
-       void **p;
-
-       if (!SGEN_CAN_ALIGN_UP (size))
-               return NULL;
-       size = ALIGN_UP (size);
-
-       LOCK_GC;
-
-       if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
-               /* large objects are always pinned anyway */
-               p = sgen_los_alloc_large_inner ((GCVTable*)vtable, size);
-       } else {
-               SGEN_ASSERT (9, sgen_client_vtable_is_inited (vtable), "class %s:%s is not initialized", sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable));
-               p = major_collector.alloc_small_pinned_obj ((GCVTable*)vtable, size, SGEN_VTABLE_HAS_REFERENCES ((GCVTable*)vtable));
-       }
-       if (G_LIKELY (p)) {
-               SGEN_LOG (6, "Allocated pinned object %p, vtable: %p (%s), size: %zd", p, vtable, sgen_client_vtable_get_name (vtable), size);
-               binary_protocol_alloc_pinned (p, vtable, size, sgen_client_get_provenance ());
-       }
-       UNLOCK_GC;
-       return p;
-}
-
-void*
-sgen_alloc_obj_mature (GCVTable *vtable, size_t size)
-{
-       void *res;
-
-       if (!SGEN_CAN_ALIGN_UP (size))
-               return NULL;
-       size = ALIGN_UP (size);
-
-       LOCK_GC;
-       res = alloc_degraded ((GCVTable*)vtable, size, TRUE);
-       UNLOCK_GC;
-
-       return res;
-}
-
-void
-sgen_init_tlab_info (SgenThreadInfo* info)
-{
-#ifndef HAVE_KW_THREAD
-       SgenThreadInfo *__thread_info__ = info;
-#endif
-
-       info->tlab_start_addr = &TLAB_START;
-       info->tlab_next_addr = &TLAB_NEXT;
-       info->tlab_temp_end_addr = &TLAB_TEMP_END;
-       info->tlab_real_end_addr = &TLAB_REAL_END;
-
-#ifdef HAVE_KW_THREAD
-       tlab_next_addr = &tlab_next;
-#endif
-}
-
-/*
- * Clear the thread local TLAB variables for all threads.
- */
-void
-sgen_clear_tlabs (void)
-{
-       SgenThreadInfo *info;
-
-       FOREACH_THREAD (info) {
-               /* A new TLAB will be allocated when the thread does its first allocation */
-               *info->tlab_start_addr = NULL;
-               *info->tlab_next_addr = NULL;
-               *info->tlab_temp_end_addr = NULL;
-               *info->tlab_real_end_addr = NULL;
-       } END_FOREACH_THREAD
-}
-
-void
-sgen_init_allocator (void)
-{
-#if defined(HAVE_KW_THREAD) && !defined(SGEN_WITHOUT_MONO)
-       int tlab_next_addr_offset = -1;
-       int tlab_temp_end_offset = -1;
-
-
-       MONO_THREAD_VAR_OFFSET (tlab_next_addr, tlab_next_addr_offset);
-       MONO_THREAD_VAR_OFFSET (tlab_temp_end, tlab_temp_end_offset);
-
-       mono_tls_key_set_offset (TLS_KEY_SGEN_TLAB_NEXT_ADDR, tlab_next_addr_offset);
-       mono_tls_key_set_offset (TLS_KEY_SGEN_TLAB_TEMP_END, tlab_temp_end_offset);
-
-       g_assert (tlab_next_addr_offset != -1);
-       g_assert (tlab_temp_end_offset != -1);
-#endif
-
-#ifdef HEAVY_STATISTICS
-       mono_counters_register ("# objects allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_alloced);
-       mono_counters_register ("bytes allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_bytes_alloced);
-       mono_counters_register ("bytes allocated in LOS", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_bytes_alloced_los);
-#endif
-}
-
-#endif /*HAVE_SGEN_GC*/
diff --git a/mono/metadata/sgen-archdep.h b/mono/metadata/sgen-archdep.h
deleted file mode 100644 (file)
index 410ba6a..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * sgen-archdep.h: Architecture dependent parts of SGen.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#ifndef __MONO_SGENARCHDEP_H__
-#define __MONO_SGENARCHDEP_H__
-
-#include <mono/utils/mono-context.h>
-
-/*
- * Define either USE_MONO_CTX, or
- * ARCH_SIGCTX_SP/ARCH_SIGCTX_IP/ARCH_STORE_REGS/ARCH_COPY_SIGCTX_REGS.
- * Define ARCH_NUM_REGS to be the number of general registers in MonoContext, or the
- * number of registers stored by ARCH_STORE_REGS.
- */
-
-#if defined(MONO_CROSS_COMPILE)
-
-#define REDZONE_SIZE   0
-
-#define ARCH_NUM_REGS 0
-#define ARCH_STORE_REGS(ptr)
-#define ARCH_SIGCTX_SP(ctx) NULL
-#define ARCH_SIGCTX_IP(ctx) NULL
-#define ARCH_COPY_SIGCTX_REGS(a,ctx)
-
-#elif defined(TARGET_X86)
-
-#define REDZONE_SIZE   0
-
-#define ARCH_NUM_REGS 8
-
-#ifndef MONO_ARCH_HAS_MONO_CONTEXT
-#error 0
-#endif
-
-#define USE_MONO_CTX
-
-#elif defined(TARGET_AMD64)
-
-#define REDZONE_SIZE   128
-
-#define ARCH_NUM_REGS 16
-#define USE_MONO_CTX
-
-#elif defined(TARGET_POWERPC)
-
-#define REDZONE_SIZE   224
-
-#define ARCH_NUM_REGS 32
-#ifdef __APPLE__
-#define ARCH_STORE_REGS(ptr)   \
-       __asm__ __volatile__(   \
-               "stmw r0, 0(%0)\n"      \
-               :                       \
-               : "b" (ptr)             \
-       )
-#else
-#define ARCH_STORE_REGS(ptr)   \
-       __asm__ __volatile__(   \
-               "stmw 0, 0(%0)\n"       \
-               :                       \
-               : "b" (ptr)             \
-       )
-#endif
-#define ARCH_SIGCTX_SP(ctx)    (UCONTEXT_REG_Rn((ctx), 1))
-#define ARCH_SIGCTX_IP(ctx)    (UCONTEXT_REG_NIP((ctx)))
-#define ARCH_COPY_SIGCTX_REGS(a,ctx) do {      \
-       int __i;        \
-       for (__i = 0; __i < 32; ++__i)  \
-               ((a)[__i]) = (gpointer) UCONTEXT_REG_Rn((ctx), __i);    \
-       } while (0)
-
-/* MS_BLOCK_SIZE must be a multiple of the system pagesize, which for some
-   archs is 64k.  */
-#if defined(TARGET_POWERPC64) && _CALL_ELF == 2
-#define ARCH_MIN_MS_BLOCK_SIZE (64*1024)
-#define ARCH_MIN_MS_BLOCK_SIZE_SHIFT   16
-#endif
-
-#elif defined(TARGET_ARM)
-
-#define REDZONE_SIZE   0
-#define USE_MONO_CTX
-
-/* We dont store ip, sp */
-#define ARCH_NUM_REGS 14
-
-#elif defined(TARGET_ARM64)
-
-#ifdef __linux__
-#define REDZONE_SIZE    0
-#elif defined(__APPLE__)
-#define REDZONE_SIZE   128
-#else
-#error "Not implemented."
-#endif
-#define USE_MONO_CTX
-#define ARCH_NUM_REGS 31
-
-#elif defined(__mips__)
-
-#define REDZONE_SIZE   0
-
-#define USE_MONO_CTX
-#define ARCH_NUM_REGS 32
-
-#elif defined(__s390x__)
-
-#define REDZONE_SIZE   0
-
-#define USE_MONO_CTX
-#define ARCH_NUM_REGS 16       
-
-#elif defined(__sparc__)
-
-#define REDZONE_SIZE   0
-
-/* Don't bother with %g0 (%r0), it's always hard-coded to zero */
-#define ARCH_NUM_REGS 15       
-#ifdef __sparcv9
-#define ARCH_STORE_REGS(ptr)   \
-       __asm__ __volatile__(   \
-               "st %%g1,[%0]\n\t"      \
-               "st %%g2,[%0+0x08]\n\t" \
-               "st %%g3,[%0+0x10]\n\t" \
-               "st %%g4,[%0+0x18]\n\t" \
-               "st %%g5,[%0+0x20]\n\t" \
-               "st %%g6,[%0+0x28]\n\t" \
-               "st %%g7,[%0+0x30]\n\t" \
-               "st %%o0,[%0+0x38]\n\t" \
-               "st %%o1,[%0+0x40]\n\t" \
-               "st %%o2,[%0+0x48]\n\t" \
-               "st %%o3,[%0+0x50]\n\t" \
-               "st %%o4,[%0+0x58]\n\t" \
-               "st %%o5,[%0+0x60]\n\t" \
-               "st %%o6,[%0+0x68]\n\t" \
-               "st %%o7,[%0+0x70]\n\t" \
-               :                       \
-               : "r" (ptr)             \
-               : "memory"                      \
-       )
-#else
-#define ARCH_STORE_REGS(ptr)   \
-       __asm__ __volatile__(   \
-               "st %%g1,[%0]\n\t"      \
-               "st %%g2,[%0+0x04]\n\t" \
-               "st %%g3,[%0+0x08]\n\t" \
-               "st %%g4,[%0+0x0c]\n\t" \
-               "st %%g5,[%0+0x10]\n\t" \
-               "st %%g6,[%0+0x14]\n\t" \
-               "st %%g7,[%0+0x18]\n\t" \
-               "st %%o0,[%0+0x1c]\n\t" \
-               "st %%o1,[%0+0x20]\n\t" \
-               "st %%o2,[%0+0x24]\n\t" \
-               "st %%o3,[%0+0x28]\n\t" \
-               "st %%o4,[%0+0x2c]\n\t" \
-               "st %%o5,[%0+0x30]\n\t" \
-               "st %%o6,[%0+0x34]\n\t" \
-               "st %%o7,[%0+0x38]\n\t" \
-               :                       \
-               : "r" (ptr)             \
-               : "memory"                      \
-       )
-#endif
-
-#ifndef REG_SP
-#define REG_SP REG_O6
-#endif
-
-#define ARCH_SIGCTX_SP(ctx)    (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_SP])
-#define ARCH_SIGCTX_IP(ctx)    (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_PC])
-#define ARCH_COPY_SIGCTX_REGS(a,ctx) do {      \
-       (a)[0] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G1]);        \
-       (a)[1] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G2]);        \
-       (a)[2] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G3]);        \
-       (a)[3] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G4]);        \
-       (a)[4] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G5]);        \
-       (a)[5] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G6]);        \
-       (a)[6] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G7]);        \
-       (a)[7] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O0]);        \
-       (a)[8] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O1]);        \
-       (a)[9] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O2]);        \
-       (a)[10] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O3]);       \
-       (a)[11] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O4]);       \
-       (a)[12] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O5]);       \
-       (a)[13] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O6]);       \
-       (a)[14] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O7]);       \
-       } while (0)
-
-#endif
-
-#endif /* __MONO_SGENARCHDEP_H__ */
index 5facf711886c5a3f7a95df789e7c200fe40f6e05..1e5c3a1777de5694090dba44d6ebc7ddfb70432d 100644 (file)
@@ -26,7 +26,7 @@
 
 #include "mono/utils/mono-compiler.h"
 
-#include "mono/metadata/sgen-gc.h"
+#include "mono/sgen/sgen-gc.h"
 #include "mono/metadata/sgen-bridge.h"
 
 extern gboolean bridge_processing_in_progress;
index e015d88cdf32a5794102d0654e2f837abe1de405..16af4ca8a273b1a59f74ed971d441ada958260d9 100644 (file)
 
 #include <stdlib.h>
 
-#include "sgen-gc.h"
+#include "sgen/sgen-gc.h"
 #include "sgen-bridge-internal.h"
-#include "sgen-hash-table.h"
-#include "sgen-qsort.h"
+#include "sgen/sgen-hash-table.h"
+#include "sgen/sgen-qsort.h"
 #include "utils/mono-logger-internal.h"
 
 MonoGCBridgeCallbacks bridge_callbacks;
diff --git a/mono/metadata/sgen-cardtable.c b/mono/metadata/sgen-cardtable.c
deleted file mode 100644 (file)
index 35c7a79..0000000
+++ /dev/null
@@ -1,618 +0,0 @@
-/*
- * sgen-cardtable.c: Card table implementation for sgen
- *
- * Author:
- *     Rodrigo Kumpera (rkumpera@novell.com)
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include <string.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-cardtable.h"
-#include "mono/metadata/sgen-memory-governor.h"
-#include "mono/metadata/sgen-protocol.h"
-#include "mono/metadata/sgen-layout-stats.h"
-#include "mono/metadata/sgen-client.h"
-#include "mono/metadata/gc-internal-agnostic.h"
-#include "mono/utils/mono-memory-model.h"
-
-//#define CARDTABLE_STATS
-
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#ifdef HAVE_SYS_MMAN_H
-#include <sys/mman.h>
-#endif
-#include <sys/types.h>
-
-guint8 *sgen_cardtable;
-
-static gboolean need_mod_union;
-
-#ifdef HEAVY_STATISTICS
-guint64 marked_cards;
-guint64 scanned_cards;
-guint64 scanned_objects;
-guint64 remarked_cards;
-
-static guint64 los_marked_cards;
-static guint64 large_objects;
-static guint64 bloby_objects;
-static guint64 los_array_cards;
-static guint64 los_array_remsets;
-
-#endif
-static guint64 major_card_scan_time;
-static guint64 los_card_scan_time;
-
-static guint64 last_major_scan_time;
-static guint64 last_los_scan_time;
-
-static void sgen_card_tables_collect_stats (gboolean begin);
-
-mword
-sgen_card_table_number_of_cards_in_range (mword address, mword size)
-{
-       mword end = address + MAX (1, size) - 1;
-       return (end >> CARD_BITS) - (address >> CARD_BITS) + 1;
-}
-
-static void
-sgen_card_table_wbarrier_set_field (GCObject *obj, gpointer field_ptr, GCObject* value)
-{
-       *(void**)field_ptr = value;
-       if (need_mod_union || sgen_ptr_in_nursery (value))
-               sgen_card_table_mark_address ((mword)field_ptr);
-       sgen_dummy_use (value);
-}
-
-static void
-sgen_card_table_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
-{
-       gpointer *dest = dest_ptr;
-       gpointer *src = src_ptr;
-
-       /*overlapping that required backward copying*/
-       if (src < dest && (src + count) > dest) {
-               gpointer *start = dest;
-               dest += count - 1;
-               src += count - 1;
-
-               for (; dest >= start; --src, --dest) {
-                       gpointer value = *src;
-                       SGEN_UPDATE_REFERENCE_ALLOW_NULL (dest, value);
-                       if (need_mod_union || sgen_ptr_in_nursery (value))
-                               sgen_card_table_mark_address ((mword)dest);
-                       sgen_dummy_use (value);
-               }
-       } else {
-               gpointer *end = dest + count;
-               for (; dest < end; ++src, ++dest) {
-                       gpointer value = *src;
-                       SGEN_UPDATE_REFERENCE_ALLOW_NULL (dest, value);
-                       if (need_mod_union || sgen_ptr_in_nursery (value))
-                               sgen_card_table_mark_address ((mword)dest);
-                       sgen_dummy_use (value);
-               }
-       }       
-}
-
-static void
-sgen_card_table_wbarrier_value_copy (gpointer dest, gpointer src, int count, size_t element_size)
-{
-       size_t size = count * element_size;
-
-#ifdef DISABLE_CRITICAL_REGION
-       LOCK_GC;
-#else
-       TLAB_ACCESS_INIT;
-       ENTER_CRITICAL_REGION;
-#endif
-       mono_gc_memmove_atomic (dest, src, size);
-       sgen_card_table_mark_range ((mword)dest, size);
-#ifdef DISABLE_CRITICAL_REGION
-       UNLOCK_GC;
-#else
-       EXIT_CRITICAL_REGION;
-#endif
-}
-
-static void
-sgen_card_table_wbarrier_object_copy (GCObject* obj, GCObject *src)
-{
-       size_t size = sgen_client_par_object_get_size (SGEN_LOAD_VTABLE_UNCHECKED (obj), obj);
-
-#ifdef DISABLE_CRITICAL_REGION
-       LOCK_GC;
-#else
-       TLAB_ACCESS_INIT;
-       ENTER_CRITICAL_REGION;
-#endif
-       mono_gc_memmove_aligned ((char*)obj + SGEN_CLIENT_OBJECT_HEADER_SIZE, (char*)src + SGEN_CLIENT_OBJECT_HEADER_SIZE,
-                       size - SGEN_CLIENT_OBJECT_HEADER_SIZE);
-       sgen_card_table_mark_range ((mword)obj, size);
-#ifdef DISABLE_CRITICAL_REGION
-       UNLOCK_GC;
-#else
-       EXIT_CRITICAL_REGION;
-#endif 
-}
-
-static void
-sgen_card_table_wbarrier_generic_nostore (gpointer ptr)
-{
-       sgen_card_table_mark_address ((mword)ptr);      
-}
-
-#ifdef SGEN_HAVE_OVERLAPPING_CARDS
-
-guint8 *sgen_shadow_cardtable;
-
-#define SGEN_CARDTABLE_END (sgen_cardtable + CARD_COUNT_IN_BYTES)
-
-static gboolean
-sgen_card_table_region_begin_scanning (mword start, mword size)
-{
-       mword end = start + size;
-       /*XXX this can be improved to work on words and have a single loop induction var */
-       while (start < end) {
-               if (sgen_card_table_card_begin_scanning (start))
-                       return TRUE;
-               start += CARD_SIZE_IN_BYTES;
-       }
-       return FALSE;
-}
-
-#else
-
-static gboolean
-sgen_card_table_region_begin_scanning (mword start, mword size)
-{
-       gboolean res = FALSE;
-       guint8 *card = sgen_card_table_get_card_address (start);
-       guint8 *end = card + sgen_card_table_number_of_cards_in_range (start, size);
-
-       /*XXX this can be improved to work on words and have a branchless body */
-       while (card != end) {
-               if (*card++) {
-                       res = TRUE;
-                       break;
-               }
-       }
-
-       memset (sgen_card_table_get_card_address (start), 0, size >> CARD_BITS);
-
-       return res;
-}
-
-#endif
-
-/*FIXME this assumes that major blocks are multiple of 4K which is pretty reasonable */
-gboolean
-sgen_card_table_get_card_data (guint8 *data_dest, mword address, mword cards)
-{
-       mword *start = (mword*)sgen_card_table_get_card_scan_address (address);
-       mword *dest = (mword*)data_dest;
-       mword *end = (mword*)(data_dest + cards);
-       mword mask = 0;
-
-       for (; dest < end; ++dest, ++start) {
-               mword v = *start;
-               *dest = v;
-               mask |= v;
-
-#ifndef SGEN_HAVE_OVERLAPPING_CARDS
-               *start = 0;
-#endif
-       }
-
-       return mask != 0;
-}
-
-void*
-sgen_card_table_align_pointer (void *ptr)
-{
-       return (void*)((mword)ptr & ~(CARD_SIZE_IN_BYTES - 1));
-}
-
-void
-sgen_card_table_mark_range (mword address, mword size)
-{
-       mword num_cards = sgen_card_table_number_of_cards_in_range (address, size);
-       guint8 *start = sgen_card_table_get_card_address (address);
-
-#ifdef SGEN_HAVE_OVERLAPPING_CARDS
-       /*
-        * FIXME: There's a theoretical bug here, namely that the card table is allocated so
-        * far toward the end of the address space that start + num_cards overflows.
-        */
-       guint8 *end = start + num_cards;
-       SGEN_ASSERT (0, num_cards <= CARD_COUNT_IN_BYTES, "How did we get an object larger than the card table?");
-       if (end > SGEN_CARDTABLE_END) {
-               memset (start, 1, SGEN_CARDTABLE_END - start);
-               memset (sgen_cardtable, 1, end - sgen_cardtable);
-               return;
-       }
-#endif
-
-       memset (start, 1, num_cards);
-}
-
-static gboolean
-sgen_card_table_is_range_marked (guint8 *cards, mword address, mword size)
-{
-       guint8 *end = cards + sgen_card_table_number_of_cards_in_range (address, size);
-
-       /*This is safe since this function is only called by code that only passes continuous card blocks*/
-       while (cards != end) {
-               if (*cards++)
-                       return TRUE;
-       }
-       return FALSE;
-
-}
-
-static void
-sgen_card_table_record_pointer (gpointer address)
-{
-       *sgen_card_table_get_card_address ((mword)address) = 1;
-}
-
-static gboolean
-sgen_card_table_find_address (char *addr)
-{
-       return sgen_card_table_address_is_marked ((mword)addr);
-}
-
-static gboolean
-sgen_card_table_find_address_with_cards (char *cards_start, guint8 *cards, char *addr)
-{
-       cards_start = sgen_card_table_align_pointer (cards_start);
-       return cards [(addr - cards_start) >> CARD_BITS];
-}
-
-static void
-update_mod_union (guint8 *dest, guint8 *start_card, size_t num_cards)
-{
-       int i;
-       for (i = 0; i < num_cards; ++i)
-               dest [i] |= start_card [i];
-}
-
-guint8*
-sgen_card_table_alloc_mod_union (char *obj, mword obj_size)
-{
-       size_t num_cards = sgen_card_table_number_of_cards_in_range ((mword) obj, obj_size);
-       guint8 *mod_union = sgen_alloc_internal_dynamic (num_cards, INTERNAL_MEM_CARDTABLE_MOD_UNION, TRUE);
-       memset (mod_union, 0, num_cards);
-       return mod_union;
-}
-
-void
-sgen_card_table_free_mod_union (guint8 *mod_union, char *obj, mword obj_size)
-{
-       size_t num_cards = sgen_card_table_number_of_cards_in_range ((mword) obj, obj_size);
-       sgen_free_internal_dynamic (mod_union, num_cards, INTERNAL_MEM_CARDTABLE_MOD_UNION);
-}
-
-void
-sgen_card_table_update_mod_union_from_cards (guint8 *dest, guint8 *start_card, size_t num_cards)
-{
-       SGEN_ASSERT (0, dest, "Why don't we have a mod union?");
-       update_mod_union (dest, start_card, num_cards);
-}
-
-void
-sgen_card_table_update_mod_union (guint8 *dest, char *obj, mword obj_size, size_t *out_num_cards)
-{
-       guint8 *start_card = sgen_card_table_get_card_address ((mword)obj);
-#ifndef SGEN_HAVE_OVERLAPPING_CARDS
-       guint8 *end_card = sgen_card_table_get_card_address ((mword)obj + obj_size - 1) + 1;
-#endif
-       size_t num_cards;
-
-#ifdef SGEN_HAVE_OVERLAPPING_CARDS
-       size_t rest;
-
-       rest = num_cards = sgen_card_table_number_of_cards_in_range ((mword) obj, obj_size);
-
-       while (start_card + rest > SGEN_CARDTABLE_END) {
-               size_t count = SGEN_CARDTABLE_END - start_card;
-               sgen_card_table_update_mod_union_from_cards (dest, start_card, count);
-               dest += count;
-               rest -= count;
-               start_card = sgen_cardtable;
-       }
-       num_cards = rest;
-#else
-       num_cards = end_card - start_card;
-#endif
-
-       sgen_card_table_update_mod_union_from_cards (dest, start_card, num_cards);
-
-       if (out_num_cards)
-               *out_num_cards = num_cards;
-}
-
-#ifdef SGEN_HAVE_OVERLAPPING_CARDS
-
-static void
-move_cards_to_shadow_table (mword start, mword size)
-{
-       guint8 *from = sgen_card_table_get_card_address (start);
-       guint8 *to = sgen_card_table_get_shadow_card_address (start);
-       size_t bytes = sgen_card_table_number_of_cards_in_range (start, size);
-
-       if (bytes >= CARD_COUNT_IN_BYTES) {
-               memcpy (sgen_shadow_cardtable, sgen_cardtable, CARD_COUNT_IN_BYTES);
-       } else if (to + bytes > SGEN_SHADOW_CARDTABLE_END) {
-               size_t first_chunk = SGEN_SHADOW_CARDTABLE_END - to;
-               size_t second_chunk = MIN (CARD_COUNT_IN_BYTES, bytes) - first_chunk;
-
-               memcpy (to, from, first_chunk);
-               memcpy (sgen_shadow_cardtable, sgen_cardtable, second_chunk);
-       } else {
-               memcpy (to, from, bytes);
-       }
-}
-
-static void
-clear_cards (mword start, mword size)
-{
-       guint8 *addr = sgen_card_table_get_card_address (start);
-       size_t bytes = sgen_card_table_number_of_cards_in_range (start, size);
-
-       if (bytes >= CARD_COUNT_IN_BYTES) {
-               memset (sgen_cardtable, 0, CARD_COUNT_IN_BYTES);
-       } else if (addr + bytes > SGEN_CARDTABLE_END) {
-               size_t first_chunk = SGEN_CARDTABLE_END - addr;
-
-               memset (addr, 0, first_chunk);
-               memset (sgen_cardtable, 0, bytes - first_chunk);
-       } else {
-               memset (addr, 0, bytes);
-       }
-}
-
-
-#else
-
-static void
-clear_cards (mword start, mword size)
-{
-       memset (sgen_card_table_get_card_address (start), 0, sgen_card_table_number_of_cards_in_range (start, size));
-}
-
-
-#endif
-
-static void
-sgen_card_table_clear_cards (void)
-{
-       /*XXX we could do this in 2 ways. using mincore or iterating over all sections/los objects */
-       sgen_major_collector_iterate_live_block_ranges (clear_cards);
-       sgen_los_iterate_live_block_ranges (clear_cards);
-}
-
-static void
-sgen_card_table_finish_minor_collection (void)
-{
-       sgen_card_tables_collect_stats (FALSE);
-}
-
-static void
-sgen_card_table_scan_remsets (ScanCopyContext ctx)
-{
-       SGEN_TV_DECLARE (atv);
-       SGEN_TV_DECLARE (btv);
-
-       sgen_card_tables_collect_stats (TRUE);
-
-#ifdef SGEN_HAVE_OVERLAPPING_CARDS
-       /*FIXME we should have a bit on each block/los object telling if the object have marked cards.*/
-       /*First we copy*/
-       sgen_major_collector_iterate_live_block_ranges (move_cards_to_shadow_table);
-       sgen_los_iterate_live_block_ranges (move_cards_to_shadow_table);
-
-       /*Then we clear*/
-       sgen_card_table_clear_cards ();
-#endif
-       SGEN_TV_GETTIME (atv);
-       sgen_get_major_collector ()->scan_card_table (FALSE, ctx);
-       SGEN_TV_GETTIME (btv);
-       last_major_scan_time = SGEN_TV_ELAPSED (atv, btv); 
-       major_card_scan_time += last_major_scan_time;
-       sgen_los_scan_card_table (FALSE, ctx);
-       SGEN_TV_GETTIME (atv);
-       last_los_scan_time = SGEN_TV_ELAPSED (btv, atv);
-       los_card_scan_time += last_los_scan_time;
-}
-
-guint8*
-sgen_get_card_table_configuration (int *shift_bits, gpointer *mask)
-{
-#ifndef MANAGED_WBARRIER
-       return NULL;
-#else
-       if (!sgen_cardtable)
-               return NULL;
-
-       *shift_bits = CARD_BITS;
-#ifdef SGEN_HAVE_OVERLAPPING_CARDS
-       *mask = (gpointer)CARD_MASK;
-#else
-       *mask = NULL;
-#endif
-
-       return sgen_cardtable;
-#endif
-}
-
-#if 0
-void
-sgen_card_table_dump_obj_card (char *object, size_t size, void *dummy)
-{
-       guint8 *start = sgen_card_table_get_card_scan_address (object);
-       guint8 *end = start + sgen_card_table_number_of_cards_in_range (object, size);
-       int cnt = 0;
-       printf ("--obj %p %d cards [%p %p]--", object, size, start, end);
-       for (; start < end; ++start) {
-               if (cnt == 0)
-                       printf ("\n\t[%p] ", start);
-               printf ("%x ", *start);
-               ++cnt;
-               if (cnt == 8)
-                       cnt = 0;
-       }
-       printf ("\n");
-}
-#endif
-
-void
-sgen_cardtable_scan_object (char *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx)
-{
-       HEAVY_STAT (++large_objects);
-
-       if (sgen_client_cardtable_scan_object (obj, block_obj_size, cards, mod_union, ctx))
-               return;
-
-       HEAVY_STAT (++bloby_objects);
-       if (cards) {
-               if (sgen_card_table_is_range_marked (cards, (mword)obj, block_obj_size))
-                       ctx.ops->scan_object (obj, sgen_obj_get_descriptor (obj), ctx.queue);
-       } else if (sgen_card_table_region_begin_scanning ((mword)obj, block_obj_size)) {
-               ctx.ops->scan_object (obj, sgen_obj_get_descriptor (obj), ctx.queue);
-       }
-
-       binary_protocol_card_scan (obj, sgen_safe_object_get_size ((GCObject*)obj));
-}
-
-#ifdef CARDTABLE_STATS
-
-typedef struct {
-       int total, marked, remarked, gc_marked; 
-} card_stats;
-
-static card_stats major_stats, los_stats;
-static card_stats *cur_stats;
-
-static void
-count_marked_cards (mword start, mword size)
-{
-       mword end = start + size;
-       while (start <= end) {
-               guint8 card = *sgen_card_table_get_card_address (start);
-               ++cur_stats->total;
-               if (card)
-                       ++cur_stats->marked;
-               if (card == 2)
-                       ++cur_stats->gc_marked;
-               start += CARD_SIZE_IN_BYTES;
-       }
-}
-
-static void
-count_remarked_cards (mword start, mword size)
-{
-       mword end = start + size;
-       while (start <= end) {
-               if (sgen_card_table_address_is_marked (start)) {
-                       ++cur_stats->remarked;
-                       *sgen_card_table_get_card_address (start) = 2;
-               }
-               start += CARD_SIZE_IN_BYTES;
-       }
-}
-
-#endif
-
-static void
-sgen_card_tables_collect_stats (gboolean begin)
-{
-#ifdef CARDTABLE_STATS
-       if (begin) {
-               memset (&major_stats, 0, sizeof (card_stats));
-               memset (&los_stats, 0, sizeof (card_stats));
-               cur_stats = &major_stats;
-               sgen_major_collector_iterate_live_block_ranges (count_marked_cards);
-               cur_stats = &los_stats;
-               sgen_los_iterate_live_block_ranges (count_marked_cards);
-       } else {
-               cur_stats = &major_stats;
-               sgen_major_collector_iterate_live_block_ranges (count_remarked_cards);
-               cur_stats = &los_stats;
-               sgen_los_iterate_live_block_ranges (count_remarked_cards);
-               printf ("cards major (t %d m %d g %d r %d)  los (t %d m %d g %d r %d) major_scan %.2fms los_scan %.2fms\n", 
-                       major_stats.total, major_stats.marked, major_stats.gc_marked, major_stats.remarked,
-                       los_stats.total, los_stats.marked, los_stats.gc_marked, los_stats.remarked,
-                       last_major_scan_time / 10000.0f, last_los_scan_time / 10000.0f);
-       }
-#endif
-}
-
-void
-sgen_card_table_init (SgenRememberedSet *remset)
-{
-       sgen_cardtable = sgen_alloc_os_memory (CARD_COUNT_IN_BYTES, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, "card table");
-
-#ifdef SGEN_HAVE_OVERLAPPING_CARDS
-       sgen_shadow_cardtable = sgen_alloc_os_memory (CARD_COUNT_IN_BYTES, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, "shadow card table");
-#endif
-
-#ifdef HEAVY_STATISTICS
-       mono_counters_register ("marked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &marked_cards);
-       mono_counters_register ("scanned cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &scanned_cards);
-       mono_counters_register ("remarked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &remarked_cards);
-
-       mono_counters_register ("los marked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_marked_cards);
-       mono_counters_register ("los array cards scanned ", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_cards);
-       mono_counters_register ("los array remsets", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_remsets);
-       mono_counters_register ("cardtable scanned objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &scanned_objects);
-       mono_counters_register ("cardtable large objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &large_objects);
-       mono_counters_register ("cardtable bloby objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &bloby_objects);
-#endif
-       mono_counters_register ("cardtable major scan time", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &major_card_scan_time);
-       mono_counters_register ("cardtable los scan time", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &los_card_scan_time);
-
-
-       remset->wbarrier_set_field = sgen_card_table_wbarrier_set_field;
-       remset->wbarrier_arrayref_copy = sgen_card_table_wbarrier_arrayref_copy;
-       remset->wbarrier_value_copy = sgen_card_table_wbarrier_value_copy;
-       remset->wbarrier_object_copy = sgen_card_table_wbarrier_object_copy;
-       remset->wbarrier_generic_nostore = sgen_card_table_wbarrier_generic_nostore;
-       remset->record_pointer = sgen_card_table_record_pointer;
-
-       remset->scan_remsets = sgen_card_table_scan_remsets;
-
-       remset->finish_minor_collection = sgen_card_table_finish_minor_collection;
-       remset->clear_cards = sgen_card_table_clear_cards;
-
-       remset->find_address = sgen_card_table_find_address;
-       remset->find_address_with_cards = sgen_card_table_find_address_with_cards;
-
-       need_mod_union = sgen_get_major_collector ()->is_concurrent;
-}
-
-#endif /*HAVE_SGEN_GC*/
diff --git a/mono/metadata/sgen-cardtable.h b/mono/metadata/sgen-cardtable.h
deleted file mode 100644 (file)
index 85a6924..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef __MONO_SGEN_CARD_TABLE_INLINES_H__
-#define __MONO_SGEN_CARD_TABLE_INLINES_H__
-
-/*WARNING: This function returns the number of cards regardless of overflow in case of overlapping cards.*/
-mword sgen_card_table_number_of_cards_in_range (mword address, mword size);
-
-void sgen_card_table_reset_region (mword start, mword end);
-void* sgen_card_table_align_pointer (void *ptr);
-void sgen_card_table_mark_range (mword address, mword size);
-void sgen_cardtable_scan_object (char *obj, mword obj_size, guint8 *cards,
-               gboolean mod_union, ScanCopyContext ctx);
-
-gboolean sgen_card_table_get_card_data (guint8 *dest, mword address, mword cards);
-
-guint8* sgen_card_table_alloc_mod_union (char *obj, mword obj_size);
-void sgen_card_table_free_mod_union (guint8 *mod_union, char *obj, mword obj_size);
-
-void sgen_card_table_update_mod_union_from_cards (guint8 *dest, guint8 *start_card, size_t num_cards);
-void sgen_card_table_update_mod_union (guint8 *dest, char *obj, mword obj_size, size_t *out_num_cards);
-
-guint8* sgen_get_card_table_configuration (int *shift_bits, gpointer *mask);
-
-void sgen_card_table_init (SgenRememberedSet *remset);
-
-/*How many bytes a single card covers*/
-#define CARD_BITS 9
-
-/* How many bits of the address space is covered by the card table.
- * If this value is smaller than the number of address bits, card aliasing is required.
- */
-#define CARD_TABLE_BITS 32
-
-#define CARD_SIZE_IN_BYTES (1 << CARD_BITS)
-#define CARD_COUNT_BITS (CARD_TABLE_BITS - CARD_BITS)
-#define CARD_COUNT_IN_BYTES (1 << CARD_COUNT_BITS)
-#define CARD_MASK ((1 << CARD_COUNT_BITS) - 1)
-
-#if SIZEOF_VOID_P * 8 > CARD_TABLE_BITS
-#define SGEN_HAVE_OVERLAPPING_CARDS    1
-#endif
-
-extern guint8 *sgen_cardtable;
-
-
-#ifdef SGEN_HAVE_OVERLAPPING_CARDS
-
-static inline guint8*
-sgen_card_table_get_card_address (mword address)
-{
-       return sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK);
-}
-
-extern guint8 *sgen_shadow_cardtable;
-
-#define SGEN_SHADOW_CARDTABLE_END (sgen_shadow_cardtable + CARD_COUNT_IN_BYTES)
-
-static inline guint8*
-sgen_card_table_get_shadow_card_address (mword address)
-{
-       return sgen_shadow_cardtable + ((address >> CARD_BITS) & CARD_MASK);
-}
-
-static inline gboolean
-sgen_card_table_card_begin_scanning (mword address)
-{
-       return *sgen_card_table_get_shadow_card_address (address) != 0;
-}
-
-static inline void
-sgen_card_table_prepare_card_for_scanning (guint8 *card)
-{
-}
-
-#define sgen_card_table_get_card_scan_address sgen_card_table_get_shadow_card_address
-
-#else
-
-static inline guint8*
-sgen_card_table_get_card_address (mword address)
-{
-       return sgen_cardtable + (address >> CARD_BITS);
-}
-
-static inline gboolean
-sgen_card_table_card_begin_scanning (mword address)
-{
-       guint8 *card = sgen_card_table_get_card_address (address);
-       gboolean res = *card;
-       *card = 0;
-       return res;
-}
-
-static inline void
-sgen_card_table_prepare_card_for_scanning (guint8 *card)
-{
-       *card = 0;
-}
-
-#define sgen_card_table_get_card_scan_address sgen_card_table_get_card_address
-
-#endif
-
-static inline gboolean
-sgen_card_table_address_is_marked (mword address)
-{
-       return *sgen_card_table_get_card_address (address) != 0;
-}
-
-static inline void
-sgen_card_table_mark_address (mword address)
-{
-       *sgen_card_table_get_card_address (address) = 1;
-}
-
-static inline size_t
-sgen_card_table_get_card_offset (char *ptr, char *base)
-{
-       return (ptr - base) >> CARD_BITS;
-}
-
-#endif
index fdae00a596801217a4716e99ff9ce0c34022d175..eb52ac990f34625a2e2b49552776b62c65363de9 100644 (file)
@@ -19,7 +19,7 @@
 
 #ifdef SGEN_DEFINE_OBJECT_VTABLE
 
-#include "metadata/sgen-archdep.h"
+#include "sgen/sgen-archdep.h"
 #include "utils/mono-threads.h"
 #include "utils/mono-mmap.h"
 #include "metadata/object-internals.h"
diff --git a/mono/metadata/sgen-client.h b/mono/metadata/sgen-client.h
deleted file mode 100644 (file)
index 347ac58..0000000
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * sgen-client.h: SGen client interface.
- *
- * Copyright (C) 2014 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "mono/metadata/sgen-pointer-queue.h"
-
-/*
- * Init whatever needs initing.  This is called relatively early in SGen initialization.
- * Must initialized the small ID for the current thread.
- */
-void sgen_client_init (void);
-
-/*
- * The slow path for getting an object's size.  We're passing in the vtable because we've
- * already fetched it.
- */
-mword sgen_client_slow_object_get_size (GCVTable *vtable, GCObject* o);
-
-/*
- * Fill the given range with a dummy object.  If the range is too short to be filled with an
- * object, null it.  Return `TRUE` if the range was filled with an object, `FALSE` if it was
- * nulled.
- */
-gboolean sgen_client_array_fill_range (char *start, size_t size);
-
-/*
- * This is called if the nursery clearing policy at `clear-at-gc`, which is usually only
- * used for debugging.  If `size` is large enough for the memory to have been filled with a
- * dummy, object, zero its header.  Note that there might not actually be a header there.
- */
-void sgen_client_zero_array_fill_header (void *p, size_t size);
-
-/*
- * Return whether the given object is an array fill dummy object.
- */
-gboolean sgen_client_object_is_array_fill (GCObject *o);
-
-/*
- * Return whether the given finalizable object's finalizer is critical, i.e., needs to run
- * after all non-critical finalizers have run.
- */
-gboolean sgen_client_object_has_critical_finalizer (GCObject *obj);
-
-/*
- * Called after an object is enqueued for finalization.  This is a very low-level callback.
- * It should almost certainly be a NOP.
- *
- * FIXME: Can we merge this with `sgen_client_object_has_critical_finalizer()`?
- */
-void sgen_client_object_queued_for_finalization (GCObject *obj);
-
-/*
- * Run the given object's finalizer.
- */
-void sgen_client_run_finalize (GCObject *obj);
-
-/*
- * Is called after a collection if there are objects to finalize.  The world is still
- * stopped.  This will usually notify the finalizer thread that it needs to run.
- */
-void sgen_client_finalize_notify (void);
-
-/*
- * Returns TRUE if no ephemerons have been marked.  Will be called again if it returned
- * FALSE.  If ephemerons are not supported, just return TRUE.
- */
-gboolean sgen_client_mark_ephemerons (ScanCopyContext ctx);
-
-/*
- * Clear ephemeron pairs with unreachable keys.
- * We pass the copy func so we can figure out if an array was promoted or not.
- */
-void sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx);
-
-/*
- * This is called for objects that are larger than one card.  If it's possible to scan only
- * parts of the object based on which cards are marked, do so and return TRUE.  Otherwise,
- * return FALSE.
- */
-gboolean sgen_client_cardtable_scan_object (char *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx);
-
-/*
- * Called after nursery objects have been pinned.  No action is necessary.
- */
-void sgen_client_nursery_objects_pinned (void **definitely_pinned, int count);
-
-/*
- * Called at a semi-random point during minor collections.  No action is necessary.
- */
-void sgen_client_collecting_minor (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue);
-
-/*
- * Called at semi-random points during major collections.  No action is necessary.
- */
-void sgen_client_collecting_major_1 (void);
-void sgen_client_collecting_major_2 (void);
-void sgen_client_collecting_major_3 (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue);
-
-/*
- * Called after a LOS object has been pinned.  No action is necessary.
- */
-void sgen_client_pinned_los_object (char *obj);
-
-/*
- * Called for every degraded allocation.  No action is necessary.
- */
-void sgen_client_degraded_allocation (size_t size);
-
-/*
- * Called whenever the amount of memory allocated for the managed heap changes.  No action
- * is necessary.
- */
-void sgen_client_total_allocated_heap_changed (size_t allocated_heap_size);
-
-/*
- * Called when an object allocation fails.  The suggested action is to abort the program.
- *
- * FIXME: Don't we want to return a BOOL here that indicates whether to retry the
- * allocation?
- */
-void sgen_client_out_of_memory (size_t size);
-
-/*
- * If the client has registered any internal memory types, this must return a string
- * describing the given type.  Only used for debugging.
- */
-const char* sgen_client_description_for_internal_mem_type (int type);
-
-/*
- * Only used for debugging.  `sgen_client_vtable_get_namespace()` may return NULL.
- */
-gboolean sgen_client_vtable_is_inited (GCVTable *vtable);
-const char* sgen_client_vtable_get_namespace (GCVTable *vtable);
-const char* sgen_client_vtable_get_name (GCVTable *vtable);
-
-/*
- * Called before starting collections.  The world is already stopped.  No action is
- * necessary.
- */
-void sgen_client_pre_collection_checks (void);
-
-/*
- * Must set the thread's thread info to `info`.  If the thread's small ID was not already
- * initialized in `sgen_client_init()` (for the main thread, usually), it must be done here.
- *
- * `stack_bottom_fallback` is the value passed through via `sgen_thread_register()`.
- */
-void sgen_client_thread_register (SgenThreadInfo* info, void *stack_bottom_fallback);
-
-void sgen_client_thread_unregister (SgenThreadInfo *p);
-
-/*
- * Called on each worker thread when it starts up.  Must initialize the thread's small ID.
- */
-void sgen_client_thread_register_worker (void);
-
-/*
- * The least this function needs to do is scan all registers and thread stacks.  To do this
- * conservatively, use `sgen_conservatively_pin_objects_from()`.
- */
-void sgen_client_scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, ScanCopyContext ctx);
-
-/*
- * Stop and restart the world, i.e., all threads that interact with the managed heap.  For
- * single-threaded programs this is a nop.
- */
-void sgen_client_stop_world (int generation);
-void sgen_client_restart_world (int generation, GGTimingInfo *timing);
-
-/*
- * Must return FALSE.  The bridge is not supported outside of Mono.
- */
-gboolean sgen_client_bridge_need_processing (void);
-
-/*
- * None of these should ever be called.
- */
-void sgen_client_bridge_reset_data (void);
-void sgen_client_bridge_processing_stw_step (void);
-void sgen_client_bridge_wait_for_processing (void);
-void sgen_client_bridge_processing_finish (int generation);
-gboolean sgen_client_bridge_is_bridge_object (GCObject *obj);
-void sgen_client_bridge_register_finalized_object (GCObject *object);
-
-/*
- * No action is necessary.
- */
-void sgen_client_mark_togglerefs (char *start, char *end, ScanCopyContext ctx);
-void sgen_client_clear_togglerefs (char *start, char *end, ScanCopyContext ctx);
-
-/*
- * Called after collections, reporting the amount of time they took.  No action is
- * necessary.
- */
-void sgen_client_log_timing (GGTimingInfo *info, mword last_major_num_sections, mword last_los_memory_usage);
-
-/*
- * Called to handle `MONO_GC_PARAMS` and `MONO_GC_DEBUG` options.  The `handle` functions
- * must return TRUE if they have recognized and processed the option, FALSE otherwise.
- */
-gboolean sgen_client_handle_gc_param (const char *opt);
-void sgen_client_print_gc_params_usage (void);
-gboolean sgen_client_handle_gc_debug (const char *opt);
-void sgen_client_print_gc_debug_usage (void);
-
-/*
- * Called to obtain an identifier for the current location, such as a method pointer. This
- * is used for logging the provenances of allocations with the heavy binary protocol.
- */
-gpointer sgen_client_get_provenance (void);
-
-/*
- * These client binary protocol functions are called from the respective binary protocol
- * functions.  No action is necessary.  We suggest implementing them as inline functions in
- * the client header file so that no overhead is incurred if they don't actually do
- * anything.
- */
-
-#define TYPE_INT int
-#define TYPE_LONGLONG long long
-#define TYPE_SIZE size_t
-#define TYPE_POINTER gpointer
-#define TYPE_BOOL gboolean
-
-#define BEGIN_PROTOCOL_ENTRY0(method) \
-       void sgen_client_ ## method (void);
-#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) \
-       void sgen_client_ ## method (void);
-#define BEGIN_PROTOCOL_ENTRY1(method,t1,f1) \
-       void sgen_client_ ## method (t1 f1);
-#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) \
-       void sgen_client_ ## method (t1 f1);
-#define BEGIN_PROTOCOL_ENTRY2(method,t1,f1,t2,f2) \
-       void sgen_client_ ## method (t1 f1, t2 f2);
-#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) \
-       void sgen_client_ ## method (t1 f1, t2 f2);
-#define BEGIN_PROTOCOL_ENTRY3(method,t1,f1,t2,f2,t3,f3) \
-       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3);
-#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) \
-       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3);
-#define BEGIN_PROTOCOL_ENTRY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
-       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4);
-#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
-       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4);
-#define BEGIN_PROTOCOL_ENTRY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
-       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5);
-#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
-       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5);
-#define BEGIN_PROTOCOL_ENTRY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
-       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6);
-#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
-       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6);
-
-#define FLUSH()
-
-#define DEFAULT_PRINT()
-#define CUSTOM_PRINT(_)
-
-#define IS_ALWAYS_MATCH(_)
-#define MATCH_INDEX(_)
-#define IS_VTABLE_MATCH(_)
-
-#define END_PROTOCOL_ENTRY
-#define END_PROTOCOL_ENTRY_HEAVY
-
-#include "sgen-protocol-def.h"
-
-#undef TYPE_INT
-#undef TYPE_LONGLONG
-#undef TYPE_SIZE
-#undef TYPE_POINTER
-#undef TYPE_BOOL
-
-#ifdef SGEN_WITHOUT_MONO
-/*
- * Get the current thread's thread info.  This will only be called on managed threads.
- */
-SgenThreadInfo* mono_thread_info_current (void);
-
-/*
- * Get the current thread's small ID.  This will be called on managed and worker threads.
- */
-int mono_thread_info_get_small_id (void);
-#endif
diff --git a/mono/metadata/sgen-conf.h b/mono/metadata/sgen-conf.h
deleted file mode 100644 (file)
index f139a98..0000000
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * sgen-conf.h: Tunable parameters and debugging switches.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#ifndef __MONO_SGENCONF_H__
-#define __MONO_SGENCONF_H__
-
-#include <glib.h>
-
-/*Basic defines and static tunables */
-
-#if SIZEOF_VOID_P == 4
-typedef guint32 mword;
-#else
-typedef guint64 mword;
-#endif
-
-
-/*
- * Turning on heavy statistics will turn off the managed allocator and
- * the managed write barrier.
- */
-// #define HEAVY_STATISTICS
-
-#ifdef HEAVY_STATISTICS
-#define HEAVY_STAT(x)  x
-#else
-#define HEAVY_STAT(x)
-#endif
-
-/*
- * Define this to allow the user to change the nursery size by
- * specifying its value in the MONO_GC_PARAMS environmental
- * variable. See mono_gc_base_init for details.
- */
-#define USER_CONFIG 1
-
-/*
- * The binary protocol enables logging a lot of the GC ativity in a way that is not very
- * intrusive and produces a compact file that can be searched using a custom tool.  This
- * option enables very fine-grained binary protocol events, which will make the GC a tiny
- * bit less efficient even if no binary protocol file is generated.
- */
-//#define SGEN_HEAVY_BINARY_PROTOCOL
-
-/*
- * This extends the heavy binary protocol to record the provenance of an object
- * for every allocation.
- */
-//#define SGEN_OBJECT_PROVENANCE
-
-/*
- * This enables checks whenever objects are enqueued in gray queues.
- * Right now the only check done is that we never enqueue nursery
- * pointers in the concurrent collector.
- */
-//#define SGEN_CHECK_GRAY_OBJECT_ENQUEUE
-
-/*
- * This keeps track of where a gray object queue section is and
- * whether it is where it should be.
- */
-//#define SGEN_CHECK_GRAY_OBJECT_SECTIONS
-
-/*
- * Enable this to check every reference update for null references and whether the update is
- * made in a worker thread.  In only a few cases do we potentially update references by
- * writing nulls, so we assert in all the cases where it's not allowed.  The concurrent
- * collector's worker thread is not allowed to update references at all, so we also assert
- * that we're not in the worker thread.
- */
-//#define SGEN_CHECK_UPDATE_REFERENCE
-
-/*
- * Define this and use the "xdomain-checks" MONO_GC_DEBUG option to
- * have cross-domain checks in the write barrier.
- */
-//#define XDOMAIN_CHECKS_IN_WBARRIER
-
-/*
- * Define this to get number of objects marked information in the
- * concurrent GC DTrace probes.  Has a small performance impact, so
- * it's disabled by default.
- */
-//#define SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
-
-/*
- * Object layout statistics gather a histogram of reference locations
- * over all scanned objects.  We use this information to improve GC
- * descriptors to speed up scanning.  This does not provide any
- * troubleshooting assistance (unless you are troubled in highly
- * unusual ways) and makes scanning slower.
- */
-//#define SGEN_OBJECT_LAYOUT_STATISTICS
-
-#ifndef SGEN_HEAVY_BINARY_PROTOCOL
-#ifndef HEAVY_STATISTICS
-#define MANAGED_ALLOCATION
-#ifndef XDOMAIN_CHECKS_IN_WBARRIER
-#define MANAGED_WBARRIER
-#endif
-#endif
-#endif
-
-/*
- * Maximum level of debug to enable on this build.
- * Making this a constant enables us to put logging in a lot of places and
- * not pay its cost on release builds.
- */
-#define SGEN_MAX_DEBUG_LEVEL 2
-
-/*
- * Maximum level of asserts to enable on this build.
- * FIXME replace all magic numbers with defines.
- */
-#define SGEN_MAX_ASSERT_LEVEL 5
-
-
-#define GC_BITS_PER_WORD (sizeof (mword) * 8)
-
-/*Size of the section used by the copying GC. */
-#define SGEN_SIZEOF_GC_MEM_SECTION     ((sizeof (GCMemSection) + 7) & ~7)
-
-/*
- * to quickly find the head of an object pinned by a conservative
- * address we keep track of the objects allocated for each
- * SGEN_SCAN_START_SIZE memory chunk in the nursery or other memory
- * sections. Larger values have less memory overhead and bigger
- * runtime cost. 4-8 KB are reasonable values.
- */
-#define SGEN_SCAN_START_SIZE (4096*2)
-
-/*
- * Objects bigger then this go into the large object space.  This size has a few
- * constraints.  At least two of them must fit into a major heap block.  It must also play
- * well with the run length GC descriptor, which encodes the object size.
- */
-#define SGEN_MAX_SMALL_OBJ_SIZE 8000
-
-/*
- * This is the maximum ammount of memory we're willing to waste in order to speed up allocation.
- * Wastage comes in thre forms:
- *
- * -when building the nursery fragment list, small regions are discarded;
- * -when allocating memory from a fragment if it ends up below the threshold, we remove it from the fragment list; and
- * -when allocating a new tlab, we discard the remaining space of the old one
- *
- * Increasing this value speeds up allocation but will cause more frequent nursery collections as less space will be used.
- * Descreasing this value will cause allocation to be slower since we'll have to cycle thru more fragments.
- * 512 annedoctally keeps wastage under control and doesn't impact allocation performance too much. 
-*/
-#define SGEN_MAX_NURSERY_WASTE 512
-
-
-/*
- * Minimum allowance for nursery allocations, as a multiple of the size of nursery.
- *
- * We allow at least this much allocation to happen to the major heap from multiple
- * minor collections before triggering a major collection.
- *
- * Bigger values increases throughput by allowing more garbage to sit in the major heap.
- * Smaller values leads to better memory effiency but more frequent major collections.
- */
-#define SGEN_DEFAULT_ALLOWANCE_NURSERY_SIZE_RATIO 4.0
-
-#define SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO 1.0
-#define SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO 10.0
-
-/*
- * Default ratio of memory we want to release in a major collection in relation to the the current heap size.
- *
- * A major collection target is to free a given amount of memory. This amount is a ratio of the major heap size.
- *
- * Values above 0.5 cause the heap to agressively grow when it's small and waste memory when it's big.
- * Lower values will produce more reasonable sized heaps when it's small, but will be suboptimal at large
- * sizes as they will use a small fraction only.
- *
- */
-#define SGEN_DEFAULT_SAVE_TARGET_RATIO 0.5
-
-#define SGEN_MIN_SAVE_TARGET_RATIO 0.1
-#define SGEN_MAX_SAVE_TARGET_RATIO 2.0
-
-/*
- * Configurable cementing parameters.
- *
- * If there are too many pinned nursery objects with many references
- * from the major heap, the hash table size must be increased.
- *
- * The threshold is the number of references from the major heap to a
- * pinned nursery object which triggers cementing: if there are more
- * than that number of references, the pinned object is cemented until
- * the next major collection.
- */
-#define SGEN_CEMENT_HASH_SHIFT 6
-#define SGEN_CEMENT_HASH_SIZE  (1 << SGEN_CEMENT_HASH_SHIFT)
-#define SGEN_CEMENT_HASH(hv)   (((hv) ^ ((hv) >> SGEN_CEMENT_HASH_SHIFT)) & (SGEN_CEMENT_HASH_SIZE - 1))
-#define SGEN_CEMENT_THRESHOLD  1000
-
-#endif
diff --git a/mono/metadata/sgen-copy-object.h b/mono/metadata/sgen-copy-object.h
deleted file mode 100644 (file)
index 99fd0cc..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * sgen-copy-object.h: This is where objects are copied.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-extern guint64 stat_copy_object_called_nursery;
-extern guint64 stat_objects_copied_nursery;
-
-extern guint64 stat_nursery_copy_object_failed_from_space;
-extern guint64 stat_nursery_copy_object_failed_forwarded;
-extern guint64 stat_nursery_copy_object_failed_pinned;
-
-extern guint64 stat_slots_allocated_in_vain;
-
-/*
- * Copies an object and enqueues it if a queue is given.
- *
- * This function can be used even if the vtable of obj is not valid
- * anymore, which is the case in the parallel collector.
- */
-static MONO_ALWAYS_INLINE void
-par_copy_object_no_checks (char *destination, GCVTable *vt, void *obj, mword objsize, SgenGrayQueue *queue)
-{
-       sgen_client_pre_copy_checks (destination, vt, obj, objsize);
-       binary_protocol_copy (obj, destination, vt, objsize);
-
-       /* FIXME: assumes object layout */
-       memcpy (destination + sizeof (mword), (char*)obj + sizeof (mword), objsize - sizeof (mword));
-
-       /* adjust array->bounds */
-       SGEN_ASSERT (9, sgen_vtable_get_descriptor (vt), "vtable %p has no gc descriptor", vt);
-
-       sgen_client_update_copied_object (destination, vt, obj, objsize);
-       obj = destination;
-       if (queue) {
-               SGEN_LOG (9, "Enqueuing gray object %p (%s)", obj, sgen_client_vtable_get_name (vt));
-               GRAY_OBJECT_ENQUEUE (queue, obj, sgen_vtable_get_descriptor (vt));
-       }
-}
-
-/*
- * This can return OBJ itself on OOM.
- */
-static MONO_NEVER_INLINE void*
-copy_object_no_checks (void *obj, SgenGrayQueue *queue)
-{
-       GCVTable *vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
-       gboolean has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
-       mword objsize = SGEN_ALIGN_UP (sgen_client_par_object_get_size (vt, obj));
-       /* FIXME: Does this not mark the newly allocated object? */
-       char *destination = COLLECTOR_SERIAL_ALLOC_FOR_PROMOTION (vt, obj, objsize, has_references);
-
-       if (G_UNLIKELY (!destination)) {
-               /* FIXME: Is this path ever tested? */
-               collector_pin_object (obj, queue);
-               sgen_set_pinned_from_failed_allocation (objsize);
-               return obj;
-       }
-
-       if (!has_references)
-               queue = NULL;
-
-       par_copy_object_no_checks (destination, vt, obj, objsize, queue);
-       /* FIXME: mark mod union cards if necessary */
-
-       /* set the forwarding pointer */
-       SGEN_FORWARD_OBJECT (obj, destination);
-
-       return destination;
-}
diff --git a/mono/metadata/sgen-debug.c b/mono/metadata/sgen-debug.c
deleted file mode 100644 (file)
index 2240a1f..0000000
+++ /dev/null
@@ -1,1401 +0,0 @@
-/*
- * sgen-debug.c: Collector debugging
- *
- * Author:
- *     Paolo Molaro (lupus@ximian.com)
- *  Rodrigo Kumpera (kumpera@gmail.com)
- *
- * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * Copyright 2011 Xamarin, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include <string.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-cardtable.h"
-#include "mono/metadata/sgen-protocol.h"
-#include "mono/metadata/sgen-memory-governor.h"
-#include "mono/metadata/sgen-pinning.h"
-#include "mono/metadata/sgen-client.h"
-#ifndef SGEN_WITHOUT_MONO
-#include "mono/metadata/sgen-bridge-internal.h"
-#endif
-
-#define LOAD_VTABLE    SGEN_LOAD_VTABLE
-
-#define object_is_forwarded    SGEN_OBJECT_IS_FORWARDED
-#define object_is_pinned       SGEN_OBJECT_IS_PINNED
-#define safe_object_get_size   sgen_safe_object_get_size
-
-void describe_ptr (char *ptr);
-void check_object (char *start);
-
-/*
- * ######################################################################
- * ########  Collector debugging
- * ######################################################################
- */
-
-static const char*descriptor_types [] = {
-       "INVALID",
-       "run length",
-       "bitmap",
-       "small pointer-free",
-       "complex",
-       "vector",
-       "complex arrray",
-       "complex pointer-free"
-};
-
-static char* describe_nursery_ptr (char *ptr, gboolean need_setup);
-
-static void
-describe_pointer (char *ptr, gboolean need_setup)
-{
-       GCVTable *vtable;
-       mword desc;
-       int type;
-       char *start;
-       char *forwarded;
-       mword size;
-
- restart:
-       if (sgen_ptr_in_nursery (ptr)) {
-               start = describe_nursery_ptr (ptr, need_setup);
-               if (!start)
-                       return;
-               ptr = start;
-               vtable = (GCVTable*)LOAD_VTABLE (ptr);
-       } else {
-               if (sgen_ptr_is_in_los (ptr, &start)) {
-                       if (ptr == start)
-                               printf ("Pointer is the start of object %p in LOS space.\n", start);
-                       else
-                               printf ("Pointer is at offset 0x%x of object %p in LOS space.\n", (int)(ptr - start), start);
-                       ptr = start;
-                       mono_sgen_los_describe_pointer (ptr);
-                       vtable = (GCVTable*)LOAD_VTABLE (ptr);
-               } else if (major_collector.ptr_is_in_non_pinned_space (ptr, &start)) {
-                       if (ptr == start)
-                               printf ("Pointer is the start of object %p in oldspace.\n", start);
-                       else if (start)
-                               printf ("Pointer is at offset 0x%x of object %p in oldspace.\n", (int)(ptr - start), start);
-                       else
-                               printf ("Pointer inside oldspace.\n");
-                       if (start)
-                               ptr = start;
-                       vtable = (GCVTable*)major_collector.describe_pointer (ptr);
-               } else if (major_collector.obj_is_from_pinned_alloc (ptr)) {
-                       // FIXME: Handle pointers to the inside of objects
-                       printf ("Pointer is inside a pinned chunk.\n");
-                       vtable = (GCVTable*)LOAD_VTABLE (ptr);
-               } else {
-                       printf ("Pointer unknown.\n");
-                       return;
-               }
-       }
-
-       if (object_is_pinned (ptr))
-               printf ("Object is pinned.\n");
-
-       if ((forwarded = object_is_forwarded (ptr))) {
-               printf ("Object is forwarded to %p:\n", forwarded);
-               ptr = forwarded;
-               goto restart;
-       }
-
-       printf ("VTable: %p\n", vtable);
-       if (vtable == NULL) {
-               printf ("VTable is invalid (empty).\n");
-               goto bridge;
-       }
-       if (sgen_ptr_in_nursery (vtable)) {
-               printf ("VTable is invalid (points inside nursery).\n");
-               goto bridge;
-       }
-       printf ("Class: %s.%s\n", sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable));
-
-       desc = sgen_vtable_get_descriptor ((GCVTable*)vtable);
-       printf ("Descriptor: %lx\n", (long)desc);
-
-       type = desc & DESC_TYPE_MASK;
-       printf ("Descriptor type: %d (%s)\n", type, descriptor_types [type]);
-
-       size = sgen_safe_object_get_size ((GCObject*)ptr);
-       printf ("Size: %d\n", (int)size);
-
- bridge:
-       ;
-#ifndef SGEN_WITHOUT_MONO
-       sgen_bridge_describe_pointer ((GCObject*)ptr);
-#endif
-}
-
-void
-describe_ptr (char *ptr)
-{
-       describe_pointer (ptr, TRUE);
-}
-
-static gboolean missing_remsets;
-
-/*
- * We let a missing remset slide if the target object is pinned,
- * because the store might have happened but the remset not yet added,
- * but in that case the target must be pinned.  We might theoretically
- * miss some missing remsets this way, but it's very unlikely.
- */
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj)    do {    \
-       if (*(ptr) && sgen_ptr_in_nursery ((char*)*(ptr))) { \
-               if (!sgen_get_remset ()->find_address ((char*)(ptr)) && !sgen_cement_lookup (*(ptr))) { \
-                       GCVTable *__vt = SGEN_LOAD_VTABLE ((obj));      \
-                       SGEN_LOG (0, "Oldspace->newspace reference %p at offset %td in object %p (%s.%s) not found in remsets.", *(ptr), (char*)(ptr) - (char*)(obj), (obj), sgen_client_vtable_get_namespace (__vt), sgen_client_vtable_get_name (__vt)); \
-                       binary_protocol_missing_remset ((obj), __vt, (int) ((char*)(ptr) - (char*)(obj)), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
-                       if (!object_is_pinned (*(ptr)))                                                         \
-                               missing_remsets = TRUE;                                                                 \
-               }                                                                                                                               \
-       }                                                                                                                                       \
-       } while (0)
-
-/*
- * Check that each object reference which points into the nursery can
- * be found in the remembered sets.
- */
-static void
-check_consistency_callback (char *start, size_t size, void *dummy)
-{
-       GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
-       mword desc = sgen_vtable_get_descriptor ((GCVTable*)vt);
-       SGEN_LOG (8, "Scanning object %p, vtable: %p (%s)", start, vt, sgen_client_vtable_get_name (vt));
-
-#include "sgen-scan-object.h"
-}
-
-/*
- * Perform consistency check of the heap.
- *
- * Assumes the world is stopped.
- */
-void
-sgen_check_consistency (void)
-{
-       // Need to add more checks
-
-       missing_remsets = FALSE;
-
-       SGEN_LOG (1, "Begin heap consistency check...");
-
-       // Check that oldspace->newspace pointers are registered with the collector
-       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)check_consistency_callback, NULL);
-
-       sgen_los_iterate_objects ((IterateObjectCallbackFunc)check_consistency_callback, NULL);
-
-       SGEN_LOG (1, "Heap consistency check done.");
-
-       if (!binary_protocol_is_enabled ())
-               g_assert (!missing_remsets);
-}
-
-static gboolean
-is_major_or_los_object_marked (char *obj)
-{
-       if (sgen_safe_object_get_size ((GCObject*)obj) > SGEN_MAX_SMALL_OBJ_SIZE) {
-               return sgen_los_object_is_pinned (obj);
-       } else {
-               return sgen_get_major_collector ()->is_object_live (obj);
-       }
-}
-
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj)    do {    \
-       if (*(ptr) && !sgen_ptr_in_nursery ((char*)*(ptr)) && !is_major_or_los_object_marked ((char*)*(ptr))) { \
-               if (!sgen_get_remset ()->find_address_with_cards (start, cards, (char*)(ptr))) { \
-                       GCVTable *__vt = SGEN_LOAD_VTABLE ((obj));      \
-                       SGEN_LOG (0, "major->major reference %p at offset %td in object %p (%s.%s) not found in remsets.", *(ptr), (char*)(ptr) - (char*)(obj), (obj), sgen_client_vtable_get_namespace (__vt), sgen_client_vtable_get_name (__vt)); \
-                       binary_protocol_missing_remset ((obj), __vt, (int) ((char*)(ptr) - (char*)(obj)), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
-                       missing_remsets = TRUE;                         \
-               }                                                                                                                               \
-       }                                                                                                                                       \
-       } while (0)
-
-static void
-check_mod_union_callback (char *start, size_t size, void *dummy)
-{
-       gboolean in_los = (gboolean) (size_t) dummy;
-       GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
-       mword desc = sgen_vtable_get_descriptor ((GCVTable*)vt);
-       guint8 *cards;
-       SGEN_LOG (8, "Scanning object %p, vtable: %p (%s)", start, vt, sgen_client_vtable_get_name (vt));
-
-       if (!is_major_or_los_object_marked (start))
-               return;
-
-       if (in_los)
-               cards = sgen_los_header_for_object (start)->cardtable_mod_union;
-       else
-               cards = sgen_get_major_collector ()->get_cardtable_mod_union_for_object (start);
-
-       SGEN_ASSERT (0, cards, "we must have mod union for marked major objects");
-
-#include "sgen-scan-object.h"
-}
-
-void
-sgen_check_mod_union_consistency (void)
-{
-       missing_remsets = FALSE;
-
-       major_collector.iterate_objects (ITERATE_OBJECTS_ALL, (IterateObjectCallbackFunc)check_mod_union_callback, (void*)FALSE);
-
-       sgen_los_iterate_objects ((IterateObjectCallbackFunc)check_mod_union_callback, (void*)TRUE);
-
-       if (!binary_protocol_is_enabled ())
-               g_assert (!missing_remsets);
-}
-
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj)    do {                                    \
-               if (*(ptr) && !LOAD_VTABLE (*(ptr)))                                            \
-                       g_error ("Could not load vtable for obj %p slot %zd (size %zd)", obj, (char*)ptr - (char*)obj, (size_t)safe_object_get_size ((GCObject*)obj)); \
-       } while (0)
-
-static void
-check_major_refs_callback (char *start, size_t size, void *dummy)
-{
-       mword desc = sgen_obj_get_descriptor (start);
-
-#include "sgen-scan-object.h"
-}
-
-void
-sgen_check_major_refs (void)
-{
-       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)check_major_refs_callback, NULL);
-       sgen_los_iterate_objects ((IterateObjectCallbackFunc)check_major_refs_callback, NULL);
-}
-
-/* Check that the reference is valid */
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj)    do {    \
-               if (*(ptr)) {   \
-                       g_assert (sgen_client_vtable_get_namespace (SGEN_LOAD_VTABLE_UNCHECKED (*(ptr))));      \
-               }       \
-       } while (0)
-
-/*
- * check_object:
- *
- *   Perform consistency check on an object. Currently we only check that the
- * reference fields are valid.
- */
-void
-check_object (char *start)
-{
-       mword desc;
-
-       if (!start)
-               return;
-
-       desc = sgen_obj_get_descriptor (start);
-
-#include "sgen-scan-object.h"
-}
-
-
-static char **valid_nursery_objects;
-static int valid_nursery_object_count;
-static gboolean broken_heap;
-
-static void 
-setup_mono_sgen_scan_area_with_callback (char *object, size_t size, void *data)
-{
-       valid_nursery_objects [valid_nursery_object_count++] = object;
-}
-
-static void
-setup_valid_nursery_objects (void)
-{
-       if (!valid_nursery_objects)
-               valid_nursery_objects = sgen_alloc_os_memory (DEFAULT_NURSERY_SIZE, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, "debugging data");
-       valid_nursery_object_count = 0;
-       sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, setup_mono_sgen_scan_area_with_callback, NULL, FALSE);
-}
-
-static gboolean
-find_object_in_nursery_dump (char *object)
-{
-       int first = 0, last = valid_nursery_object_count;
-       while (first < last) {
-               int middle = first + ((last - first) >> 1);
-               if (object == valid_nursery_objects [middle])
-                       return TRUE;
-
-               if (object < valid_nursery_objects [middle])
-                       last = middle;
-               else
-                       first = middle + 1;
-       }
-       g_assert (first == last);
-       return FALSE;
-}
-
-static void
-iterate_valid_nursery_objects (IterateObjectCallbackFunc callback, void *data)
-{
-       int i;
-       for (i = 0; i < valid_nursery_object_count; ++i) {
-               char *obj = valid_nursery_objects [i];
-               callback (obj, safe_object_get_size ((GCObject*)obj), data);
-       }
-}
-
-static char*
-describe_nursery_ptr (char *ptr, gboolean need_setup)
-{
-       int i;
-
-       if (need_setup)
-               setup_valid_nursery_objects ();
-
-       for (i = 0; i < valid_nursery_object_count - 1; ++i) {
-               if (valid_nursery_objects [i + 1] > ptr)
-                       break;
-       }
-
-       if (i >= valid_nursery_object_count || valid_nursery_objects [i] + safe_object_get_size ((GCObject *)valid_nursery_objects [i]) < ptr) {
-               SGEN_LOG (0, "nursery-ptr (unalloc'd-memory)");
-               return NULL;
-       } else {
-               char *obj = valid_nursery_objects [i];
-               if (obj == ptr)
-                       SGEN_LOG (0, "nursery-ptr %p", obj);
-               else
-                       SGEN_LOG (0, "nursery-ptr %p (interior-ptr offset %td)", obj, ptr - obj);
-               return obj;
-       }
-}
-
-static gboolean
-is_valid_object_pointer (char *object)
-{
-       if (sgen_ptr_in_nursery (object))
-               return find_object_in_nursery_dump (object);
-       
-       if (sgen_los_is_valid_object (object))
-               return TRUE;
-
-       if (major_collector.is_valid_object (object))
-               return TRUE;
-       return FALSE;
-}
-
-static void
-bad_pointer_spew (char *obj, char **slot)
-{
-       char *ptr = *slot;
-       GCVTable *vtable = (GCVTable*)LOAD_VTABLE (obj);
-
-       SGEN_LOG (0, "Invalid object pointer %p at offset %td in object %p (%s.%s):", ptr,
-                       (char*)slot - obj,
-                       obj, sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable));
-       describe_pointer (ptr, FALSE);
-       broken_heap = TRUE;
-}
-
-static void
-missing_remset_spew (char *obj, char **slot)
-{
-       char *ptr = *slot;
-       GCVTable *vtable = (GCVTable*)LOAD_VTABLE (obj);
-
-       SGEN_LOG (0, "Oldspace->newspace reference %p at offset %td in object %p (%s.%s) not found in remsets.",
-                       ptr, (char*)slot - obj, obj, 
-                       sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable));
-
-       broken_heap = TRUE;
-}
-
-/*
-FIXME Flag missing remsets due to pinning as non fatal
-*/
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj)    do {    \
-               if (*(char**)ptr) {     \
-                       if (!is_valid_object_pointer (*(char**)ptr)) {  \
-                               bad_pointer_spew ((char*)obj, (char**)ptr);     \
-                       } else if (!sgen_ptr_in_nursery (obj) && sgen_ptr_in_nursery ((char*)*ptr)) {   \
-                               if (!sgen_get_remset ()->find_address ((char*)(ptr)) && !sgen_cement_lookup ((char*)*(ptr)) && (!allow_missing_pinned || !SGEN_OBJECT_IS_PINNED ((char*)*(ptr)))) \
-                               missing_remset_spew ((char*)obj, (char**)ptr);  \
-                       }       \
-        } \
-       } while (0)
-
-static void
-verify_object_pointers_callback (char *start, size_t size, void *data)
-{
-       gboolean allow_missing_pinned = (gboolean) (size_t) data;
-       mword desc = sgen_obj_get_descriptor (start);
-
-#include "sgen-scan-object.h"
-}
-
-/*
-FIXME:
--This heap checker is racy regarding inlined write barriers and other JIT tricks that
-depend on OP_DUMMY_USE.
-*/
-void
-sgen_check_whole_heap (gboolean allow_missing_pinned)
-{
-       setup_valid_nursery_objects ();
-
-       broken_heap = FALSE;
-       sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, verify_object_pointers_callback, (void*) (size_t) allow_missing_pinned, FALSE);
-       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, verify_object_pointers_callback, (void*) (size_t) allow_missing_pinned);
-       sgen_los_iterate_objects (verify_object_pointers_callback, (void*) (size_t) allow_missing_pinned);
-
-       g_assert (!broken_heap);
-}
-
-static gboolean
-ptr_in_heap (char *object)
-{
-       if (sgen_ptr_in_nursery (object))
-               return TRUE;
-       
-       if (sgen_los_is_valid_object (object))
-               return TRUE;
-
-       if (major_collector.is_valid_object (object))
-               return TRUE;
-       return FALSE;
-}
-
-/*
- * sgen_check_objref:
- *   Do consistency checks on the object reference OBJ. Assert on failure.
- */
-void
-sgen_check_objref (char *obj)
-{
-       g_assert (ptr_in_heap (obj));
-}
-
-static void
-find_pinning_ref_from_thread (char *obj, size_t size)
-{
-#ifndef SGEN_WITHOUT_MONO
-       int j;
-       SgenThreadInfo *info;
-       char *endobj = obj + size;
-
-       FOREACH_THREAD (info) {
-               char **start = (char**)info->client_info.stack_start;
-               if (info->client_info.skip || info->client_info.gc_disabled)
-                       continue;
-               while (start < (char**)info->client_info.stack_end) {
-                       if (*start >= obj && *start < endobj)
-                               SGEN_LOG (0, "Object %p referenced in thread %p (id %p) at %p, stack: %p-%p", obj, info, (gpointer)mono_thread_info_get_tid (info), start, info->client_info.stack_start, info->client_info.stack_end);
-                       start++;
-               }
-
-               for (j = 0; j < ARCH_NUM_REGS; ++j) {
-#ifdef USE_MONO_CTX
-                       mword w = ((mword*)&info->client_info.ctx) [j];
-#else
-                       mword w = (mword)&info->client_info.regs [j];
-#endif
-
-                       if (w >= (mword)obj && w < (mword)obj + size)
-                               SGEN_LOG (0, "Object %p referenced in saved reg %d of thread %p (id %p)", obj, j, info, (gpointer)mono_thread_info_get_tid (info));
-               } END_FOREACH_THREAD
-       }
-#endif
-}
-
-/*
- * Debugging function: find in the conservative roots where @obj is being pinned.
- */
-static G_GNUC_UNUSED void
-find_pinning_reference (char *obj, size_t size)
-{
-       char **start;
-       RootRecord *root;
-       char *endobj = obj + size;
-
-       SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_NORMAL], start, root) {
-               /* if desc is non-null it has precise info */
-               if (!root->root_desc) {
-                       while (start < (char**)root->end_root) {
-                               if (*start >= obj && *start < endobj) {
-                                       SGEN_LOG (0, "Object %p referenced in pinned roots %p-%p\n", obj, start, root->end_root);
-                               }
-                               start++;
-                       }
-               }
-       } SGEN_HASH_TABLE_FOREACH_END;
-
-       find_pinning_ref_from_thread (obj, size);
-}
-
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj)    do {                                    \
-               char* __target = *(char**)ptr;                          \
-               if (__target) {                                         \
-                       if (sgen_ptr_in_nursery (__target)) {           \
-                               g_assert (!SGEN_OBJECT_IS_FORWARDED (__target)); \
-                       } else {                                        \
-                               mword __size = sgen_safe_object_get_size ((GCObject*)__target); \
-                               if (__size <= SGEN_MAX_SMALL_OBJ_SIZE)  \
-                                       g_assert (major_collector.is_object_live (__target)); \
-                               else                                    \
-                                       g_assert (sgen_los_object_is_pinned (__target)); \
-                       }                                               \
-               }                                                       \
-       } while (0)
-
-static void
-check_marked_callback (char *start, size_t size, void *dummy)
-{
-       gboolean flag = (gboolean) (size_t) dummy;
-       mword desc;
-
-       if (sgen_ptr_in_nursery (start)) {
-               if (flag)
-                       SGEN_ASSERT (0, SGEN_OBJECT_IS_PINNED (start), "All objects remaining in the nursery must be pinned");
-       } else if (flag) {
-               if (!sgen_los_object_is_pinned (start))
-                       return;
-       } else {
-               if (!major_collector.is_object_live (start))
-                       return;
-       }
-
-       desc = sgen_obj_get_descriptor_safe (start);
-
-#include "sgen-scan-object.h"
-}
-
-void
-sgen_check_heap_marked (gboolean nursery_must_be_pinned)
-{
-       setup_valid_nursery_objects ();
-
-       iterate_valid_nursery_objects (check_marked_callback, (void*)(size_t)nursery_must_be_pinned);
-       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, check_marked_callback, (void*)FALSE);
-       sgen_los_iterate_objects (check_marked_callback, (void*)TRUE);
-}
-
-static void
-check_nursery_objects_pinned_callback (char *obj, size_t size, void *data /* ScanCopyContext *ctx */)
-{
-       gboolean pinned = (gboolean) (size_t) data;
-
-       g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
-       if (pinned)
-               g_assert (SGEN_OBJECT_IS_PINNED (obj));
-       else
-               g_assert (!SGEN_OBJECT_IS_PINNED (obj));
-}
-
-void
-sgen_check_nursery_objects_pinned (gboolean pinned)
-{
-       sgen_clear_nursery_fragments ();
-       sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
-                       (IterateObjectCallbackFunc)check_nursery_objects_pinned_callback, (void*) (size_t) pinned /* (void*)&ctx */, FALSE);
-}
-
-static void
-verify_scan_starts (char *start, char *end)
-{
-       size_t i;
-
-       for (i = 0; i < nursery_section->num_scan_start; ++i) {
-               char *addr = nursery_section->scan_starts [i];
-               if (addr > start && addr < end)
-                       SGEN_LOG (0, "NFC-BAD SCAN START [%zu] %p for obj [%p %p]", i, addr, start, end);
-       }
-}
-
-void
-sgen_debug_verify_nursery (gboolean do_dump_nursery_content)
-{
-       char *start, *end, *cur, *hole_start;
-
-       if (nursery_canaries_enabled ())
-               SGEN_LOG (0, "Checking nursery canaries...");
-
-       /*This cleans up unused fragments */
-       sgen_nursery_allocator_prepare_for_pinning ();
-
-       hole_start = start = cur = sgen_get_nursery_start ();
-       end = sgen_get_nursery_end ();
-
-       while (cur < end) {
-               size_t ss, size;
-               gboolean is_array_fill;
-
-               if (!*(void**)cur) {
-                       cur += sizeof (void*);
-                       continue;
-               }
-
-               if (object_is_forwarded (cur))
-                       SGEN_LOG (0, "FORWARDED OBJ %p", cur);
-               else if (object_is_pinned (cur))
-                       SGEN_LOG (0, "PINNED OBJ %p", cur);
-
-               ss = safe_object_get_size ((GCObject*)cur);
-               size = SGEN_ALIGN_UP (ss);
-               verify_scan_starts (cur, cur + size);
-               is_array_fill = sgen_client_object_is_array_fill ((GCObject*)cur);
-               if (do_dump_nursery_content) {
-                       GCVTable *vtable = SGEN_LOAD_VTABLE (cur);
-                       if (cur > hole_start)
-                               SGEN_LOG (0, "HOLE [%p %p %d]", hole_start, cur, (int)(cur - hole_start));
-                       SGEN_LOG (0, "OBJ  [%p %p %d %d %s.%s %d]", cur, cur + size, (int)size, (int)ss,
-                                       sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable),
-                                       is_array_fill);
-               }
-               if (nursery_canaries_enabled () && !is_array_fill) {
-                       CHECK_CANARY_FOR_OBJECT (cur);
-                       CANARIFY_SIZE (size);
-               }
-               cur += size;
-               hole_start = cur;
-       }
-}
-
-/*
- * Checks that no objects in the nursery are fowarded or pinned.  This
- * is a precondition to restarting the mutator while doing a
- * concurrent collection.  Note that we don't clear fragments because
- * we depend on that having happened earlier.
- */
-void
-sgen_debug_check_nursery_is_clean (void)
-{
-       char *end, *cur;
-
-       cur = sgen_get_nursery_start ();
-       end = sgen_get_nursery_end ();
-
-       while (cur < end) {
-               size_t size;
-
-               if (!*(void**)cur) {
-                       cur += sizeof (void*);
-                       continue;
-               }
-
-               g_assert (!object_is_forwarded (cur));
-               g_assert (!object_is_pinned (cur));
-
-               size = SGEN_ALIGN_UP (safe_object_get_size ((GCObject*)cur));
-               verify_scan_starts (cur, cur + size);
-
-               cur += size;
-       }
-}
-
-static gboolean scan_object_for_specific_ref_precise = TRUE;
-
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj) do {                                       \
-               if ((GCObject*)*(ptr) == key) {                         \
-                       GCVTable *vtable = SGEN_LOAD_VTABLE (*(ptr));   \
-                       g_print ("found ref to %p in object %p (%s.%s) at offset %td\n", \
-                                       key, (obj), sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable), ((char*)(ptr) - (char*)(obj))); \
-               }                                                       \
-       } while (0)
-
-static void
-scan_object_for_specific_ref (char *start, GCObject *key)
-{
-       char *forwarded;
-
-       if ((forwarded = SGEN_OBJECT_IS_FORWARDED (start)))
-               start = forwarded;
-
-       if (scan_object_for_specific_ref_precise) {
-               mword desc = sgen_obj_get_descriptor_safe (start);
-               #include "sgen-scan-object.h"
-       } else {
-               mword *words = (mword*)start;
-               size_t size = safe_object_get_size ((GCObject*)start);
-               int i;
-               for (i = 0; i < size / sizeof (mword); ++i) {
-                       if (words [i] == (mword)key) {
-                               GCVTable *vtable = SGEN_LOAD_VTABLE (start);
-                               g_print ("found possible ref to %p in object %p (%s.%s) at offset %td\n",
-                                               key, start, sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable), i * sizeof (mword));
-                       }
-               }
-       }
-}
-
-static void
-scan_object_for_specific_ref_callback (char *obj, size_t size, GCObject *key)
-{
-       scan_object_for_specific_ref (obj, key);
-}
-
-static void
-check_root_obj_specific_ref (RootRecord *root, GCObject *key, GCObject *obj)
-{
-       if (key != obj)
-               return;
-       g_print ("found ref to %p in root record %p\n", key, root);
-}
-
-static GCObject *check_key = NULL;
-static RootRecord *check_root = NULL;
-
-static void
-check_root_obj_specific_ref_from_marker (void **obj, void *gc_data)
-{
-       check_root_obj_specific_ref (check_root, check_key, *obj);
-}
-
-static void
-scan_roots_for_specific_ref (GCObject *key, int root_type)
-{
-       void **start_root;
-       RootRecord *root;
-       check_key = key;
-
-       SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
-               mword desc = root->root_desc;
-
-               check_root = root;
-
-               switch (desc & ROOT_DESC_TYPE_MASK) {
-               case ROOT_DESC_BITMAP:
-                       desc >>= ROOT_DESC_TYPE_SHIFT;
-                       while (desc) {
-                               if (desc & 1)
-                                       check_root_obj_specific_ref (root, key, *start_root);
-                               desc >>= 1;
-                               start_root++;
-                       }
-                       return;
-               case ROOT_DESC_COMPLEX: {
-                       gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
-                       int bwords = (int) ((*bitmap_data) - 1);
-                       void **start_run = start_root;
-                       bitmap_data++;
-                       while (bwords-- > 0) {
-                               gsize bmap = *bitmap_data++;
-                               void **objptr = start_run;
-                               while (bmap) {
-                                       if (bmap & 1)
-                                               check_root_obj_specific_ref (root, key, *objptr);
-                                       bmap >>= 1;
-                                       ++objptr;
-                               }
-                               start_run += GC_BITS_PER_WORD;
-                       }
-                       break;
-               }
-               case ROOT_DESC_USER: {
-                       SgenUserRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
-                       marker (start_root, check_root_obj_specific_ref_from_marker, NULL);
-                       break;
-               }
-               case ROOT_DESC_RUN_LEN:
-                       g_assert_not_reached ();
-               default:
-                       g_assert_not_reached ();
-               }
-       } SGEN_HASH_TABLE_FOREACH_END;
-
-       check_key = NULL;
-       check_root = NULL;
-}
-
-void
-mono_gc_scan_for_specific_ref (GCObject *key, gboolean precise)
-{
-       void **ptr;
-       RootRecord *root;
-
-       scan_object_for_specific_ref_precise = precise;
-
-       sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
-                       (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key, TRUE);
-
-       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
-
-       sgen_los_iterate_objects ((IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
-
-       scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
-       scan_roots_for_specific_ref (key, ROOT_TYPE_WBARRIER);
-
-       SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], ptr, root) {
-               while (ptr < (void**)root->end_root) {
-                       check_root_obj_specific_ref (root, *ptr, key);
-                       ++ptr;
-               }
-       } SGEN_HASH_TABLE_FOREACH_END;
-
-       if (sgen_is_world_stopped ())
-               find_pinning_ref_from_thread ((char*)key, sizeof (MonoObject));
-}
-
-#ifndef SGEN_WITHOUT_MONO
-
-static MonoDomain *check_domain = NULL;
-
-static void
-check_obj_not_in_domain (MonoObject **o)
-{
-       g_assert (((*o))->vtable->domain != check_domain);
-}
-
-
-static void
-check_obj_not_in_domain_callback (void **o, void *gc_data)
-{
-       g_assert (((MonoObject*)(*o))->vtable->domain != check_domain);
-}
-
-void
-sgen_scan_for_registered_roots_in_domain (MonoDomain *domain, int root_type)
-{
-       void **start_root;
-       RootRecord *root;
-       check_domain = domain;
-       SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
-               mword desc = root->root_desc;
-
-               /* The MonoDomain struct is allowed to hold
-                  references to objects in its own domain. */
-               if (start_root == (void**)domain)
-                       continue;
-
-               switch (desc & ROOT_DESC_TYPE_MASK) {
-               case ROOT_DESC_BITMAP:
-                       desc >>= ROOT_DESC_TYPE_SHIFT;
-                       while (desc) {
-                               if ((desc & 1) && *start_root)
-                                       check_obj_not_in_domain (*start_root);
-                               desc >>= 1;
-                               start_root++;
-                       }
-                       break;
-               case ROOT_DESC_COMPLEX: {
-                       gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
-                       int bwords = (int)((*bitmap_data) - 1);
-                       void **start_run = start_root;
-                       bitmap_data++;
-                       while (bwords-- > 0) {
-                               gsize bmap = *bitmap_data++;
-                               void **objptr = start_run;
-                               while (bmap) {
-                                       if ((bmap & 1) && *objptr)
-                                               check_obj_not_in_domain (*objptr);
-                                       bmap >>= 1;
-                                       ++objptr;
-                               }
-                               start_run += GC_BITS_PER_WORD;
-                       }
-                       break;
-               }
-               case ROOT_DESC_USER: {
-                       SgenUserRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
-                       marker (start_root, check_obj_not_in_domain_callback, NULL);
-                       break;
-               }
-               case ROOT_DESC_RUN_LEN:
-                       g_assert_not_reached ();
-               default:
-                       g_assert_not_reached ();
-               }
-       } SGEN_HASH_TABLE_FOREACH_END;
-
-       check_domain = NULL;
-}
-
-static gboolean
-is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
-{
-       MonoObject *o = (MonoObject*)(obj);
-       MonoObject *ref = (MonoObject*)*(ptr);
-       size_t offset = (char*)(ptr) - (char*)o;
-
-       if (o->vtable->klass == mono_defaults.thread_class && offset == G_STRUCT_OFFSET (MonoThread, internal_thread))
-               return TRUE;
-       if (o->vtable->klass == mono_defaults.internal_thread_class && offset == G_STRUCT_OFFSET (MonoInternalThread, current_appcontext))
-               return TRUE;
-
-#ifndef DISABLE_REMOTING
-       if (mono_defaults.real_proxy_class->supertypes && mono_class_has_parent_fast (o->vtable->klass, mono_defaults.real_proxy_class) &&
-                       offset == G_STRUCT_OFFSET (MonoRealProxy, unwrapped_server))
-               return TRUE;
-#endif
-       /* Thread.cached_culture_info */
-       if (!strcmp (ref->vtable->klass->name_space, "System.Globalization") &&
-                       !strcmp (ref->vtable->klass->name, "CultureInfo") &&
-                       !strcmp(o->vtable->klass->name_space, "System") &&
-                       !strcmp(o->vtable->klass->name, "Object[]"))
-               return TRUE;
-       /*
-        *  at System.IO.MemoryStream.InternalConstructor (byte[],int,int,bool,bool) [0x0004d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:121
-        * at System.IO.MemoryStream..ctor (byte[]) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:81
-        * at (wrapper remoting-invoke-with-check) System.IO.MemoryStream..ctor (byte[]) <IL 0x00020, 0xffffffff>
-        * at System.Runtime.Remoting.Messaging.CADMethodCallMessage.GetArguments () [0x0000d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/CADMessages.cs:327
-        * at System.Runtime.Remoting.Messaging.MethodCall..ctor (System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/MethodCall.cs:87
-        * at System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) [0x00018] in /home/schani/Work/novell/trunk/mcs/class/corlib/System/AppDomain.cs:1213
-        * at (wrapper remoting-invoke-with-check) System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) <IL 0x0003d, 0xffffffff>
-        * at System.Runtime.Remoting.Channels.CrossAppDomainSink.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00008] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Channels/CrossAppDomainChannel.cs:198
-        * at (wrapper runtime-invoke) object.runtime_invoke_CrossAppDomainSink/ProcessMessageRes_object_object (object,intptr,intptr,intptr) <IL 0x0004c, 0xffffffff>
-        */
-       if (!strcmp (ref->vtable->klass->name_space, "System") &&
-                       !strcmp (ref->vtable->klass->name, "Byte[]") &&
-                       !strcmp (o->vtable->klass->name_space, "System.IO") &&
-                       !strcmp (o->vtable->klass->name, "MemoryStream"))
-               return TRUE;
-       return FALSE;
-}
-
-static void
-check_reference_for_xdomain (gpointer *ptr, char *obj, MonoDomain *domain)
-{
-       MonoObject *o = (MonoObject*)(obj);
-       MonoObject *ref = (MonoObject*)*(ptr);
-       size_t offset = (char*)(ptr) - (char*)o;
-       MonoClass *class;
-       MonoClassField *field;
-       char *str;
-
-       if (!ref || ref->vtable->domain == domain)
-               return;
-       if (is_xdomain_ref_allowed (ptr, obj, domain))
-               return;
-
-       field = NULL;
-       for (class = o->vtable->klass; class; class = class->parent) {
-               int i;
-
-               for (i = 0; i < class->field.count; ++i) {
-                       if (class->fields[i].offset == offset) {
-                               field = &class->fields[i];
-                               break;
-                       }
-               }
-               if (field)
-                       break;
-       }
-
-       if (ref->vtable->klass == mono_defaults.string_class)
-               str = mono_string_to_utf8 ((MonoString*)ref);
-       else
-               str = NULL;
-       g_print ("xdomain reference in %p (%s.%s) at offset %d (%s) to %p (%s.%s) (%s)  -  pointed to by:\n",
-                       o, o->vtable->klass->name_space, o->vtable->klass->name,
-                       offset, field ? field->name : "",
-                       ref, ref->vtable->klass->name_space, ref->vtable->klass->name, str ? str : "");
-       mono_gc_scan_for_specific_ref (o, TRUE);
-       if (str)
-               g_free (str);
-}
-
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj)    check_reference_for_xdomain ((ptr), (obj), domain)
-
-static void
-scan_object_for_xdomain_refs (char *start, mword size, void *data)
-{
-       MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (start);
-       MonoDomain *domain = vt->domain;
-       mword desc = sgen_vtable_get_descriptor ((GCVTable*)vt);
-
-       #include "sgen-scan-object.h"
-}
-
-void
-sgen_check_for_xdomain_refs (void)
-{
-       LOSObject *bigobj;
-
-       sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
-                       (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL, FALSE);
-
-       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
-
-       for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
-               scan_object_for_xdomain_refs (bigobj->data, sgen_los_object_size (bigobj), NULL);
-}
-
-#endif
-
-/* If not null, dump the heap after each collection into this file */
-static FILE *heap_dump_file = NULL;
-
-void
-sgen_dump_occupied (char *start, char *end, char *section_start)
-{
-       fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
-}
-
-void
-sgen_dump_section (GCMemSection *section, const char *type)
-{
-       char *start = section->data;
-       char *end = section->data + section->size;
-       char *occ_start = NULL;
-
-       fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
-
-       while (start < end) {
-               guint size;
-               //GCVTable *vt;
-               //MonoClass *class;
-
-               if (!*(void**)start) {
-                       if (occ_start) {
-                               sgen_dump_occupied (occ_start, start, section->data);
-                               occ_start = NULL;
-                       }
-                       start += sizeof (void*); /* should be ALLOC_ALIGN, really */
-                       continue;
-               }
-               g_assert (start < section->next_data);
-
-               if (!occ_start)
-                       occ_start = start;
-
-               //vt = (GCVTable*)SGEN_LOAD_VTABLE (start);
-               //class = vt->klass;
-
-               size = SGEN_ALIGN_UP (safe_object_get_size ((GCObject*) start));
-
-               /*
-               fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
-                               start - section->data,
-                               vt->klass->name_space, vt->klass->name,
-                               size);
-               */
-
-               start += size;
-       }
-       if (occ_start)
-               sgen_dump_occupied (occ_start, start, section->data);
-
-       fprintf (heap_dump_file, "</section>\n");
-}
-
-static void
-dump_object (GCObject *obj, gboolean dump_location)
-{
-#ifndef SGEN_WITHOUT_MONO
-       static char class_name [1024];
-
-       MonoClass *class = mono_object_class (obj);
-       int i, j;
-
-       /*
-        * Python's XML parser is too stupid to parse angle brackets
-        * in strings, so we just ignore them;
-        */
-       i = j = 0;
-       while (class->name [i] && j < sizeof (class_name) - 1) {
-               if (!strchr ("<>\"", class->name [i]))
-                       class_name [j++] = class->name [i];
-               ++i;
-       }
-       g_assert (j < sizeof (class_name));
-       class_name [j] = 0;
-
-       fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%zd\"",
-                       class->name_space, class_name,
-                       safe_object_get_size (obj));
-       if (dump_location) {
-               const char *location;
-               if (sgen_ptr_in_nursery (obj))
-                       location = "nursery";
-               else if (safe_object_get_size (obj) <= SGEN_MAX_SMALL_OBJ_SIZE)
-                       location = "major";
-               else
-                       location = "LOS";
-               fprintf (heap_dump_file, " location=\"%s\"", location);
-       }
-       fprintf (heap_dump_file, "/>\n");
-#endif
-}
-
-void
-sgen_debug_enable_heap_dump (const char *filename)
-{
-       heap_dump_file = fopen (filename, "w");
-       if (heap_dump_file) {
-               fprintf (heap_dump_file, "<sgen-dump>\n");
-               sgen_pin_stats_enable ();
-       }
-}
-
-void
-sgen_debug_dump_heap (const char *type, int num, const char *reason)
-{
-       SgenPointerQueue *pinned_objects;
-       LOSObject *bigobj;
-       int i;
-
-       if (!heap_dump_file)
-               return;
-
-       fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
-       if (reason)
-               fprintf (heap_dump_file, " reason=\"%s\"", reason);
-       fprintf (heap_dump_file, ">\n");
-#ifndef SGEN_WITHOUT_MONO
-       fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
-#endif
-       sgen_dump_internal_mem_usage (heap_dump_file);
-       fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
-       /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
-       fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
-
-       fprintf (heap_dump_file, "<pinned-objects>\n");
-       pinned_objects = sgen_pin_stats_get_object_list ();
-       for (i = 0; i < pinned_objects->next_slot; ++i)
-               dump_object (pinned_objects->data [i], TRUE);
-       fprintf (heap_dump_file, "</pinned-objects>\n");
-
-       sgen_dump_section (nursery_section, "nursery");
-
-       major_collector.dump_heap (heap_dump_file);
-
-       fprintf (heap_dump_file, "<los>\n");
-       for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
-               dump_object ((GCObject*)bigobj->data, FALSE);
-       fprintf (heap_dump_file, "</los>\n");
-
-       fprintf (heap_dump_file, "</collection>\n");
-}
-
-static char *found_obj;
-
-static void
-find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
-{
-       char *ptr = user_data;
-
-       if (ptr >= obj && ptr < obj + size) {
-               g_assert (!found_obj);
-               found_obj = obj;
-       }
-}
-
-/* for use in the debugger */
-char*
-sgen_find_object_for_ptr (char *ptr)
-{
-       if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
-               found_obj = NULL;
-               sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
-                               find_object_for_ptr_callback, ptr, TRUE);
-               if (found_obj)
-                       return found_obj;
-       }
-
-       found_obj = NULL;
-       sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
-       if (found_obj)
-               return found_obj;
-
-       /*
-        * Very inefficient, but this is debugging code, supposed to
-        * be called from gdb, so we don't care.
-        */
-       found_obj = NULL;
-       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, find_object_for_ptr_callback, ptr);
-       return found_obj;
-}
-
-#ifndef SGEN_WITHOUT_MONO
-
-static int
-compare_xrefs (const void *a_ptr, const void *b_ptr)
-{
-       const MonoGCBridgeXRef *a = a_ptr;
-       const MonoGCBridgeXRef *b = b_ptr;
-
-       if (a->src_scc_index < b->src_scc_index)
-               return -1;
-       if (a->src_scc_index > b->src_scc_index)
-               return 1;
-
-       if (a->dst_scc_index < b->dst_scc_index)
-               return -1;
-       if (a->dst_scc_index > b->dst_scc_index)
-               return 1;
-
-       return 0;
-}
-
-/*
-static void
-dump_processor_state (SgenBridgeProcessor *p)
-{
-       int i;
-
-       printf ("------\n");
-       printf ("SCCS %d\n", p->num_sccs);
-       for (i = 0; i < p->num_sccs; ++i) {
-               int j;
-               MonoGCBridgeSCC *scc = p->api_sccs [i];
-               printf ("\tSCC %d:", i);
-               for (j = 0; j < scc->num_objs; ++j) {
-                       MonoObject *obj = scc->objs [j];
-                       printf (" %p", obj);
-               }
-               printf ("\n");
-       }
-
-       printf ("XREFS %d\n", p->num_xrefs);
-       for (i = 0; i < p->num_xrefs; ++i)
-               printf ("\t%d -> %d\n", p->api_xrefs [i].src_scc_index, p->api_xrefs [i].dst_scc_index);
-
-       printf ("-------\n");
-}
-*/
-
-gboolean
-sgen_compare_bridge_processor_results (SgenBridgeProcessor *a, SgenBridgeProcessor *b)
-{
-       int i;
-       SgenHashTable obj_to_a_scc = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_BRIDGE_DEBUG, INTERNAL_MEM_BRIDGE_DEBUG, sizeof (int), mono_aligned_addr_hash, NULL);
-       SgenHashTable b_scc_to_a_scc = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_BRIDGE_DEBUG, INTERNAL_MEM_BRIDGE_DEBUG, sizeof (int), g_direct_hash, NULL);
-       MonoGCBridgeXRef *a_xrefs, *b_xrefs;
-       size_t xrefs_alloc_size;
-
-       // dump_processor_state (a);
-       // dump_processor_state (b);
-
-       if (a->num_sccs != b->num_sccs)
-               g_error ("SCCS count expected %d but got %d", a->num_sccs, b->num_sccs);
-       if (a->num_xrefs != b->num_xrefs)
-               g_error ("SCCS count expected %d but got %d", a->num_xrefs, b->num_xrefs);
-
-       /*
-        * First we build a hash of each object in `a` to its respective SCC index within
-        * `a`.  Along the way we also assert that no object is more than one SCC.
-        */
-       for (i = 0; i < a->num_sccs; ++i) {
-               int j;
-               MonoGCBridgeSCC *scc = a->api_sccs [i];
-
-               g_assert (scc->num_objs > 0);
-
-               for (j = 0; j < scc->num_objs; ++j) {
-                       GCObject *obj = scc->objs [j];
-                       gboolean new_entry = sgen_hash_table_replace (&obj_to_a_scc, obj, &i, NULL);
-                       g_assert (new_entry);
-               }
-       }
-
-       /*
-        * Now we check whether each of the objects in `b` are in `a`, and whether the SCCs
-        * of `b` contain the same sets of objects as those of `a`.
-        *
-        * While we're doing this, build a hash table to map from `b` SCC indexes to `a` SCC
-        * indexes.
-        */
-       for (i = 0; i < b->num_sccs; ++i) {
-               MonoGCBridgeSCC *scc = b->api_sccs [i];
-               MonoGCBridgeSCC *a_scc;
-               int *a_scc_index_ptr;
-               int a_scc_index;
-               int j;
-               gboolean new_entry;
-
-               g_assert (scc->num_objs > 0);
-               a_scc_index_ptr = sgen_hash_table_lookup (&obj_to_a_scc, scc->objs [0]);
-               g_assert (a_scc_index_ptr);
-               a_scc_index = *a_scc_index_ptr;
-
-               //g_print ("A SCC %d -> B SCC %d\n", a_scc_index, i);
-
-               a_scc = a->api_sccs [a_scc_index];
-               g_assert (a_scc->num_objs == scc->num_objs);
-
-               for (j = 1; j < scc->num_objs; ++j) {
-                       a_scc_index_ptr = sgen_hash_table_lookup (&obj_to_a_scc, scc->objs [j]);
-                       g_assert (a_scc_index_ptr);
-                       g_assert (*a_scc_index_ptr == a_scc_index);
-               }
-
-               new_entry = sgen_hash_table_replace (&b_scc_to_a_scc, GINT_TO_POINTER (i), &a_scc_index, NULL);
-               g_assert (new_entry);
-       }
-
-       /*
-        * Finally, check that we have the same xrefs.  We do this by making copies of both
-        * xref arrays, and replacing the SCC indexes in the copy for `b` with the
-        * corresponding indexes in `a`.  Then we sort both arrays and assert that they're
-        * the same.
-        *
-        * At the same time, check that no xref is self-referential and that there are no
-        * duplicate ones.
-        */
-
-       xrefs_alloc_size = a->num_xrefs * sizeof (MonoGCBridgeXRef);
-       a_xrefs = sgen_alloc_internal_dynamic (xrefs_alloc_size, INTERNAL_MEM_BRIDGE_DEBUG, TRUE);
-       b_xrefs = sgen_alloc_internal_dynamic (xrefs_alloc_size, INTERNAL_MEM_BRIDGE_DEBUG, TRUE);
-
-       memcpy (a_xrefs, a->api_xrefs, xrefs_alloc_size);
-       for (i = 0; i < b->num_xrefs; ++i) {
-               MonoGCBridgeXRef *xref = &b->api_xrefs [i];
-               int *scc_index_ptr;
-
-               g_assert (xref->src_scc_index != xref->dst_scc_index);
-
-               scc_index_ptr = sgen_hash_table_lookup (&b_scc_to_a_scc, GINT_TO_POINTER (xref->src_scc_index));
-               g_assert (scc_index_ptr);
-               b_xrefs [i].src_scc_index = *scc_index_ptr;
-
-               scc_index_ptr = sgen_hash_table_lookup (&b_scc_to_a_scc, GINT_TO_POINTER (xref->dst_scc_index));
-               g_assert (scc_index_ptr);
-               b_xrefs [i].dst_scc_index = *scc_index_ptr;
-       }
-
-       qsort (a_xrefs, a->num_xrefs, sizeof (MonoGCBridgeXRef), compare_xrefs);
-       qsort (b_xrefs, a->num_xrefs, sizeof (MonoGCBridgeXRef), compare_xrefs);
-
-       for (i = 0; i < a->num_xrefs; ++i) {
-               g_assert (a_xrefs [i].src_scc_index == b_xrefs [i].src_scc_index);
-               g_assert (a_xrefs [i].dst_scc_index == b_xrefs [i].dst_scc_index);
-       }
-
-       sgen_hash_table_clean (&obj_to_a_scc);
-       sgen_hash_table_clean (&b_scc_to_a_scc);
-       sgen_free_internal_dynamic (a_xrefs, xrefs_alloc_size, INTERNAL_MEM_BRIDGE_DEBUG);
-       sgen_free_internal_dynamic (b_xrefs, xrefs_alloc_size, INTERNAL_MEM_BRIDGE_DEBUG);
-
-       return TRUE;
-}
-
-#endif
-
-#endif /*HAVE_SGEN_GC*/
diff --git a/mono/metadata/sgen-descriptor.c b/mono/metadata/sgen-descriptor.c
deleted file mode 100644 (file)
index ce94a6c..0000000
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * sgen-descriptor.c: GC descriptors describe object layout.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#ifdef HAVE_PTHREAD_H
-#include <pthread.h>
-#endif
-#ifdef HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-#include <stdio.h>
-#include <string.h>
-#include <errno.h>
-#include <assert.h>
-#ifdef __MACH__
-#undef _XOPEN_SOURCE
-#endif
-#ifdef __MACH__
-#define _XOPEN_SOURCE
-#endif
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/gc-internal-agnostic.h"
-
-#define MAX_USER_DESCRIPTORS 16
-
-#define MAKE_ROOT_DESC(type,val) ((type) | ((val) << ROOT_DESC_TYPE_SHIFT))
-#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
-
-
-static gsize* complex_descriptors = NULL;
-static int complex_descriptors_size = 0;
-static int complex_descriptors_next = 0;
-static SgenUserRootMarkFunc user_descriptors [MAX_USER_DESCRIPTORS];
-static int user_descriptors_next = 0;
-static void *all_ref_root_descrs [32];
-
-#ifdef HEAVY_STATISTICS
-static guint64 stat_scanned_count_per_descriptor [DESC_TYPE_MAX];
-static guint64 stat_copied_count_per_descriptor [DESC_TYPE_MAX];
-#endif
-
-static int
-alloc_complex_descriptor (gsize *bitmap, int numbits)
-{
-       int nwords, res, i;
-
-       numbits = ALIGN_TO (numbits, GC_BITS_PER_WORD);
-       nwords = numbits / GC_BITS_PER_WORD + 1;
-
-       sgen_gc_lock ();
-       res = complex_descriptors_next;
-       /* linear search, so we don't have duplicates with domain load/unload
-        * this should not be performance critical or we'd have bigger issues
-        * (the number and size of complex descriptors should be small).
-        */
-       for (i = 0; i < complex_descriptors_next; ) {
-               if (complex_descriptors [i] == nwords) {
-                       int j, found = TRUE;
-                       for (j = 0; j < nwords - 1; ++j) {
-                               if (complex_descriptors [i + 1 + j] != bitmap [j]) {
-                                       found = FALSE;
-                                       break;
-                               }
-                       }
-                       if (found) {
-                               sgen_gc_unlock ();
-                               return i;
-                       }
-               }
-               i += (int)complex_descriptors [i];
-       }
-       if (complex_descriptors_next + nwords > complex_descriptors_size) {
-               int new_size = complex_descriptors_size * 2 + nwords;
-               complex_descriptors = g_realloc (complex_descriptors, new_size * sizeof (gsize));
-               complex_descriptors_size = new_size;
-       }
-       SGEN_LOG (6, "Complex descriptor %d, size: %d (total desc memory: %d)", res, nwords, complex_descriptors_size);
-       complex_descriptors_next += nwords;
-       complex_descriptors [res] = nwords;
-       for (i = 0; i < nwords - 1; ++i) {
-               complex_descriptors [res + 1 + i] = bitmap [i];
-               SGEN_LOG (6, "\tvalue: %p", (void*)complex_descriptors [res + 1 + i]);
-       }
-       sgen_gc_unlock ();
-       return res;
-}
-
-gsize*
-sgen_get_complex_descriptor (mword desc)
-{
-       return complex_descriptors + (desc >> LOW_TYPE_BITS);
-}
-
-/*
- * Descriptor builders.
- */
-void*
-mono_gc_make_descr_for_object (gsize *bitmap, int numbits, size_t obj_size)
-{
-       int first_set = -1, num_set = 0, last_set = -1, i;
-       mword desc = 0;
-       size_t stored_size = obj_size;
-
-       stored_size += SGEN_ALLOC_ALIGN - 1;
-       stored_size &= ~(SGEN_ALLOC_ALIGN - 1);
-
-       for (i = 0; i < numbits; ++i) {
-               if (bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
-                       if (first_set < 0)
-                               first_set = i;
-                       last_set = i;
-                       num_set++;
-               }
-       }
-
-       if (first_set < 0) {
-               SGEN_LOG (6, "Ptrfree descriptor %p, size: %zd", (void*)desc, stored_size);
-               if (stored_size <= MAX_RUNLEN_OBJECT_SIZE && stored_size <= SGEN_MAX_SMALL_OBJ_SIZE)
-                       return (void*)(DESC_TYPE_SMALL_PTRFREE | stored_size);
-               return (void*)DESC_TYPE_COMPLEX_PTRFREE;
-       }
-
-       g_assert (!(stored_size & 0x7));
-
-       SGEN_ASSERT (5, stored_size == SGEN_ALIGN_UP (stored_size), "Size is not aligned");
-
-       /* we know the 2-word header is ptr-free */
-       if (last_set < BITMAP_NUM_BITS + OBJECT_HEADER_WORDS && stored_size <= SGEN_MAX_SMALL_OBJ_SIZE) {
-               desc = DESC_TYPE_BITMAP | ((*bitmap >> OBJECT_HEADER_WORDS) << LOW_TYPE_BITS);
-               SGEN_LOG (6, "Largebitmap descriptor %p, size: %zd, last set: %d", (void*)desc, stored_size, last_set);
-               return (void*) desc;
-       }
-
-       if (stored_size <= MAX_RUNLEN_OBJECT_SIZE && stored_size <= SGEN_MAX_SMALL_OBJ_SIZE) {
-               /* check run-length encoding first: one byte offset, one byte number of pointers
-                * on 64 bit archs, we can have 3 runs, just one on 32.
-                * It may be better to use nibbles.
-                */
-               if (first_set < 256 && num_set < 256 && (first_set + num_set == last_set + 1)) {
-                       desc = DESC_TYPE_RUN_LENGTH | stored_size | (first_set << 16) | (num_set << 24);
-                       SGEN_LOG (6, "Runlen descriptor %p, size: %zd, first set: %d, num set: %d", (void*)desc, stored_size, first_set, num_set);
-                       return (void*) desc;
-               }
-       }
-
-       /* it's a complex object ... */
-       desc = DESC_TYPE_COMPLEX | (alloc_complex_descriptor (bitmap, last_set + 1) << LOW_TYPE_BITS);
-       return (void*) desc;
-}
-
-/* If the array holds references, numbits == 1 and the first bit is set in elem_bitmap */
-void*
-mono_gc_make_descr_for_array (int vector, gsize *elem_bitmap, int numbits, size_t elem_size)
-{
-       int first_set = -1, num_set = 0, last_set = -1, i;
-       mword desc = DESC_TYPE_VECTOR | (vector ? VECTOR_KIND_SZARRAY : VECTOR_KIND_ARRAY);
-       for (i = 0; i < numbits; ++i) {
-               if (elem_bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
-                       if (first_set < 0)
-                               first_set = i;
-                       last_set = i;
-                       num_set++;
-               }
-       }
-
-       if (first_set < 0) {
-               if (elem_size <= MAX_ELEMENT_SIZE)
-                       return (void*)(desc | VECTOR_SUBTYPE_PTRFREE | (elem_size << VECTOR_ELSIZE_SHIFT));
-               return (void*)DESC_TYPE_COMPLEX_PTRFREE;
-       }
-
-       if (elem_size <= MAX_ELEMENT_SIZE) {
-               desc |= elem_size << VECTOR_ELSIZE_SHIFT;
-               if (!num_set) {
-                       return (void*)(desc | VECTOR_SUBTYPE_PTRFREE);
-               }
-               /* Note: we also handle structs with just ref fields */
-               if (num_set * sizeof (gpointer) == elem_size) {
-                       return (void*)(desc | VECTOR_SUBTYPE_REFS | ((gssize)(-1) << 16));
-               }
-               /* FIXME: try run-len first */
-               /* Note: we can't skip the object header here, because it's not present */
-               if (last_set < VECTOR_BITMAP_SIZE) {
-                       return (void*)(desc | VECTOR_SUBTYPE_BITMAP | (*elem_bitmap << 16));
-               }
-       }
-       /* it's am array of complex structs ... */
-       desc = DESC_TYPE_COMPLEX_ARR;
-       desc |= alloc_complex_descriptor (elem_bitmap, last_set + 1) << LOW_TYPE_BITS;
-       return (void*) desc;
-}
-
-/* Return the bitmap encoded by a descriptor */
-gsize*
-mono_gc_get_bitmap_for_descr (void *descr, int *numbits)
-{
-       mword d = (mword)descr;
-       gsize *bitmap;
-
-       switch (d & DESC_TYPE_MASK) {
-       case DESC_TYPE_RUN_LENGTH: {            
-               int first_set = (d >> 16) & 0xff;
-               int num_set = (d >> 24) & 0xff;
-               int i;
-
-               bitmap = g_new0 (gsize, (first_set + num_set + 7) / 8);
-
-               for (i = first_set; i < first_set + num_set; ++i)
-                       bitmap [i / GC_BITS_PER_WORD] |= ((gsize)1 << (i % GC_BITS_PER_WORD));
-
-               *numbits = first_set + num_set;
-
-               return bitmap;
-       }
-
-       case DESC_TYPE_BITMAP: {
-               gsize bmap = (d >> LOW_TYPE_BITS) << OBJECT_HEADER_WORDS;
-
-               bitmap = g_new0 (gsize, 1);
-               bitmap [0] = bmap;
-               *numbits = 0;
-               while (bmap) {
-                       (*numbits) ++;
-                       bmap >>= 1;
-               }
-               return bitmap;
-       }
-
-       case DESC_TYPE_COMPLEX: {
-               gsize *bitmap_data = sgen_get_complex_descriptor (d);
-               int bwords = (int)(*bitmap_data) - 1;//Max scalar object size is 1Mb, which means up to 32k descriptor words
-               int i;
-
-               bitmap = g_new0 (gsize, bwords);
-               *numbits = bwords * GC_BITS_PER_WORD;
-
-               for (i = 0; i < bwords; ++i) {
-                       bitmap [i] = bitmap_data [i + 1];
-               }
-
-               return bitmap;
-       }
-
-       default:
-               g_assert_not_reached ();
-       }
-}
-
-void*
-mono_gc_make_descr_from_bitmap (gsize *bitmap, int numbits)
-{
-       if (numbits == 0) {
-               return (void*)MAKE_ROOT_DESC (ROOT_DESC_BITMAP, 0);
-       } else if (numbits < ((sizeof (*bitmap) * 8) - ROOT_DESC_TYPE_SHIFT)) {
-               return (void*)MAKE_ROOT_DESC (ROOT_DESC_BITMAP, bitmap [0]);
-       } else {
-               mword complex = alloc_complex_descriptor (bitmap, numbits);
-               return (void*)MAKE_ROOT_DESC (ROOT_DESC_COMPLEX, complex);
-       }
-}
-
-void*
-mono_gc_make_root_descr_all_refs (int numbits)
-{
-       gsize *gc_bitmap;
-       void *descr;
-       int num_bytes = numbits / 8;
-
-       if (numbits < 32 && all_ref_root_descrs [numbits])
-               return all_ref_root_descrs [numbits];
-
-       gc_bitmap = g_malloc0 (ALIGN_TO (ALIGN_TO (numbits, 8) + 1, sizeof (gsize)));
-       memset (gc_bitmap, 0xff, num_bytes);
-       if (numbits < ((sizeof (*gc_bitmap) * 8) - ROOT_DESC_TYPE_SHIFT)) 
-               gc_bitmap[0] = GUINT64_TO_LE(gc_bitmap[0]);
-       else if (numbits && num_bytes % (sizeof (*gc_bitmap)))
-               gc_bitmap[num_bytes / 8] = GUINT64_TO_LE(gc_bitmap [num_bytes / 8]);
-       if (numbits % 8)
-               gc_bitmap [numbits / 8] = (1 << (numbits % 8)) - 1;
-       descr = mono_gc_make_descr_from_bitmap (gc_bitmap, numbits);
-       g_free (gc_bitmap);
-
-       if (numbits < 32)
-               all_ref_root_descrs [numbits] = descr;
-
-       return descr;
-}
-
-void*
-sgen_make_user_root_descriptor (SgenUserRootMarkFunc marker)
-{
-       void *descr;
-
-       g_assert (user_descriptors_next < MAX_USER_DESCRIPTORS);
-       descr = (void*)MAKE_ROOT_DESC (ROOT_DESC_USER, (mword)user_descriptors_next);
-       user_descriptors [user_descriptors_next ++] = marker;
-
-       return descr;
-}
-
-void*
-sgen_get_complex_descriptor_bitmap (mword desc)
-{
-       return complex_descriptors + (desc >> ROOT_DESC_TYPE_SHIFT);
-}
-
-SgenUserRootMarkFunc
-sgen_get_user_descriptor_func (mword desc)
-{
-       return user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
-}
-
-#ifdef HEAVY_STATISTICS
-void
-sgen_descriptor_count_scanned_object (mword desc)
-{
-       int type = desc & DESC_TYPE_MASK;
-       SGEN_ASSERT (0, type, "Descriptor type can't be zero");
-       ++stat_scanned_count_per_descriptor [type - 1];
-}
-
-void
-sgen_descriptor_count_copied_object (mword desc)
-{
-       int type = desc & DESC_TYPE_MASK;
-       SGEN_ASSERT (0, type, "Descriptor type can't be zero");
-       ++stat_copied_count_per_descriptor [type - 1];
-}
-#endif
-
-void
-sgen_init_descriptors (void)
-{
-#ifdef HEAVY_STATISTICS
-       mono_counters_register ("# scanned RUN_LENGTH", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_RUN_LENGTH - 1]);
-       mono_counters_register ("# scanned SMALL_PTRFREE", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_SMALL_PTRFREE - 1]);
-       mono_counters_register ("# scanned COMPLEX", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_COMPLEX - 1]);
-       mono_counters_register ("# scanned VECTOR", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_VECTOR - 1]);
-       mono_counters_register ("# scanned BITMAP", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_BITMAP - 1]);
-       mono_counters_register ("# scanned COMPLEX_ARR", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_COMPLEX_ARR - 1]);
-       mono_counters_register ("# scanned COMPLEX_PTRFREE", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_COMPLEX_PTRFREE - 1]);
-
-       mono_counters_register ("# copied RUN_LENGTH", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_RUN_LENGTH - 1]);
-       mono_counters_register ("# copied SMALL_PTRFREE", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_SMALL_PTRFREE - 1]);
-       mono_counters_register ("# copied COMPLEX", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_COMPLEX - 1]);
-       mono_counters_register ("# copied VECTOR", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_VECTOR - 1]);
-       mono_counters_register ("# copied BITMAP", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_BITMAP - 1]);
-       mono_counters_register ("# copied COMPLEX_ARR", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_COMPLEX_ARR - 1]);
-       mono_counters_register ("# copied COMPLEX_PTRFREE", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_COMPLEX_PTRFREE - 1]);
-#endif
-}
-
-#endif
diff --git a/mono/metadata/sgen-descriptor.h b/mono/metadata/sgen-descriptor.h
deleted file mode 100644 (file)
index 2fd6bb6..0000000
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * sgen-descriptor.h: GC descriptors describe object layout.
-
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- *
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#ifndef __MONO_SGEN_DESCRIPTOR_H__
-#define __MONO_SGEN_DESCRIPTOR_H__
-
-#include <mono/metadata/sgen-conf.h>
-
-
-/*
- * ######################################################################
- * ########  GC descriptors
- * ######################################################################
- * Used to quickly get the info the GC needs about an object: size and
- * where the references are held.
- */
-#define OBJECT_HEADER_WORDS (SGEN_CLIENT_OBJECT_HEADER_SIZE / sizeof(gpointer))
-#define LOW_TYPE_BITS 3
-#define DESC_TYPE_MASK ((1 << LOW_TYPE_BITS) - 1)
-#define MAX_RUNLEN_OBJECT_SIZE 0xFFFF
-#define VECTOR_INFO_SHIFT 14
-#define VECTOR_KIND_SHIFT 13
-#define VECTOR_ELSIZE_SHIFT 3
-#define VECTOR_BITMAP_SHIFT 16
-#define VECTOR_BITMAP_SIZE (GC_BITS_PER_WORD - VECTOR_BITMAP_SHIFT)
-#define BITMAP_NUM_BITS (GC_BITS_PER_WORD - LOW_TYPE_BITS)
-#define MAX_ELEMENT_SIZE 0x3ff
-#define VECTOR_SUBTYPE_PTRFREE (DESC_TYPE_V_PTRFREE << VECTOR_INFO_SHIFT)
-#define VECTOR_SUBTYPE_REFS    (DESC_TYPE_V_REFS << VECTOR_INFO_SHIFT)
-#define VECTOR_SUBTYPE_BITMAP  (DESC_TYPE_V_BITMAP << VECTOR_INFO_SHIFT)
-
-#define VECTOR_KIND_SZARRAY  (DESC_TYPE_V_SZARRAY << VECTOR_KIND_SHIFT)
-#define VECTOR_KIND_ARRAY  (DESC_TYPE_V_ARRAY << VECTOR_KIND_SHIFT)
-
-/*
- * Objects are aligned to 8 bytes boundaries.
- *
- * A descriptor is a pointer in GCVTable, so 32 or 64 bits of size.
- * The low 3 bits define the type of the descriptor. The other bits
- * depend on the type.
- *
- * It's important to be able to quickly identify two properties of classes from their
- * descriptors: whether they are small enough to live in the regular major heap (size <=
- * SGEN_MAX_SMALL_OBJ_SIZE), and whether they contain references.
- *
- * To that end we have three descriptor types that only apply to small classes: RUN_LENGTH,
- * BITMAP, and SMALL_PTRFREE.  We also have the type COMPLEX_PTRFREE, which applies to
- * classes that are either not small or of unknown size (those being strings and arrays).
- * The lowest two bits of the SMALL_PTRFREE and COMPLEX_PTRFREE tags are the same, so we can
- * quickly check for references.
- *
- * As a general rule the 13 remaining low bits define the size, either
- * of the whole object or of the elements in the arrays. While for objects
- * the size is already in bytes, for arrays we need to shift, because
- * array elements might be smaller than 8 bytes. In case of arrays, we
- * use two bits to describe what the additional high bits represents,
- * so the default behaviour can handle element sizes less than 2048 bytes.
- * The high 16 bits, if 0 it means the object is pointer-free.
- * This design should make it easy and fast to skip over ptr-free data.
- * The first 4 types should cover >95% of the objects.
- * Note that since the size of objects is limited to 64K, larger objects
- * will be allocated in the large object heap.
- * If we want 4-bytes alignment, we need to put vector and small bitmap
- * inside complex.
- *
- * We don't use 0 so that 0 isn't a valid GC descriptor.  No deep reason for this other than
- * to be able to identify a non-inited descriptor for debugging.
- */
-enum {
-       /* Keep in sync with `descriptor_types` in sgen-debug.c! */
-       DESC_TYPE_RUN_LENGTH = 1,   /* 16 bits aligned byte size | 1-3 (offset, numptr) bytes tuples */
-       DESC_TYPE_BITMAP = 2,       /* | 29-61 bitmap bits */
-       DESC_TYPE_SMALL_PTRFREE = 3,
-       DESC_TYPE_MAX_SMALL_OBJ = 3,
-       DESC_TYPE_COMPLEX = 4,      /* index for bitmap into complex_descriptors */
-       DESC_TYPE_VECTOR = 5,       /* 10 bits element size | 1 bit kind | 2 bits desc | element desc */
-       DESC_TYPE_COMPLEX_ARR = 6,  /* index for bitmap into complex_descriptors */
-       DESC_TYPE_COMPLEX_PTRFREE = 7, /* Nothing, used to encode large ptr objects and strings. */
-       DESC_TYPE_MAX = 7,
-
-       DESC_TYPE_PTRFREE_MASK = 3,
-       DESC_TYPE_PTRFREE_BITS = 3
-};
-
-/* values for array kind */
-enum {
-       DESC_TYPE_V_SZARRAY = 0, /*vector with no bounds data */
-       DESC_TYPE_V_ARRAY = 1, /* array with bounds data */
-};
-
-/* subtypes for arrays and vectors */
-enum {
-       DESC_TYPE_V_PTRFREE = 0,/* there are no refs: keep first so it has a zero value  */
-       DESC_TYPE_V_REFS,       /* all the array elements are refs */
-       DESC_TYPE_V_RUN_LEN,    /* elements are run-length encoded as DESC_TYPE_RUN_LENGTH */
-       DESC_TYPE_V_BITMAP      /* elements are as the bitmap in DESC_TYPE_SMALL_BITMAP */
-};
-
-#define SGEN_DESC_STRING       (DESC_TYPE_COMPLEX_PTRFREE | (1 << LOW_TYPE_BITS))
-
-/* Root bitmap descriptors are simpler: the lower three bits describe the type
- * and we either have 30/62 bitmap bits or nibble-based run-length,
- * or a complex descriptor, or a user defined marker function.
- */
-enum {
-       ROOT_DESC_CONSERVATIVE, /* 0, so matches NULL value */
-       ROOT_DESC_BITMAP,
-       ROOT_DESC_RUN_LEN, 
-       ROOT_DESC_COMPLEX,
-       ROOT_DESC_USER,
-       ROOT_DESC_TYPE_MASK = 0x7,
-       ROOT_DESC_TYPE_SHIFT = 3,
-};
-
-typedef void (*SgenUserMarkFunc)     (void **addr, void *gc_data);
-typedef void (*SgenUserRootMarkFunc) (void *addr, SgenUserMarkFunc mark_func, void *gc_data);
-
-void* sgen_make_user_root_descriptor (SgenUserRootMarkFunc marker);
-
-gsize* sgen_get_complex_descriptor (mword desc);
-void* sgen_get_complex_descriptor_bitmap (mword desc);
-SgenUserRootMarkFunc sgen_get_user_descriptor_func (mword desc);
-
-void sgen_init_descriptors (void);
-
-#ifdef HEAVY_STATISTICS
-void sgen_descriptor_count_scanned_object (mword desc);
-void sgen_descriptor_count_copied_object (mword desc);
-#endif
-
-static inline gboolean
-sgen_gc_descr_has_references (mword desc)
-{
-       /* This covers SMALL_PTRFREE and COMPLEX_PTRFREE */
-       if ((desc & DESC_TYPE_PTRFREE_MASK) == DESC_TYPE_PTRFREE_BITS)
-               return FALSE;
-
-       /*The array is ptr-free*/
-       if ((desc & 0xC007) == (DESC_TYPE_VECTOR | VECTOR_SUBTYPE_PTRFREE))
-               return FALSE;
-
-       return TRUE;
-}
-
-#define SGEN_VTABLE_HAS_REFERENCES(vt) (sgen_gc_descr_has_references (sgen_vtable_get_descriptor ((vt))))
-#define SGEN_OBJECT_HAS_REFERENCES(o)  (SGEN_VTABLE_HAS_REFERENCES (SGEN_LOAD_VTABLE ((o))))
-
-/* helper macros to scan and traverse objects, macros because we resue them in many functions */
-#ifdef __GNUC__
-#define PREFETCH_READ(addr)    __builtin_prefetch ((addr), 0, 1)
-#define PREFETCH_WRITE(addr)   __builtin_prefetch ((addr), 1, 1)
-#else
-#define PREFETCH_READ(addr)
-#define PREFETCH_WRITE(addr)
-#endif
-
-#if defined(__GNUC__) && SIZEOF_VOID_P==4
-#define GNUC_BUILTIN_CTZ(bmap) __builtin_ctz(bmap)
-#elif defined(__GNUC__) && SIZEOF_VOID_P==8
-#define GNUC_BUILTIN_CTZ(bmap) __builtin_ctzl(bmap)
-#endif
-
-/* code using these macros must define a HANDLE_PTR(ptr) macro that does the work */
-#define OBJ_RUN_LEN_FOREACH_PTR(desc,obj)      do {    \
-               if ((desc) & 0xffff0000) {      \
-                       /* there are pointers */        \
-                       void **_objptr_end;     \
-                       void **_objptr = (void**)(obj); \
-                       _objptr += ((desc) >> 16) & 0xff;       \
-                       _objptr_end = _objptr + (((desc) >> 24) & 0xff);        \
-                       while (_objptr < _objptr_end) { \
-                               HANDLE_PTR (_objptr, (obj));    \
-                               _objptr++;      \
-                       };      \
-               }       \
-       } while (0)
-
-/* a bitmap desc means that there are pointer references or we'd have
- * choosen run-length, instead: add an assert to check.
- */
-#ifdef __GNUC__
-#define OBJ_BITMAP_FOREACH_PTR(desc,obj)       do {            \
-               /* there are pointers */                        \
-               void **_objptr = (void**)(obj);                 \
-               gsize _bmap = (desc) >> LOW_TYPE_BITS;          \
-               _objptr += OBJECT_HEADER_WORDS;                 \
-               do {                                            \
-                       int _index = GNUC_BUILTIN_CTZ (_bmap);  \
-                       _objptr += _index;                      \
-                       _bmap >>= (_index + 1);                 \
-                       HANDLE_PTR (_objptr, (obj));            \
-                       ++_objptr;                              \
-               } while (_bmap);                                \
-       } while (0)
-#else
-#define OBJ_BITMAP_FOREACH_PTR(desc,obj)       do {    \
-               /* there are pointers */        \
-               void **_objptr = (void**)(obj); \
-               gsize _bmap = (desc) >> LOW_TYPE_BITS;  \
-               _objptr += OBJECT_HEADER_WORDS; \
-               do {    \
-                       if ((_bmap & 1)) {      \
-                               HANDLE_PTR (_objptr, (obj));    \
-                       }       \
-                       _bmap >>= 1;    \
-                       ++_objptr;      \
-               } while (_bmap);        \
-       } while (0)
-#endif
-
-#define OBJ_COMPLEX_FOREACH_PTR(vt,obj)        do {    \
-               /* there are pointers */        \
-               void **_objptr = (void**)(obj); \
-               gsize *bitmap_data = sgen_get_complex_descriptor ((desc)); \
-               gsize bwords = (*bitmap_data) - 1;      \
-               void **start_run = _objptr;     \
-               bitmap_data++;  \
-               while (bwords-- > 0) {  \
-                       gsize _bmap = *bitmap_data++;   \
-                       _objptr = start_run;    \
-                       /*g_print ("bitmap: 0x%x/%d at %p\n", _bmap, bwords, _objptr);*/        \
-                       while (_bmap) { \
-                               if ((_bmap & 1)) {      \
-                                       HANDLE_PTR (_objptr, (obj));    \
-                               }       \
-                               _bmap >>= 1;    \
-                               ++_objptr;      \
-                       }       \
-                       start_run += GC_BITS_PER_WORD;  \
-               }       \
-       } while (0)
-
-/* this one is untested */
-#define OBJ_COMPLEX_ARR_FOREACH_PTR(desc,obj)  do {    \
-               /* there are pointers */        \
-               GCVTable *vt = (GCVTable*)SGEN_LOAD_VTABLE (obj); \
-               gsize *mbitmap_data = sgen_get_complex_descriptor ((desc)); \
-               gsize mbwords = (*mbitmap_data++) - 1;  \
-               gsize el_size = sgen_client_array_element_size (vt);    \
-               char *e_start = sgen_client_array_data_start ((GCObject*)(obj));        \
-               char *e_end = e_start + el_size * sgen_client_array_length ((GCObject*)(obj));  \
-               while (e_start < e_end) {       \
-                       void **_objptr = (void**)e_start;       \
-                       gsize *bitmap_data = mbitmap_data;      \
-                       gsize bwords = mbwords; \
-                       while (bwords-- > 0) {  \
-                               gsize _bmap = *bitmap_data++;   \
-                               void **start_run = _objptr;     \
-                               /*g_print ("bitmap: 0x%x\n", _bmap);*/  \
-                               while (_bmap) { \
-                                       if ((_bmap & 1)) {      \
-                                               HANDLE_PTR (_objptr, (obj));    \
-                                       }       \
-                                       _bmap >>= 1;    \
-                                       ++_objptr;      \
-                               }       \
-                               _objptr = start_run + GC_BITS_PER_WORD; \
-                       }       \
-                       e_start += el_size;     \
-               }       \
-       } while (0)
-
-#define OBJ_VECTOR_FOREACH_PTR(desc,obj)       do {    \
-               /* note: 0xffffc000 excludes DESC_TYPE_V_PTRFREE */     \
-               if ((desc) & 0xffffc000) {                              \
-                       int el_size = ((desc) >> 3) & MAX_ELEMENT_SIZE; \
-                       /* there are pointers */        \
-                       int etype = (desc) & 0xc000;                    \
-                       if (etype == (DESC_TYPE_V_REFS << 14)) {        \
-                               void **p = (void**)sgen_client_array_data_start ((GCObject*)(obj));     \
-                               void **end_refs = (void**)((char*)p + el_size * sgen_client_array_length ((GCObject*)(obj)));   \
-                               /* Note: this code can handle also arrays of struct with only references in them */     \
-                               while (p < end_refs) {  \
-                                       HANDLE_PTR (p, (obj));  \
-                                       ++p;    \
-                               }       \
-                       } else if (etype == DESC_TYPE_V_RUN_LEN << 14) {        \
-                               int offset = ((desc) >> 16) & 0xff;     \
-                               int num_refs = ((desc) >> 24) & 0xff;   \
-                               char *e_start = sgen_client_array_data_start ((GCObject*)(obj));        \
-                               char *e_end = e_start + el_size * sgen_client_array_length ((GCObject*)(obj));  \
-                               while (e_start < e_end) {       \
-                                       void **p = (void**)e_start;     \
-                                       int i;  \
-                                       p += offset;    \
-                                       for (i = 0; i < num_refs; ++i) {        \
-                                               HANDLE_PTR (p + i, (obj));      \
-                                       }       \
-                                       e_start += el_size;     \
-                               }       \
-                       } else if (etype == DESC_TYPE_V_BITMAP << 14) { \
-                               char *e_start = sgen_client_array_data_start ((GCObject*)(obj));        \
-                               char *e_end = e_start + el_size * sgen_client_array_length ((GCObject*)(obj));  \
-                               while (e_start < e_end) {       \
-                                       void **p = (void**)e_start;     \
-                                       gsize _bmap = (desc) >> 16;     \
-                                       /* Note: there is no object header here to skip */      \
-                                       while (_bmap) { \
-                                               if ((_bmap & 1)) {      \
-                                                       HANDLE_PTR (p, (obj));  \
-                                               }       \
-                                               _bmap >>= 1;    \
-                                               ++p;    \
-                                       }       \
-                                       e_start += el_size;     \
-                               }       \
-                       }       \
-               }       \
-       } while (0)
-
-
-#endif
diff --git a/mono/metadata/sgen-fin-weak-hash.c b/mono/metadata/sgen-fin-weak-hash.c
deleted file mode 100644 (file)
index e2885dc..0000000
+++ /dev/null
@@ -1,871 +0,0 @@
-/*
- * sgen-fin-weak-hash.c: Finalizers and weak links.
- *
- * Author:
- *     Paolo Molaro (lupus@ximian.com)
- *  Rodrigo Kumpera (kumpera@gmail.com)
- *
- * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * Copyright 2011 Xamarin, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-gray.h"
-#include "mono/metadata/sgen-protocol.h"
-#include "mono/metadata/sgen-pointer-queue.h"
-#include "mono/metadata/sgen-client.h"
-#include "mono/utils/mono-membar.h"
-
-#define ptr_in_nursery sgen_ptr_in_nursery
-
-typedef SgenGrayQueue GrayQueue;
-
-static int no_finalize = 0;
-
-#define DISLINK_OBJECT(l)      (REVEAL_POINTER (*(void**)(l)))
-#define DISLINK_TRACK(l)       ((~(size_t)(*(void**)(l))) & 1)
-
-/*
- * The finalizable hash has the object as the key, the 
- * disappearing_link hash, has the link address as key.
- *
- * Copyright 2011 Xamarin Inc.
- */
-
-#define TAG_MASK ((mword)0x1)
-
-static inline GCObject*
-tagged_object_get_object (GCObject *object)
-{
-       return (GCObject*)(((mword)object) & ~TAG_MASK);
-}
-
-static inline int
-tagged_object_get_tag (GCObject *object)
-{
-       return ((mword)object) & TAG_MASK;
-}
-
-static inline GCObject*
-tagged_object_apply (void *object, int tag_bits)
-{
-       return (GCObject*)((mword)object | (mword)tag_bits);
-}
-
-static int
-tagged_object_hash (GCObject *o)
-{
-       return sgen_aligned_addr_hash (tagged_object_get_object (o));
-}
-
-static gboolean
-tagged_object_equals (GCObject *a, GCObject *b)
-{
-       return tagged_object_get_object (a) == tagged_object_get_object (b);
-}
-
-static SgenHashTable minor_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
-static SgenHashTable major_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
-
-static SgenHashTable*
-get_finalize_entry_hash_table (int generation)
-{
-       switch (generation) {
-       case GENERATION_NURSERY: return &minor_finalizable_hash;
-       case GENERATION_OLD: return &major_finalizable_hash;
-       default: g_assert_not_reached ();
-       }
-}
-
-#define BRIDGE_OBJECT_MARKED 0x1
-
-/* LOCKING: requires that the GC lock is held */
-void
-sgen_mark_bridge_object (GCObject *obj)
-{
-       SgenHashTable *hash_table = get_finalize_entry_hash_table (ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD);
-
-       sgen_hash_table_set_key (hash_table, obj, tagged_object_apply (obj, BRIDGE_OBJECT_MARKED));
-}
-
-/* LOCKING: requires that the GC lock is held */
-void
-sgen_collect_bridge_objects (int generation, ScanCopyContext ctx)
-{
-       CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
-       GrayQueue *queue = ctx.queue;
-       SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
-       GCObject *object;
-       gpointer dummy G_GNUC_UNUSED;
-       char *copy;
-       SgenPointerQueue moved_fin_objects;
-
-       sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
-
-       if (no_finalize)
-               return;
-
-       SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
-               int tag = tagged_object_get_tag (object);
-               object = tagged_object_get_object (object);
-
-               /* Bridge code told us to ignore this one */
-               if (tag == BRIDGE_OBJECT_MARKED)
-                       continue;
-
-               /* Object is a bridge object and major heap says it's dead  */
-               if (major_collector.is_object_live ((char*)object))
-                       continue;
-
-               /* Nursery says the object is dead. */
-               if (!sgen_gc_is_object_ready_for_finalization (object))
-                       continue;
-
-               if (!sgen_client_bridge_is_bridge_object (object))
-                       continue;
-
-               copy = (char*)object;
-               copy_func ((void**)&copy, queue);
-
-               sgen_client_bridge_register_finalized_object ((GCObject*)copy);
-               
-               if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
-                       /* remove from the list */
-                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
-
-                       /* insert it into the major hash */
-                       sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
-
-                       SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
-
-                       continue;
-               } else if (copy != (char*)object) {
-                       /* update pointer */
-                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
-
-                       /* register for reinsertion */
-                       sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
-
-                       SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
-
-                       continue;
-               }
-       } SGEN_HASH_TABLE_FOREACH_END;
-
-       while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
-               sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
-       }
-
-       sgen_pointer_queue_free (&moved_fin_objects);
-}
-
-
-/* LOCKING: requires that the GC lock is held */
-void
-sgen_finalize_in_range (int generation, ScanCopyContext ctx)
-{
-       CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
-       GrayQueue *queue = ctx.queue;
-       SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
-       GCObject *object;
-       gpointer dummy G_GNUC_UNUSED;
-       SgenPointerQueue moved_fin_objects;
-
-       sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
-
-       if (no_finalize)
-               return;
-       SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
-               int tag = tagged_object_get_tag (object);
-               object = tagged_object_get_object (object);
-               if (!major_collector.is_object_live ((char*)object)) {
-                       gboolean is_fin_ready = sgen_gc_is_object_ready_for_finalization (object);
-                       GCObject *copy = object;
-                       copy_func ((void**)&copy, queue);
-                       if (is_fin_ready) {
-                               /* remove and put in fin_ready_list */
-                               SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
-                               sgen_queue_finalization_entry (copy);
-                               /* Make it survive */
-                               SGEN_LOG (5, "Queueing object for finalization: %p (%s) (was at %p) (%d)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object, sgen_hash_table_num_entries (hash_table));
-                               continue;
-                       } else {
-                               if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
-                                       /* remove from the list */
-                                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
-
-                                       /* insert it into the major hash */
-                                       sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
-
-                                       SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
-
-                                       continue;
-                               } else if (copy != object) {
-                                       /* update pointer */
-                                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
-
-                                       /* register for reinsertion */
-                                       sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
-
-                                       SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
-
-                                       continue;
-                               }
-                       }
-               }
-       } SGEN_HASH_TABLE_FOREACH_END;
-
-       while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
-               sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
-       }
-
-       sgen_pointer_queue_free (&moved_fin_objects);
-}
-
-/* LOCKING: requires that the GC lock is held */
-static void
-register_for_finalization (GCObject *obj, void *user_data, int generation)
-{
-       SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
-
-       if (no_finalize)
-               return;
-
-       if (user_data) {
-               if (sgen_hash_table_replace (hash_table, obj, NULL, NULL)) {
-                       GCVTable *vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
-                       SGEN_LOG (5, "Added finalizer for object: %p (%s) (%d) to %s table", obj, sgen_client_vtable_get_name (vt), hash_table->num_entries, sgen_generation_name (generation));
-               }
-       } else {
-               if (sgen_hash_table_remove (hash_table, obj, NULL)) {
-                       GCVTable *vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
-                       SGEN_LOG (5, "Removed finalizer for object: %p (%s) (%d)", obj, sgen_client_vtable_get_name (vt), hash_table->num_entries);
-               }
-       }
-}
-
-/*
- * We're using (mostly) non-locking staging queues for finalizers and weak links to speed
- * up registering them.  Otherwise we'd have to take the GC lock.
- *
- * The queues are arrays of `StageEntry`, plus a `next_entry` index.  Threads add entries to
- * the queue via `add_stage_entry()` in a linear fashion until it fills up, in which case
- * `process_stage_entries()` is called to drain it.  A garbage collection will also drain
- * the queues via the same function.  That implies that `add_stage_entry()`, since it
- * doesn't take a lock, must be able to run concurrently with `process_stage_entries()`,
- * though it doesn't have to make progress while the queue is drained.  In fact, once it
- * detects that the queue is being drained, it blocks until the draining is done.
- *
- * The protocol must guarantee that entries in the queue are causally ordered, otherwise two
- * entries for the same location might get switched, resulting in the earlier one being
- * committed and the later one ignored.
- *
- * `next_entry` is the index of the next entry to be filled, or `-1` if the queue is
- * currently being drained.  Each entry has a state:
- *
- * `STAGE_ENTRY_FREE`: The entry is free.  Its data fields must be `NULL`.
- *
- * `STAGE_ENTRY_BUSY`: The entry is currently being filled in.
- *
- * `STAGE_ENTRY_USED`: The entry is completely filled in and must be processed in the next
- * draining round.
- *
- * `STAGE_ENTRY_INVALID`: The entry was busy during queue draining and therefore
- * invalidated.  Entries that are `BUSY` can obviously not be processed during a drain, but
- * we can't leave them in place because new entries might be inserted before them, including
- * from the same thread, violating causality.  An alternative would be not to reset
- * `next_entry` to `0` after a drain, but to the index of the last `BUSY` entry plus one,
- * but that can potentially waste the whole queue.
- *
- * State transitions:
- *
- * | from    | to      | filler? | drainer? |
- * +---------+---------+---------+----------+
- * | FREE    | BUSY    | X       |          |
- * | BUSY    | FREE    | X       |          |
- * | BUSY    | USED    | X       |          |
- * | BUSY    | INVALID |         | X        |
- * | USED    | FREE    |         | X        |
- * | INVALID | FREE    | X       |          |
- *
- * `next_entry` can be incremented either by the filler thread that set the corresponding
- * entry to `BUSY`, or by another filler thread that's trying to get a `FREE` slot.  If that
- * other thread wasn't allowed to increment, it would block on the first filler thread.
- *
- * An entry's state, once it's set from `FREE` to `BUSY` by a filler thread, can only be
- * changed by that same thread or by the drained.  The drainer can only set a `BUSY` thread
- * to `INVALID`, so it needs to be set to `FREE` again by the original filler thread.
- */
-
-#define STAGE_ENTRY_FREE       0
-#define STAGE_ENTRY_BUSY       1
-#define STAGE_ENTRY_USED       2
-#define STAGE_ENTRY_INVALID    3
-
-typedef struct {
-       volatile gint32 state;
-       GCObject *obj;
-       void *user_data;
-} StageEntry;
-
-#define NUM_FIN_STAGE_ENTRIES  1024
-
-static volatile gint32 next_fin_stage_entry = 0;
-static StageEntry fin_stage_entries [NUM_FIN_STAGE_ENTRIES];
-
-/*
- * This is used to lock the stage when processing is forced, i.e. when it's triggered by a
- * garbage collection.  In that case, the world is already stopped and there's only one
- * thread operating on the queue.
- */
-static void
-lock_stage_for_processing (volatile gint32 *next_entry)
-{
-       *next_entry = -1;
-}
-
-/*
- * When processing is triggered by an overflow, we don't want to take the GC lock
- * immediately, and then set `next_index` to `-1`, because another thread might have drained
- * the queue in the mean time.  Instead, we make sure the overflow is still there, we
- * atomically set `next_index`, and only once that happened do we take the GC lock.
- */
-static gboolean
-try_lock_stage_for_processing (int num_entries, volatile gint32 *next_entry)
-{
-       gint32 old = *next_entry;
-       if (old < num_entries)
-               return FALSE;
-       return InterlockedCompareExchange (next_entry, -1, old) == old;
-}
-
-/* LOCKING: requires that the GC lock is held */
-static void
-process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (GCObject*, void*, int))
-{
-       int i;
-
-       /*
-        * This can happen if after setting `next_index` to `-1` in
-        * `try_lock_stage_for_processing()`, a GC was triggered, which then drained the
-        * queue and reset `next_entry`.
-        *
-        * We have the GC lock now, so if it's still `-1`, we can't be interrupted by a GC.
-        */
-       if (*next_entry != -1)
-               return;
-
-       for (i = 0; i < num_entries; ++i) {
-               gint32 state;
-
-       retry:
-               state = entries [i].state;
-
-               switch (state) {
-               case STAGE_ENTRY_FREE:
-               case STAGE_ENTRY_INVALID:
-                       continue;
-               case STAGE_ENTRY_BUSY:
-                       /* BUSY -> INVALID */
-                       /*
-                        * This must be done atomically, because the filler thread can set
-                        * the entry to `USED`, in which case we must process it, so we must
-                        * detect that eventuality.
-                        */
-                       if (InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_INVALID, STAGE_ENTRY_BUSY) != STAGE_ENTRY_BUSY)
-                               goto retry;
-                       continue;
-               case STAGE_ENTRY_USED:
-                       break;
-               default:
-                       SGEN_ASSERT (0, FALSE, "Invalid stage entry state");
-                       break;
-               }
-
-               /* state is USED */
-
-               process_func (entries [i].obj, entries [i].user_data, i);
-
-               entries [i].obj = NULL;
-               entries [i].user_data = NULL;
-
-               mono_memory_write_barrier ();
-
-               /* USED -> FREE */
-               /*
-                * This transition only happens here, so we don't have to do it atomically.
-                */
-               entries [i].state = STAGE_ENTRY_FREE;
-       }
-
-       mono_memory_write_barrier ();
-
-       *next_entry = 0;
-}
-
-#ifdef HEAVY_STATISTICS
-static guint64 stat_overflow_abort = 0;
-static guint64 stat_wait_for_processing = 0;
-static guint64 stat_increment_other_thread = 0;
-static guint64 stat_index_decremented = 0;
-static guint64 stat_entry_invalidated = 0;
-static guint64 stat_success = 0;
-#endif
-
-static int
-add_stage_entry (int num_entries, volatile gint32 *next_entry, StageEntry *entries, GCObject *obj, void *user_data)
-{
-       gint32 index, new_next_entry, old_next_entry;
-       gint32 previous_state;
-
- retry:
-       for (;;) {
-               index = *next_entry;
-               if (index >= num_entries) {
-                       HEAVY_STAT (++stat_overflow_abort);
-                       return -1;
-               }
-               if (index < 0) {
-                       /*
-                        * Backed-off waiting is way more efficient than even using a
-                        * dedicated lock for this.
-                        */
-                       while ((index = *next_entry) < 0) {
-                               /*
-                                * This seems like a good value.  Determined by timing
-                                * sgen-weakref-stress.exe.
-                                */
-                               g_usleep (200);
-                               HEAVY_STAT (++stat_wait_for_processing);
-                       }
-                       continue;
-               }
-               /* FREE -> BUSY */
-               if (entries [index].state != STAGE_ENTRY_FREE ||
-                               InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE) {
-                       /*
-                        * If we can't get the entry it must be because another thread got
-                        * it first.  We don't want to wait for that thread to increment
-                        * `next_entry`, so we try to do it ourselves.  Whether we succeed
-                        * or not, we start over.
-                        */
-                       if (*next_entry == index) {
-                               InterlockedCompareExchange (next_entry, index + 1, index);
-                               //g_print ("tried increment for other thread\n");
-                               HEAVY_STAT (++stat_increment_other_thread);
-                       }
-                       continue;
-               }
-               /* state is BUSY now */
-               mono_memory_write_barrier ();
-               /*
-                * Incrementing `next_entry` must happen after setting the state to `BUSY`.
-                * If it were the other way around, it would be possible that after a filler
-                * incremented the index, other threads fill up the queue, the queue is
-                * drained, the original filler finally fills in the slot, but `next_entry`
-                * ends up at the start of the queue, and new entries are written in the
-                * queue in front of, not behind, the original filler's entry.
-                *
-                * We don't actually require that the CAS succeeds, but we do require that
-                * the value of `next_entry` is not lower than our index.  Since the drainer
-                * sets it to `-1`, that also takes care of the case that the drainer is
-                * currently running.
-                */
-               old_next_entry = InterlockedCompareExchange (next_entry, index + 1, index);
-               if (old_next_entry < index) {
-                       /* BUSY -> FREE */
-                       /* INVALID -> FREE */
-                       /*
-                        * The state might still be `BUSY`, or the drainer could have set it
-                        * to `INVALID`.  In either case, there's no point in CASing.  Set
-                        * it to `FREE` and start over.
-                        */
-                       entries [index].state = STAGE_ENTRY_FREE;
-                       HEAVY_STAT (++stat_index_decremented);
-                       continue;
-               }
-               break;
-       }
-
-       SGEN_ASSERT (0, index >= 0 && index < num_entries, "Invalid index");
-
-       entries [index].obj = obj;
-       entries [index].user_data = user_data;
-
-       mono_memory_write_barrier ();
-
-       new_next_entry = *next_entry;
-       mono_memory_read_barrier ();
-       /* BUSY -> USED */
-       /*
-        * A `BUSY` entry will either still be `BUSY` or the drainer will have set it to
-        * `INVALID`.  In the former case, we set it to `USED` and we're finished.  In the
-        * latter case, we reset it to `FREE` and start over.
-        */
-       previous_state = InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_USED, STAGE_ENTRY_BUSY);
-       if (previous_state == STAGE_ENTRY_BUSY) {
-               SGEN_ASSERT (0, new_next_entry >= index || new_next_entry < 0, "Invalid next entry index - as long as we're busy, other thread can only increment or invalidate it");
-               HEAVY_STAT (++stat_success);
-               return index;
-       }
-
-       SGEN_ASSERT (0, previous_state == STAGE_ENTRY_INVALID, "Invalid state transition - other thread can only make busy state invalid");
-       entries [index].obj = NULL;
-       entries [index].user_data = NULL;
-       mono_memory_write_barrier ();
-       /* INVALID -> FREE */
-       entries [index].state = STAGE_ENTRY_FREE;
-
-       HEAVY_STAT (++stat_entry_invalidated);
-
-       goto retry;
-}
-
-/* LOCKING: requires that the GC lock is held */
-static void
-process_fin_stage_entry (GCObject *obj, void *user_data, int index)
-{
-       if (ptr_in_nursery (obj))
-               register_for_finalization (obj, user_data, GENERATION_NURSERY);
-       else
-               register_for_finalization (obj, user_data, GENERATION_OLD);
-}
-
-/* LOCKING: requires that the GC lock is held */
-void
-sgen_process_fin_stage_entries (void)
-{
-       lock_stage_for_processing (&next_fin_stage_entry);
-       process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
-}
-
-void
-sgen_object_register_for_finalization (GCObject *obj, void *user_data)
-{
-       while (add_stage_entry (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, obj, user_data) == -1) {
-               if (try_lock_stage_for_processing (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry)) {
-                       LOCK_GC;
-                       process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
-                       UNLOCK_GC;
-               }
-       }
-}
-
-/* LOCKING: requires that the GC lock is held */
-static int
-finalizers_with_predicate (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size, SgenHashTable *hash_table)
-{
-       GCObject *object;
-       gpointer dummy G_GNUC_UNUSED;
-       int count;
-
-       if (no_finalize || !out_size || !out_array)
-               return 0;
-       count = 0;
-       SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
-               object = tagged_object_get_object (object);
-
-               if (predicate (object, user_data)) {
-                       /* remove and put in out_array */
-                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
-                       out_array [count ++] = object;
-                       SGEN_LOG (5, "Collecting object for finalization: %p (%s) (%d)", object, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (object)), sgen_hash_table_num_entries (hash_table));
-                       if (count == out_size)
-                               return count;
-                       continue;
-               }
-       } SGEN_HASH_TABLE_FOREACH_END;
-       return count;
-}
-
-/**
- * sgen_gather_finalizers_if:
- * @predicate: predicate function
- * @user_data: predicate function data argument
- * @out_array: output array
- * @out_size: size of output array
- *
- * Store inside @out_array up to @out_size objects that match @predicate. Returns the number
- * of stored items. Can be called repeteadly until it returns 0.
- *
- * The items are removed from the finalizer data structure, so the caller is supposed
- * to finalize them.
- *
- * @out_array me be on the stack, or registered as a root, to allow the GC to know the
- * objects are still alive.
- */
-int
-sgen_gather_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size)
-{
-       int result;
-
-       LOCK_GC;
-       sgen_process_fin_stage_entries ();
-       result = finalizers_with_predicate (predicate, user_data, (GCObject**)out_array, out_size, &minor_finalizable_hash);
-       if (result < out_size) {
-               result += finalizers_with_predicate (predicate, user_data, (GCObject**)out_array + result, out_size - result,
-                       &major_finalizable_hash);
-       }
-       UNLOCK_GC;
-
-       return result;
-}
-
-static SgenHashTable minor_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, sgen_aligned_addr_hash, NULL);
-static SgenHashTable major_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, sgen_aligned_addr_hash, NULL);
-
-static SgenHashTable*
-get_dislink_hash_table (int generation)
-{
-       switch (generation) {
-       case GENERATION_NURSERY: return &minor_disappearing_link_hash;
-       case GENERATION_OLD: return &major_disappearing_link_hash;
-       default: g_assert_not_reached ();
-       }
-}
-
-/* LOCKING: assumes the GC lock is held */
-static void
-add_or_remove_disappearing_link (GCObject *obj, void **link, int generation)
-{
-       SgenHashTable *hash_table = get_dislink_hash_table (generation);
-
-       if (!obj) {
-               if (sgen_hash_table_remove (hash_table, link, NULL)) {
-                       SGEN_LOG (5, "Removed dislink %p (%d) from %s table",
-                                       link, hash_table->num_entries, sgen_generation_name (generation));
-               }
-               return;
-       }
-
-       sgen_hash_table_replace (hash_table, link, NULL, NULL);
-       SGEN_LOG (5, "Added dislink for object: %p (%s) at %p to %s table",
-                       obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE_UNCHECKED (obj)), link, sgen_generation_name (generation));
-}
-
-/* LOCKING: requires that the GC lock is held */
-void
-sgen_null_link_in_range (int generation, gboolean before_finalization, ScanCopyContext ctx)
-{
-       CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
-       GrayQueue *queue = ctx.queue;
-       void **link;
-       gpointer dummy G_GNUC_UNUSED;
-       SgenHashTable *hash = get_dislink_hash_table (generation);
-
-       SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
-               char *object;
-               gboolean track;
-
-               /*
-               We null a weak link before unregistering it, so it's possible that a thread is
-               suspended right in between setting the content to null and staging the unregister.
-
-               The rest of this code cannot handle null links as DISLINK_OBJECT (NULL) produces an invalid address.
-
-               We should simply skip the entry as the staged removal will take place during the next GC.
-               */
-               if (!*link) {
-                       SGEN_LOG (5, "Dislink %p was externally nullified", link);
-                       continue;
-               }
-
-               track = DISLINK_TRACK (link);
-               /*
-                * Tracked references are processed after
-                * finalization handling whereas standard weak
-                * references are processed before.  If an
-                * object is still not marked after finalization
-                * handling it means that it either doesn't have
-                * a finalizer or the finalizer has already run,
-                * so we must null a tracking reference.
-                */
-               if (track != before_finalization) {
-                       object = DISLINK_OBJECT (link);
-                       /*
-                       We should guard against a null object been hidden. This can sometimes happen.
-                       */
-                       if (!object) {
-                               SGEN_LOG (5, "Dislink %p with a hidden null object", link);
-                               continue;
-                       }
-
-                       if (!major_collector.is_object_live (object)) {
-                               if (sgen_gc_is_object_ready_for_finalization (object)) {
-                                       *link = NULL;
-                                       binary_protocol_dislink_update (link, NULL, 0, 0);
-                                       SGEN_LOG (5, "Dislink nullified at %p to GCed object %p", link, object);
-                                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
-                                       continue;
-                               } else {
-                                       char *copy = object;
-                                       copy_func ((void**)&copy, queue);
-
-                                       /* Update pointer if it's moved.  If the object
-                                        * has been moved out of the nursery, we need to
-                                        * remove the link from the minor hash table to
-                                        * the major one.
-                                        *
-                                        * FIXME: what if an object is moved earlier?
-                                        */
-
-                                       if (hash == &minor_disappearing_link_hash && !ptr_in_nursery (copy)) {
-                                               SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
-
-                                               g_assert (copy);
-                                               *link = HIDE_POINTER (copy, track);
-                                               add_or_remove_disappearing_link ((GCObject*)copy, link, GENERATION_OLD);
-                                               binary_protocol_dislink_update (link, copy, track, 0);
-
-                                               SGEN_LOG (5, "Upgraded dislink at %p to major because object %p moved to %p", link, object, copy);
-
-                                               continue;
-                                       } else {
-                                               *link = HIDE_POINTER (copy, track);
-                                               binary_protocol_dislink_update (link, copy, track, 0);
-                                               SGEN_LOG (5, "Updated dislink at %p to %p", link, DISLINK_OBJECT (link));
-                                       }
-                               }
-                       }
-               }
-       } SGEN_HASH_TABLE_FOREACH_END;
-}
-
-/* LOCKING: requires that the GC lock is held */
-void
-sgen_null_links_if (SgenObjectPredicateFunc predicate, void *data, int generation)
-{
-       void **link;
-       gpointer dummy G_GNUC_UNUSED;
-       SgenHashTable *hash = get_dislink_hash_table (generation);
-       SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
-               char *object = DISLINK_OBJECT (link);
-
-               if (!*link)
-                       continue;
-
-               if (predicate ((GCObject*)object, data)) {
-                       *link = NULL;
-                       binary_protocol_dislink_update (link, NULL, 0, 0);
-                       SGEN_LOG (5, "Dislink nullified by predicate at %p to GCed object %p", link, object);
-                       SGEN_HASH_TABLE_FOREACH_REMOVE (FALSE /* TRUE */);
-                       continue;
-               }
-       } SGEN_HASH_TABLE_FOREACH_END;
-}
-
-void
-sgen_remove_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, int generation)
-{
-       SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
-       GCObject *object;
-       gpointer dummy G_GNUC_UNUSED;
-
-       SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
-               object = tagged_object_get_object (object);
-
-               if (predicate (object, user_data)) {
-                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
-                       continue;
-               }
-       } SGEN_HASH_TABLE_FOREACH_END;  
-}
-
-/* LOCKING: requires that the GC lock is held */
-static void
-process_dislink_stage_entry (GCObject *obj, void *_link, int index)
-{
-       void **link = _link;
-
-       if (index >= 0)
-               binary_protocol_dislink_process_staged (link, obj, index);
-
-       add_or_remove_disappearing_link (NULL, link, GENERATION_NURSERY);
-       add_or_remove_disappearing_link (NULL, link, GENERATION_OLD);
-       if (obj) {
-               if (ptr_in_nursery (obj))
-                       add_or_remove_disappearing_link (obj, link, GENERATION_NURSERY);
-               else
-                       add_or_remove_disappearing_link (obj, link, GENERATION_OLD);
-       }
-}
-
-#define NUM_DISLINK_STAGE_ENTRIES      1024
-
-static volatile gint32 next_dislink_stage_entry = 0;
-static StageEntry dislink_stage_entries [NUM_DISLINK_STAGE_ENTRIES];
-
-/* LOCKING: requires that the GC lock is held */
-void
-sgen_process_dislink_stage_entries (void)
-{
-       lock_stage_for_processing (&next_dislink_stage_entry);
-       process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
-}
-
-void
-sgen_register_disappearing_link (GCObject *obj, void **link, gboolean track, gboolean in_gc)
-{
-       if (obj)
-               *link = HIDE_POINTER (obj, track);
-       else
-               *link = NULL;
-
-#if 1
-       if (in_gc) {
-               binary_protocol_dislink_update (link, obj, track, 0);
-               process_dislink_stage_entry (obj, link, -1);
-       } else {
-               int index;
-               binary_protocol_dislink_update (link, obj, track, 1);
-               while ((index = add_stage_entry (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, obj, link)) == -1) {
-                       if (try_lock_stage_for_processing (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry)) {
-                               LOCK_GC;
-                               process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
-                               UNLOCK_GC;
-                       }
-               }
-               binary_protocol_dislink_update_staged (link, obj, track, index);
-       }
-#else
-       if (!in_gc)
-               LOCK_GC;
-       binary_protocol_dislink_update (link, obj, track, 0);
-       process_dislink_stage_entry (obj, link, -1);
-       if (!in_gc)
-               UNLOCK_GC;
-#endif
-}
-
-void
-sgen_init_fin_weak_hash (void)
-{
-#ifdef HEAVY_STATISTICS
-       mono_counters_register ("FinWeak Successes", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_success);
-       mono_counters_register ("FinWeak Overflow aborts", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_overflow_abort);
-       mono_counters_register ("FinWeak Wait for processing", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wait_for_processing);
-       mono_counters_register ("FinWeak Increment other thread", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_increment_other_thread);
-       mono_counters_register ("FinWeak Index decremented", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_index_decremented);
-       mono_counters_register ("FinWeak Entry invalidated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_entry_invalidated);
-#endif
-}
-
-#endif /* HAVE_SGEN_GC */
diff --git a/mono/metadata/sgen-gc.c b/mono/metadata/sgen-gc.c
deleted file mode 100644 (file)
index 70a5184..0000000
+++ /dev/null
@@ -1,3460 +0,0 @@
-/*
- * sgen-gc.c: Simple generational GC.
- *
- * Author:
- *     Paolo Molaro (lupus@ximian.com)
- *  Rodrigo Kumpera (kumpera@gmail.com)
- *
- * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- *
- * Thread start/stop adapted from Boehm's GC:
- * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
- * Copyright (c) 2000-2004 by Hewlett-Packard Company.  All rights reserved.
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright 2011 Xamarin, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Important: allocation provides always zeroed memory, having to do
- * a memset after allocation is deadly for performance.
- * Memory usage at startup is currently as follows:
- * 64 KB pinned space
- * 64 KB internal space
- * size of nursery
- * We should provide a small memory config with half the sizes
- *
- * We currently try to make as few mono assumptions as possible:
- * 1) 2-word header with no GC pointers in it (first vtable, second to store the
- *    forwarding ptr)
- * 2) gc descriptor is the second word in the vtable (first word in the class)
- * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
- * 4) there is a function to get an object's size and the number of
- *    elements in an array.
- * 5) we know the special way bounds are allocated for complex arrays
- * 6) we know about proxies and how to treat them when domains are unloaded
- *
- * Always try to keep stack usage to a minimum: no recursive behaviour
- * and no large stack allocs.
- *
- * General description.
- * Objects are initially allocated in a nursery using a fast bump-pointer technique.
- * When the nursery is full we start a nursery collection: this is performed with a
- * copying GC.
- * When the old generation is full we start a copying GC of the old generation as well:
- * this will be changed to mark&sweep with copying when fragmentation becomes to severe
- * in the future.  Maybe we'll even do both during the same collection like IMMIX.
- *
- * The things that complicate this description are:
- * *) pinned objects: we can't move them so we need to keep track of them
- * *) no precise info of the thread stacks and registers: we need to be able to
- *    quickly find the objects that may be referenced conservatively and pin them
- *    (this makes the first issues more important)
- * *) large objects are too expensive to be dealt with using copying GC: we handle them
- *    with mark/sweep during major collections
- * *) some objects need to not move even if they are small (interned strings, Type handles):
- *    we use mark/sweep for them, too: they are not allocated in the nursery, but inside
- *    PinnedChunks regions
- */
-
-/*
- * TODO:
-
- *) we could have a function pointer in MonoClass to implement
-  customized write barriers for value types
-
- *) investigate the stuff needed to advance a thread to a GC-safe
-  point (single-stepping, read from unmapped memory etc) and implement it.
-  This would enable us to inline allocations and write barriers, for example,
-  or at least parts of them, like the write barrier checks.
-  We may need this also for handling precise info on stacks, even simple things
-  as having uninitialized data on the stack and having to wait for the prolog
-  to zero it. Not an issue for the last frame that we scan conservatively.
-  We could always not trust the value in the slots anyway.
-
- *) modify the jit to save info about references in stack locations:
-  this can be done just for locals as a start, so that at least
-  part of the stack is handled precisely.
-
- *) test/fix endianess issues
-
- *) Implement a card table as the write barrier instead of remembered
-    sets?  Card tables are not easy to implement with our current
-    memory layout.  We have several different kinds of major heap
-    objects: Small objects in regular blocks, small objects in pinned
-    chunks and LOS objects.  If we just have a pointer we have no way
-    to tell which kind of object it points into, therefore we cannot
-    know where its card table is.  The least we have to do to make
-    this happen is to get rid of write barriers for indirect stores.
-    (See next item)
-
- *) Get rid of write barriers for indirect stores.  We can do this by
-    telling the GC to wbarrier-register an object once we do an ldloca
-    or ldelema on it, and to unregister it once it's not used anymore
-    (it can only travel downwards on the stack).  The problem with
-    unregistering is that it needs to happen eventually no matter
-    what, even if exceptions are thrown, the thread aborts, etc.
-    Rodrigo suggested that we could do only the registering part and
-    let the collector find out (pessimistically) when it's safe to
-    unregister, namely when the stack pointer of the thread that
-    registered the object is higher than it was when the registering
-    happened.  This might make for a good first implementation to get
-    some data on performance.
-
- *) Some sort of blacklist support?  Blacklists is a concept from the
-    Boehm GC: if during a conservative scan we find pointers to an
-    area which we might use as heap, we mark that area as unusable, so
-    pointer retention by random pinning pointers is reduced.
-
- *) experiment with max small object size (very small right now - 2kb,
-    because it's tied to the max freelist size)
-
-  *) add an option to mmap the whole heap in one chunk: it makes for many
-     simplifications in the checks (put the nursery at the top and just use a single
-     check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
-     not flexible (too much of the address space may be used by default or we can't
-     increase the heap as needed) and we'd need a race-free mechanism to return memory
-     back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
-     was written to, munmap is needed, but the following mmap may not find the same segment
-     free...)
-
- *) memzero the major fragments after restarting the world and optionally a smaller
-    chunk at a time
-
- *) investigate having fragment zeroing threads
-
- *) separate locks for finalization and other minor stuff to reduce
-    lock contention
-
- *) try a different copying order to improve memory locality
-
- *) a thread abort after a store but before the write barrier will
-    prevent the write barrier from executing
-
- *) specialized dynamically generated markers/copiers
-
- *) Dynamically adjust TLAB size to the number of threads.  If we have
-    too many threads that do allocation, we might need smaller TLABs,
-    and we might get better performance with larger TLABs if we only
-    have a handful of threads.  We could sum up the space left in all
-    assigned TLABs and if that's more than some percentage of the
-    nursery size, reduce the TLAB size.
-
- *) Explore placing unreachable objects on unused nursery memory.
-       Instead of memset'ng a region to zero, place an int[] covering it.
-       A good place to start is add_nursery_frag. The tricky thing here is
-       placing those objects atomically outside of a collection.
-
- *) Allocation should use asymmetric Dekker synchronization:
-       http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
-       This should help weak consistency archs.
- */
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#ifdef __MACH__
-#undef _XOPEN_SOURCE
-#define _XOPEN_SOURCE
-#define _DARWIN_C_SOURCE
-#endif
-
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#ifdef HAVE_PTHREAD_H
-#include <pthread.h>
-#endif
-#ifdef HAVE_PTHREAD_NP_H
-#include <pthread_np.h>
-#endif
-#include <stdio.h>
-#include <string.h>
-#include <errno.h>
-#include <assert.h>
-#include <stdlib.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-cardtable.h"
-#include "mono/metadata/sgen-protocol.h"
-#include "mono/metadata/sgen-memory-governor.h"
-#include "mono/metadata/sgen-hash-table.h"
-#include "mono/metadata/sgen-cardtable.h"
-#include "mono/metadata/sgen-pinning.h"
-#include "mono/metadata/sgen-workers.h"
-#include "mono/metadata/sgen-client.h"
-#include "mono/metadata/sgen-pointer-queue.h"
-#include "mono/metadata/gc-internal-agnostic.h"
-#include "mono/utils/mono-proclib.h"
-#include "mono/utils/mono-memory-model.h"
-#include "mono/utils/hazard-pointer.h"
-
-#include <mono/utils/memcheck.h>
-
-#undef pthread_create
-#undef pthread_join
-#undef pthread_detach
-
-/*
- * ######################################################################
- * ########  Types and constants used by the GC.
- * ######################################################################
- */
-
-/* 0 means not initialized, 1 is initialized, -1 means in progress */
-static int gc_initialized = 0;
-/* If set, check if we need to do something every X allocations */
-gboolean has_per_allocation_action;
-/* If set, do a heap check every X allocation */
-guint32 verify_before_allocs = 0;
-/* If set, do a minor collection before every X allocation */
-guint32 collect_before_allocs = 0;
-/* If set, do a whole heap check before each collection */
-static gboolean whole_heap_check_before_collection = FALSE;
-/* If set, do a heap consistency check before each minor collection */
-static gboolean consistency_check_at_minor_collection = FALSE;
-/* If set, do a mod union consistency check before each finishing collection pause */
-static gboolean mod_union_consistency_check = FALSE;
-/* If set, check whether mark bits are consistent after major collections */
-static gboolean check_mark_bits_after_major_collection = FALSE;
-/* If set, check that all nursery objects are pinned/not pinned, depending on context */
-static gboolean check_nursery_objects_pinned = FALSE;
-/* If set, do a few checks when the concurrent collector is used */
-static gboolean do_concurrent_checks = FALSE;
-/* If set, do a plausibility check on the scan_starts before and after
-   each collection */
-static gboolean do_scan_starts_check = FALSE;
-
-/*
- * If the major collector is concurrent and this is FALSE, we will
- * never initiate a synchronous major collection, unless requested via
- * GC.Collect().
- */
-static gboolean allow_synchronous_major = TRUE;
-static gboolean disable_minor_collections = FALSE;
-static gboolean disable_major_collections = FALSE;
-static gboolean do_verify_nursery = FALSE;
-static gboolean do_dump_nursery_content = FALSE;
-static gboolean enable_nursery_canaries = FALSE;
-
-#ifdef HEAVY_STATISTICS
-guint64 stat_objects_alloced_degraded = 0;
-guint64 stat_bytes_alloced_degraded = 0;
-
-guint64 stat_copy_object_called_nursery = 0;
-guint64 stat_objects_copied_nursery = 0;
-guint64 stat_copy_object_called_major = 0;
-guint64 stat_objects_copied_major = 0;
-
-guint64 stat_scan_object_called_nursery = 0;
-guint64 stat_scan_object_called_major = 0;
-
-guint64 stat_slots_allocated_in_vain;
-
-guint64 stat_nursery_copy_object_failed_from_space = 0;
-guint64 stat_nursery_copy_object_failed_forwarded = 0;
-guint64 stat_nursery_copy_object_failed_pinned = 0;
-guint64 stat_nursery_copy_object_failed_to_space = 0;
-
-static guint64 stat_wbarrier_add_to_global_remset = 0;
-static guint64 stat_wbarrier_set_arrayref = 0;
-static guint64 stat_wbarrier_arrayref_copy = 0;
-static guint64 stat_wbarrier_generic_store = 0;
-static guint64 stat_wbarrier_generic_store_atomic = 0;
-static guint64 stat_wbarrier_set_root = 0;
-static guint64 stat_wbarrier_value_copy = 0;
-static guint64 stat_wbarrier_object_copy = 0;
-#endif
-
-static guint64 stat_pinned_objects = 0;
-
-static guint64 time_minor_pre_collection_fragment_clear = 0;
-static guint64 time_minor_pinning = 0;
-static guint64 time_minor_scan_remsets = 0;
-static guint64 time_minor_scan_pinned = 0;
-static guint64 time_minor_scan_roots = 0;
-static guint64 time_minor_finish_gray_stack = 0;
-static guint64 time_minor_fragment_creation = 0;
-
-static guint64 time_major_pre_collection_fragment_clear = 0;
-static guint64 time_major_pinning = 0;
-static guint64 time_major_scan_pinned = 0;
-static guint64 time_major_scan_roots = 0;
-static guint64 time_major_scan_mod_union = 0;
-static guint64 time_major_finish_gray_stack = 0;
-static guint64 time_major_free_bigobjs = 0;
-static guint64 time_major_los_sweep = 0;
-static guint64 time_major_sweep = 0;
-static guint64 time_major_fragment_creation = 0;
-
-static guint64 time_max = 0;
-
-static SGEN_TV_DECLARE (time_major_conc_collection_start);
-static SGEN_TV_DECLARE (time_major_conc_collection_end);
-
-static SGEN_TV_DECLARE (last_minor_collection_start_tv);
-static SGEN_TV_DECLARE (last_minor_collection_end_tv);
-
-int gc_debug_level = 0;
-FILE* gc_debug_file;
-
-/*
-void
-mono_gc_flush_info (void)
-{
-       fflush (gc_debug_file);
-}
-*/
-
-#define TV_DECLARE SGEN_TV_DECLARE
-#define TV_GETTIME SGEN_TV_GETTIME
-#define TV_ELAPSED SGEN_TV_ELAPSED
-
-static SGEN_TV_DECLARE (sgen_init_timestamp);
-
-NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
-
-#define object_is_forwarded    SGEN_OBJECT_IS_FORWARDED
-#define object_is_pinned       SGEN_OBJECT_IS_PINNED
-#define pin_object             SGEN_PIN_OBJECT
-
-#define ptr_in_nursery sgen_ptr_in_nursery
-
-#define LOAD_VTABLE    SGEN_LOAD_VTABLE
-
-gboolean
-nursery_canaries_enabled (void)
-{
-       return enable_nursery_canaries;
-}
-
-#define safe_object_get_size   sgen_safe_object_get_size
-
-/*
- * ######################################################################
- * ########  Global data.
- * ######################################################################
- */
-LOCK_DECLARE (gc_mutex);
-gboolean sgen_try_free_some_memory;
-
-#define SCAN_START_SIZE        SGEN_SCAN_START_SIZE
-
-size_t degraded_mode = 0;
-
-static mword bytes_pinned_from_failed_allocation = 0;
-
-GCMemSection *nursery_section = NULL;
-static volatile mword lowest_heap_address = ~(mword)0;
-static volatile mword highest_heap_address = 0;
-
-LOCK_DECLARE (sgen_interruption_mutex);
-
-int current_collection_generation = -1;
-static volatile gboolean concurrent_collection_in_progress = FALSE;
-
-/* objects that are ready to be finalized */
-static SgenPointerQueue fin_ready_queue = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_FINALIZE_READY);
-static SgenPointerQueue critical_fin_queue = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_FINALIZE_READY);
-
-/* registered roots: the key to the hash is the root start address */
-/* 
- * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
- */
-SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
-       SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), sgen_aligned_addr_hash, NULL),
-       SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), sgen_aligned_addr_hash, NULL),
-       SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), sgen_aligned_addr_hash, NULL)
-};
-static mword roots_size = 0; /* amount of memory in the root set */
-
-/* The size of a TLAB */
-/* The bigger the value, the less often we have to go to the slow path to allocate a new 
- * one, but the more space is wasted by threads not allocating much memory.
- * FIXME: Tune this.
- * FIXME: Make this self-tuning for each thread.
- */
-guint32 tlab_size = (1024 * 4);
-
-#define MAX_SMALL_OBJ_SIZE     SGEN_MAX_SMALL_OBJ_SIZE
-
-#define ALLOC_ALIGN            SGEN_ALLOC_ALIGN
-
-#define ALIGN_UP               SGEN_ALIGN_UP
-
-#ifdef SGEN_DEBUG_INTERNAL_ALLOC
-MonoNativeThreadId main_gc_thread = NULL;
-#endif
-
-/*Object was pinned during the current collection*/
-static mword objects_pinned;
-
-/*
- * ######################################################################
- * ########  Macros and function declarations.
- * ######################################################################
- */
-
-typedef SgenGrayQueue GrayQueue;
-
-/* forward declarations */
-static void scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx);
-
-static void pin_from_roots (void *start_nursery, void *end_nursery, ScanCopyContext ctx);
-static void finish_gray_stack (int generation, ScanCopyContext ctx);
-
-
-SgenMajorCollector major_collector;
-SgenMinorCollector sgen_minor_collector;
-/* FIXME: get rid of this */
-static GrayQueue gray_queue;
-
-static SgenRememberedSet remset;
-
-/* The gray queue to use from the main collection thread. */
-#define WORKERS_DISTRIBUTE_GRAY_QUEUE  (&gray_queue)
-
-/*
- * The gray queue a worker job must use.  If we're not parallel or
- * concurrent, we use the main gray queue.
- */
-static SgenGrayQueue*
-sgen_workers_get_job_gray_queue (WorkerData *worker_data)
-{
-       return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
-}
-
-static void
-gray_queue_redirect (SgenGrayQueue *queue)
-{
-       gboolean wake = FALSE;
-
-       for (;;) {
-               GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
-               if (!section)
-                       break;
-               sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
-               wake = TRUE;
-       }
-
-       if (wake) {
-               g_assert (concurrent_collection_in_progress);
-               sgen_workers_ensure_awake ();
-       }
-}
-
-static void
-gray_queue_enable_redirect (SgenGrayQueue *queue)
-{
-       if (!concurrent_collection_in_progress)
-               return;
-
-       sgen_gray_queue_set_alloc_prepare (queue, gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
-       gray_queue_redirect (queue);
-}
-
-void
-sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
-{
-       while (start < end) {
-               size_t size;
-               char *obj;
-
-               if (!*(void**)start) {
-                       start += sizeof (void*); /* should be ALLOC_ALIGN, really */
-                       continue;
-               }
-
-               if (allow_flags) {
-                       if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
-                               obj = start;
-               } else {
-                       obj = start;
-               }
-
-               if (!sgen_client_object_is_array_fill ((GCObject*)obj)) {
-                       CHECK_CANARY_FOR_OBJECT (obj);
-                       size = ALIGN_UP (safe_object_get_size ((GCObject*)obj));
-                       callback (obj, size, data);
-                       CANARIFY_SIZE (size);
-               } else {
-                       size = ALIGN_UP (safe_object_get_size ((GCObject*)obj));
-               }
-
-               start += size;
-       }
-}
-
-/*
- * sgen_add_to_global_remset:
- *
- *   The global remset contains locations which point into newspace after
- * a minor collection. This can happen if the objects they point to are pinned.
- *
- * LOCKING: If called from a parallel collector, the global remset
- * lock must be held.  For serial collectors that is not necessary.
- */
-void
-sgen_add_to_global_remset (gpointer ptr, gpointer obj)
-{
-       SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Target pointer of global remset must be in the nursery");
-
-       HEAVY_STAT (++stat_wbarrier_add_to_global_remset);
-
-       if (!major_collector.is_concurrent) {
-               SGEN_ASSERT (5, current_collection_generation != -1, "Global remsets can only be added during collections");
-       } else {
-               if (current_collection_generation == -1)
-                       SGEN_ASSERT (5, sgen_concurrent_collection_in_progress (), "Global remsets outside of collection pauses can only be added by the concurrent collector");
-       }
-
-       if (!object_is_pinned (obj))
-               SGEN_ASSERT (5, sgen_minor_collector.is_split || sgen_concurrent_collection_in_progress (), "Non-pinned objects can only remain in nursery if it is a split nursery");
-       else if (sgen_cement_lookup_or_register (obj))
-               return;
-
-       remset.record_pointer (ptr);
-
-       sgen_pin_stats_register_global_remset (obj);
-
-       SGEN_LOG (8, "Adding global remset for %p", ptr);
-       binary_protocol_global_remset (ptr, obj, (gpointer)SGEN_LOAD_VTABLE (obj));
-}
-
-/*
- * sgen_drain_gray_stack:
- *
- *   Scan objects in the gray stack until the stack is empty. This should be called
- * frequently after each object is copied, to achieve better locality and cache
- * usage.
- *
- * max_objs is the maximum number of objects to scan, or -1 to scan until the stack is
- * empty.
- */
-gboolean
-sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
-{
-       ScanObjectFunc scan_func = ctx.ops->scan_object;
-       GrayQueue *queue = ctx.queue;
-
-       if (current_collection_generation == GENERATION_OLD && major_collector.drain_gray_stack)
-               return major_collector.drain_gray_stack (ctx);
-
-       do {
-               int i;
-               for (i = 0; i != max_objs; ++i) {
-                       char *obj;
-                       mword desc;
-                       GRAY_OBJECT_DEQUEUE (queue, &obj, &desc);
-                       if (!obj)
-                               return TRUE;
-                       SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
-                       scan_func (obj, desc, queue);
-               }
-       } while (max_objs < 0);
-       return FALSE;
-}
-
-/*
- * Addresses in the pin queue are already sorted. This function finds
- * the object header for each address and pins the object. The
- * addresses must be inside the nursery section.  The (start of the)
- * address array is overwritten with the addresses of the actually
- * pinned objects.  Return the number of pinned objects.
- */
-static int
-pin_objects_from_nursery_pin_queue (gboolean do_scan_objects, ScanCopyContext ctx)
-{
-       GCMemSection *section = nursery_section;
-       void **start =  sgen_pinning_get_entry (section->pin_queue_first_entry);
-       void **end = sgen_pinning_get_entry (section->pin_queue_last_entry);
-       void *start_nursery = section->data;
-       void *end_nursery = section->next_data;
-       void *last = NULL;
-       int count = 0;
-       void *search_start;
-       void *addr;
-       void *pinning_front = start_nursery;
-       size_t idx;
-       void **definitely_pinned = start;
-       ScanObjectFunc scan_func = ctx.ops->scan_object;
-       SgenGrayQueue *queue = ctx.queue;
-
-       sgen_nursery_allocator_prepare_for_pinning ();
-
-       while (start < end) {
-               void *obj_to_pin = NULL;
-               size_t obj_to_pin_size = 0;
-               mword desc;
-
-               addr = *start;
-
-               SGEN_ASSERT (0, addr >= start_nursery && addr < end_nursery, "Potential pinning address out of range");
-               SGEN_ASSERT (0, addr >= last, "Pin queue not sorted");
-
-               if (addr == last) {
-                       ++start;
-                       continue;
-               }
-
-               SGEN_LOG (5, "Considering pinning addr %p", addr);
-               /* We've already processed everything up to pinning_front. */
-               if (addr < pinning_front) {
-                       start++;
-                       continue;
-               }
-
-               /*
-                * Find the closest scan start <= addr.  We might search backward in the
-                * scan_starts array because entries might be NULL.  In the worst case we
-                * start at start_nursery.
-                */
-               idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
-               SGEN_ASSERT (0, idx < section->num_scan_start, "Scan start index out of range");
-               search_start = (void*)section->scan_starts [idx];
-               if (!search_start || search_start > addr) {
-                       while (idx) {
-                               --idx;
-                               search_start = section->scan_starts [idx];
-                               if (search_start && search_start <= addr)
-                                       break;
-                       }
-                       if (!search_start || search_start > addr)
-                               search_start = start_nursery;
-               }
-
-               /*
-                * If the pinning front is closer than the scan start we found, start
-                * searching at the front.
-                */
-               if (search_start < pinning_front)
-                       search_start = pinning_front;
-
-               /*
-                * Now addr should be in an object a short distance from search_start.
-                *
-                * search_start must point to zeroed mem or point to an object.
-                */
-               do {
-                       size_t obj_size, canarified_obj_size;
-
-                       /* Skip zeros. */
-                       if (!*(void**)search_start) {
-                               search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
-                               /* The loop condition makes sure we don't overrun addr. */
-                               continue;
-                       }
-
-                       canarified_obj_size = obj_size = ALIGN_UP (safe_object_get_size ((GCObject*)search_start));
-
-                       /*
-                        * Filler arrays are marked by an invalid sync word.  We don't
-                        * consider them for pinning.  They are not delimited by canaries,
-                        * either.
-                        */
-                       if (!sgen_client_object_is_array_fill ((GCObject*)search_start)) {
-                               CHECK_CANARY_FOR_OBJECT (search_start);
-                               CANARIFY_SIZE (canarified_obj_size);
-
-                               if (addr >= search_start && (char*)addr < (char*)search_start + obj_size) {
-                                       /* This is the object we're looking for. */
-                                       obj_to_pin = search_start;
-                                       obj_to_pin_size = canarified_obj_size;
-                                       break;
-                               }
-                       }
-
-                       /* Skip to the next object */
-                       search_start = (void*)((char*)search_start + canarified_obj_size);
-               } while (search_start <= addr);
-
-               /* We've searched past the address we were looking for. */
-               if (!obj_to_pin) {
-                       pinning_front = search_start;
-                       goto next_pin_queue_entry;
-               }
-
-               /*
-                * We've found an object to pin.  It might still be a dummy array, but we
-                * can advance the pinning front in any case.
-                */
-               pinning_front = (char*)obj_to_pin + obj_to_pin_size;
-
-               /*
-                * If this is a dummy array marking the beginning of a nursery
-                * fragment, we don't pin it.
-                */
-               if (sgen_client_object_is_array_fill ((GCObject*)obj_to_pin))
-                       goto next_pin_queue_entry;
-
-               /*
-                * Finally - pin the object!
-                */
-               desc = sgen_obj_get_descriptor_safe (obj_to_pin);
-               if (do_scan_objects) {
-                       scan_func (obj_to_pin, desc, queue);
-               } else {
-                       SGEN_LOG (4, "Pinned object %p, vtable %p (%s), count %d\n",
-                                       obj_to_pin, *(void**)obj_to_pin, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj_to_pin)), count);
-                       binary_protocol_pin (obj_to_pin,
-                                       (gpointer)LOAD_VTABLE (obj_to_pin),
-                                       safe_object_get_size (obj_to_pin));
-
-                       pin_object (obj_to_pin);
-                       GRAY_OBJECT_ENQUEUE (queue, obj_to_pin, desc);
-                       sgen_pin_stats_register_object (obj_to_pin, obj_to_pin_size);
-                       definitely_pinned [count] = obj_to_pin;
-                       count++;
-               }
-
-       next_pin_queue_entry:
-               last = addr;
-               ++start;
-       }
-       sgen_client_nursery_objects_pinned (definitely_pinned, count);
-       stat_pinned_objects += count;
-       return count;
-}
-
-static void
-pin_objects_in_nursery (gboolean do_scan_objects, ScanCopyContext ctx)
-{
-       size_t reduced_to;
-
-       if (nursery_section->pin_queue_first_entry == nursery_section->pin_queue_last_entry)
-               return;
-
-       reduced_to = pin_objects_from_nursery_pin_queue (do_scan_objects, ctx);
-       nursery_section->pin_queue_last_entry = nursery_section->pin_queue_first_entry + reduced_to;
-}
-
-/*
- * This function is only ever called (via `collector_pin_object()` in `sgen-copy-object.h`)
- * when we can't promote an object because we're out of memory.
- */
-void
-sgen_pin_object (void *object, GrayQueue *queue)
-{
-       /*
-        * All pinned objects are assumed to have been staged, so we need to stage as well.
-        * Also, the count of staged objects shows that "late pinning" happened.
-        */
-       sgen_pin_stage_ptr (object);
-
-       SGEN_PIN_OBJECT (object);
-       binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
-
-       ++objects_pinned;
-       sgen_pin_stats_register_object (object, safe_object_get_size (object));
-
-       GRAY_OBJECT_ENQUEUE (queue, object, sgen_obj_get_descriptor_safe (object));
-}
-
-/* Sort the addresses in array in increasing order.
- * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
- */
-void
-sgen_sort_addresses (void **array, size_t size)
-{
-       size_t i;
-       void *tmp;
-
-       for (i = 1; i < size; ++i) {
-               size_t child = i;
-               while (child > 0) {
-                       size_t parent = (child - 1) / 2;
-
-                       if (array [parent] >= array [child])
-                               break;
-
-                       tmp = array [parent];
-                       array [parent] = array [child];
-                       array [child] = tmp;
-
-                       child = parent;
-               }
-       }
-
-       for (i = size - 1; i > 0; --i) {
-               size_t end, root;
-               tmp = array [i];
-               array [i] = array [0];
-               array [0] = tmp;
-
-               end = i - 1;
-               root = 0;
-
-               while (root * 2 + 1 <= end) {
-                       size_t child = root * 2 + 1;
-
-                       if (child < end && array [child] < array [child + 1])
-                               ++child;
-                       if (array [root] >= array [child])
-                               break;
-
-                       tmp = array [root];
-                       array [root] = array [child];
-                       array [child] = tmp;
-
-                       root = child;
-               }
-       }
-}
-
-/* 
- * Scan the memory between start and end and queue values which could be pointers
- * to the area between start_nursery and end_nursery for later consideration.
- * Typically used for thread stacks.
- */
-void
-sgen_conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
-{
-       int count = 0;
-
-#ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
-       VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
-#endif
-
-       while (start < end) {
-               if (*start >= start_nursery && *start < end_nursery) {
-                       /*
-                        * *start can point to the middle of an object
-                        * note: should we handle pointing at the end of an object?
-                        * pinning in C# code disallows pointing at the end of an object
-                        * but there is some small chance that an optimizing C compiler
-                        * may keep the only reference to an object by pointing
-                        * at the end of it. We ignore this small chance for now.
-                        * Pointers to the end of an object are indistinguishable
-                        * from pointers to the start of the next object in memory
-                        * so if we allow that we'd need to pin two objects...
-                        * We queue the pointer in an array, the
-                        * array will then be sorted and uniqued. This way
-                        * we can coalesce several pinning pointers and it should
-                        * be faster since we'd do a memory scan with increasing
-                        * addresses. Note: we can align the address to the allocation
-                        * alignment, so the unique process is more effective.
-                        */
-                       mword addr = (mword)*start;
-                       addr &= ~(ALLOC_ALIGN - 1);
-                       if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
-                               SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
-                               sgen_pin_stage_ptr ((void*)addr);
-                               binary_protocol_pin_stage (start, (void*)addr);
-                               count++;
-                       }
-
-                       /*
-                        * FIXME: It seems we're registering objects from all over the heap
-                        * (at least from the nursery and the LOS), but we're only
-                        * registering pinned addresses in the nursery.  What's up with
-                        * that?
-                        *
-                        * Also, why wouldn't we register addresses once the pinning queue
-                        * is sorted and uniqued?
-                        */
-                       if (ptr_in_nursery ((void*)addr))
-                               sgen_pin_stats_register_address ((char*)addr, pin_type);
-               }
-               start++;
-       }
-       if (count)
-               SGEN_LOG (7, "found %d potential pinned heap pointers", count);
-}
-
-/*
- * The first thing we do in a collection is to identify pinned objects.
- * This function considers all the areas of memory that need to be
- * conservatively scanned.
- */
-static void
-pin_from_roots (void *start_nursery, void *end_nursery, ScanCopyContext ctx)
-{
-       void **start_root;
-       RootRecord *root;
-       SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
-       /* objects pinned from the API are inside these roots */
-       SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
-               SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
-               sgen_conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
-       } SGEN_HASH_TABLE_FOREACH_END;
-       /* now deal with the thread stacks
-        * in the future we should be able to conservatively scan only:
-        * *) the cpu registers
-        * *) the unmanaged stack frames
-        * *) the _last_ managed stack frame
-        * *) pointers slots in managed frames
-        */
-       sgen_client_scan_thread_data (start_nursery, end_nursery, FALSE, ctx);
-}
-
-static void
-unpin_objects_from_queue (SgenGrayQueue *queue)
-{
-       for (;;) {
-               char *addr;
-               mword desc;
-               GRAY_OBJECT_DEQUEUE (queue, &addr, &desc);
-               if (!addr)
-                       break;
-               g_assert (SGEN_OBJECT_IS_PINNED (addr));
-               SGEN_UNPIN_OBJECT (addr);
-       }
-}
-
-static void
-single_arg_user_copy_or_mark (void **obj, void *gc_data)
-{
-       ScanCopyContext *ctx = gc_data;
-       ctx->ops->copy_or_mark_object (obj, ctx->queue);
-}
-
-/*
- * The memory area from start_root to end_root contains pointers to objects.
- * Their position is precisely described by @desc (this means that the pointer
- * can be either NULL or the pointer to the start of an object).
- * This functions copies them to to_space updates them.
- *
- * This function is not thread-safe!
- */
-static void
-precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
-{
-       CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
-       SgenGrayQueue *queue = ctx.queue;
-
-       switch (desc & ROOT_DESC_TYPE_MASK) {
-       case ROOT_DESC_BITMAP:
-               desc >>= ROOT_DESC_TYPE_SHIFT;
-               while (desc) {
-                       if ((desc & 1) && *start_root) {
-                               copy_func (start_root, queue);
-                               SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
-                       }
-                       desc >>= 1;
-                       start_root++;
-               }
-               return;
-       case ROOT_DESC_COMPLEX: {
-               gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
-               gsize bwords = (*bitmap_data) - 1;
-               void **start_run = start_root;
-               bitmap_data++;
-               while (bwords-- > 0) {
-                       gsize bmap = *bitmap_data++;
-                       void **objptr = start_run;
-                       while (bmap) {
-                               if ((bmap & 1) && *objptr) {
-                                       copy_func (objptr, queue);
-                                       SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
-                               }
-                               bmap >>= 1;
-                               ++objptr;
-                       }
-                       start_run += GC_BITS_PER_WORD;
-               }
-               break;
-       }
-       case ROOT_DESC_USER: {
-               SgenUserRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
-               marker (start_root, single_arg_user_copy_or_mark, &ctx);
-               break;
-       }
-       case ROOT_DESC_RUN_LEN:
-               g_assert_not_reached ();
-       default:
-               g_assert_not_reached ();
-       }
-}
-
-static void
-reset_heap_boundaries (void)
-{
-       lowest_heap_address = ~(mword)0;
-       highest_heap_address = 0;
-}
-
-void
-sgen_update_heap_boundaries (mword low, mword high)
-{
-       mword old;
-
-       do {
-               old = lowest_heap_address;
-               if (low >= old)
-                       break;
-       } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
-
-       do {
-               old = highest_heap_address;
-               if (high <= old)
-                       break;
-       } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
-}
-
-/*
- * Allocate and setup the data structures needed to be able to allocate objects
- * in the nursery. The nursery is stored in nursery_section.
- */
-static void
-alloc_nursery (void)
-{
-       GCMemSection *section;
-       char *data;
-       size_t scan_starts;
-       size_t alloc_size;
-
-       if (nursery_section)
-               return;
-       SGEN_LOG (2, "Allocating nursery size: %zu", (size_t)sgen_nursery_size);
-       /* later we will alloc a larger area for the nursery but only activate
-        * what we need. The rest will be used as expansion if we have too many pinned
-        * objects in the existing nursery.
-        */
-       /* FIXME: handle OOM */
-       section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
-
-       alloc_size = sgen_nursery_size;
-
-       /* If there isn't enough space even for the nursery we should simply abort. */
-       g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
-
-       data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
-       sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
-       SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)sgen_gc_get_total_heap_allocation ());
-       section->data = section->next_data = data;
-       section->size = alloc_size;
-       section->end_data = data + sgen_nursery_size;
-       scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
-       section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
-       section->num_scan_start = scan_starts;
-
-       nursery_section = section;
-
-       sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
-}
-
-FILE *
-mono_gc_get_logfile (void)
-{
-       return gc_debug_file;
-}
-
-static void
-scan_finalizer_entries (SgenPointerQueue *fin_queue, ScanCopyContext ctx)
-{
-       CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
-       SgenGrayQueue *queue = ctx.queue;
-       size_t i;
-
-       for (i = 0; i < fin_queue->next_slot; ++i) {
-               void *obj = fin_queue->data [i];
-               if (!obj)
-                       continue;
-               SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
-               copy_func (&fin_queue->data [i], queue);
-       }
-}
-
-static const char*
-generation_name (int generation)
-{
-       switch (generation) {
-       case GENERATION_NURSERY: return "nursery";
-       case GENERATION_OLD: return "old";
-       default: g_assert_not_reached ();
-       }
-}
-
-const char*
-sgen_generation_name (int generation)
-{
-       return generation_name (generation);
-}
-
-static void
-finish_gray_stack (int generation, ScanCopyContext ctx)
-{
-       TV_DECLARE (atv);
-       TV_DECLARE (btv);
-       int done_with_ephemerons, ephemeron_rounds = 0;
-       char *start_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_start () : NULL;
-       char *end_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_end () : (char*)-1;
-       SgenGrayQueue *queue = ctx.queue;
-
-       /*
-        * We copied all the reachable objects. Now it's the time to copy
-        * the objects that were not referenced by the roots, but by the copied objects.
-        * we built a stack of objects pointed to by gray_start: they are
-        * additional roots and we may add more items as we go.
-        * We loop until gray_start == gray_objects which means no more objects have
-        * been added. Note this is iterative: no recursion is involved.
-        * We need to walk the LO list as well in search of marked big objects
-        * (use a flag since this is needed only on major collections). We need to loop
-        * here as well, so keep a counter of marked LO (increasing it in copy_object).
-        *   To achieve better cache locality and cache usage, we drain the gray stack 
-        * frequently, after each object is copied, and just finish the work here.
-        */
-       sgen_drain_gray_stack (-1, ctx);
-       TV_GETTIME (atv);
-       SGEN_LOG (2, "%s generation done", generation_name (generation));
-
-       /*
-       Reset bridge data, we might have lingering data from a previous collection if this is a major
-       collection trigged by minor overflow.
-
-       We must reset the gathered bridges since their original block might be evacuated due to major
-       fragmentation in the meanwhile and the bridge code should not have to deal with that.
-       */
-       if (sgen_client_bridge_need_processing ())
-               sgen_client_bridge_reset_data ();
-
-       /*
-        * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
-        * before processing finalizable objects and non-tracking weak links to avoid finalizing/clearing
-        * objects that are in fact reachable.
-        */
-       done_with_ephemerons = 0;
-       do {
-               done_with_ephemerons = sgen_client_mark_ephemerons (ctx);
-               sgen_drain_gray_stack (-1, ctx);
-               ++ephemeron_rounds;
-       } while (!done_with_ephemerons);
-
-       sgen_client_mark_togglerefs (start_addr, end_addr, ctx);
-
-       if (sgen_client_bridge_need_processing ()) {
-               /*Make sure the gray stack is empty before we process bridge objects so we get liveness right*/
-               sgen_drain_gray_stack (-1, ctx);
-               sgen_collect_bridge_objects (generation, ctx);
-               if (generation == GENERATION_OLD)
-                       sgen_collect_bridge_objects (GENERATION_NURSERY, ctx);
-
-               /*
-               Do the first bridge step here, as the collector liveness state will become useless after that.
-
-               An important optimization is to only proccess the possibly dead part of the object graph and skip
-               over all live objects as we transitively know everything they point must be alive too.
-
-               The above invariant is completely wrong if we let the gray queue be drained and mark/copy everything.
-
-               This has the unfortunate side effect of making overflow collections perform the first step twice, but
-               given we now have heuristics that perform major GC in anticipation of minor overflows this should not
-               be a big deal.
-               */
-               sgen_client_bridge_processing_stw_step ();
-       }
-
-       /*
-       Make sure we drain the gray stack before processing disappearing links and finalizers.
-       If we don't make sure it is empty we might wrongly see a live object as dead.
-       */
-       sgen_drain_gray_stack (-1, ctx);
-
-       /*
-       We must clear weak links that don't track resurrection before processing object ready for
-       finalization so they can be cleared before that.
-       */
-       sgen_null_link_in_range (generation, TRUE, ctx);
-       if (generation == GENERATION_OLD)
-               sgen_null_link_in_range (GENERATION_NURSERY, TRUE, ctx);
-
-
-       /* walk the finalization queue and move also the objects that need to be
-        * finalized: use the finalized objects as new roots so the objects they depend
-        * on are also not reclaimed. As with the roots above, only objects in the nursery
-        * are marked/copied.
-        */
-       sgen_finalize_in_range (generation, ctx);
-       if (generation == GENERATION_OLD)
-               sgen_finalize_in_range (GENERATION_NURSERY, ctx);
-       /* drain the new stack that might have been created */
-       SGEN_LOG (6, "Precise scan of gray area post fin");
-       sgen_drain_gray_stack (-1, ctx);
-
-       /*
-        * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
-        */
-       done_with_ephemerons = 0;
-       do {
-               done_with_ephemerons = sgen_client_mark_ephemerons (ctx);
-               sgen_drain_gray_stack (-1, ctx);
-               ++ephemeron_rounds;
-       } while (!done_with_ephemerons);
-
-       sgen_client_clear_unreachable_ephemerons (ctx);
-
-       /*
-        * We clear togglerefs only after all possible chances of revival are done. 
-        * This is semantically more inline with what users expect and it allows for
-        * user finalizers to correctly interact with TR objects.
-       */
-       sgen_client_clear_togglerefs (start_addr, end_addr, ctx);
-
-       TV_GETTIME (btv);
-       SGEN_LOG (2, "Finalize queue handling scan for %s generation: %ld usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
-
-       /*
-        * handle disappearing links
-        * Note we do this after checking the finalization queue because if an object
-        * survives (at least long enough to be finalized) we don't clear the link.
-        * This also deals with a possible issue with the monitor reclamation: with the Boehm
-        * GC a finalized object my lose the monitor because it is cleared before the finalizer is
-        * called.
-        */
-       g_assert (sgen_gray_object_queue_is_empty (queue));
-       for (;;) {
-               sgen_null_link_in_range (generation, FALSE, ctx);
-               if (generation == GENERATION_OLD)
-                       sgen_null_link_in_range (GENERATION_NURSERY, FALSE, ctx);
-               if (sgen_gray_object_queue_is_empty (queue))
-                       break;
-               sgen_drain_gray_stack (-1, ctx);
-       }
-
-       g_assert (sgen_gray_object_queue_is_empty (queue));
-
-       sgen_gray_object_queue_trim_free_list (queue);
-}
-
-void
-sgen_check_section_scan_starts (GCMemSection *section)
-{
-       size_t i;
-       for (i = 0; i < section->num_scan_start; ++i) {
-               if (section->scan_starts [i]) {
-                       mword size = safe_object_get_size ((GCObject*) section->scan_starts [i]);
-                       SGEN_ASSERT (0, size >= SGEN_CLIENT_MINIMUM_OBJECT_SIZE && size <= MAX_SMALL_OBJ_SIZE, "Weird object size at scan starts.");
-               }
-       }
-}
-
-static void
-check_scan_starts (void)
-{
-       if (!do_scan_starts_check)
-               return;
-       sgen_check_section_scan_starts (nursery_section);
-       major_collector.check_scan_starts ();
-}
-
-static void
-scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx)
-{
-       void **start_root;
-       RootRecord *root;
-       SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
-               SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
-               precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
-       } SGEN_HASH_TABLE_FOREACH_END;
-}
-
-static void
-init_stats (void)
-{
-       static gboolean inited = FALSE;
-
-       if (inited)
-               return;
-
-       mono_counters_register ("Collection max time",  MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME | MONO_COUNTER_MONOTONIC, &time_max);
-
-       mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_pre_collection_fragment_clear);
-       mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_pinning);
-       mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_remsets);
-       mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_pinned);
-       mono_counters_register ("Minor scan roots", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_roots);
-       mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_fragment_creation);
-
-       mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_pre_collection_fragment_clear);
-       mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_pinning);
-       mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_pinned);
-       mono_counters_register ("Major scan roots", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_roots);
-       mono_counters_register ("Major scan mod union", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_mod_union);
-       mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_finish_gray_stack);
-       mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_free_bigobjs);
-       mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_los_sweep);
-       mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_sweep);
-       mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_fragment_creation);
-
-       mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_pinned_objects);
-
-#ifdef HEAVY_STATISTICS
-       mono_counters_register ("WBarrier remember pointer", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_add_to_global_remset);
-       mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_set_arrayref);
-       mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_arrayref_copy);
-       mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_generic_store);
-       mono_counters_register ("WBarrier generic atomic store called", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_generic_store_atomic);
-       mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_set_root);
-       mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_value_copy);
-       mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_object_copy);
-
-       mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_alloced_degraded);
-       mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_bytes_alloced_degraded);
-
-       mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copy_object_called_nursery);
-       mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_copied_nursery);
-       mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copy_object_called_major);
-       mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_copied_major);
-
-       mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scan_object_called_nursery);
-       mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scan_object_called_major);
-
-       mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_slots_allocated_in_vain);
-
-       mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_from_space);
-       mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_forwarded);
-       mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_pinned);
-       mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_to_space);
-
-       sgen_nursery_allocator_init_heavy_stats ();
-#endif
-
-       inited = TRUE;
-}
-
-
-static void
-reset_pinned_from_failed_allocation (void)
-{
-       bytes_pinned_from_failed_allocation = 0;
-}
-
-void
-sgen_set_pinned_from_failed_allocation (mword objsize)
-{
-       bytes_pinned_from_failed_allocation += objsize;
-}
-
-gboolean
-sgen_collection_is_concurrent (void)
-{
-       switch (current_collection_generation) {
-       case GENERATION_NURSERY:
-               return FALSE;
-       case GENERATION_OLD:
-               return concurrent_collection_in_progress;
-       default:
-               g_error ("Invalid current generation %d", current_collection_generation);
-       }
-       return FALSE;
-}
-
-gboolean
-sgen_concurrent_collection_in_progress (void)
-{
-       return concurrent_collection_in_progress;
-}
-
-typedef struct {
-       SgenThreadPoolJob job;
-       SgenObjectOperations *ops;
-} ScanJob;
-
-static void
-job_remembered_set_scan (void *worker_data_untyped, SgenThreadPoolJob *job)
-{
-       WorkerData *worker_data = worker_data_untyped;
-       ScanJob *job_data = (ScanJob*)job;
-       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
-       remset.scan_remsets (ctx);
-}
-
-typedef struct {
-       SgenThreadPoolJob job;
-       SgenObjectOperations *ops;
-       char *heap_start;
-       char *heap_end;
-       int root_type;
-} ScanFromRegisteredRootsJob;
-
-static void
-job_scan_from_registered_roots (void *worker_data_untyped, SgenThreadPoolJob *job)
-{
-       WorkerData *worker_data = worker_data_untyped;
-       ScanFromRegisteredRootsJob *job_data = (ScanFromRegisteredRootsJob*)job;
-       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
-
-       scan_from_registered_roots (job_data->heap_start, job_data->heap_end, job_data->root_type, ctx);
-}
-
-typedef struct {
-       SgenThreadPoolJob job;
-       SgenObjectOperations *ops;
-       char *heap_start;
-       char *heap_end;
-} ScanThreadDataJob;
-
-static void
-job_scan_thread_data (void *worker_data_untyped, SgenThreadPoolJob *job)
-{
-       WorkerData *worker_data = worker_data_untyped;
-       ScanThreadDataJob *job_data = (ScanThreadDataJob*)job;
-       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
-
-       sgen_client_scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE, ctx);
-}
-
-typedef struct {
-       SgenThreadPoolJob job;
-       SgenObjectOperations *ops;
-       SgenPointerQueue *queue;
-} ScanFinalizerEntriesJob;
-
-static void
-job_scan_finalizer_entries (void *worker_data_untyped, SgenThreadPoolJob *job)
-{
-       WorkerData *worker_data = worker_data_untyped;
-       ScanFinalizerEntriesJob *job_data = (ScanFinalizerEntriesJob*)job;
-       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
-
-       scan_finalizer_entries (job_data->queue, ctx);
-}
-
-static void
-job_scan_major_mod_union_card_table (void *worker_data_untyped, SgenThreadPoolJob *job)
-{
-       WorkerData *worker_data = worker_data_untyped;
-       ScanJob *job_data = (ScanJob*)job;
-       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
-
-       g_assert (concurrent_collection_in_progress);
-       major_collector.scan_card_table (TRUE, ctx);
-}
-
-static void
-job_scan_los_mod_union_card_table (void *worker_data_untyped, SgenThreadPoolJob *job)
-{
-       WorkerData *worker_data = worker_data_untyped;
-       ScanJob *job_data = (ScanJob*)job;
-       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
-
-       g_assert (concurrent_collection_in_progress);
-       sgen_los_scan_card_table (TRUE, ctx);
-}
-
-static void
-init_gray_queue (void)
-{
-       if (sgen_collection_is_concurrent ())
-               sgen_workers_init_distribute_gray_queue ();
-       sgen_gray_object_queue_init (&gray_queue, NULL);
-}
-
-static void
-enqueue_scan_from_roots_jobs (char *heap_start, char *heap_end, SgenObjectOperations *ops)
-{
-       ScanFromRegisteredRootsJob *scrrj;
-       ScanThreadDataJob *stdj;
-       ScanFinalizerEntriesJob *sfej;
-
-       /* registered roots, this includes static fields */
-
-       scrrj = (ScanFromRegisteredRootsJob*)sgen_thread_pool_job_alloc ("scan from registered roots normal", job_scan_from_registered_roots, sizeof (ScanFromRegisteredRootsJob));
-       scrrj->ops = ops;
-       scrrj->heap_start = heap_start;
-       scrrj->heap_end = heap_end;
-       scrrj->root_type = ROOT_TYPE_NORMAL;
-       sgen_workers_enqueue_job (&scrrj->job);
-
-       scrrj = (ScanFromRegisteredRootsJob*)sgen_thread_pool_job_alloc ("scan from registered roots wbarrier", job_scan_from_registered_roots, sizeof (ScanFromRegisteredRootsJob));
-       scrrj->ops = ops;
-       scrrj->heap_start = heap_start;
-       scrrj->heap_end = heap_end;
-       scrrj->root_type = ROOT_TYPE_WBARRIER;
-       sgen_workers_enqueue_job (&scrrj->job);
-
-       /* Threads */
-
-       stdj = (ScanThreadDataJob*)sgen_thread_pool_job_alloc ("scan thread data", job_scan_thread_data, sizeof (ScanThreadDataJob));
-       stdj->heap_start = heap_start;
-       stdj->heap_end = heap_end;
-       sgen_workers_enqueue_job (&stdj->job);
-
-       /* Scan the list of objects ready for finalization. */
-
-       sfej = (ScanFinalizerEntriesJob*)sgen_thread_pool_job_alloc ("scan finalizer entries", job_scan_finalizer_entries, sizeof (ScanFinalizerEntriesJob));
-       sfej->queue = &fin_ready_queue;
-       sfej->ops = ops;
-       sgen_workers_enqueue_job (&sfej->job);
-
-       sfej = (ScanFinalizerEntriesJob*)sgen_thread_pool_job_alloc ("scan critical finalizer entries", job_scan_finalizer_entries, sizeof (ScanFinalizerEntriesJob));
-       sfej->queue = &critical_fin_queue;
-       sfej->ops = ops;
-       sgen_workers_enqueue_job (&sfej->job);
-}
-
-/*
- * Perform a nursery collection.
- *
- * Return whether any objects were late-pinned due to being out of memory.
- */
-static gboolean
-collect_nursery (SgenGrayQueue *unpin_queue, gboolean finish_up_concurrent_mark)
-{
-       gboolean needs_major;
-       size_t max_garbage_amount;
-       char *nursery_next;
-       mword fragment_total;
-       ScanJob *sj;
-       SgenObjectOperations *object_ops = &sgen_minor_collector.serial_ops;
-       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (object_ops, &gray_queue);
-       TV_DECLARE (atv);
-       TV_DECLARE (btv);
-
-       if (disable_minor_collections)
-               return TRUE;
-
-       TV_GETTIME (last_minor_collection_start_tv);
-       atv = last_minor_collection_start_tv;
-
-       binary_protocol_collection_begin (gc_stats.minor_gc_count, GENERATION_NURSERY);
-
-       if (do_verify_nursery || do_dump_nursery_content)
-               sgen_debug_verify_nursery (do_dump_nursery_content);
-
-       current_collection_generation = GENERATION_NURSERY;
-
-       SGEN_ASSERT (0, !sgen_collection_is_concurrent (), "Why is the nursery collection concurrent?");
-
-       reset_pinned_from_failed_allocation ();
-
-       check_scan_starts ();
-
-       sgen_nursery_alloc_prepare_for_minor ();
-
-       degraded_mode = 0;
-       objects_pinned = 0;
-       nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
-       /* FIXME: optimize later to use the higher address where an object can be present */
-       nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
-
-       SGEN_LOG (1, "Start nursery collection %d %p-%p, size: %d", gc_stats.minor_gc_count, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ()));
-       max_garbage_amount = nursery_next - sgen_get_nursery_start ();
-       g_assert (nursery_section->size >= max_garbage_amount);
-
-       /* world must be stopped already */
-       TV_GETTIME (btv);
-       time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
-
-       sgen_client_pre_collection_checks ();
-
-       nursery_section->next_data = nursery_next;
-
-       major_collector.start_nursery_collection ();
-
-       sgen_memgov_minor_collection_start ();
-
-       init_gray_queue ();
-
-       gc_stats.minor_gc_count ++;
-
-       if (whole_heap_check_before_collection) {
-               sgen_clear_nursery_fragments ();
-               sgen_check_whole_heap (finish_up_concurrent_mark);
-       }
-       if (consistency_check_at_minor_collection)
-               sgen_check_consistency ();
-
-       sgen_process_fin_stage_entries ();
-       sgen_process_dislink_stage_entries ();
-
-       /* pin from pinned handles */
-       sgen_init_pinning ();
-       sgen_client_binary_protocol_mark_start (GENERATION_NURSERY);
-       pin_from_roots (sgen_get_nursery_start (), nursery_next, ctx);
-       /* pin cemented objects */
-       sgen_pin_cemented_objects ();
-       /* identify pinned objects */
-       sgen_optimize_pin_queue ();
-       sgen_pinning_setup_section (nursery_section);
-
-       pin_objects_in_nursery (FALSE, ctx);
-       sgen_pinning_trim_queue_to_section (nursery_section);
-
-       TV_GETTIME (atv);
-       time_minor_pinning += TV_ELAPSED (btv, atv);
-       SGEN_LOG (2, "Finding pinned pointers: %zd in %ld usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
-       SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
-
-       /*
-        * FIXME: When we finish a concurrent collection we do a nursery collection first,
-        * as part of which we scan the card table.  Then, later, we scan the mod union
-        * cardtable.  We should only have to do one.
-        */
-       sj = (ScanJob*)sgen_thread_pool_job_alloc ("scan remset", job_remembered_set_scan, sizeof (ScanJob));
-       sj->ops = object_ops;
-       sgen_workers_enqueue_job (&sj->job);
-
-       /* we don't have complete write barrier yet, so we scan all the old generation sections */
-       TV_GETTIME (btv);
-       time_minor_scan_remsets += TV_ELAPSED (atv, btv);
-       SGEN_LOG (2, "Old generation scan: %ld usecs", TV_ELAPSED (atv, btv));
-
-       sgen_drain_gray_stack (-1, ctx);
-
-       /* FIXME: Why do we do this at this specific, seemingly random, point? */
-       sgen_client_collecting_minor (&fin_ready_queue, &critical_fin_queue);
-
-       TV_GETTIME (atv);
-       time_minor_scan_pinned += TV_ELAPSED (btv, atv);
-
-       enqueue_scan_from_roots_jobs (sgen_get_nursery_start (), nursery_next, object_ops);
-
-       TV_GETTIME (btv);
-       time_minor_scan_roots += TV_ELAPSED (atv, btv);
-
-       finish_gray_stack (GENERATION_NURSERY, ctx);
-
-       TV_GETTIME (atv);
-       time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
-       sgen_client_binary_protocol_mark_end (GENERATION_NURSERY);
-
-       if (objects_pinned) {
-               sgen_optimize_pin_queue ();
-               sgen_pinning_setup_section (nursery_section);
-       }
-
-       /* walk the pin_queue, build up the fragment list of free memory, unmark
-        * pinned objects as we go, memzero() the empty fragments so they are ready for the
-        * next allocations.
-        */
-       sgen_client_binary_protocol_reclaim_start (GENERATION_NURSERY);
-       fragment_total = sgen_build_nursery_fragments (nursery_section, unpin_queue);
-       if (!fragment_total)
-               degraded_mode = 1;
-
-       /* Clear TLABs for all threads */
-       sgen_clear_tlabs ();
-
-       sgen_client_binary_protocol_reclaim_end (GENERATION_NURSERY);
-       TV_GETTIME (btv);
-       time_minor_fragment_creation += TV_ELAPSED (atv, btv);
-       SGEN_LOG (2, "Fragment creation: %ld usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
-
-       if (consistency_check_at_minor_collection)
-               sgen_check_major_refs ();
-
-       major_collector.finish_nursery_collection ();
-
-       TV_GETTIME (last_minor_collection_end_tv);
-       gc_stats.minor_gc_time += TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
-
-       sgen_debug_dump_heap ("minor", gc_stats.minor_gc_count - 1, NULL);
-
-       /* prepare the pin queue for the next collection */
-       sgen_finish_pinning ();
-       if (sgen_have_pending_finalizers ()) {
-               SGEN_LOG (4, "Finalizer-thread wakeup");
-               sgen_client_finalize_notify ();
-       }
-       sgen_pin_stats_reset ();
-       /* clear cemented hash */
-       sgen_cement_clear_below_threshold ();
-
-       g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
-
-       remset.finish_minor_collection ();
-
-       check_scan_starts ();
-
-       binary_protocol_flush_buffers (FALSE);
-
-       sgen_memgov_minor_collection_end ();
-
-       /*objects are late pinned because of lack of memory, so a major is a good call*/
-       needs_major = objects_pinned > 0;
-       current_collection_generation = -1;
-       objects_pinned = 0;
-
-       binary_protocol_collection_end (gc_stats.minor_gc_count - 1, GENERATION_NURSERY, 0, 0);
-
-       if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
-               sgen_check_nursery_objects_pinned (unpin_queue != NULL);
-
-       return needs_major;
-}
-
-static void
-scan_nursery_objects_callback (char *obj, size_t size, ScanCopyContext *ctx)
-{
-       /*
-        * This is called on all objects in the nursery, including pinned ones, so we need
-        * to use sgen_obj_get_descriptor_safe(), which masks out the vtable tag bits.
-        */
-       ctx->ops->scan_object (obj, sgen_obj_get_descriptor_safe (obj), ctx->queue);
-}
-
-static void
-scan_nursery_objects (ScanCopyContext ctx)
-{
-       sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
-                       (IterateObjectCallbackFunc)scan_nursery_objects_callback, (void*)&ctx, FALSE);
-}
-
-typedef enum {
-       COPY_OR_MARK_FROM_ROOTS_SERIAL,
-       COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT,
-       COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT
-} CopyOrMarkFromRootsMode;
-
-static void
-major_copy_or_mark_from_roots (size_t *old_next_pin_slot, CopyOrMarkFromRootsMode mode, gboolean scan_whole_nursery, SgenObjectOperations *object_ops)
-{
-       LOSObject *bigobj;
-       TV_DECLARE (atv);
-       TV_DECLARE (btv);
-       /* FIXME: only use these values for the precise scan
-        * note that to_space pointers should be excluded anyway...
-        */
-       char *heap_start = NULL;
-       char *heap_end = (char*)-1;
-       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (object_ops, WORKERS_DISTRIBUTE_GRAY_QUEUE);
-       gboolean concurrent = mode != COPY_OR_MARK_FROM_ROOTS_SERIAL;
-
-       SGEN_ASSERT (0, !!concurrent == !!concurrent_collection_in_progress, "We've been called with the wrong mode.");
-
-       if (scan_whole_nursery)
-               SGEN_ASSERT (0, mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT, "Scanning whole nursery only makes sense when we're finishing a concurrent collection.");
-
-       if (concurrent) {
-               /*This cleans up unused fragments */
-               sgen_nursery_allocator_prepare_for_pinning ();
-
-               if (do_concurrent_checks)
-                       sgen_debug_check_nursery_is_clean ();
-       } else {
-               /* The concurrent collector doesn't touch the nursery. */
-               sgen_nursery_alloc_prepare_for_major ();
-       }
-
-       init_gray_queue ();
-
-       TV_GETTIME (atv);
-
-       /* Pinning depends on this */
-       sgen_clear_nursery_fragments ();
-
-       if (whole_heap_check_before_collection)
-               sgen_check_whole_heap (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT);
-
-       TV_GETTIME (btv);
-       time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
-
-       if (!sgen_collection_is_concurrent ())
-               nursery_section->next_data = sgen_get_nursery_end ();
-       /* we should also coalesce scanning from sections close to each other
-        * and deal with pointers outside of the sections later.
-        */
-
-       objects_pinned = 0;
-
-       sgen_client_pre_collection_checks ();
-
-       if (!concurrent) {
-               /* Remsets are not useful for a major collection */
-               remset.clear_cards ();
-       }
-
-       sgen_process_fin_stage_entries ();
-       sgen_process_dislink_stage_entries ();
-
-       TV_GETTIME (atv);
-       sgen_init_pinning ();
-       SGEN_LOG (6, "Collecting pinned addresses");
-       pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, ctx);
-
-       if (mode != COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
-               if (major_collector.is_concurrent) {
-                       /*
-                        * The concurrent major collector cannot evict
-                        * yet, so we need to pin cemented objects to
-                        * not break some asserts.
-                        *
-                        * FIXME: We could evict now!
-                        */
-                       sgen_pin_cemented_objects ();
-               }
-       }
-
-       sgen_optimize_pin_queue ();
-
-       sgen_client_collecting_major_1 ();
-
-       /*
-        * pin_queue now contains all candidate pointers, sorted and
-        * uniqued.  We must do two passes now to figure out which
-        * objects are pinned.
-        *
-        * The first is to find within the pin_queue the area for each
-        * section.  This requires that the pin_queue be sorted.  We
-        * also process the LOS objects and pinned chunks here.
-        *
-        * The second, destructive, pass is to reduce the section
-        * areas to pointers to the actually pinned objects.
-        */
-       SGEN_LOG (6, "Pinning from sections");
-       /* first pass for the sections */
-       sgen_find_section_pin_queue_start_end (nursery_section);
-       /* identify possible pointers to the insize of large objects */
-       SGEN_LOG (6, "Pinning from large objects");
-       for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
-               size_t dummy;
-               if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy, &dummy)) {
-                       binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((GCObject*)(bigobj->data))));
-
-                       if (sgen_los_object_is_pinned (bigobj->data)) {
-                               SGEN_ASSERT (0, mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT, "LOS objects can only be pinned here after concurrent marking.");
-                               continue;
-                       }
-                       sgen_los_pin_object (bigobj->data);
-                       if (SGEN_OBJECT_HAS_REFERENCES (bigobj->data))
-                               GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data, sgen_obj_get_descriptor (bigobj->data));
-                       sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((GCObject*) bigobj->data));
-                       SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data,
-                                       sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (bigobj->data)),
-                                       (unsigned long)sgen_los_object_size (bigobj));
-
-                       sgen_client_pinned_los_object (bigobj->data);
-               }
-       }
-       /* second pass for the sections */
-
-       /*
-        * Concurrent mark never follows references into the nursery.  In the start and
-        * finish pauses we must scan live nursery objects, though.
-        *
-        * In the finish pause we do this conservatively by scanning all nursery objects.
-        * Previously we would only scan pinned objects here.  We assumed that all objects
-        * that were pinned during the nursery collection immediately preceding this finish
-        * mark would be pinned again here.  Due to the way we get the stack end for the GC
-        * thread, however, that's not necessarily the case: we scan part of the stack used
-        * by the GC itself, which changes constantly, so pinning isn't entirely
-        * deterministic.
-        *
-        * The split nursery also complicates things because non-pinned objects can survive
-        * in the nursery.  That's why we need to do a full scan of the nursery for it, too.
-        *
-        * In the future we shouldn't do a preceding nursery collection at all and instead
-        * do the finish pause with promotion from the nursery.
-        *
-        * A further complication arises when we have late-pinned objects from the preceding
-        * nursery collection.  Those are the result of being out of memory when trying to
-        * evacuate objects.  They won't be found from the roots, so we just scan the whole
-        * nursery.
-        *
-        * Non-concurrent mark evacuates from the nursery, so it's
-        * sufficient to just scan pinned nursery objects.
-        */
-       if (scan_whole_nursery || mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT || (concurrent && sgen_minor_collector.is_split)) {
-               scan_nursery_objects (ctx);
-       } else {
-               pin_objects_in_nursery (concurrent, ctx);
-               if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
-                       sgen_check_nursery_objects_pinned (mode != COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT);
-       }
-
-       major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
-       if (old_next_pin_slot)
-               *old_next_pin_slot = sgen_get_pinned_count ();
-
-       TV_GETTIME (btv);
-       time_major_pinning += TV_ELAPSED (atv, btv);
-       SGEN_LOG (2, "Finding pinned pointers: %zd in %ld usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
-       SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
-
-       major_collector.init_to_space ();
-
-       /*
-        * The concurrent collector doesn't move objects, neither on
-        * the major heap nor in the nursery, so we can mark even
-        * before pinning has finished.  For the non-concurrent
-        * collector we start the workers after pinning.
-        */
-       if (mode != COPY_OR_MARK_FROM_ROOTS_SERIAL) {
-               SGEN_ASSERT (0, sgen_workers_all_done (), "Why are the workers not done when we start or finish a major collection?");
-               sgen_workers_start_all_workers (object_ops);
-               gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
-       }
-
-#ifdef SGEN_DEBUG_INTERNAL_ALLOC
-       main_gc_thread = mono_native_thread_self ();
-#endif
-
-       sgen_client_collecting_major_2 ();
-
-       TV_GETTIME (atv);
-       time_major_scan_pinned += TV_ELAPSED (btv, atv);
-
-       sgen_client_collecting_major_3 (&fin_ready_queue, &critical_fin_queue);
-
-       /*
-        * FIXME: is this the right context?  It doesn't seem to contain a copy function
-        * unless we're concurrent.
-        */
-       enqueue_scan_from_roots_jobs (heap_start, heap_end, object_ops);
-
-       TV_GETTIME (btv);
-       time_major_scan_roots += TV_ELAPSED (atv, btv);
-
-       if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
-               ScanJob *sj;
-
-               /* Mod union card table */
-               sj = (ScanJob*)sgen_thread_pool_job_alloc ("scan mod union cardtable", job_scan_major_mod_union_card_table, sizeof (ScanJob));
-               sj->ops = object_ops;
-               sgen_workers_enqueue_job (&sj->job);
-
-               sj = (ScanJob*)sgen_thread_pool_job_alloc ("scan LOS mod union cardtable", job_scan_los_mod_union_card_table, sizeof (ScanJob));
-               sj->ops = object_ops;
-               sgen_workers_enqueue_job (&sj->job);
-
-               TV_GETTIME (atv);
-               time_major_scan_mod_union += TV_ELAPSED (btv, atv);
-       }
-}
-
-static void
-major_finish_copy_or_mark (void)
-{
-       if (!concurrent_collection_in_progress)
-               return;
-
-       /*
-        * Prepare the pin queue for the next collection.  Since pinning runs on the worker
-        * threads we must wait for the jobs to finish before we can reset it.
-        */
-       sgen_workers_wait_for_jobs_finished ();
-       sgen_finish_pinning ();
-
-       sgen_pin_stats_reset ();
-
-       if (do_concurrent_checks)
-               sgen_debug_check_nursery_is_clean ();
-}
-
-static void
-major_start_collection (gboolean concurrent, size_t *old_next_pin_slot)
-{
-       SgenObjectOperations *object_ops;
-
-       binary_protocol_collection_begin (gc_stats.major_gc_count, GENERATION_OLD);
-
-       current_collection_generation = GENERATION_OLD;
-
-       g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
-
-       sgen_cement_reset ();
-
-       if (concurrent) {
-               g_assert (major_collector.is_concurrent);
-               concurrent_collection_in_progress = TRUE;
-
-               object_ops = &major_collector.major_ops_concurrent_start;
-       } else {
-               object_ops = &major_collector.major_ops_serial;
-       }
-
-       reset_pinned_from_failed_allocation ();
-
-       sgen_memgov_major_collection_start ();
-
-       //count_ref_nonref_objs ();
-       //consistency_check ();
-
-       check_scan_starts ();
-
-       degraded_mode = 0;
-       SGEN_LOG (1, "Start major collection %d", gc_stats.major_gc_count);
-       gc_stats.major_gc_count ++;
-
-       if (major_collector.start_major_collection)
-               major_collector.start_major_collection ();
-
-       major_copy_or_mark_from_roots (old_next_pin_slot, concurrent ? COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT : COPY_OR_MARK_FROM_ROOTS_SERIAL, FALSE, object_ops);
-       major_finish_copy_or_mark ();
-}
-
-static void
-major_finish_collection (const char *reason, size_t old_next_pin_slot, gboolean forced, gboolean scan_whole_nursery)
-{
-       ScannedObjectCounts counts;
-       SgenObjectOperations *object_ops;
-       TV_DECLARE (atv);
-       TV_DECLARE (btv);
-
-       TV_GETTIME (btv);
-
-       if (concurrent_collection_in_progress) {
-               object_ops = &major_collector.major_ops_concurrent_finish;
-
-               major_copy_or_mark_from_roots (NULL, COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT, scan_whole_nursery, object_ops);
-
-               major_finish_copy_or_mark ();
-
-               sgen_workers_join ();
-
-               SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&gray_queue), "Why is the gray queue not empty after workers have finished working?");
-
-#ifdef SGEN_DEBUG_INTERNAL_ALLOC
-               main_gc_thread = NULL;
-#endif
-
-               if (do_concurrent_checks)
-                       sgen_debug_check_nursery_is_clean ();
-       } else {
-               SGEN_ASSERT (0, !scan_whole_nursery, "scan_whole_nursery only applies to concurrent collections");
-               object_ops = &major_collector.major_ops_serial;
-       }
-
-       /*
-        * The workers have stopped so we need to finish gray queue
-        * work that might result from finalization in the main GC
-        * thread.  Redirection must therefore be turned off.
-        */
-       sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
-       g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
-
-       /* all the objects in the heap */
-       finish_gray_stack (GENERATION_OLD, CONTEXT_FROM_OBJECT_OPERATIONS (object_ops, &gray_queue));
-       TV_GETTIME (atv);
-       time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
-
-       SGEN_ASSERT (0, sgen_workers_all_done (), "Can't have workers working after joining");
-
-       if (objects_pinned) {
-               g_assert (!concurrent_collection_in_progress);
-
-               /*
-                * This is slow, but we just OOM'd.
-                *
-                * See comment at `sgen_pin_queue_clear_discarded_entries` for how the pin
-                * queue is laid out at this point.
-                */
-               sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
-               /*
-                * We need to reestablish all pinned nursery objects in the pin queue
-                * because they're needed for fragment creation.  Unpinning happens by
-                * walking the whole queue, so it's not necessary to reestablish where major
-                * heap block pins are - all we care is that they're still in there
-                * somewhere.
-                */
-               sgen_optimize_pin_queue ();
-               sgen_find_section_pin_queue_start_end (nursery_section);
-               objects_pinned = 0;
-       }
-
-       reset_heap_boundaries ();
-       sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
-
-       if (!concurrent_collection_in_progress) {
-               /* walk the pin_queue, build up the fragment list of free memory, unmark
-                * pinned objects as we go, memzero() the empty fragments so they are ready for the
-                * next allocations.
-                */
-               if (!sgen_build_nursery_fragments (nursery_section, NULL))
-                       degraded_mode = 1;
-
-               /* prepare the pin queue for the next collection */
-               sgen_finish_pinning ();
-
-               /* Clear TLABs for all threads */
-               sgen_clear_tlabs ();
-
-               sgen_pin_stats_reset ();
-       }
-
-       sgen_cement_clear_below_threshold ();
-
-       if (check_mark_bits_after_major_collection)
-               sgen_check_heap_marked (concurrent_collection_in_progress);
-
-       TV_GETTIME (btv);
-       time_major_fragment_creation += TV_ELAPSED (atv, btv);
-
-       binary_protocol_sweep_begin (GENERATION_OLD, !major_collector.sweeps_lazily);
-
-       TV_GETTIME (atv);
-       time_major_free_bigobjs += TV_ELAPSED (btv, atv);
-
-       sgen_los_sweep ();
-
-       TV_GETTIME (btv);
-       time_major_los_sweep += TV_ELAPSED (atv, btv);
-
-       major_collector.sweep ();
-
-       binary_protocol_sweep_end (GENERATION_OLD, !major_collector.sweeps_lazily);
-
-       TV_GETTIME (atv);
-       time_major_sweep += TV_ELAPSED (btv, atv);
-
-       sgen_debug_dump_heap ("major", gc_stats.major_gc_count - 1, reason);
-
-       if (sgen_have_pending_finalizers ()) {
-               SGEN_LOG (4, "Finalizer-thread wakeup");
-               sgen_client_finalize_notify ();
-       }
-
-       g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
-
-       sgen_memgov_major_collection_end (forced);
-       current_collection_generation = -1;
-
-       memset (&counts, 0, sizeof (ScannedObjectCounts));
-       major_collector.finish_major_collection (&counts);
-
-       g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
-
-       SGEN_ASSERT (0, sgen_workers_all_done (), "Can't have workers working after major collection has finished");
-       if (concurrent_collection_in_progress)
-               concurrent_collection_in_progress = FALSE;
-
-       check_scan_starts ();
-
-       binary_protocol_flush_buffers (FALSE);
-
-       //consistency_check ();
-
-       binary_protocol_collection_end (gc_stats.major_gc_count - 1, GENERATION_OLD, counts.num_scanned_objects, counts.num_unique_scanned_objects);
-}
-
-static gboolean
-major_do_collection (const char *reason, gboolean forced)
-{
-       TV_DECLARE (time_start);
-       TV_DECLARE (time_end);
-       size_t old_next_pin_slot;
-
-       if (disable_major_collections)
-               return FALSE;
-
-       if (major_collector.get_and_reset_num_major_objects_marked) {
-               long long num_marked = major_collector.get_and_reset_num_major_objects_marked ();
-               g_assert (!num_marked);
-       }
-
-       /* world must be stopped already */
-       TV_GETTIME (time_start);
-
-       major_start_collection (FALSE, &old_next_pin_slot);
-       major_finish_collection (reason, old_next_pin_slot, forced, FALSE);
-
-       TV_GETTIME (time_end);
-       gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
-
-       /* FIXME: also report this to the user, preferably in gc-end. */
-       if (major_collector.get_and_reset_num_major_objects_marked)
-               major_collector.get_and_reset_num_major_objects_marked ();
-
-       return bytes_pinned_from_failed_allocation > 0;
-}
-
-static void
-major_start_concurrent_collection (const char *reason)
-{
-       TV_DECLARE (time_start);
-       TV_DECLARE (time_end);
-       long long num_objects_marked;
-
-       if (disable_major_collections)
-               return;
-
-       TV_GETTIME (time_start);
-       SGEN_TV_GETTIME (time_major_conc_collection_start);
-
-       num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
-       g_assert (num_objects_marked == 0);
-
-       binary_protocol_concurrent_start ();
-
-       // FIXME: store reason and pass it when finishing
-       major_start_collection (TRUE, NULL);
-
-       gray_queue_redirect (&gray_queue);
-
-       num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
-
-       TV_GETTIME (time_end);
-       gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
-
-       current_collection_generation = -1;
-}
-
-/*
- * Returns whether the major collection has finished.
- */
-static gboolean
-major_should_finish_concurrent_collection (void)
-{
-       SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&gray_queue), "Why is the gray queue not empty before we have started doing anything?");
-       return sgen_workers_all_done ();
-}
-
-static void
-major_update_concurrent_collection (void)
-{
-       TV_DECLARE (total_start);
-       TV_DECLARE (total_end);
-
-       TV_GETTIME (total_start);
-
-       binary_protocol_concurrent_update ();
-
-       major_collector.update_cardtable_mod_union ();
-       sgen_los_update_cardtable_mod_union ();
-
-       TV_GETTIME (total_end);
-       gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end);
-}
-
-static void
-major_finish_concurrent_collection (gboolean forced)
-{
-       TV_DECLARE (total_start);
-       TV_DECLARE (total_end);
-       gboolean late_pinned;
-       SgenGrayQueue unpin_queue;
-       memset (&unpin_queue, 0, sizeof (unpin_queue));
-
-       TV_GETTIME (total_start);
-
-       binary_protocol_concurrent_finish ();
-
-       /*
-        * The major collector can add global remsets which are processed in the finishing
-        * nursery collection, below.  That implies that the workers must have finished
-        * marking before the nursery collection is allowed to run, otherwise we might miss
-        * some remsets.
-        */
-       sgen_workers_wait ();
-
-       SGEN_TV_GETTIME (time_major_conc_collection_end);
-       gc_stats.major_gc_time_concurrent += SGEN_TV_ELAPSED (time_major_conc_collection_start, time_major_conc_collection_end);
-
-       major_collector.update_cardtable_mod_union ();
-       sgen_los_update_cardtable_mod_union ();
-
-       late_pinned = collect_nursery (&unpin_queue, TRUE);
-
-       if (mod_union_consistency_check)
-               sgen_check_mod_union_consistency ();
-
-       current_collection_generation = GENERATION_OLD;
-       major_finish_collection ("finishing", -1, forced, late_pinned);
-
-       if (whole_heap_check_before_collection)
-               sgen_check_whole_heap (FALSE);
-
-       unpin_objects_from_queue (&unpin_queue);
-       sgen_gray_object_queue_deinit (&unpin_queue);
-
-       TV_GETTIME (total_end);
-       gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end) - TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
-
-       current_collection_generation = -1;
-}
-
-/*
- * Ensure an allocation request for @size will succeed by freeing enough memory.
- *
- * LOCKING: The GC lock MUST be held.
- */
-void
-sgen_ensure_free_space (size_t size)
-{
-       int generation_to_collect = -1;
-       const char *reason = NULL;
-
-       if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
-               if (sgen_need_major_collection (size)) {
-                       reason = "LOS overflow";
-                       generation_to_collect = GENERATION_OLD;
-               }
-       } else {
-               if (degraded_mode) {
-                       if (sgen_need_major_collection (size)) {
-                               reason = "Degraded mode overflow";
-                               generation_to_collect = GENERATION_OLD;
-                       }
-               } else if (sgen_need_major_collection (size)) {
-                       reason = "Minor allowance";
-                       generation_to_collect = GENERATION_OLD;
-               } else {
-                       generation_to_collect = GENERATION_NURSERY;
-                       reason = "Nursery full";                        
-               }
-       }
-
-       if (generation_to_collect == -1) {
-               if (concurrent_collection_in_progress && sgen_workers_all_done ()) {
-                       generation_to_collect = GENERATION_OLD;
-                       reason = "Finish concurrent collection";
-               }
-       }
-
-       if (generation_to_collect == -1)
-               return;
-       sgen_perform_collection (size, generation_to_collect, reason, FALSE);
-}
-
-/*
- * LOCKING: Assumes the GC lock is held.
- */
-void
-sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
-{
-       TV_DECLARE (gc_start);
-       TV_DECLARE (gc_end);
-       TV_DECLARE (gc_total_start);
-       TV_DECLARE (gc_total_end);
-       GGTimingInfo infos [2];
-       int overflow_generation_to_collect = -1;
-       int oldest_generation_collected = generation_to_collect;
-       const char *overflow_reason = NULL;
-
-       binary_protocol_collection_requested (generation_to_collect, requested_size, wait_to_finish ? 1 : 0);
-
-       SGEN_ASSERT (0, generation_to_collect == GENERATION_NURSERY || generation_to_collect == GENERATION_OLD, "What generation is this?");
-
-       TV_GETTIME (gc_start);
-
-       sgen_stop_world (generation_to_collect);
-
-       TV_GETTIME (gc_total_start);
-
-       if (concurrent_collection_in_progress) {
-               /*
-                * We update the concurrent collection.  If it finished, we're done.  If
-                * not, and we've been asked to do a nursery collection, we do that.
-                */
-               gboolean finish = major_should_finish_concurrent_collection () || (wait_to_finish && generation_to_collect == GENERATION_OLD);
-
-               if (finish) {
-                       major_finish_concurrent_collection (wait_to_finish);
-                       oldest_generation_collected = GENERATION_OLD;
-               } else {
-                       sgen_workers_signal_start_nursery_collection_and_wait ();
-
-                       major_update_concurrent_collection ();
-                       if (generation_to_collect == GENERATION_NURSERY)
-                               collect_nursery (NULL, FALSE);
-
-                       sgen_workers_signal_finish_nursery_collection ();
-               }
-
-               goto done;
-       }
-
-       /*
-        * If we've been asked to do a major collection, and the major collector wants to
-        * run synchronously (to evacuate), we set the flag to do that.
-        */
-       if (generation_to_collect == GENERATION_OLD &&
-                       allow_synchronous_major &&
-                       major_collector.want_synchronous_collection &&
-                       *major_collector.want_synchronous_collection) {
-               wait_to_finish = TRUE;
-       }
-
-       SGEN_ASSERT (0, !concurrent_collection_in_progress, "Why did this not get handled above?");
-
-       /*
-        * There's no concurrent collection in progress.  Collect the generation we're asked
-        * to collect.  If the major collector is concurrent and we're not forced to wait,
-        * start a concurrent collection.
-        */
-       // FIXME: extract overflow reason
-       if (generation_to_collect == GENERATION_NURSERY) {
-               if (collect_nursery (NULL, FALSE)) {
-                       overflow_generation_to_collect = GENERATION_OLD;
-                       overflow_reason = "Minor overflow";
-               }
-       } else {
-               if (major_collector.is_concurrent && !wait_to_finish) {
-                       collect_nursery (NULL, FALSE);
-                       major_start_concurrent_collection (reason);
-                       // FIXME: set infos[0] properly
-                       goto done;
-               }
-
-               if (major_do_collection (reason, wait_to_finish)) {
-                       overflow_generation_to_collect = GENERATION_NURSERY;
-                       overflow_reason = "Excessive pinning";
-               }
-       }
-
-       TV_GETTIME (gc_end);
-
-       memset (infos, 0, sizeof (infos));
-       infos [0].generation = generation_to_collect;
-       infos [0].reason = reason;
-       infos [0].is_overflow = FALSE;
-       infos [1].generation = -1;
-       infos [0].total_time = SGEN_TV_ELAPSED (gc_start, gc_end);
-
-       SGEN_ASSERT (0, !concurrent_collection_in_progress, "Why did this not get handled above?");
-
-       if (overflow_generation_to_collect != -1) {
-               /*
-                * We need to do an overflow collection, either because we ran out of memory
-                * or the nursery is fully pinned.
-                */
-
-               infos [1].generation = overflow_generation_to_collect;
-               infos [1].reason = overflow_reason;
-               infos [1].is_overflow = TRUE;
-               gc_start = gc_end;
-
-               if (overflow_generation_to_collect == GENERATION_NURSERY)
-                       collect_nursery (NULL, FALSE);
-               else
-                       major_do_collection (overflow_reason, wait_to_finish);
-
-               TV_GETTIME (gc_end);
-               infos [1].total_time = SGEN_TV_ELAPSED (gc_start, gc_end);
-
-               oldest_generation_collected = MAX (oldest_generation_collected, overflow_generation_to_collect);
-       }
-
-       SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)sgen_gc_get_total_heap_allocation (), (unsigned long)los_memory_usage);
-
-       /* this also sets the proper pointers for the next allocation */
-       if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
-               /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
-               SGEN_LOG (1, "nursery collection didn't find enough room for %zd alloc (%zd pinned)", requested_size, sgen_get_pinned_count ());
-               sgen_dump_pin_queue ();
-               degraded_mode = 1;
-       }
-
- done:
-       g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
-
-       TV_GETTIME (gc_total_end);
-       time_max = MAX (time_max, TV_ELAPSED (gc_total_start, gc_total_end));
-
-       sgen_restart_world (oldest_generation_collected, infos);
-}
-
-/*
- * ######################################################################
- * ########  Memory allocation from the OS
- * ######################################################################
- * This section of code deals with getting memory from the OS and
- * allocating memory for GC-internal data structures.
- * Internal memory can be handled with a freelist for small objects.
- */
-
-/*
- * Debug reporting.
- */
-G_GNUC_UNUSED static void
-report_internal_mem_usage (void)
-{
-       printf ("Internal memory usage:\n");
-       sgen_report_internal_mem_usage ();
-       printf ("Pinned memory usage:\n");
-       major_collector.report_pinned_memory_usage ();
-}
-
-/*
- * ######################################################################
- * ########  Finalization support
- * ######################################################################
- */
-
-/*
- * If the object has been forwarded it means it's still referenced from a root. 
- * If it is pinned it's still alive as well.
- * A LOS object is only alive if we have pinned it.
- * Return TRUE if @obj is ready to be finalized.
- */
-static inline gboolean
-sgen_is_object_alive (void *object)
-{
-       if (ptr_in_nursery (object))
-               return sgen_nursery_is_object_alive (object);
-
-       return sgen_major_is_object_alive (object);
-}
-
-/*
- * This function returns true if @object is either alive and belongs to the
- * current collection - major collections are full heap, so old gen objects
- * are never alive during a minor collection.
- */
-static inline int
-sgen_is_object_alive_and_on_current_collection (char *object)
-{
-       if (ptr_in_nursery (object))
-               return sgen_nursery_is_object_alive (object);
-
-       if (current_collection_generation == GENERATION_NURSERY)
-               return FALSE;
-
-       return sgen_major_is_object_alive (object);
-}
-
-
-gboolean
-sgen_gc_is_object_ready_for_finalization (void *object)
-{
-       return !sgen_is_object_alive (object);
-}
-
-void
-sgen_queue_finalization_entry (GCObject *obj)
-{
-       gboolean critical = sgen_client_object_has_critical_finalizer (obj);
-
-       sgen_pointer_queue_add (critical ? &critical_fin_queue : &fin_ready_queue, obj);
-
-       sgen_client_object_queued_for_finalization (obj);
-}
-
-gboolean
-sgen_object_is_live (void *obj)
-{
-       return sgen_is_object_alive_and_on_current_collection (obj);
-}
-
-/*
- * `System.GC.WaitForPendingFinalizers` first checks `sgen_have_pending_finalizers()` to
- * determine whether it can exit quickly.  The latter must therefore only return FALSE if
- * all finalizers have really finished running.
- *
- * `sgen_gc_invoke_finalizers()` first dequeues a finalizable object, and then finalizes it.
- * This means that just checking whether the queues are empty leaves the possibility that an
- * object might have been dequeued but not yet finalized.  That's why we need the additional
- * flag `pending_unqueued_finalizer`.
- */
-
-static volatile gboolean pending_unqueued_finalizer = FALSE;
-
-int
-sgen_gc_invoke_finalizers (void)
-{
-       int count = 0;
-
-       g_assert (!pending_unqueued_finalizer);
-
-       /* FIXME: batch to reduce lock contention */
-       while (sgen_have_pending_finalizers ()) {
-               void *obj;
-
-               LOCK_GC;
-
-               /*
-                * We need to set `pending_unqueued_finalizer` before dequeing the
-                * finalizable object.
-                */
-               if (!sgen_pointer_queue_is_empty (&fin_ready_queue)) {
-                       pending_unqueued_finalizer = TRUE;
-                       mono_memory_write_barrier ();
-                       obj = sgen_pointer_queue_pop (&fin_ready_queue);
-               } else if (!sgen_pointer_queue_is_empty (&critical_fin_queue)) {
-                       pending_unqueued_finalizer = TRUE;
-                       mono_memory_write_barrier ();
-                       obj = sgen_pointer_queue_pop (&critical_fin_queue);
-               } else {
-                       obj = NULL;
-               }
-
-               if (obj)
-                       SGEN_LOG (7, "Finalizing object %p (%s)", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
-
-               UNLOCK_GC;
-
-               if (!obj)
-                       break;
-
-               count++;
-               /* the object is on the stack so it is pinned */
-               /*g_print ("Calling finalizer for object: %p (%s)\n", obj, sgen_client_object_safe_name (obj));*/
-               sgen_client_run_finalize (obj);
-       }
-
-       if (pending_unqueued_finalizer) {
-               mono_memory_write_barrier ();
-               pending_unqueued_finalizer = FALSE;
-       }
-
-       return count;
-}
-
-gboolean
-sgen_have_pending_finalizers (void)
-{
-       return pending_unqueued_finalizer || !sgen_pointer_queue_is_empty (&fin_ready_queue) || !sgen_pointer_queue_is_empty (&critical_fin_queue);
-}
-
-/*
- * ######################################################################
- * ########  registered roots support
- * ######################################################################
- */
-
-/*
- * We do not coalesce roots.
- */
-int
-sgen_register_root (char *start, size_t size, void *descr, int root_type)
-{
-       RootRecord new_root;
-       int i;
-       LOCK_GC;
-       for (i = 0; i < ROOT_TYPE_NUM; ++i) {
-               RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
-               /* we allow changing the size and the descriptor (for thread statics etc) */
-               if (root) {
-                       size_t old_size = root->end_root - start;
-                       root->end_root = start + size;
-                       g_assert (((root->root_desc != 0) && (descr != NULL)) ||
-                                         ((root->root_desc == 0) && (descr == NULL)));
-                       root->root_desc = (mword)descr;
-                       roots_size += size;
-                       roots_size -= old_size;
-                       UNLOCK_GC;
-                       return TRUE;
-               }
-       }
-
-       new_root.end_root = start + size;
-       new_root.root_desc = (mword)descr;
-
-       sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
-       roots_size += size;
-
-       SGEN_LOG (3, "Added root for range: %p-%p, descr: %p  (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
-
-       UNLOCK_GC;
-       return TRUE;
-}
-
-void
-sgen_deregister_root (char* addr)
-{
-       int root_type;
-       RootRecord root;
-
-       LOCK_GC;
-       for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
-               if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
-                       roots_size -= (root.end_root - addr);
-       }
-       UNLOCK_GC;
-}
-
-/*
- * ######################################################################
- * ########  Thread handling (stop/start code)
- * ######################################################################
- */
-
-int
-sgen_get_current_collection_generation (void)
-{
-       return current_collection_generation;
-}
-
-void*
-sgen_thread_register (SgenThreadInfo* info, void *stack_bottom_fallback)
-{
-#ifndef HAVE_KW_THREAD
-       info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
-#endif
-
-       sgen_init_tlab_info (info);
-
-       sgen_client_thread_register (info, stack_bottom_fallback);
-
-       return info;
-}
-
-void
-sgen_thread_unregister (SgenThreadInfo *p)
-{
-       sgen_client_thread_unregister (p);
-}
-
-/*
- * ######################################################################
- * ########  Write barriers
- * ######################################################################
- */
-
-/*
- * Note: the write barriers first do the needed GC work and then do the actual store:
- * this way the value is visible to the conservative GC scan after the write barrier
- * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
- * the conservative scan, otherwise by the remembered set scan.
- */
-
-void
-mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
-{
-       HEAVY_STAT (++stat_wbarrier_arrayref_copy);
-       /*This check can be done without taking a lock since dest_ptr array is pinned*/
-       if (ptr_in_nursery (dest_ptr) || count <= 0) {
-               mono_gc_memmove_aligned (dest_ptr, src_ptr, count * sizeof (gpointer));
-               return;
-       }
-
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-       if (binary_protocol_is_heavy_enabled ()) {
-               int i;
-               for (i = 0; i < count; ++i) {
-                       gpointer dest = (gpointer*)dest_ptr + i;
-                       gpointer obj = *((gpointer*)src_ptr + i);
-                       if (obj)
-                               binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
-               }
-       }
-#endif
-
-       remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
-}
-
-void
-mono_gc_wbarrier_generic_nostore (gpointer ptr)
-{
-       gpointer obj;
-
-       HEAVY_STAT (++stat_wbarrier_generic_store);
-
-       sgen_client_wbarrier_generic_nostore_check (ptr);
-
-       obj = *(gpointer*)ptr;
-       if (obj)
-               binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
-
-       /*
-        * We need to record old->old pointer locations for the
-        * concurrent collector.
-        */
-       if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
-               SGEN_LOG (8, "Skipping remset at %p", ptr);
-               return;
-       }
-
-       SGEN_LOG (8, "Adding remset at %p", ptr);
-
-       remset.wbarrier_generic_nostore (ptr);
-}
-
-void
-mono_gc_wbarrier_generic_store (gpointer ptr, GCObject* value)
-{
-       SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (value)) : "null");
-       SGEN_UPDATE_REFERENCE_ALLOW_NULL (ptr, value);
-       if (ptr_in_nursery (value))
-               mono_gc_wbarrier_generic_nostore (ptr);
-       sgen_dummy_use (value);
-}
-
-/* Same as mono_gc_wbarrier_generic_store () but performs the store
- * as an atomic operation with release semantics.
- */
-void
-mono_gc_wbarrier_generic_store_atomic (gpointer ptr, GCObject *value)
-{
-       HEAVY_STAT (++stat_wbarrier_generic_store_atomic);
-
-       SGEN_LOG (8, "Wbarrier atomic store at %p to %p (%s)", ptr, value, value ? sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (value)) : "null");
-
-       InterlockedWritePointer (ptr, value);
-
-       if (ptr_in_nursery (value))
-               mono_gc_wbarrier_generic_nostore (ptr);
-
-       sgen_dummy_use (value);
-}
-
-void
-sgen_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
-{
-       GCObject **dest = _dest;
-       GCObject **src = _src;
-
-       while (size) {
-               if (bitmap & 0x1)
-                       mono_gc_wbarrier_generic_store (dest, *src);
-               else
-                       *dest = *src;
-               ++src;
-               ++dest;
-               size -= SIZEOF_VOID_P;
-               bitmap >>= 1;
-       }
-}
-
-/*
- * ######################################################################
- * ########  Other mono public interface functions.
- * ######################################################################
- */
-
-void
-sgen_gc_collect (int generation)
-{
-       LOCK_GC;
-       if (generation > 1)
-               generation = 1;
-       sgen_perform_collection (0, generation, "user request", TRUE);
-       UNLOCK_GC;
-}
-
-int
-sgen_gc_collection_count (int generation)
-{
-       if (generation == 0)
-               return gc_stats.minor_gc_count;
-       return gc_stats.major_gc_count;
-}
-
-size_t
-sgen_gc_get_used_size (void)
-{
-       gint64 tot = 0;
-       LOCK_GC;
-       tot = los_memory_usage;
-       tot += nursery_section->next_data - nursery_section->data;
-       tot += major_collector.get_used_size ();
-       /* FIXME: account for pinned objects */
-       UNLOCK_GC;
-       return tot;
-}
-
-GCObject*
-sgen_weak_link_get (void **link_addr)
-{
-       void * volatile *link_addr_volatile;
-       void *ptr;
-       GCObject *obj;
- retry:
-       link_addr_volatile = link_addr;
-       ptr = (void*)*link_addr_volatile;
-       /*
-        * At this point we have a hidden pointer.  If the GC runs
-        * here, it will not recognize the hidden pointer as a
-        * reference, and if the object behind it is not referenced
-        * elsewhere, it will be freed.  Once the world is restarted
-        * we reveal the pointer, giving us a pointer to a freed
-        * object.  To make sure we don't return it, we load the
-        * hidden pointer again.  If it's still the same, we can be
-        * sure the object reference is valid.
-        */
-       if (ptr)
-               obj = (GCObject*) REVEAL_POINTER (ptr);
-       else
-               return NULL;
-
-       mono_memory_barrier ();
-
-       /*
-        * During the second bridge processing step the world is
-        * running again.  That step processes all weak links once
-        * more to null those that refer to dead objects.  Before that
-        * is completed, those links must not be followed, so we
-        * conservatively wait for bridge processing when any weak
-        * link is dereferenced.
-        */
-       sgen_client_bridge_wait_for_processing ();
-
-       if ((void*)*link_addr_volatile != ptr)
-               goto retry;
-
-       return obj;
-}
-
-gboolean
-sgen_set_allow_synchronous_major (gboolean flag)
-{
-       if (!major_collector.is_concurrent)
-               return flag;
-
-       allow_synchronous_major = flag;
-       return TRUE;
-}
-
-void
-sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...)
-{
-       va_list ap;
-
-       va_start (ap, description_format);
-
-       fprintf (stderr, "Warning: In environment variable `%s': ", env_var);
-       vfprintf (stderr, description_format, ap);
-       if (fallback)
-               fprintf (stderr, " - %s", fallback);
-       fprintf (stderr, "\n");
-
-       va_end (ap);
-}
-
-static gboolean
-parse_double_in_interval (const char *env_var, const char *opt_name, const char *opt, double min, double max, double *result)
-{
-       char *endptr;
-       double val = strtod (opt, &endptr);
-       if (endptr == opt) {
-               sgen_env_var_error (env_var, "Using default value.", "`%s` must be a number.", opt_name);
-               return FALSE;
-       }
-       else if (val < min || val > max) {
-               sgen_env_var_error (env_var, "Using default value.", "`%s` must be between %.2f - %.2f.", opt_name, min, max);
-               return FALSE;
-       }
-       *result = val;
-       return TRUE;
-}
-
-void
-sgen_gc_init (void)
-{
-       const char *env;
-       char **opts, **ptr;
-       char *major_collector_opt = NULL;
-       char *minor_collector_opt = NULL;
-       size_t max_heap = 0;
-       size_t soft_limit = 0;
-       int result;
-       gboolean debug_print_allowance = FALSE;
-       double allowance_ratio = 0, save_target = 0;
-       gboolean cement_enabled = TRUE;
-
-       do {
-               result = InterlockedCompareExchange (&gc_initialized, -1, 0);
-               switch (result) {
-               case 1:
-                       /* already inited */
-                       return;
-               case -1:
-                       /* being inited by another thread */
-                       g_usleep (1000);
-                       break;
-               case 0:
-                       /* we will init it */
-                       break;
-               default:
-                       g_assert_not_reached ();
-               }
-       } while (result != 0);
-
-       SGEN_TV_GETTIME (sgen_init_timestamp);
-
-#ifdef SGEN_WITHOUT_MONO
-       mono_thread_smr_init ();
-#endif
-
-       LOCK_INIT (gc_mutex);
-
-       gc_debug_file = stderr;
-
-       LOCK_INIT (sgen_interruption_mutex);
-
-       if ((env = g_getenv (MONO_GC_PARAMS_NAME))) {
-               opts = g_strsplit (env, ",", -1);
-               for (ptr = opts; *ptr; ++ptr) {
-                       char *opt = *ptr;
-                       if (g_str_has_prefix (opt, "major=")) {
-                               opt = strchr (opt, '=') + 1;
-                               major_collector_opt = g_strdup (opt);
-                       } else if (g_str_has_prefix (opt, "minor=")) {
-                               opt = strchr (opt, '=') + 1;
-                               minor_collector_opt = g_strdup (opt);
-                       }
-               }
-       } else {
-               opts = NULL;
-       }
-
-       init_stats ();
-       sgen_init_internal_allocator ();
-       sgen_init_nursery_allocator ();
-       sgen_init_fin_weak_hash ();
-       sgen_init_hash_table ();
-       sgen_init_descriptors ();
-       sgen_init_gray_queues ();
-       sgen_init_allocator ();
-
-       sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
-       sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
-
-       sgen_client_init ();
-
-       if (!minor_collector_opt) {
-               sgen_simple_nursery_init (&sgen_minor_collector);
-       } else {
-               if (!strcmp (minor_collector_opt, "simple")) {
-               use_simple_nursery:
-                       sgen_simple_nursery_init (&sgen_minor_collector);
-               } else if (!strcmp (minor_collector_opt, "split")) {
-                       sgen_split_nursery_init (&sgen_minor_collector);
-               } else {
-                       sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `simple` instead.", "Unknown minor collector `%s'.", minor_collector_opt);
-                       goto use_simple_nursery;
-               }
-       }
-
-       if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
-       use_marksweep_major:
-               sgen_marksweep_init (&major_collector);
-       } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
-               sgen_marksweep_conc_init (&major_collector);
-       } else {
-               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `marksweep` instead.", "Unknown major collector `%s'.", major_collector_opt);
-               goto use_marksweep_major;
-       }
-
-       sgen_nursery_size = DEFAULT_NURSERY_SIZE;
-
-       if (major_collector.is_concurrent)
-               cement_enabled = FALSE;
-
-       if (opts) {
-               gboolean usage_printed = FALSE;
-
-               for (ptr = opts; *ptr; ++ptr) {
-                       char *opt = *ptr;
-                       if (!strcmp (opt, ""))
-                               continue;
-                       if (g_str_has_prefix (opt, "major="))
-                               continue;
-                       if (g_str_has_prefix (opt, "minor="))
-                               continue;
-                       if (g_str_has_prefix (opt, "max-heap-size=")) {
-                               size_t page_size = mono_pagesize ();
-                               size_t max_heap_candidate = 0;
-                               opt = strchr (opt, '=') + 1;
-                               if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap_candidate)) {
-                                       max_heap = (max_heap_candidate + page_size - 1) & ~(size_t)(page_size - 1);
-                                       if (max_heap != max_heap_candidate)
-                                               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Rounding up.", "`max-heap-size` size must be a multiple of %d.", page_size);
-                               } else {
-                                       sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`max-heap-size` must be an integer.");
-                               }
-                               continue;
-                       }
-                       if (g_str_has_prefix (opt, "soft-heap-limit=")) {
-                               opt = strchr (opt, '=') + 1;
-                               if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
-                                       if (soft_limit <= 0) {
-                                               sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be positive.");
-                                               soft_limit = 0;
-                                       }
-                               } else {
-                                       sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be an integer.");
-                               }
-                               continue;
-                       }
-
-#ifdef USER_CONFIG
-                       if (g_str_has_prefix (opt, "nursery-size=")) {
-                               size_t val;
-                               opt = strchr (opt, '=') + 1;
-                               if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
-                                       if ((val & (val - 1))) {
-                                               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be a power of two.");
-                                               continue;
-                                       }
-
-                                       if (val < SGEN_MAX_NURSERY_WASTE) {
-                                               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.",
-                                                               "`nursery-size` must be at least %d bytes.", SGEN_MAX_NURSERY_WASTE);
-                                               continue;
-                                       }
-
-                                       sgen_nursery_size = val;
-                                       sgen_nursery_bits = 0;
-                                       while (ONE_P << (++ sgen_nursery_bits) != sgen_nursery_size)
-                                               ;
-                               } else {
-                                       sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be an integer.");
-                                       continue;
-                               }
-                               continue;
-                       }
-#endif
-                       if (g_str_has_prefix (opt, "save-target-ratio=")) {
-                               double val;
-                               opt = strchr (opt, '=') + 1;
-                               if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "save-target-ratio", opt,
-                                               SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO, &val)) {
-                                       save_target = val;
-                               }
-                               continue;
-                       }
-                       if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
-                               double val;
-                               opt = strchr (opt, '=') + 1;
-                               if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "default-allowance-ratio", opt,
-                                               SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, &val)) {
-                                       allowance_ratio = val;
-                               }
-                               continue;
-                       }
-                       if (g_str_has_prefix (opt, "allow-synchronous-major=")) {
-                               if (!major_collector.is_concurrent) {
-                                       sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`allow-synchronous-major` is only valid for the concurrent major collector.");
-                                       continue;
-                               }
-
-                               opt = strchr (opt, '=') + 1;
-
-                               if (!strcmp (opt, "yes")) {
-                                       allow_synchronous_major = TRUE;
-                               } else if (!strcmp (opt, "no")) {
-                                       allow_synchronous_major = FALSE;
-                               } else {
-                                       sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`allow-synchronous-major` must be either `yes' or `no'.");
-                                       continue;
-                               }
-                       }
-
-                       if (!strcmp (opt, "cementing")) {
-                               cement_enabled = TRUE;
-                               continue;
-                       }
-                       if (!strcmp (opt, "no-cementing")) {
-                               cement_enabled = FALSE;
-                               continue;
-                       }
-
-                       if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
-                               continue;
-
-                       if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
-                               continue;
-
-                       if (sgen_client_handle_gc_param (opt))
-                               continue;
-
-                       sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Unknown option `%s`.", opt);
-
-                       if (usage_printed)
-                               continue;
-
-                       fprintf (stderr, "\n%s must be a comma-delimited list of one or more of the following:\n", MONO_GC_PARAMS_NAME);
-                       fprintf (stderr, "  max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
-                       fprintf (stderr, "  soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
-                       fprintf (stderr, "  nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
-                       fprintf (stderr, "  major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-conc', `marksweep-par')\n");
-                       fprintf (stderr, "  minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
-                       fprintf (stderr, "  wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
-                       fprintf (stderr, "  [no-]cementing\n");
-                       if (major_collector.is_concurrent)
-                               fprintf (stderr, "  allow-synchronous-major=FLAG (where FLAG is `yes' or `no')\n");
-                       if (major_collector.print_gc_param_usage)
-                               major_collector.print_gc_param_usage ();
-                       if (sgen_minor_collector.print_gc_param_usage)
-                               sgen_minor_collector.print_gc_param_usage ();
-                       sgen_client_print_gc_params_usage ();
-                       fprintf (stderr, " Experimental options:\n");
-                       fprintf (stderr, "  save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
-                       fprintf (stderr, "  default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
-                       fprintf (stderr, "\n");
-
-                       usage_printed = TRUE;
-               }
-               g_strfreev (opts);
-       }
-
-       if (major_collector_opt)
-               g_free (major_collector_opt);
-
-       if (minor_collector_opt)
-               g_free (minor_collector_opt);
-
-       alloc_nursery ();
-
-       if (major_collector.is_concurrent && cement_enabled) {
-               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`cementing` is not supported on concurrent major collectors.");
-               cement_enabled = FALSE;
-       }
-
-       sgen_cement_init (cement_enabled);
-
-       if ((env = g_getenv (MONO_GC_DEBUG_NAME))) {
-               gboolean usage_printed = FALSE;
-
-               opts = g_strsplit (env, ",", -1);
-               for (ptr = opts; ptr && *ptr; ptr ++) {
-                       char *opt = *ptr;
-                       if (!strcmp (opt, ""))
-                               continue;
-                       if (opt [0] >= '0' && opt [0] <= '9') {
-                               gc_debug_level = atoi (opt);
-                               opt++;
-                               if (opt [0] == ':')
-                                       opt++;
-                               if (opt [0]) {
-                                       char *rf = g_strdup_printf ("%s.%d", opt, mono_process_current_pid ());
-                                       gc_debug_file = fopen (rf, "wb");
-                                       if (!gc_debug_file)
-                                               gc_debug_file = stderr;
-                                       g_free (rf);
-                               }
-                       } else if (!strcmp (opt, "print-allowance")) {
-                               debug_print_allowance = TRUE;
-                       } else if (!strcmp (opt, "print-pinning")) {
-                               sgen_pin_stats_enable ();
-                       } else if (!strcmp (opt, "verify-before-allocs")) {
-                               verify_before_allocs = 1;
-                               has_per_allocation_action = TRUE;
-                       } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
-                               char *arg = strchr (opt, '=') + 1;
-                               verify_before_allocs = atoi (arg);
-                               has_per_allocation_action = TRUE;
-                       } else if (!strcmp (opt, "collect-before-allocs")) {
-                               collect_before_allocs = 1;
-                               has_per_allocation_action = TRUE;
-                       } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
-                               char *arg = strchr (opt, '=') + 1;
-                               has_per_allocation_action = TRUE;
-                               collect_before_allocs = atoi (arg);
-                       } else if (!strcmp (opt, "verify-before-collections")) {
-                               whole_heap_check_before_collection = TRUE;
-                       } else if (!strcmp (opt, "check-at-minor-collections")) {
-                               consistency_check_at_minor_collection = TRUE;
-                               nursery_clear_policy = CLEAR_AT_GC;
-                       } else if (!strcmp (opt, "mod-union-consistency-check")) {
-                               if (!major_collector.is_concurrent) {
-                                       sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`mod-union-consistency-check` only works with concurrent major collector.");
-                                       continue;
-                               }
-                               mod_union_consistency_check = TRUE;
-                       } else if (!strcmp (opt, "check-mark-bits")) {
-                               check_mark_bits_after_major_collection = TRUE;
-                       } else if (!strcmp (opt, "check-nursery-pinned")) {
-                               check_nursery_objects_pinned = TRUE;
-                       } else if (!strcmp (opt, "clear-at-gc")) {
-                               nursery_clear_policy = CLEAR_AT_GC;
-                       } else if (!strcmp (opt, "clear-nursery-at-gc")) {
-                               nursery_clear_policy = CLEAR_AT_GC;
-                       } else if (!strcmp (opt, "clear-at-tlab-creation")) {
-                               nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
-                       } else if (!strcmp (opt, "debug-clear-at-tlab-creation")) {
-                               nursery_clear_policy = CLEAR_AT_TLAB_CREATION_DEBUG;
-                       } else if (!strcmp (opt, "check-scan-starts")) {
-                               do_scan_starts_check = TRUE;
-                       } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
-                               do_verify_nursery = TRUE;
-                       } else if (!strcmp (opt, "check-concurrent")) {
-                               if (!major_collector.is_concurrent) {
-                                       sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`check-concurrent` only works with concurrent major collectors.");
-                                       continue;
-                               }
-                               do_concurrent_checks = TRUE;
-                       } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
-                               do_dump_nursery_content = TRUE;
-                       } else if (!strcmp (opt, "disable-minor")) {
-                               disable_minor_collections = TRUE;
-                       } else if (!strcmp (opt, "disable-major")) {
-                               disable_major_collections = TRUE;
-                       } else if (g_str_has_prefix (opt, "heap-dump=")) {
-                               char *filename = strchr (opt, '=') + 1;
-                               nursery_clear_policy = CLEAR_AT_GC;
-                               sgen_debug_enable_heap_dump (filename);
-                       } else if (g_str_has_prefix (opt, "binary-protocol=")) {
-                               char *filename = strchr (opt, '=') + 1;
-                               char *colon = strrchr (filename, ':');
-                               size_t limit = -1;
-                               if (colon) {
-                                       if (!mono_gc_parse_environment_string_extract_number (colon + 1, &limit)) {
-                                               sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring limit.", "Binary protocol file size limit must be an integer.");
-                                               limit = -1;
-                                       }
-                                       *colon = '\0';
-                               }
-                               binary_protocol_init (filename, (long long)limit);
-                       } else if (!strcmp (opt, "nursery-canaries")) {
-                               do_verify_nursery = TRUE;
-                               enable_nursery_canaries = TRUE;
-                       } else if (!sgen_client_handle_gc_debug (opt)) {
-                               sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "Unknown option `%s`.", opt);
-
-                               if (usage_printed)
-                                       continue;
-
-                               fprintf (stderr, "\n%s must be of the format [<l>[:<filename>]|<option>]+ where <l> is a debug level 0-9.\n", MONO_GC_DEBUG_NAME);
-                               fprintf (stderr, "Valid <option>s are:\n");
-                               fprintf (stderr, "  collect-before-allocs[=<n>]\n");
-                               fprintf (stderr, "  verify-before-allocs[=<n>]\n");
-                               fprintf (stderr, "  check-at-minor-collections\n");
-                               fprintf (stderr, "  check-mark-bits\n");
-                               fprintf (stderr, "  check-nursery-pinned\n");
-                               fprintf (stderr, "  verify-before-collections\n");
-                               fprintf (stderr, "  verify-nursery-at-minor-gc\n");
-                               fprintf (stderr, "  dump-nursery-at-minor-gc\n");
-                               fprintf (stderr, "  disable-minor\n");
-                               fprintf (stderr, "  disable-major\n");
-                               fprintf (stderr, "  check-concurrent\n");
-                               fprintf (stderr, "  clear-[nursery-]at-gc\n");
-                               fprintf (stderr, "  clear-at-tlab-creation\n");
-                               fprintf (stderr, "  debug-clear-at-tlab-creation\n");
-                               fprintf (stderr, "  check-scan-starts\n");
-                               fprintf (stderr, "  print-allowance\n");
-                               fprintf (stderr, "  print-pinning\n");
-                               fprintf (stderr, "  heap-dump=<filename>\n");
-                               fprintf (stderr, "  binary-protocol=<filename>[:<file-size-limit>]\n");
-                               fprintf (stderr, "  nursery-canaries\n");
-                               sgen_client_print_gc_debug_usage ();
-                               fprintf (stderr, "\n");
-
-                               usage_printed = TRUE;
-                       }
-               }
-               g_strfreev (opts);
-       }
-
-       if (check_mark_bits_after_major_collection)
-               nursery_clear_policy = CLEAR_AT_GC;
-
-       if (major_collector.post_param_init)
-               major_collector.post_param_init (&major_collector);
-
-       if (major_collector.needs_thread_pool)
-               sgen_workers_init (1);
-
-       sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
-
-       memset (&remset, 0, sizeof (remset));
-
-       sgen_card_table_init (&remset);
-
-       gc_initialized = 1;
-}
-
-NurseryClearPolicy
-sgen_get_nursery_clear_policy (void)
-{
-       return nursery_clear_policy;
-}
-
-void
-sgen_gc_lock (void)
-{
-       LOCK_GC;
-}
-
-void
-sgen_gc_unlock (void)
-{
-       gboolean try_free = sgen_try_free_some_memory;
-       sgen_try_free_some_memory = FALSE;
-       mono_mutex_unlock (&gc_mutex);
-       if (try_free)
-               mono_thread_hazardous_try_free_some ();
-}
-
-void
-sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
-{
-       major_collector.iterate_live_block_ranges (callback);
-}
-
-SgenMajorCollector*
-sgen_get_major_collector (void)
-{
-       return &major_collector;
-}
-
-SgenRememberedSet*
-sgen_get_remset (void)
-{
-       return &remset;
-}
-
-static void
-count_cards (long long *major_total, long long *major_marked, long long *los_total, long long *los_marked)
-{
-       sgen_get_major_collector ()->count_cards (major_total, major_marked);
-       sgen_los_count_cards (los_total, los_marked);
-}
-
-static gboolean world_is_stopped = FALSE;
-
-/* LOCKING: assumes the GC lock is held */
-void
-sgen_stop_world (int generation)
-{
-       long long major_total = -1, major_marked = -1, los_total = -1, los_marked = -1;
-
-       SGEN_ASSERT (0, !world_is_stopped, "Why are we stopping a stopped world?");
-
-       binary_protocol_world_stopping (generation, sgen_timestamp (), (gpointer)mono_native_thread_id_get ());
-
-       sgen_client_stop_world (generation);
-
-       world_is_stopped = TRUE;
-
-       if (binary_protocol_is_heavy_enabled ())
-               count_cards (&major_total, &major_marked, &los_total, &los_marked);
-       binary_protocol_world_stopped (generation, sgen_timestamp (), major_total, major_marked, los_total, los_marked);
-}
-
-/* LOCKING: assumes the GC lock is held */
-void
-sgen_restart_world (int generation, GGTimingInfo *timing)
-{
-       long long major_total = -1, major_marked = -1, los_total = -1, los_marked = -1;
-
-       SGEN_ASSERT (0, world_is_stopped, "Why are we restarting a running world?");
-
-       if (binary_protocol_is_heavy_enabled ())
-               count_cards (&major_total, &major_marked, &los_total, &los_marked);
-       binary_protocol_world_restarting (generation, sgen_timestamp (), major_total, major_marked, los_total, los_marked);
-
-       sgen_client_restart_world (generation, timing);
-
-       world_is_stopped = FALSE;
-
-       binary_protocol_world_restarted (generation, sgen_timestamp ());
-
-       sgen_try_free_some_memory = TRUE;
-
-       if (sgen_client_bridge_need_processing ())
-               sgen_client_bridge_processing_finish (generation);
-
-       sgen_memgov_collection_end (generation, timing, timing ? 2 : 0);
-}
-
-gboolean
-sgen_is_world_stopped (void)
-{
-       return world_is_stopped;
-}
-
-void
-sgen_check_whole_heap_stw (void)
-{
-       sgen_stop_world (0);
-       sgen_clear_nursery_fragments ();
-       sgen_check_whole_heap (FALSE);
-       sgen_restart_world (0, NULL);
-}
-
-gint64
-sgen_timestamp (void)
-{
-       SGEN_TV_DECLARE (timestamp);
-       SGEN_TV_GETTIME (timestamp);
-       return SGEN_TV_ELAPSED (sgen_init_timestamp, timestamp);
-}
-
-#endif /* HAVE_SGEN_GC */
diff --git a/mono/metadata/sgen-gc.h b/mono/metadata/sgen-gc.h
deleted file mode 100644 (file)
index 88538f2..0000000
+++ /dev/null
@@ -1,1071 +0,0 @@
-/*
- * sgen-gc.c: Simple generational GC.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#ifndef __MONO_SGENGC_H__
-#define __MONO_SGENGC_H__
-
-/* pthread impl */
-#include "config.h"
-
-#ifdef HAVE_SGEN_GC
-
-typedef struct _SgenThreadInfo SgenThreadInfo;
-#undef THREAD_INFO_TYPE
-#define THREAD_INFO_TYPE SgenThreadInfo
-
-#include <glib.h>
-#include <stdio.h>
-#ifdef HAVE_PTHREAD_H
-#include <pthread.h>
-#endif
-#include <stdint.h>
-#include "mono/utils/mono-compiler.h"
-#include "mono/utils/atomic.h"
-#include "mono/utils/mono-mutex.h"
-#include "mono/metadata/sgen-conf.h"
-#include "mono/metadata/sgen-descriptor.h"
-#include "mono/metadata/sgen-gray.h"
-#include "mono/metadata/sgen-hash-table.h"
-#include "mono/metadata/sgen-protocol.h"
-
-/* The method used to clear the nursery */
-/* Clearing at nursery collections is the safest, but has bad interactions with caches.
- * Clearing at TLAB creation is much faster, but more complex and it might expose hard
- * to find bugs.
- */
-typedef enum {
-       CLEAR_AT_GC,
-       CLEAR_AT_TLAB_CREATION,
-       CLEAR_AT_TLAB_CREATION_DEBUG
-} NurseryClearPolicy;
-
-NurseryClearPolicy sgen_get_nursery_clear_policy (void);
-
-#if !defined(__MACH__) && !MONO_MACH_ARCH_SUPPORTED && defined(HAVE_PTHREAD_KILL)
-#define SGEN_POSIX_STW 1
-#endif
-
-/*
- * The nursery section uses this struct.
- */
-typedef struct _GCMemSection GCMemSection;
-struct _GCMemSection {
-       char *data;
-       mword size;
-       /* pointer where more data could be allocated if it fits */
-       char *next_data;
-       char *end_data;
-       /*
-        * scan starts is an array of pointers to objects equally spaced in the allocation area
-        * They let use quickly find pinned objects from pinning pointers.
-        */
-       char **scan_starts;
-       /* in major collections indexes in the pin_queue for objects that pin this section */
-       size_t pin_queue_first_entry;
-       size_t pin_queue_last_entry;
-       size_t num_scan_start;
-};
-
-/*
- * Recursion is not allowed for the thread lock.
- */
-#define LOCK_DECLARE(name) mono_mutex_t name
-/* if changing LOCK_INIT to something that isn't idempotent, look at
-   its use in mono_gc_base_init in sgen-gc.c */
-#define LOCK_INIT(name)        mono_mutex_init (&(name))
-#define LOCK_GC do {                                           \
-               MONO_TRY_BLOCKING       \
-               mono_mutex_lock (&gc_mutex);                    \
-               MONO_FINISH_TRY_BLOCKING        \
-       } while (0)
-#define UNLOCK_GC do { sgen_gc_unlock (); } while (0)
-
-extern LOCK_DECLARE (sgen_interruption_mutex);
-
-#define LOCK_INTERRUPTION mono_mutex_lock (&sgen_interruption_mutex)
-#define UNLOCK_INTERRUPTION mono_mutex_unlock (&sgen_interruption_mutex)
-
-/* FIXME: Use InterlockedAdd & InterlockedAdd64 to reduce the CAS cost. */
-#define SGEN_CAS       InterlockedCompareExchange
-#define SGEN_CAS_PTR   InterlockedCompareExchangePointer
-#define SGEN_ATOMIC_ADD(x,i)   do {                                    \
-               int __old_x;                                            \
-               do {                                                    \
-                       __old_x = (x);                                  \
-               } while (InterlockedCompareExchange (&(x), __old_x + (i), __old_x) != __old_x); \
-       } while (0)
-#define SGEN_ATOMIC_ADD_P(x,i) do { \
-               size_t __old_x;                                            \
-               do {                                                    \
-                       __old_x = (x);                                  \
-               } while (InterlockedCompareExchangePointer ((void**)&(x), (void*)(__old_x + (i)), (void*)__old_x) != (void*)__old_x); \
-       } while (0)
-
-
-#ifndef HOST_WIN32
-/* we intercept pthread_create calls to know which threads exist */
-#define USE_PTHREAD_INTERCEPT 1
-#endif
-
-#ifdef HEAVY_STATISTICS
-extern guint64 stat_objects_alloced_degraded;
-extern guint64 stat_bytes_alloced_degraded;
-extern guint64 stat_copy_object_called_major;
-extern guint64 stat_objects_copied_major;
-#endif
-
-#define SGEN_ASSERT(level, a, ...) do {        \
-       if (G_UNLIKELY ((level) <= SGEN_MAX_ASSERT_LEVEL && !(a))) {    \
-               g_error (__VA_ARGS__);  \
-} } while (0)
-
-
-#define SGEN_LOG(level, format, ...) do {      \
-       if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) {        \
-               mono_gc_printf (gc_debug_file, format, ##__VA_ARGS__);  \
-} } while (0)
-
-#define SGEN_COND_LOG(level, cond, format, ...) do {   \
-       if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) {        \
-               if (cond)       \
-                       mono_gc_printf (gc_debug_file, format, ##__VA_ARGS__);  \
-} } while (0)
-
-extern int gc_debug_level;
-extern FILE* gc_debug_file;
-
-extern int current_collection_generation;
-
-extern unsigned int sgen_global_stop_count;
-
-#define SGEN_ALLOC_ALIGN               8
-#define SGEN_ALLOC_ALIGN_BITS  3
-
-/* s must be non-negative */
-#define SGEN_CAN_ALIGN_UP(s)           ((s) <= SIZE_MAX - (SGEN_ALLOC_ALIGN - 1))
-#define SGEN_ALIGN_UP(s)               (((s)+(SGEN_ALLOC_ALIGN-1)) & ~(SGEN_ALLOC_ALIGN-1))
-
-#if SIZEOF_VOID_P == 4
-#define ONE_P 1
-#else
-#define ONE_P 1ll
-#endif
-
-static inline guint
-sgen_aligned_addr_hash (gconstpointer ptr)
-{
-       return GPOINTER_TO_UINT (ptr) >> 3;
-}
-
-/*
- * The link pointer is hidden by negating each bit.  We use the lowest
- * bit of the link (before negation) to store whether it needs
- * resurrection tracking.
- */
-#define HIDE_POINTER(p,t)      ((gpointer)(~((size_t)(p)|((t)?1:0))))
-#define REVEAL_POINTER(p)      ((gpointer)((~(size_t)(p))&~3L))
-
-#define SGEN_PTR_IN_NURSERY(p,bits,start,end)  (((mword)(p) & ~((1 << (bits)) - 1)) == (mword)(start))
-
-#ifdef USER_CONFIG
-
-/* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */
-#define DEFAULT_NURSERY_SIZE (sgen_nursery_size)
-extern size_t sgen_nursery_size;
-/* The number of trailing 0 bits in DEFAULT_NURSERY_SIZE */
-#define DEFAULT_NURSERY_BITS (sgen_nursery_bits)
-extern int sgen_nursery_bits;
-
-#else
-
-#define DEFAULT_NURSERY_SIZE (4*1024*1024)
-#define DEFAULT_NURSERY_BITS 22
-
-#endif
-
-extern char *sgen_nursery_start;
-extern char *sgen_nursery_end;
-
-static inline MONO_ALWAYS_INLINE gboolean
-sgen_ptr_in_nursery (void *p)
-{
-       return SGEN_PTR_IN_NURSERY ((p), DEFAULT_NURSERY_BITS, sgen_nursery_start, sgen_nursery_end);
-}
-
-static inline MONO_ALWAYS_INLINE char*
-sgen_get_nursery_start (void)
-{
-       return sgen_nursery_start;
-}
-
-static inline MONO_ALWAYS_INLINE char*
-sgen_get_nursery_end (void)
-{
-       return sgen_nursery_end;
-}
-
-/*
- * We use the lowest three bits in the vtable pointer of objects to tag whether they're
- * forwarded, pinned, and/or cemented.  These are the valid states:
- *
- * | State            | bits |
- * |------------------+------+
- * | default          |  000 |
- * | forwarded        |  001 |
- * | pinned           |  010 |
- * | pinned, cemented |  110 |
- *
- * We store them in the vtable slot because the bits are used in the sync block for other
- * purposes: if we merge them and alloc the sync blocks aligned to 8 bytes, we can change
- * this and use bit 3 in the syncblock (with the lower two bits both set for forwarded, that
- * would be an invalid combination for the monitor and hash code).
- */
-
-#include "sgen-tagged-pointer.h"
-
-#define SGEN_VTABLE_BITS_MASK  SGEN_TAGGED_POINTER_MASK
-
-#define SGEN_POINTER_IS_TAGGED_FORWARDED(p)    SGEN_POINTER_IS_TAGGED_1((p))
-#define SGEN_POINTER_TAG_FORWARDED(p)          SGEN_POINTER_TAG_1((p))
-
-#define SGEN_POINTER_IS_TAGGED_PINNED(p)       SGEN_POINTER_IS_TAGGED_2((p))
-#define SGEN_POINTER_TAG_PINNED(p)             SGEN_POINTER_TAG_2((p))
-
-#define SGEN_POINTER_IS_TAGGED_CEMENTED(p)     SGEN_POINTER_IS_TAGGED_4((p))
-#define SGEN_POINTER_TAG_CEMENTED(p)           SGEN_POINTER_TAG_4((p))
-
-#define SGEN_POINTER_UNTAG_VTABLE(p)           SGEN_POINTER_UNTAG_ALL((p))
-
-/* returns NULL if not forwarded, or the forwarded address */
-#define SGEN_VTABLE_IS_FORWARDED(vtable) (SGEN_POINTER_IS_TAGGED_FORWARDED ((vtable)) ? SGEN_POINTER_UNTAG_VTABLE ((vtable)) : NULL)
-#define SGEN_OBJECT_IS_FORWARDED(obj) (SGEN_VTABLE_IS_FORWARDED (((mword*)(obj))[0]))
-
-#define SGEN_VTABLE_IS_PINNED(vtable) SGEN_POINTER_IS_TAGGED_PINNED ((vtable))
-#define SGEN_OBJECT_IS_PINNED(obj) (SGEN_VTABLE_IS_PINNED (((mword*)(obj))[0]))
-
-#define SGEN_OBJECT_IS_CEMENTED(obj) (SGEN_POINTER_IS_TAGGED_CEMENTED (((mword*)(obj))[0]))
-
-/* set the forwarded address fw_addr for object obj */
-#define SGEN_FORWARD_OBJECT(obj,fw_addr) do {                          \
-               *(void**)(obj) = SGEN_POINTER_TAG_FORWARDED ((fw_addr));        \
-       } while (0)
-#define SGEN_PIN_OBJECT(obj) do {      \
-               *(void**)(obj) = SGEN_POINTER_TAG_PINNED (*(void**)(obj)); \
-       } while (0)
-#define SGEN_CEMENT_OBJECT(obj) do {   \
-               *(void**)(obj) = SGEN_POINTER_TAG_CEMENTED (*(void**)(obj)); \
-       } while (0)
-/* Unpins and uncements */
-#define SGEN_UNPIN_OBJECT(obj) do {    \
-               *(void**)(obj) = SGEN_POINTER_UNTAG_VTABLE (*(void**)(obj)); \
-       } while (0)
-
-/*
- * Since we set bits in the vtable, use the macro to load it from the pointer to
- * an object that is potentially pinned.
- */
-#define SGEN_LOAD_VTABLE(obj)          SGEN_POINTER_UNTAG_ALL (SGEN_LOAD_VTABLE_UNCHECKED ((obj)))
-
-/*
-List of what each bit on of the vtable gc bits means. 
-*/
-enum {
-       SGEN_GC_BIT_BRIDGE_OBJECT = 1,
-       SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT = 2,
-       SGEN_GC_BIT_FINALIZER_AWARE = 4,
-};
-
-/* the runtime can register areas of memory as roots: we keep two lists of roots,
- * a pinned root set for conservatively scanned roots and a normal one for
- * precisely scanned roots (currently implemented as a single list).
- */
-typedef struct _RootRecord RootRecord;
-struct _RootRecord {
-       char *end_root;
-       mword root_desc;
-};
-
-enum {
-       ROOT_TYPE_NORMAL = 0, /* "normal" roots */
-       ROOT_TYPE_PINNED = 1, /* roots without a GC descriptor */
-       ROOT_TYPE_WBARRIER = 2, /* roots with a write barrier */
-       ROOT_TYPE_NUM
-};
-
-extern SgenHashTable roots_hash [ROOT_TYPE_NUM];
-
-int sgen_register_root (char *start, size_t size, void *descr, int root_type);
-void sgen_deregister_root (char* addr);
-
-typedef void (*IterateObjectCallbackFunc) (char*, size_t, void*);
-
-void sgen_gc_init (void);
-
-void sgen_os_init (void);
-
-void sgen_update_heap_boundaries (mword low, mword high);
-
-void sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags);
-void sgen_check_section_scan_starts (GCMemSection *section);
-
-void sgen_conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type);
-
-/* Keep in sync with description_for_type() in sgen-internal.c! */
-enum {
-       INTERNAL_MEM_PIN_QUEUE,
-       INTERNAL_MEM_FRAGMENT,
-       INTERNAL_MEM_SECTION,
-       INTERNAL_MEM_SCAN_STARTS,
-       INTERNAL_MEM_FIN_TABLE,
-       INTERNAL_MEM_FINALIZE_ENTRY,
-       INTERNAL_MEM_FINALIZE_READY,
-       INTERNAL_MEM_DISLINK_TABLE,
-       INTERNAL_MEM_DISLINK,
-       INTERNAL_MEM_ROOTS_TABLE,
-       INTERNAL_MEM_ROOT_RECORD,
-       INTERNAL_MEM_STATISTICS,
-       INTERNAL_MEM_STAT_PINNED_CLASS,
-       INTERNAL_MEM_STAT_REMSET_CLASS,
-       INTERNAL_MEM_GRAY_QUEUE,
-       INTERNAL_MEM_MS_TABLES,
-       INTERNAL_MEM_MS_BLOCK_INFO,
-       INTERNAL_MEM_MS_BLOCK_INFO_SORT,
-       INTERNAL_MEM_WORKER_DATA,
-       INTERNAL_MEM_THREAD_POOL_JOB,
-       INTERNAL_MEM_BRIDGE_DATA,
-       INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE,
-       INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE_ENTRY,
-       INTERNAL_MEM_BRIDGE_HASH_TABLE,
-       INTERNAL_MEM_BRIDGE_HASH_TABLE_ENTRY,
-       INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE,
-       INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE_ENTRY,
-       INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE,
-       INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE_ENTRY,
-       INTERNAL_MEM_TARJAN_OBJ_BUCKET,
-       INTERNAL_MEM_BRIDGE_DEBUG,
-       INTERNAL_MEM_TOGGLEREF_DATA,
-       INTERNAL_MEM_CARDTABLE_MOD_UNION,
-       INTERNAL_MEM_BINARY_PROTOCOL,
-       INTERNAL_MEM_TEMPORARY,
-       INTERNAL_MEM_FIRST_CLIENT
-};
-
-enum {
-       GENERATION_NURSERY,
-       GENERATION_OLD,
-       GENERATION_MAX
-};
-
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-#define BINARY_PROTOCOL_ARG(x) ,x
-#else
-#define BINARY_PROTOCOL_ARG(x)
-#endif
-
-void sgen_init_internal_allocator (void);
-
-#define SGEN_DEFINE_OBJECT_VTABLE
-#ifdef SGEN_CLIENT_HEADER
-#include SGEN_CLIENT_HEADER
-#else
-#include "sgen-client-mono.h"
-#endif
-#undef SGEN_DEFINE_OBJECT_VTABLE
-
-/* eventually share with MonoThread? */
-/*
- * This structure extends the MonoThreadInfo structure.
- */
-struct _SgenThreadInfo {
-       SgenClientThreadInfo client_info;
-
-       char **tlab_next_addr;
-       char **tlab_start_addr;
-       char **tlab_temp_end_addr;
-       char **tlab_real_end_addr;
-
-#ifndef HAVE_KW_THREAD
-       char *tlab_start;
-       char *tlab_next;
-       char *tlab_temp_end;
-       char *tlab_real_end;
-#endif
-};
-
-gboolean sgen_is_worker_thread (MonoNativeThreadId thread);
-
-typedef void (*CopyOrMarkObjectFunc) (void**, SgenGrayQueue*);
-typedef void (*ScanObjectFunc) (char *obj, mword desc, SgenGrayQueue*);
-typedef void (*ScanVTypeFunc) (char *full_object, char *start, mword desc, SgenGrayQueue* BINARY_PROTOCOL_ARG (size_t size));
-
-typedef struct {
-       CopyOrMarkObjectFunc copy_or_mark_object;
-       ScanObjectFunc scan_object;
-       ScanVTypeFunc scan_vtype;
-       /*FIXME add allocation function? */
-} SgenObjectOperations;
-
-typedef struct
-{
-       SgenObjectOperations *ops;
-       SgenGrayQueue *queue;
-} ScanCopyContext;
-
-#define CONTEXT_FROM_OBJECT_OPERATIONS(ops, queue) ((ScanCopyContext) { (ops), (queue) })
-
-void sgen_report_internal_mem_usage (void);
-void sgen_dump_internal_mem_usage (FILE *heap_dump_file);
-void sgen_dump_section (GCMemSection *section, const char *type);
-void sgen_dump_occupied (char *start, char *end, char *section_start);
-
-void sgen_register_fixed_internal_mem_type (int type, size_t size);
-
-void* sgen_alloc_internal (int type);
-void sgen_free_internal (void *addr, int type);
-
-void* sgen_alloc_internal_dynamic (size_t size, int type, gboolean assert_on_failure);
-void sgen_free_internal_dynamic (void *addr, size_t size, int type);
-
-void sgen_pin_stats_enable (void);
-void sgen_pin_stats_register_object (char *obj, size_t size);
-void sgen_pin_stats_register_global_remset (char *obj);
-void sgen_pin_stats_print_class_stats (void);
-
-void sgen_sort_addresses (void **array, size_t size);
-void sgen_add_to_global_remset (gpointer ptr, gpointer obj);
-
-int sgen_get_current_collection_generation (void);
-gboolean sgen_collection_is_concurrent (void);
-gboolean sgen_concurrent_collection_in_progress (void);
-
-typedef struct _SgenFragment SgenFragment;
-
-struct _SgenFragment {
-       SgenFragment *next;
-       char *fragment_start;
-       char *fragment_next; /* the current soft limit for allocation */
-       char *fragment_end;
-       SgenFragment *next_in_order; /* We use a different entry for all active fragments so we can avoid SMR. */
-};
-
-typedef struct {
-       SgenFragment *alloc_head; /* List head to be used when allocating memory. Walk with fragment_next. */
-       SgenFragment *region_head; /* List head of the region used by this allocator. Walk with next_in_order. */
-} SgenFragmentAllocator;
-
-void sgen_fragment_allocator_add (SgenFragmentAllocator *allocator, char *start, char *end);
-void sgen_fragment_allocator_release (SgenFragmentAllocator *allocator);
-void* sgen_fragment_allocator_serial_alloc (SgenFragmentAllocator *allocator, size_t size);
-void* sgen_fragment_allocator_par_alloc (SgenFragmentAllocator *allocator, size_t size);
-void* sgen_fragment_allocator_serial_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size);
-void* sgen_fragment_allocator_par_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size);
-SgenFragment* sgen_fragment_allocator_alloc (void);
-void sgen_clear_allocator_fragments (SgenFragmentAllocator *allocator);
-void sgen_clear_range (char *start, char *end);
-
-
-/*
-This is a space/speed compromise as we need to make sure the from/to space check is both O(1)
-and only hit cache hot memory. On a 4Mb nursery it requires 1024 bytes, or 3% of your average
-L1 cache. On small configs with a 512kb nursery, this goes to 0.4%.
-
-Experimental results on how much space we waste with a 4Mb nursery:
-
-Note that the wastage applies to the half nursery, or 2Mb:
-
-Test 1 (compiling corlib):
-9: avg: 3.1k
-8: avg: 1.6k
-
-*/
-#define SGEN_TO_SPACE_GRANULE_BITS 9
-#define SGEN_TO_SPACE_GRANULE_IN_BYTES (1 << SGEN_TO_SPACE_GRANULE_BITS)
-
-extern char *sgen_space_bitmap;
-extern size_t sgen_space_bitmap_size;
-
-static inline gboolean
-sgen_nursery_is_to_space (char *object)
-{
-       size_t idx = (object - sgen_nursery_start) >> SGEN_TO_SPACE_GRANULE_BITS;
-       size_t byte = idx >> 3;
-       size_t bit = idx & 0x7;
-
-       SGEN_ASSERT (4, sgen_ptr_in_nursery (object), "object %p is not in nursery [%p - %p]", object, sgen_get_nursery_start (), sgen_get_nursery_end ());
-       SGEN_ASSERT (4, byte < sgen_space_bitmap_size, "byte index %zd out of range (%zd)", byte, sgen_space_bitmap_size);
-
-       return (sgen_space_bitmap [byte] & (1 << bit)) != 0;
-}
-
-static inline gboolean
-sgen_nursery_is_from_space (char *object)
-{
-       return !sgen_nursery_is_to_space (object);
-}
-
-static inline gboolean
-sgen_nursery_is_object_alive (char *obj)
-{
-       /* FIXME put this asserts under a non default level */
-       g_assert (sgen_ptr_in_nursery (obj));
-
-       if (sgen_nursery_is_to_space (obj))
-               return TRUE;
-
-       if (SGEN_OBJECT_IS_PINNED (obj) || SGEN_OBJECT_IS_FORWARDED (obj))
-               return TRUE;
-
-       return FALSE;
-}
-
-typedef struct {
-       gboolean is_split;
-
-       char* (*alloc_for_promotion) (GCVTable *vtable, char *obj, size_t objsize, gboolean has_references);
-
-       SgenObjectOperations serial_ops;
-
-       void (*prepare_to_space) (char *to_space_bitmap, size_t space_bitmap_size);
-       void (*clear_fragments) (void);
-       SgenFragment* (*build_fragments_get_exclude_head) (void);
-       void (*build_fragments_release_exclude_head) (void);
-       void (*build_fragments_finish) (SgenFragmentAllocator *allocator);
-       void (*init_nursery) (SgenFragmentAllocator *allocator, char *start, char *end);
-
-       gboolean (*handle_gc_param) (const char *opt); /* Optional */
-       void (*print_gc_param_usage) (void); /* Optional */
-} SgenMinorCollector;
-
-extern SgenMinorCollector sgen_minor_collector;
-
-void sgen_simple_nursery_init (SgenMinorCollector *collector);
-void sgen_split_nursery_init (SgenMinorCollector *collector);
-
-/* Updating references */
-
-#ifdef SGEN_CHECK_UPDATE_REFERENCE
-gboolean sgen_thread_pool_is_thread_pool_thread (MonoNativeThreadId some_thread) MONO_INTERNAL;
-static inline void
-sgen_update_reference (void **p, void *o, gboolean allow_null)
-{
-       if (!allow_null)
-               SGEN_ASSERT (0, o, "Cannot update a reference with a NULL pointer");
-       SGEN_ASSERT (0, !sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "Can't update a reference in the worker thread");
-       *p = o;
-}
-
-#define SGEN_UPDATE_REFERENCE_ALLOW_NULL(p,o)  sgen_update_reference ((void**)(p), (void*)(o), TRUE)
-#define SGEN_UPDATE_REFERENCE(p,o)             sgen_update_reference ((void**)(p), (void*)(o), FALSE)
-#else
-#define SGEN_UPDATE_REFERENCE_ALLOW_NULL(p,o)  (*(void**)(p) = (void*)(o))
-#define SGEN_UPDATE_REFERENCE(p,o)             SGEN_UPDATE_REFERENCE_ALLOW_NULL ((p), (o))
-#endif
-
-/* Major collector */
-
-typedef void (*sgen_cardtable_block_callback) (mword start, mword size);
-void sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback);
-
-typedef enum {
-       ITERATE_OBJECTS_SWEEP = 1,
-       ITERATE_OBJECTS_NON_PINNED = 2,
-       ITERATE_OBJECTS_PINNED = 4,
-       ITERATE_OBJECTS_ALL = ITERATE_OBJECTS_NON_PINNED | ITERATE_OBJECTS_PINNED,
-       ITERATE_OBJECTS_SWEEP_NON_PINNED = ITERATE_OBJECTS_SWEEP | ITERATE_OBJECTS_NON_PINNED,
-       ITERATE_OBJECTS_SWEEP_PINNED = ITERATE_OBJECTS_SWEEP | ITERATE_OBJECTS_PINNED,
-       ITERATE_OBJECTS_SWEEP_ALL = ITERATE_OBJECTS_SWEEP | ITERATE_OBJECTS_NON_PINNED | ITERATE_OBJECTS_PINNED
-} IterateObjectsFlags;
-
-typedef struct
-{
-       size_t num_scanned_objects;
-       size_t num_unique_scanned_objects;
-} ScannedObjectCounts;
-
-typedef struct _SgenMajorCollector SgenMajorCollector;
-struct _SgenMajorCollector {
-       size_t section_size;
-       gboolean is_concurrent;
-       gboolean needs_thread_pool;
-       gboolean supports_cardtable;
-       gboolean sweeps_lazily;
-
-       /*
-        * This is set to TRUE by the sweep if the next major
-        * collection should be synchronous (for evacuation).  For
-        * non-concurrent collectors, this should be NULL.
-        */
-       gboolean *want_synchronous_collection;
-
-       void* (*alloc_heap) (mword nursery_size, mword nursery_align, int nursery_bits);
-       gboolean (*is_object_live) (char *obj);
-       void* (*alloc_small_pinned_obj) (GCVTable *vtable, size_t size, gboolean has_references);
-       void* (*alloc_degraded) (GCVTable *vtable, size_t size);
-
-       SgenObjectOperations major_ops_serial;
-       SgenObjectOperations major_ops_concurrent_start;
-       SgenObjectOperations major_ops_concurrent;
-       SgenObjectOperations major_ops_concurrent_finish;
-
-       void* (*alloc_object) (GCVTable *vtable, size_t size, gboolean has_references);
-       void (*free_pinned_object) (char *obj, size_t size);
-
-       /*
-        * This is used for domain unloading, heap walking from the logging profiler, and
-        * debugging.  Can assume the world is stopped.
-        */
-       void (*iterate_objects) (IterateObjectsFlags flags, IterateObjectCallbackFunc callback, void *data);
-
-       void (*free_non_pinned_object) (char *obj, size_t size);
-       void (*pin_objects) (SgenGrayQueue *queue);
-       void (*pin_major_object) (char *obj, SgenGrayQueue *queue);
-       void (*scan_card_table) (gboolean mod_union, ScanCopyContext ctx);
-       void (*iterate_live_block_ranges) (sgen_cardtable_block_callback callback);
-       void (*update_cardtable_mod_union) (void);
-       void (*init_to_space) (void);
-       void (*sweep) (void);
-       gboolean (*have_swept) (void);
-       void (*finish_sweeping) (void);
-       void (*free_swept_blocks) (size_t allowance);
-       void (*check_scan_starts) (void);
-       void (*dump_heap) (FILE *heap_dump_file);
-       gint64 (*get_used_size) (void);
-       void (*start_nursery_collection) (void);
-       void (*finish_nursery_collection) (void);
-       void (*start_major_collection) (void);
-       void (*finish_major_collection) (ScannedObjectCounts *counts);
-       gboolean (*drain_gray_stack) (ScanCopyContext ctx);
-       gboolean (*ptr_is_in_non_pinned_space) (char *ptr, char **start);
-       gboolean (*obj_is_from_pinned_alloc) (char *obj);
-       void (*report_pinned_memory_usage) (void);
-       size_t (*get_num_major_sections) (void);
-       size_t (*get_bytes_survived_last_sweep) (void);
-       gboolean (*handle_gc_param) (const char *opt);
-       void (*print_gc_param_usage) (void);
-       void (*post_param_init) (SgenMajorCollector *collector);
-       gboolean (*is_valid_object) (char *object);
-       GCVTable* (*describe_pointer) (char *pointer);
-       guint8* (*get_cardtable_mod_union_for_object) (char *object);
-       long long (*get_and_reset_num_major_objects_marked) (void);
-       void (*count_cards) (long long *num_total_cards, long long *num_marked_cards);
-};
-
-extern SgenMajorCollector major_collector;
-
-void sgen_marksweep_init (SgenMajorCollector *collector);
-void sgen_marksweep_fixed_init (SgenMajorCollector *collector);
-void sgen_marksweep_par_init (SgenMajorCollector *collector);
-void sgen_marksweep_fixed_par_init (SgenMajorCollector *collector);
-void sgen_marksweep_conc_init (SgenMajorCollector *collector);
-SgenMajorCollector* sgen_get_major_collector (void);
-
-
-typedef struct _SgenRememberedSet {
-       void (*wbarrier_set_field) (GCObject *obj, gpointer field_ptr, GCObject* value);
-       void (*wbarrier_arrayref_copy) (gpointer dest_ptr, gpointer src_ptr, int count);
-       void (*wbarrier_value_copy) (gpointer dest, gpointer src, int count, size_t element_size);
-       void (*wbarrier_object_copy) (GCObject* obj, GCObject *src);
-       void (*wbarrier_generic_nostore) (gpointer ptr);
-       void (*record_pointer) (gpointer ptr);
-
-       void (*scan_remsets) (ScanCopyContext ctx);
-
-       void (*clear_cards) (void);
-
-       void (*finish_minor_collection) (void);
-       gboolean (*find_address) (char *addr);
-       gboolean (*find_address_with_cards) (char *cards_start, guint8 *cards, char *addr);
-} SgenRememberedSet;
-
-SgenRememberedSet *sgen_get_remset (void);
-
-/*
- * These must be kept in sync with object.h.  They're here for using SGen independently of
- * Mono.
- */
-void mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count);
-void mono_gc_wbarrier_generic_nostore (gpointer ptr);
-void mono_gc_wbarrier_generic_store (gpointer ptr, GCObject* value);
-void mono_gc_wbarrier_generic_store_atomic (gpointer ptr, GCObject *value);
-
-void sgen_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap);
-
-static inline mword
-sgen_obj_get_descriptor (char *obj)
-{
-       GCVTable *vtable = SGEN_LOAD_VTABLE_UNCHECKED (obj);
-       SGEN_ASSERT (9, !SGEN_POINTER_IS_TAGGED_ANY (vtable), "Object can't be tagged");
-       return sgen_vtable_get_descriptor (vtable);
-}
-
-static inline mword
-sgen_obj_get_descriptor_safe (char *obj)
-{
-       GCVTable *vtable = (GCVTable*)SGEN_LOAD_VTABLE (obj);
-       return sgen_vtable_get_descriptor (vtable);
-}
-
-static inline mword
-sgen_safe_object_get_size (GCObject *obj)
-{
-       char *forwarded;
-
-       if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj)))
-               obj = (GCObject*)forwarded;
-
-       return sgen_client_par_object_get_size ((GCVTable*)SGEN_LOAD_VTABLE (obj), obj);
-}
-
-static inline gboolean
-sgen_safe_object_is_small (GCObject *obj, int type)
-{
-       if (type <= DESC_TYPE_MAX_SMALL_OBJ)
-               return TRUE;
-       return SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)obj)) <= SGEN_MAX_SMALL_OBJ_SIZE;
-}
-
-/*
- * This variant guarantees to return the exact size of the object
- * before alignment. Needed for canary support.
- */
-static inline guint
-sgen_safe_object_get_size_unaligned (GCObject *obj)
-{
-       char *forwarded;
-
-       if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
-               obj = (GCObject*)forwarded;
-       }
-
-       return sgen_client_slow_object_get_size ((GCVTable*)SGEN_LOAD_VTABLE (obj), obj);
-}
-
-#ifdef SGEN_CLIENT_HEADER
-#include SGEN_CLIENT_HEADER
-#else
-#include "sgen-client-mono.h"
-#endif
-
-gboolean sgen_object_is_live (void *obj);
-
-void  sgen_init_fin_weak_hash (void);
-
-/* FIXME: move the toggleref stuff out of here */
-void sgen_mark_togglerefs (char *start, char *end, ScanCopyContext ctx);
-void sgen_clear_togglerefs (char *start, char *end, ScanCopyContext ctx);
-
-void sgen_process_togglerefs (void);
-void sgen_register_test_toggleref_callback (void);
-
-void sgen_mark_bridge_object (GCObject *obj);
-void sgen_collect_bridge_objects (int generation, ScanCopyContext ctx);
-
-typedef gboolean (*SgenObjectPredicateFunc) (GCObject *obj, void *user_data);
-
-void sgen_null_links_if (SgenObjectPredicateFunc predicate, void *data, int generation);
-
-gboolean sgen_gc_is_object_ready_for_finalization (void *object);
-void sgen_gc_lock (void);
-void sgen_gc_unlock (void);
-
-void sgen_queue_finalization_entry (GCObject *obj);
-const char* sgen_generation_name (int generation);
-
-void sgen_finalize_in_range (int generation, ScanCopyContext ctx);
-void sgen_null_link_in_range (int generation, gboolean before_finalization, ScanCopyContext ctx);
-void sgen_process_fin_stage_entries (void);
-gboolean sgen_have_pending_finalizers (void);
-void sgen_object_register_for_finalization (GCObject *obj, void *user_data);
-
-int sgen_gather_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size);
-void sgen_remove_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, int generation);
-
-void sgen_process_dislink_stage_entries (void);
-void sgen_register_disappearing_link (GCObject *obj, void **link, gboolean track, gboolean in_gc);
-
-GCObject* sgen_weak_link_get (void **link_addr);
-
-gboolean sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx);
-
-enum {
-       SPACE_NURSERY,
-       SPACE_MAJOR,
-       SPACE_LOS
-};
-
-void sgen_pin_object (void *object, SgenGrayQueue *queue);
-void sgen_set_pinned_from_failed_allocation (mword objsize);
-
-void sgen_ensure_free_space (size_t size);
-void sgen_gc_collect (int generation);
-void sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish);
-
-int sgen_gc_collection_count (int generation);
-/* FIXME: what exactly does this return? */
-size_t sgen_gc_get_used_size (void);
-size_t sgen_gc_get_total_heap_allocation (void);
-
-/* STW */
-
-typedef struct {
-       int generation;
-       const char *reason;
-       gboolean is_overflow;
-       gint64 total_time;
-       gint64 stw_time;
-       gint64 bridge_time;
-} GGTimingInfo;
-
-void sgen_stop_world (int generation);
-void sgen_restart_world (int generation, GGTimingInfo *timing);
-gboolean sgen_is_world_stopped (void);
-
-gboolean sgen_set_allow_synchronous_major (gboolean flag);
-
-/* LOS */
-
-typedef struct _LOSObject LOSObject;
-struct _LOSObject {
-       LOSObject *next;
-       mword size; /* this is the object size, lowest bit used for pin/mark */
-       guint8 * volatile cardtable_mod_union; /* only used by the concurrent collector */
-#if SIZEOF_VOID_P < 8
-       mword dummy;            /* to align object to sizeof (double) */
-#endif
-       char data [MONO_ZERO_LEN_ARRAY];
-};
-
-extern LOSObject *los_object_list;
-extern mword los_memory_usage;
-
-void sgen_los_free_object (LOSObject *obj);
-void* sgen_los_alloc_large_inner (GCVTable *vtable, size_t size);
-void sgen_los_sweep (void);
-gboolean sgen_ptr_is_in_los (char *ptr, char **start);
-void sgen_los_iterate_objects (IterateObjectCallbackFunc cb, void *user_data);
-void sgen_los_iterate_live_block_ranges (sgen_cardtable_block_callback callback);
-void sgen_los_scan_card_table (gboolean mod_union, ScanCopyContext ctx);
-void sgen_los_update_cardtable_mod_union (void);
-void sgen_los_count_cards (long long *num_total_cards, long long *num_marked_cards);
-gboolean sgen_los_is_valid_object (char *object);
-gboolean mono_sgen_los_describe_pointer (char *ptr);
-LOSObject* sgen_los_header_for_object (char *data);
-mword sgen_los_object_size (LOSObject *obj);
-void sgen_los_pin_object (char *obj);
-gboolean sgen_los_object_is_pinned (char *obj);
-void sgen_los_mark_mod_union_card (GCObject *mono_obj, void **ptr);
-
-
-/* nursery allocator */
-
-void sgen_clear_nursery_fragments (void);
-void sgen_nursery_allocator_prepare_for_pinning (void);
-void sgen_nursery_allocator_set_nursery_bounds (char *nursery_start, char *nursery_end);
-mword sgen_build_nursery_fragments (GCMemSection *nursery_section, SgenGrayQueue *unpin_queue);
-void sgen_init_nursery_allocator (void);
-void sgen_nursery_allocator_init_heavy_stats (void);
-void sgen_init_allocator (void);
-char* sgen_nursery_alloc_get_upper_alloc_bound (void);
-void* sgen_nursery_alloc (size_t size);
-void* sgen_nursery_alloc_range (size_t size, size_t min_size, size_t *out_alloc_size);
-gboolean sgen_can_alloc_size (size_t size);
-void sgen_nursery_retire_region (void *address, ptrdiff_t size);
-
-void sgen_nursery_alloc_prepare_for_minor (void);
-void sgen_nursery_alloc_prepare_for_major (void);
-
-char* sgen_alloc_for_promotion (char *obj, size_t objsize, gboolean has_references);
-
-void* sgen_alloc_obj_nolock (GCVTable *vtable, size_t size);
-void* sgen_try_alloc_obj_nolock (GCVTable *vtable, size_t size);
-
-/* Threads */
-
-void* sgen_thread_register (SgenThreadInfo* info, void *addr);
-void sgen_thread_unregister (SgenThreadInfo *p);
-
-/* Finalization/ephemeron support */
-
-static inline gboolean
-sgen_major_is_object_alive (void *object)
-{
-       mword objsize;
-
-       /* Oldgen objects can be pinned and forwarded too */
-       if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
-               return TRUE;
-
-       /*
-        * FIXME: major_collector.is_object_live() also calculates the
-        * size.  Avoid the double calculation.
-        */
-       objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)object));
-       if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
-               return sgen_los_object_is_pinned (object);
-
-       return major_collector.is_object_live (object);
-}
-
-/*
- * This function returns true if @object is either alive or it belongs to the old gen
- * and we're currently doing a minor collection.
- */
-static inline int
-sgen_is_object_alive_for_current_gen (char *object)
-{
-       if (sgen_ptr_in_nursery (object))
-               return sgen_nursery_is_object_alive (object);
-
-       if (current_collection_generation == GENERATION_NURSERY)
-               return TRUE;
-
-       return sgen_major_is_object_alive (object);
-}
-
-int sgen_gc_invoke_finalizers (void);
-
-/* Other globals */
-
-extern GCMemSection *nursery_section;
-extern guint32 collect_before_allocs;
-extern guint32 verify_before_allocs;
-extern gboolean has_per_allocation_action;
-extern size_t degraded_mode;
-extern int default_nursery_size;
-extern guint32 tlab_size;
-extern NurseryClearPolicy nursery_clear_policy;
-extern gboolean sgen_try_free_some_memory;
-
-extern LOCK_DECLARE (gc_mutex);
-
-/* Nursery helpers. */
-
-static inline void
-sgen_set_nursery_scan_start (char *p)
-{
-       size_t idx = (p - (char*)nursery_section->data) / SGEN_SCAN_START_SIZE;
-       char *old = nursery_section->scan_starts [idx];
-       if (!old || old > p)
-               nursery_section->scan_starts [idx] = p;
-}
-
-
-/* Object Allocation */
-
-typedef enum {
-       ATYPE_NORMAL,
-       ATYPE_VECTOR,
-       ATYPE_SMALL,
-       ATYPE_STRING,
-       ATYPE_NUM
-} SgenAllocatorType;
-
-void sgen_init_tlab_info (SgenThreadInfo* info);
-void sgen_clear_tlabs (void);
-
-void* sgen_alloc_obj (GCVTable *vtable, size_t size);
-void* sgen_alloc_obj_pinned (GCVTable *vtable, size_t size);
-void* sgen_alloc_obj_mature (GCVTable *vtable, size_t size);
-
-/* Debug support */
-
-void sgen_check_consistency (void);
-void sgen_check_mod_union_consistency (void);
-void sgen_check_major_refs (void);
-void sgen_check_whole_heap (gboolean allow_missing_pinning);
-void sgen_check_whole_heap_stw (void);
-void sgen_check_objref (char *obj);
-void sgen_check_heap_marked (gboolean nursery_must_be_pinned);
-void sgen_check_nursery_objects_pinned (gboolean pinned);
-void sgen_check_for_xdomain_refs (void);
-char* sgen_find_object_for_ptr (char *ptr);
-
-void mono_gc_scan_for_specific_ref (GCObject *key, gboolean precise);
-
-void sgen_debug_enable_heap_dump (const char *filename);
-void sgen_debug_dump_heap (const char *type, int num, const char *reason);
-
-void sgen_debug_verify_nursery (gboolean do_dump_nursery_content);
-void sgen_debug_check_nursery_is_clean (void);
-
-/* Write barrier support */
-
-/*
- * This causes the compile to extend the liveness of 'v' till the call to dummy_use
- */
-static inline void
-sgen_dummy_use (gpointer v) {
-#if defined(__GNUC__)
-       __asm__ volatile ("" : "=r"(v) : "r"(v));
-#elif defined(_MSC_VER)
-       static volatile gpointer ptr;
-       ptr = v;
-#else
-#error "Implement sgen_dummy_use for your compiler"
-#endif
-}
-
-/* Environment variable parsing */
-
-#define MONO_GC_PARAMS_NAME    "MONO_GC_PARAMS"
-#define MONO_GC_DEBUG_NAME     "MONO_GC_DEBUG"
-
-void sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...);
-
-/* Utilities */
-
-void sgen_qsort (void *base, size_t nel, size_t width, int (*compar) (const void*, const void*));
-gint64 sgen_timestamp (void);
-
-/*
- * Canary (guard word) support
- * Notes:
- * - CANARY_SIZE must be multiple of word size in bytes
- * - Canary space is not included on checks against SGEN_MAX_SMALL_OBJ_SIZE
- */
-gboolean nursery_canaries_enabled (void);
-
-#define CANARY_SIZE 8
-#define CANARY_STRING  "koupepia"
-
-#define CANARIFY_SIZE(size) if (nursery_canaries_enabled ()) { \
-                       size = size + CANARY_SIZE;      \
-               }
-
-#define CANARIFY_ALLOC(addr,size) if (nursery_canaries_enabled ()) {   \
-                               memcpy ((char*) (addr) + (size), CANARY_STRING, CANARY_SIZE);   \
-                       }
-
-#define CANARY_VALID(addr) (strncmp ((char*) (addr), CANARY_STRING, CANARY_SIZE) == 0)
-
-#define CHECK_CANARY_FOR_OBJECT(addr) if (nursery_canaries_enabled ()) {       \
-                               char* canary_ptr = (char*) (addr) + sgen_safe_object_get_size_unaligned ((GCObject *) (addr));  \
-                               if (!CANARY_VALID(canary_ptr)) {        \
-                                       char canary_copy[CANARY_SIZE +1];       \
-                                       strncpy (canary_copy, canary_ptr, CANARY_SIZE); \
-                                       canary_copy[CANARY_SIZE] = 0;   \
-                                       g_error ("CORRUPT CANARY:\naddr->%p\ntype->%s\nexcepted->'%s'\nfound->'%s'\n", (char*) addr, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE ((addr))), CANARY_STRING, canary_copy);      \
-                               } }
-
-#endif /* HAVE_SGEN_GC */
-
-#endif /* __MONO_SGENGC_H__ */
diff --git a/mono/metadata/sgen-gray.c b/mono/metadata/sgen-gray.c
deleted file mode 100644 (file)
index 6fec939..0000000
+++ /dev/null
@@ -1,383 +0,0 @@
-/*
- * sgen-gray.c: Gray queue management.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-protocol.h"
-
-#ifdef HEAVY_STATISTICS
-guint64 stat_gray_queue_section_alloc;
-guint64 stat_gray_queue_section_free;
-guint64 stat_gray_queue_enqueue_fast_path;
-guint64 stat_gray_queue_dequeue_fast_path;
-guint64 stat_gray_queue_enqueue_slow_path;
-guint64 stat_gray_queue_dequeue_slow_path;
-#endif
-
-#define GRAY_QUEUE_LENGTH_LIMIT        64
-
-#ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
-#define STATE_TRANSITION(s,o,n)        do {                                    \
-               int __old = (o);                                        \
-               if (InterlockedCompareExchange ((volatile int*)&(s)->state, (n), __old) != __old) \
-                       g_assert_not_reached ();                        \
-       } while (0)
-#define STATE_SET(s,v)         (s)->state = (v)
-#define STATE_ASSERT(s,v)      g_assert ((s)->state == (v))
-#else
-#define STATE_TRANSITION(s,o,n)
-#define STATE_SET(s,v)
-#define STATE_ASSERT(s,v)
-#endif
-
-void
-sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue)
-{
-       GrayQueueSection *section;
-
-       HEAVY_STAT (stat_gray_queue_section_alloc ++);
-
-       if (queue->alloc_prepare_func)
-               queue->alloc_prepare_func (queue);
-
-       if (queue->free_list) {
-               /* Use the previously allocated queue sections if possible */
-               section = queue->free_list;
-               queue->free_list = section->next;
-               STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
-       } else {
-               /* Allocate a new section */
-               section = sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE);
-               STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING);
-       }
-
-       section->size = SGEN_GRAY_QUEUE_SECTION_SIZE;
-
-       STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
-
-       /* Link it with the others */
-       section->next = queue->first;
-       queue->first = section;
-       queue->cursor = section->entries - 1;
-}
-
-void
-sgen_gray_object_free_queue_section (GrayQueueSection *section)
-{
-       HEAVY_STAT (stat_gray_queue_section_free ++);
-
-       STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_FREED);
-       sgen_free_internal (section, INTERNAL_MEM_GRAY_QUEUE);
-}
-
-/*
- * The following two functions are called in the inner loops of the
- * collector, so they need to be as fast as possible.  We have macros
- * for them in sgen-gc.h.
- */
-
-void
-sgen_gray_object_enqueue (SgenGrayQueue *queue, char *obj, mword desc)
-{
-       GrayQueueEntry entry = SGEN_GRAY_QUEUE_ENTRY (obj, desc);
-
-       HEAVY_STAT (stat_gray_queue_enqueue_slow_path ++);
-
-       SGEN_ASSERT (9, obj, "enqueueing a null object");
-       //sgen_check_objref (obj);
-
-#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
-       if (queue->enqueue_check_func)
-               queue->enqueue_check_func (obj);
-#endif
-
-       if (G_UNLIKELY (!queue->first || queue->cursor == GRAY_LAST_CURSOR_POSITION (queue->first))) {
-               if (queue->first) {
-                       /* Set the current section size back to default, might have been changed by sgen_gray_object_dequeue_section */
-                       queue->first->size = SGEN_GRAY_QUEUE_SECTION_SIZE;
-               }
-
-               sgen_gray_object_alloc_queue_section (queue);
-       }
-       STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
-       SGEN_ASSERT (9, queue->cursor <= GRAY_LAST_CURSOR_POSITION (queue->first), "gray queue %p overflow, first %p, cursor %p", queue, queue->first, queue->cursor);
-       *++queue->cursor = entry;
-
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-       binary_protocol_gray_enqueue (queue, queue->cursor, obj);
-#endif
-}
-
-GrayQueueEntry
-sgen_gray_object_dequeue (SgenGrayQueue *queue)
-{
-       GrayQueueEntry entry;
-
-       HEAVY_STAT (stat_gray_queue_dequeue_slow_path ++);
-
-       if (sgen_gray_object_queue_is_empty (queue)) {
-               entry.obj = NULL;
-               return entry;
-       }
-
-       STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
-       SGEN_ASSERT (9, queue->cursor >= GRAY_FIRST_CURSOR_POSITION (queue->first), "gray queue %p underflow", queue);
-
-       entry = *queue->cursor--;
-
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-       binary_protocol_gray_dequeue (queue, queue->cursor + 1, entry.obj);
-#endif
-
-       if (G_UNLIKELY (queue->cursor < GRAY_FIRST_CURSOR_POSITION (queue->first))) {
-               GrayQueueSection *section = queue->first;
-               queue->first = section->next;
-               section->next = queue->free_list;
-
-               STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
-
-               queue->free_list = section;
-               queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL;
-       }
-
-       return entry;
-}
-
-GrayQueueSection*
-sgen_gray_object_dequeue_section (SgenGrayQueue *queue)
-{
-       GrayQueueSection *section;
-
-       if (!queue->first)
-               return NULL;
-
-       section = queue->first;
-       queue->first = section->next;
-
-       section->next = NULL;
-       section->size = queue->cursor - section->entries + 1;
-
-       queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL;
-
-       STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
-
-       return section;
-}
-
-void
-sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section)
-{
-       STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
-
-       if (queue->first)
-               queue->first->size = queue->cursor - queue->first->entries + 1;
-
-       section->next = queue->first;
-       queue->first = section;
-       queue->cursor = queue->first->entries + queue->first->size - 1;
-#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
-       if (queue->enqueue_check_func) {
-               int i;
-               for (i = 0; i < section->size; ++i)
-                       queue->enqueue_check_func (section->entries [i].obj);
-       }
-#endif
-}
-
-void
-sgen_gray_object_queue_trim_free_list (SgenGrayQueue *queue)
-{
-       GrayQueueSection *section, *next;
-       int i = 0;
-       for (section = queue->free_list; section && i < GRAY_QUEUE_LENGTH_LIMIT - 1; section = section->next) {
-               STATE_ASSERT (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
-               i ++;
-       }
-       if (!section)
-               return;
-       while (section->next) {
-               next = section->next;
-               section->next = next->next;
-               STATE_TRANSITION (next, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
-               sgen_gray_object_free_queue_section (next);
-       }
-}
-
-void
-sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func)
-{
-       g_assert (sgen_gray_object_queue_is_empty (queue));
-
-       queue->alloc_prepare_func = NULL;
-       queue->alloc_prepare_data = NULL;
-#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
-       queue->enqueue_check_func = enqueue_check_func;
-#endif
-
-       /* Free the extra sections allocated during the last collection */
-       sgen_gray_object_queue_trim_free_list (queue);
-}
-
-static void
-invalid_prepare_func (SgenGrayQueue *queue)
-{
-       g_assert_not_reached ();
-}
-
-void
-sgen_gray_object_queue_init_invalid (SgenGrayQueue *queue)
-{
-       sgen_gray_object_queue_init (queue, NULL);
-       queue->alloc_prepare_func = invalid_prepare_func;
-       queue->alloc_prepare_data = NULL;
-}
-
-void
-sgen_gray_queue_set_alloc_prepare (SgenGrayQueue *queue, GrayQueueAllocPrepareFunc alloc_prepare_func, void *data)
-{
-       SGEN_ASSERT (0, !queue->alloc_prepare_func && !queue->alloc_prepare_data, "Can't set gray queue alloc-prepare twice");
-       queue->alloc_prepare_func = alloc_prepare_func;
-       queue->alloc_prepare_data = data;
-}
-
-void
-sgen_gray_object_queue_init_with_alloc_prepare (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func,
-               GrayQueueAllocPrepareFunc alloc_prepare_func, void *data)
-{
-       sgen_gray_object_queue_init (queue, enqueue_check_func);
-       sgen_gray_queue_set_alloc_prepare (queue, alloc_prepare_func, data);
-}
-
-void
-sgen_gray_object_queue_deinit (SgenGrayQueue *queue)
-{
-       g_assert (!queue->first);
-       while (queue->free_list) {
-               GrayQueueSection *next = queue->free_list->next;
-               STATE_TRANSITION (queue->free_list, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
-               sgen_gray_object_free_queue_section (queue->free_list);
-               queue->free_list = next;
-       }
-}
-
-void
-sgen_gray_object_queue_disable_alloc_prepare (SgenGrayQueue *queue)
-{
-       queue->alloc_prepare_func = NULL;
-       queue->alloc_prepare_data = NULL;
-}
-
-static void
-lock_section_queue (SgenSectionGrayQueue *queue)
-{
-       if (!queue->locked)
-               return;
-
-       mono_mutex_lock (&queue->lock);
-}
-
-static void
-unlock_section_queue (SgenSectionGrayQueue *queue)
-{
-       if (!queue->locked)
-               return;
-
-       mono_mutex_unlock (&queue->lock);
-}
-
-void
-sgen_section_gray_queue_init (SgenSectionGrayQueue *queue, gboolean locked, GrayQueueEnqueueCheckFunc enqueue_check_func)
-{
-       g_assert (sgen_section_gray_queue_is_empty (queue));
-
-       queue->locked = locked;
-       if (locked) {
-               mono_mutex_init_recursive (&queue->lock);
-       }
-
-#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
-       queue->enqueue_check_func = enqueue_check_func;
-#endif
-}
-
-gboolean
-sgen_section_gray_queue_is_empty (SgenSectionGrayQueue *queue)
-{
-       return !queue->first;
-}
-
-GrayQueueSection*
-sgen_section_gray_queue_dequeue (SgenSectionGrayQueue *queue)
-{
-       GrayQueueSection *section;
-
-       lock_section_queue (queue);
-
-       if (queue->first) {
-               section = queue->first;
-               queue->first = section->next;
-
-               STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
-
-               section->next = NULL;
-       } else {
-               section = NULL;
-       }
-
-       unlock_section_queue (queue);
-
-       return section;
-}
-
-void
-sgen_section_gray_queue_enqueue (SgenSectionGrayQueue *queue, GrayQueueSection *section)
-{
-       STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
-
-       lock_section_queue (queue);
-
-       section->next = queue->first;
-       queue->first = section;
-#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
-       if (queue->enqueue_check_func) {
-               int i;
-               for (i = 0; i < section->size; ++i)
-                       queue->enqueue_check_func (section->entries [i].obj);
-       }
-#endif
-
-       unlock_section_queue (queue);
-}
-
-void
-sgen_init_gray_queues (void)
-{
-#ifdef HEAVY_STATISTICS
-       mono_counters_register ("Gray Queue alloc section", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_section_alloc);
-       mono_counters_register ("Gray Queue free section", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_section_free);
-       mono_counters_register ("Gray Queue enqueue fast path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_enqueue_fast_path);
-       mono_counters_register ("Gray Queue dequeue fast path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_dequeue_fast_path);
-       mono_counters_register ("Gray Queue enqueue slow path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_enqueue_slow_path);
-       mono_counters_register ("Gray Queue dequeue slow path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_dequeue_slow_path);
-#endif
-}
-#endif
diff --git a/mono/metadata/sgen-gray.h b/mono/metadata/sgen-gray.h
deleted file mode 100644 (file)
index de98fb1..0000000
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * sgen-gray.h: Gray queue management.
- *
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#ifndef __MONO_SGEN_GRAY_H__
-#define __MONO_SGEN_GRAY_H__
-
-#include "mono/metadata/sgen-protocol.h"
-
-/*
- * This gray queue has to be as optimized as possible, because it is in the core of
- * the mark/copy phase of the garbage collector. The memory access has then to be as
- * cache friendly as possible. That's why we use a cursor based implementation.
- * 
- * This simply consist in maintaining a pointer to the current element in the
- * queue. In addition to using this cursor, we use a simple linked list of arrays,
- * called sections, so that we have the cache friendliness of arrays without having
- * the cost of memory reallocation of a dynaic array, not the cost of memory
- * indirection of a linked list.
- * 
- * This implementation also allows the dequeuing of a whole section at a time. This is
- * for example used in the parallel GC because it would be too costly to take one element 
- * at a time. This imply the main constraint that, because we don't carry the cursor
- * with the section, we still have to store the index of the last element. This is done 
- * through the 'size' field on the section, which default value is it's maximum value
- * SGEN_GRAY_QUEUE_SECTION_SIZE. This field is updated in multiple cases :
- *  - section allocation : default value
- *  - object push : default value if we fill the current queue first
- *  - section dequeue : position of the cursor in the dequeued section
- *  - section enqueue : position of the cursor in the previously first section in the queue
- * 
- * The previous implementation was an index based access where we would store the index
- * of the last element in the section. This was less efficient because we would have
- * to make 1 memory access for the index value, 1 for the base address of the objects
- * array and another 1 for the actual value in the array.
- */
-
-/* SGEN_GRAY_QUEUE_HEADER_SIZE is number of machine words */
-#ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
-#define SGEN_GRAY_QUEUE_HEADER_SIZE    4
-#else
-#define SGEN_GRAY_QUEUE_HEADER_SIZE    2
-#endif
-
-#define SGEN_GRAY_QUEUE_SECTION_SIZE   (128 - SGEN_GRAY_QUEUE_HEADER_SIZE)
-
-#ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
-typedef enum {
-       GRAY_QUEUE_SECTION_STATE_FLOATING,
-       GRAY_QUEUE_SECTION_STATE_ENQUEUED,
-       GRAY_QUEUE_SECTION_STATE_FREE_LIST,
-       GRAY_QUEUE_SECTION_STATE_FREED
-} GrayQueueSectionState;
-#endif
-
-typedef struct _GrayQueueEntry GrayQueueEntry;
-struct _GrayQueueEntry {
-       char *obj;
-       mword desc;
-};
-
-#define SGEN_GRAY_QUEUE_ENTRY(obj,desc)        { (obj), (desc) }
-
-/*
- * This is a stack now instead of a queue, so the most recently added items are removed
- * first, improving cache locality, and keeping the stack size manageable.
- */
-typedef struct _GrayQueueSection GrayQueueSection;
-struct _GrayQueueSection {
-#ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
-       /*
-        * The dummy is here so that the state doesn't get overwritten
-        * by the internal allocator once the section is freed.
-        */
-       int dummy;
-       GrayQueueSectionState state;
-#endif
-       int size;
-       GrayQueueSection *next;
-       GrayQueueEntry entries [SGEN_GRAY_QUEUE_SECTION_SIZE];
-};
-
-typedef struct _SgenGrayQueue SgenGrayQueue;
-
-typedef void (*GrayQueueAllocPrepareFunc) (SgenGrayQueue*);
-typedef void (*GrayQueueEnqueueCheckFunc) (char*);
-
-struct _SgenGrayQueue {
-       GrayQueueEntry *cursor;
-       GrayQueueSection *first;
-       GrayQueueSection *free_list;
-       GrayQueueAllocPrepareFunc alloc_prepare_func;
-#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
-       GrayQueueEnqueueCheckFunc enqueue_check_func;
-#endif
-       void *alloc_prepare_data;
-};
-
-typedef struct _SgenSectionGrayQueue SgenSectionGrayQueue;
-
-struct _SgenSectionGrayQueue {
-       GrayQueueSection *first;
-       gboolean locked;
-       mono_mutex_t lock;
-#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
-       GrayQueueEnqueueCheckFunc enqueue_check_func;
-#endif
-};
-
-#define GRAY_LAST_CURSOR_POSITION(s) ((s)->entries + SGEN_GRAY_QUEUE_SECTION_SIZE - 1)
-#define GRAY_FIRST_CURSOR_POSITION(s) ((s)->entries)
-
-#ifdef HEAVY_STATISTICS
-extern guint64 stat_gray_queue_section_alloc;
-extern guint64 stat_gray_queue_section_free;
-extern guint64 stat_gray_queue_enqueue_fast_path;
-extern guint64 stat_gray_queue_dequeue_fast_path;
-extern guint64 stat_gray_queue_enqueue_slow_path;
-extern guint64 stat_gray_queue_dequeue_slow_path;
-#endif
-
-void sgen_init_gray_queues (void);
-
-void sgen_gray_object_enqueue (SgenGrayQueue *queue, char *obj, mword desc);
-GrayQueueEntry sgen_gray_object_dequeue (SgenGrayQueue *queue);
-GrayQueueSection* sgen_gray_object_dequeue_section (SgenGrayQueue *queue);
-void sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section);
-void sgen_gray_object_queue_trim_free_list (SgenGrayQueue *queue);
-void sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func);
-void sgen_gray_object_queue_init_invalid (SgenGrayQueue *queue);
-void sgen_gray_queue_set_alloc_prepare (SgenGrayQueue *queue, GrayQueueAllocPrepareFunc alloc_prepare_func, void *data);
-void sgen_gray_object_queue_init_with_alloc_prepare (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func,
-               GrayQueueAllocPrepareFunc func, void *data);
-void sgen_gray_object_queue_deinit (SgenGrayQueue *queue);
-void sgen_gray_object_queue_disable_alloc_prepare (SgenGrayQueue *queue);
-void sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue);
-void sgen_gray_object_free_queue_section (GrayQueueSection *section);
-
-void sgen_section_gray_queue_init (SgenSectionGrayQueue *queue, gboolean locked,
-               GrayQueueEnqueueCheckFunc enqueue_check_func);
-gboolean sgen_section_gray_queue_is_empty (SgenSectionGrayQueue *queue);
-GrayQueueSection* sgen_section_gray_queue_dequeue (SgenSectionGrayQueue *queue);
-void sgen_section_gray_queue_enqueue (SgenSectionGrayQueue *queue, GrayQueueSection *section);
-
-gboolean sgen_gray_object_fill_prefetch (SgenGrayQueue *queue);
-
-static inline gboolean
-sgen_gray_object_queue_is_empty (SgenGrayQueue *queue)
-{
-       return queue->first == NULL;
-}
-
-static inline MONO_ALWAYS_INLINE void
-GRAY_OBJECT_ENQUEUE (SgenGrayQueue *queue, char* obj, mword desc)
-{
-#if SGEN_MAX_DEBUG_LEVEL >= 9
-       sgen_gray_object_enqueue (queue, obj, desc);
-#else
-       if (G_UNLIKELY (!queue->first || queue->cursor == GRAY_LAST_CURSOR_POSITION (queue->first))) {
-               sgen_gray_object_enqueue (queue, obj, desc);
-       } else {
-               GrayQueueEntry entry = SGEN_GRAY_QUEUE_ENTRY (obj, desc);
-
-               HEAVY_STAT (stat_gray_queue_enqueue_fast_path ++);
-
-               *++queue->cursor = entry;
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-               binary_protocol_gray_enqueue (queue, queue->cursor, obj);
-#endif
-       }
-#endif
-}
-
-static inline MONO_ALWAYS_INLINE void
-GRAY_OBJECT_DEQUEUE (SgenGrayQueue *queue, char** obj, mword *desc)
-{
-       GrayQueueEntry entry;
-#if SGEN_MAX_DEBUG_LEVEL >= 9
-       entry = sgen_gray_object_dequeue (queue);
-       *obj = entry.obj;
-       *desc = entry.desc;
-#else
-       if (!queue->first) {
-               HEAVY_STAT (stat_gray_queue_dequeue_fast_path ++);
-
-               *obj = NULL;
-       } else if (G_UNLIKELY (queue->cursor == GRAY_FIRST_CURSOR_POSITION (queue->first))) {
-               entry = sgen_gray_object_dequeue (queue);
-               *obj = entry.obj;
-               *desc = entry.desc;
-       } else {
-               HEAVY_STAT (stat_gray_queue_dequeue_fast_path ++);
-
-               entry = *queue->cursor--;
-               *obj = entry.obj;
-               *desc = entry.desc;
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-               binary_protocol_gray_dequeue (queue, queue->cursor + 1, *obj);
-#endif
-       }
-#endif
-}
-
-#endif
diff --git a/mono/metadata/sgen-hash-table.c b/mono/metadata/sgen-hash-table.c
deleted file mode 100644 (file)
index 0225d48..0000000
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * sgen-hash-table.c
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "config.h"
-
-#ifdef HAVE_SGEN_GC
-
-#include <string.h>
-
-#include <mono/metadata/sgen-gc.h>
-#include <mono/metadata/sgen-hash-table.h>
-
-#ifdef HEAVY_STATISTICS
-static guint64 stat_lookups;
-static guint64 stat_lookup_iterations;
-static guint64 stat_lookup_max_iterations;
-#endif
-
-static void
-rehash (SgenHashTable *hash_table)
-{
-       SgenHashTableEntry **old_hash = hash_table->table;
-       guint old_hash_size = hash_table->size;
-       guint i, hash, new_size;
-       SgenHashTableEntry **new_hash;
-       SgenHashTableEntry *entry, *next;
-
-       if (!old_hash) {
-               sgen_register_fixed_internal_mem_type (hash_table->entry_mem_type,
-                               sizeof (SgenHashTableEntry*) + sizeof (gpointer) + hash_table->data_size);
-               new_size = 13;
-       } else {
-               new_size = g_spaced_primes_closest (hash_table->num_entries);
-       }
-
-       new_hash = sgen_alloc_internal_dynamic (new_size * sizeof (SgenHashTableEntry*), hash_table->table_mem_type, TRUE);
-       for (i = 0; i < old_hash_size; ++i) {
-               for (entry = old_hash [i]; entry; entry = next) {
-                       hash = hash_table->hash_func (entry->key) % new_size;
-                       next = entry->next;
-                       entry->next = new_hash [hash];
-                       new_hash [hash] = entry;
-               }
-       }
-       sgen_free_internal_dynamic (old_hash, old_hash_size * sizeof (SgenHashTableEntry*), hash_table->table_mem_type);
-       hash_table->table = new_hash;
-       hash_table->size = new_size;
-}
-
-static void
-rehash_if_necessary (SgenHashTable *hash_table)
-{
-       if (hash_table->num_entries >= hash_table->size * 2)
-               rehash (hash_table);
-
-       SGEN_ASSERT (1, hash_table->size, "rehash guarantees size > 0");
-}
-
-static SgenHashTableEntry*
-lookup (SgenHashTable *hash_table, gpointer key, guint *_hash)
-{
-       SgenHashTableEntry *entry;
-       guint hash;
-       GEqualFunc equal = hash_table->equal_func;
-#ifdef HEAVY_STATISTICS
-       guint64 iterations = 0;
-       ++stat_lookups;
-#endif
-
-       if (!hash_table->size)
-               return NULL;
-
-       hash = hash_table->hash_func (key) % hash_table->size;
-       if (_hash)
-               *_hash = hash;
-
-       for (entry = hash_table->table [hash]; entry; entry = entry->next) {
-#ifdef HEAVY_STATISTICS
-               ++stat_lookup_iterations;
-               ++iterations;
-               if (iterations > stat_lookup_max_iterations)
-                       stat_lookup_max_iterations = iterations;
-#endif
-               if ((equal && equal (entry->key, key)) || (!equal && entry->key == key))
-                       return entry;
-       }
-       return NULL;
-}
-
-gpointer
-sgen_hash_table_lookup (SgenHashTable *hash_table, gpointer key)
-{
-       SgenHashTableEntry *entry = lookup (hash_table, key, NULL);
-       if (!entry)
-               return NULL;
-       return entry->data;
-}
-
-gboolean
-sgen_hash_table_replace (SgenHashTable *hash_table, gpointer key, gpointer new_value, gpointer old_value)
-{
-       guint hash;
-       SgenHashTableEntry *entry;
-
-       rehash_if_necessary (hash_table);
-       entry = lookup (hash_table, key, &hash);
-
-       if (entry) {
-               if (old_value)
-                       memcpy (old_value, entry->data, hash_table->data_size); 
-               memcpy (entry->data, new_value, hash_table->data_size);
-               return FALSE;
-       }
-
-       entry = sgen_alloc_internal (hash_table->entry_mem_type);
-       entry->key = key;
-       memcpy (entry->data, new_value, hash_table->data_size);
-
-       entry->next = hash_table->table [hash];
-       hash_table->table [hash] = entry;
-
-       hash_table->num_entries++;
-
-       return TRUE;
-}
-
-gboolean
-sgen_hash_table_set_value (SgenHashTable *hash_table, gpointer key, gpointer new_value, gpointer old_value)
-{
-       guint hash;
-       SgenHashTableEntry *entry;
-
-       entry = lookup (hash_table, key, &hash);
-
-       if (entry) {
-               if (old_value)
-                       memcpy (old_value, entry->data, hash_table->data_size);
-               memcpy (entry->data, new_value, hash_table->data_size);
-               return TRUE;
-       }
-
-       return FALSE;
-}
-
-gboolean
-sgen_hash_table_set_key (SgenHashTable *hash_table, gpointer old_key, gpointer new_key)
-{
-       guint hash;
-       SgenHashTableEntry *entry;
-
-       entry = lookup (hash_table, old_key, &hash);
-
-       if (entry) {
-               entry->key = new_key;
-               return TRUE;
-       }
-
-       return FALSE;
-}
-
-gboolean
-sgen_hash_table_remove (SgenHashTable *hash_table, gpointer key, gpointer data_return)
-{
-       SgenHashTableEntry *entry, *prev;
-       guint hash;
-       GEqualFunc equal = hash_table->equal_func;
-
-       rehash_if_necessary (hash_table);
-       hash = hash_table->hash_func (key) % hash_table->size;
-
-       prev = NULL;
-       for (entry = hash_table->table [hash]; entry; entry = entry->next) {
-               if ((equal && equal (entry->key, key)) || (!equal && entry->key == key)) {
-                       if (prev)
-                               prev->next = entry->next;
-                       else
-                               hash_table->table [hash] = entry->next;
-
-                       hash_table->num_entries--;
-
-                       if (data_return)
-                               memcpy (data_return, entry->data, hash_table->data_size);
-
-                       sgen_free_internal (entry, hash_table->entry_mem_type);
-
-                       return TRUE;
-               }
-               prev = entry;
-       }
-
-       return FALSE;
-}
-
-void
-sgen_hash_table_clean (SgenHashTable *hash_table)
-{
-       guint i;
-
-       if (!hash_table->size) {
-               SGEN_ASSERT (1, !hash_table->table, "clean should reset hash_table->table");
-               SGEN_ASSERT (1, !hash_table->num_entries, "clean should reset hash_table->num_entries");
-               return;
-       }
-
-       for (i = 0; i < hash_table->size; ++i) {
-               SgenHashTableEntry *entry = hash_table->table [i];
-               while (entry) {
-                       SgenHashTableEntry *next = entry->next;
-                       sgen_free_internal (entry, hash_table->entry_mem_type);
-                       entry = next;
-               }
-       }
-
-       sgen_free_internal_dynamic (hash_table->table, hash_table->size * sizeof (SgenHashTableEntry*), hash_table->table_mem_type);
-
-       hash_table->table = NULL;
-       hash_table->size = 0;
-       hash_table->num_entries = 0;
-}
-
-void
-sgen_init_hash_table (void)
-{
-#ifdef HEAVY_STATISTICS
-       mono_counters_register ("Hash table lookups", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_lookups);
-       mono_counters_register ("Hash table lookup iterations", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_lookup_iterations);
-       mono_counters_register ("Hash table lookup max iterations", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_lookup_max_iterations);
-#endif
-}
-
-#endif
diff --git a/mono/metadata/sgen-hash-table.h b/mono/metadata/sgen-hash-table.h
deleted file mode 100644 (file)
index e6cfe43..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef __MONO_SGENHASHTABLE_H__
-#define __MONO_SGENHASHTABLE_H__
-
-#include "config.h"
-
-#ifdef HAVE_SGEN_GC
-
-#include <glib.h>
-
-/* hash tables */
-
-typedef struct _SgenHashTableEntry SgenHashTableEntry;
-struct _SgenHashTableEntry {
-       SgenHashTableEntry *next;
-       gpointer key;
-       char data [MONO_ZERO_LEN_ARRAY]; /* data is pointer-aligned */
-};
-
-typedef struct {
-       int table_mem_type;
-       int entry_mem_type;
-       size_t data_size;
-       GHashFunc hash_func;
-       GEqualFunc equal_func;
-       SgenHashTableEntry **table;
-       guint size;
-       guint num_entries;
-} SgenHashTable;
-
-#define SGEN_HASH_TABLE_INIT(table_type,entry_type,data_size,hash_func,equal_func)     { (table_type), (entry_type), (data_size), (hash_func), (equal_func), NULL, 0, 0 }
-#define SGEN_HASH_TABLE_ENTRY_SIZE(data_size)                  ((data_size) + sizeof (SgenHashTableEntry*) + sizeof (gpointer))
-
-gpointer sgen_hash_table_lookup (SgenHashTable *table, gpointer key);
-gboolean sgen_hash_table_replace (SgenHashTable *table, gpointer key, gpointer new_value, gpointer old_value);
-gboolean sgen_hash_table_set_value (SgenHashTable *table, gpointer key, gpointer new_value, gpointer old_value);
-gboolean sgen_hash_table_set_key (SgenHashTable *hash_table, gpointer old_key, gpointer new_key);
-gboolean sgen_hash_table_remove (SgenHashTable *table, gpointer key, gpointer data_return);
-
-void sgen_hash_table_clean (SgenHashTable *table);
-
-void sgen_init_hash_table (void);
-
-#define sgen_hash_table_num_entries(h) ((h)->num_entries)
-
-#define sgen_hash_table_key_for_value_pointer(v)       (((SgenHashTableEntry*)((char*)(v) - G_STRUCT_OFFSET (SgenHashTableEntry, data)))->key)
-
-#define SGEN_HASH_TABLE_FOREACH(h,k,v) do {                            \
-               SgenHashTable *__hash_table = (h);                      \
-               SgenHashTableEntry **__table = __hash_table->table;     \
-               guint __i;                                              \
-               for (__i = 0; __i < (h)->size; ++__i) {                 \
-                       SgenHashTableEntry **__iter, **__next;                  \
-                       for (__iter = &__table [__i]; *__iter; __iter = __next) {       \
-                               SgenHashTableEntry *__entry = *__iter;  \
-                               __next = &__entry->next;        \
-                               (k) = __entry->key;                     \
-                               (v) = (gpointer)__entry->data;
-
-/* The loop must be continue'd after using this! */
-#define SGEN_HASH_TABLE_FOREACH_REMOVE(free)   do {                    \
-               *__iter = *__next;      \
-               __next = __iter;        \
-               --__hash_table->num_entries;                            \
-               if ((free))                                             \
-                       sgen_free_internal (__entry, __hash_table->entry_mem_type); \
-       } while (0)
-
-#define SGEN_HASH_TABLE_FOREACH_SET_KEY(k)     ((__entry)->key = (k))
-
-#define SGEN_HASH_TABLE_FOREACH_END                                    \
-                       }                                               \
-               }                                                       \
-       } while (0)
-
-#endif
-
-#endif
diff --git a/mono/metadata/sgen-internal.c b/mono/metadata/sgen-internal.c
deleted file mode 100644 (file)
index ffb4d54..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * sgen-internal.c: Internal lock-free memory allocator.
- *
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-
-#ifdef HAVE_SGEN_GC
-
-#include <string.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/utils/lock-free-alloc.h"
-#include "mono/metadata/sgen-memory-governor.h"
-#include "mono/metadata/sgen-client.h"
-
-/* keep each size a multiple of ALLOC_ALIGN */
-#if SIZEOF_VOID_P == 4
-static const int allocator_sizes [] = {
-          8,   16,   24,   32,   40,   48,   64,   80,
-         96,  128,  160,  192,  224,  248,  296,  320,
-        384,  448,  504,  528,  584,  680,  816, 1088,
-       1360, 2044, 2336, 2728, 3272, 4092, 5456, 8188 };
-#else
-static const int allocator_sizes [] = {
-          8,   16,   24,   32,   40,   48,   64,   80,
-         96,  128,  160,  192,  224,  248,  320,  328,
-        384,  448,  528,  584,  680,  816, 1016, 1088,
-       1360, 2040, 2336, 2728, 3272, 4088, 5456, 8184 };
-#endif
-
-#define NUM_ALLOCATORS (sizeof (allocator_sizes) / sizeof (int))
-
-static int allocator_block_sizes [NUM_ALLOCATORS];
-
-static MonoLockFreeAllocSizeClass size_classes [NUM_ALLOCATORS];
-static MonoLockFreeAllocator allocators [NUM_ALLOCATORS];
-
-#ifdef HEAVY_STATISTICS
-static int allocator_sizes_stats [NUM_ALLOCATORS];
-#endif
-
-static size_t
-block_size (size_t slot_size)
-{
-       static int pagesize = -1;
-
-       int size;
-
-       if (pagesize == -1)
-               pagesize = mono_pagesize ();
-
-       for (size = pagesize; size < LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) {
-               if (slot_size * 2 <= LOCK_FREE_ALLOC_SB_USABLE_SIZE (size))
-                       return size;
-       }
-       return LOCK_FREE_ALLOC_SB_MAX_SIZE;
-}
-
-/*
- * Find the allocator index for memory chunks that can contain @size
- * objects.
- */
-static int
-index_for_size (size_t size)
-{
-       int slot;
-       /* do a binary search or lookup table later. */
-       for (slot = 0; slot < NUM_ALLOCATORS; ++slot) {
-               if (allocator_sizes [slot] >= size)
-                       return slot;
-       }
-       g_assert_not_reached ();
-       return -1;
-}
-
-/*
- * Allocator indexes for the fixed INTERNAL_MEM_XXX types.  -1 if that
- * type is dynamic.
- */
-static int fixed_type_allocator_indexes [INTERNAL_MEM_MAX];
-
-void
-sgen_register_fixed_internal_mem_type (int type, size_t size)
-{
-       int slot;
-
-       g_assert (type >= 0 && type < INTERNAL_MEM_MAX);
-       g_assert (size <= allocator_sizes [NUM_ALLOCATORS - 1]);
-
-       slot = index_for_size (size);
-       g_assert (slot >= 0);
-
-       if (fixed_type_allocator_indexes [type] == -1)
-               fixed_type_allocator_indexes [type] = slot;
-       else
-               g_assert (fixed_type_allocator_indexes [type] == slot);
-}
-
-static const char*
-description_for_type (int type)
-{
-       switch (type) {
-       case INTERNAL_MEM_PIN_QUEUE: return "pin-queue";
-       case INTERNAL_MEM_FRAGMENT: return "fragment";
-       case INTERNAL_MEM_SECTION: return "section";
-       case INTERNAL_MEM_SCAN_STARTS: return "scan-starts";
-       case INTERNAL_MEM_FIN_TABLE: return "fin-table";
-       case INTERNAL_MEM_FINALIZE_ENTRY: return "finalize-entry";
-       case INTERNAL_MEM_FINALIZE_READY: return "finalize-ready";
-       case INTERNAL_MEM_DISLINK_TABLE: return "dislink-table";
-       case INTERNAL_MEM_DISLINK: return "dislink";
-       case INTERNAL_MEM_ROOTS_TABLE: return "roots-table";
-       case INTERNAL_MEM_ROOT_RECORD: return "root-record";
-       case INTERNAL_MEM_STATISTICS: return "statistics";
-       case INTERNAL_MEM_STAT_PINNED_CLASS: return "pinned-class";
-       case INTERNAL_MEM_STAT_REMSET_CLASS: return "remset-class";
-       case INTERNAL_MEM_GRAY_QUEUE: return "gray-queue";
-       case INTERNAL_MEM_MS_TABLES: return "marksweep-tables";
-       case INTERNAL_MEM_MS_BLOCK_INFO: return "marksweep-block-info";
-       case INTERNAL_MEM_MS_BLOCK_INFO_SORT: return "marksweep-block-info-sort";
-       case INTERNAL_MEM_WORKER_DATA: return "worker-data";
-       case INTERNAL_MEM_THREAD_POOL_JOB: return "thread-pool-job";
-       case INTERNAL_MEM_BRIDGE_DATA: return "bridge-data";
-       case INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE: return "old-bridge-hash-table";
-       case INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE_ENTRY: return "old-bridge-hash-table-entry";
-       case INTERNAL_MEM_BRIDGE_HASH_TABLE: return "bridge-hash-table";
-       case INTERNAL_MEM_BRIDGE_HASH_TABLE_ENTRY: return "bridge-hash-table-entry";
-       case INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE: return "tarjan-bridge-hash-table";
-       case INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE_ENTRY: return "tarjan-bridge-hash-table-entry";
-       case INTERNAL_MEM_TARJAN_OBJ_BUCKET: return "tarjan-bridge-object-buckets";
-       case INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE: return "bridge-alive-hash-table";
-       case INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE_ENTRY: return "bridge-alive-hash-table-entry";
-       case INTERNAL_MEM_BRIDGE_DEBUG: return "bridge-debug";
-       case INTERNAL_MEM_TOGGLEREF_DATA: return "toggleref-data";
-       case INTERNAL_MEM_CARDTABLE_MOD_UNION: return "cardtable-mod-union";
-       case INTERNAL_MEM_BINARY_PROTOCOL: return "binary-protocol";
-       case INTERNAL_MEM_TEMPORARY: return "temporary";
-       default: {
-               const char *description = sgen_client_description_for_internal_mem_type (type);
-               SGEN_ASSERT (0, description, "Unknown internal mem type");
-               return description;
-       }
-       }
-}
-
-void*
-sgen_alloc_internal_dynamic (size_t size, int type, gboolean assert_on_failure)
-{
-       int index;
-       void *p;
-
-       if (size > allocator_sizes [NUM_ALLOCATORS - 1]) {
-               p = sgen_alloc_os_memory (size, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, NULL);
-               if (!p)
-                       sgen_assert_memory_alloc (NULL, size, description_for_type (type));
-       } else {
-               index = index_for_size (size);
-
-#ifdef HEAVY_STATISTICS
-               ++ allocator_sizes_stats [index];
-#endif
-
-               p = mono_lock_free_alloc (&allocators [index]);
-               if (!p)
-                       sgen_assert_memory_alloc (NULL, size, description_for_type (type));
-               memset (p, 0, size);
-       }
-       return p;
-}
-
-void
-sgen_free_internal_dynamic (void *addr, size_t size, int type)
-{
-       if (!addr)
-               return;
-
-       if (size > allocator_sizes [NUM_ALLOCATORS - 1])
-               sgen_free_os_memory (addr, size, SGEN_ALLOC_INTERNAL);
-       else
-               mono_lock_free_free (addr, block_size (size));
-}
-
-void*
-sgen_alloc_internal (int type)
-{
-       int index, size;
-       void *p;
-
-       index = fixed_type_allocator_indexes [type];
-       g_assert (index >= 0 && index < NUM_ALLOCATORS);
-
-#ifdef HEAVY_STATISTICS
-       ++ allocator_sizes_stats [index];
-#endif
-
-       size = allocator_sizes [index];
-
-       p = mono_lock_free_alloc (&allocators [index]);
-       memset (p, 0, size);
-
-       return p;
-}
-
-void
-sgen_free_internal (void *addr, int type)
-{
-       int index;
-
-       if (!addr)
-               return;
-
-       index = fixed_type_allocator_indexes [type];
-       g_assert (index >= 0 && index < NUM_ALLOCATORS);
-
-       mono_lock_free_free (addr, allocator_block_sizes [index]);
-}
-
-void
-sgen_dump_internal_mem_usage (FILE *heap_dump_file)
-{
-       /*
-       int i;
-
-       fprintf (heap_dump_file, "<other-mem-usage type=\"large-internal\" size=\"%lld\"/>\n", large_internal_bytes_alloced);
-       fprintf (heap_dump_file, "<other-mem-usage type=\"pinned-chunks\" size=\"%lld\"/>\n", pinned_chunk_bytes_alloced);
-       for (i = 0; i < INTERNAL_MEM_MAX; ++i) {
-               fprintf (heap_dump_file, "<other-mem-usage type=\"%s\" size=\"%ld\"/>\n",
-                               description_for_type (i), unmanaged_allocator.small_internal_mem_bytes [i]);
-       }
-       */
-}
-
-void
-sgen_report_internal_mem_usage (void)
-{
-       int i G_GNUC_UNUSED;
-#ifdef HEAVY_STATISTICS
-       printf ("size -> # allocations\n");
-       for (i = 0; i < NUM_ALLOCATORS; ++i)
-               printf ("%d -> %d\n", allocator_sizes [i], allocator_sizes_stats [i]);
-#endif
-}
-
-void
-sgen_init_internal_allocator (void)
-{
-       int i, size;
-
-       for (i = 0; i < INTERNAL_MEM_MAX; ++i)
-               fixed_type_allocator_indexes [i] = -1;
-
-       for (i = 0; i < NUM_ALLOCATORS; ++i) {
-               allocator_block_sizes [i] = block_size (allocator_sizes [i]);
-               mono_lock_free_allocator_init_size_class (&size_classes [i], allocator_sizes [i], allocator_block_sizes [i]);
-               mono_lock_free_allocator_init_allocator (&allocators [i], &size_classes [i]);
-       }
-
-       for (size = mono_pagesize (); size <= LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) {
-               int max_size = LOCK_FREE_ALLOC_SB_USABLE_SIZE (size) / 2;
-               /*
-                * we assert that allocator_sizes contains the biggest possible object size
-                * per block (4K => 4080 / 2 = 2040, 8k => 8176 / 2 = 4088, 16k => 16368 / 2 = 8184 on 64bits),
-                * so that we do not get different block sizes for sizes that should go to the same one
-                */
-               g_assert (allocator_sizes [index_for_size (max_size)] == max_size);
-       }
-}
-
-#endif
diff --git a/mono/metadata/sgen-layout-stats.c b/mono/metadata/sgen-layout-stats.c
deleted file mode 100644 (file)
index 8342bff..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright Xamarin Inc (http://www.xamarin.com)
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include "metadata/sgen-gc.h"
-#include "metadata/sgen-layout-stats.h"
-
-#ifdef SGEN_OBJECT_LAYOUT_STATISTICS
-
-#define NUM_HISTOGRAM_ENTRIES  (1 << SGEN_OBJECT_LAYOUT_BITMAP_BITS)
-
-static unsigned long histogram [NUM_HISTOGRAM_ENTRIES];
-static unsigned long count_bitmap_overflow;
-static unsigned long count_ref_array;
-static unsigned long count_vtype_array;
-
-void
-sgen_object_layout_scanned_bitmap (unsigned int bitmap)
-{
-       g_assert (!(bitmap >> SGEN_OBJECT_LAYOUT_BITMAP_BITS));
-       ++histogram [bitmap];
-}
-
-void
-sgen_object_layout_scanned_bitmap_overflow (void)
-{
-       ++count_bitmap_overflow;
-}
-
-void
-sgen_object_layout_scanned_ref_array (void)
-{
-       ++count_ref_array;
-}
-
-void
-sgen_object_layout_scanned_vtype_array (void)
-{
-       ++count_vtype_array;
-}
-
-void
-sgen_object_layout_dump (FILE *out)
-{
-       int i;
-
-       for (i = 0; i < NUM_HISTOGRAM_ENTRIES; ++i) {
-               if (!histogram [i])
-                       continue;
-               fprintf (out, "%d %lu\n", i, histogram [i]);
-       }
-       fprintf (out, "bitmap-overflow %lu\n", count_bitmap_overflow);
-       fprintf (out, "ref-array %lu\n", count_ref_array);
-       fprintf (out, "vtype-array %lu\n", count_vtype_array);
-}
-
-#endif
-#endif
diff --git a/mono/metadata/sgen-layout-stats.h b/mono/metadata/sgen-layout-stats.h
deleted file mode 100644 (file)
index 3853d34..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright Xamarin Inc (http://www.xamarin.com)
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef __MONO_SGEN_LAYOUT_STATS_H__
-#define __MONO_SGEN_LAYOUT_STATS_H__
-
-#ifdef SGEN_OBJECT_LAYOUT_STATISTICS
-
-#define SGEN_OBJECT_LAYOUT_BITMAP_BITS 16
-
-void sgen_object_layout_scanned_bitmap (unsigned int bitmap);
-void sgen_object_layout_scanned_bitmap_overflow (void);
-void sgen_object_layout_scanned_ref_array (void);
-void sgen_object_layout_scanned_vtype_array (void);
-
-void sgen_object_layout_dump (FILE *out);
-
-#define SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP   unsigned int __object_layout_bitmap = 0
-#define SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP(o,p) do {            \
-               int __index = ((void**)(p)) - ((void**)(((char*)(o)) + SGEN_CLIENT_OBJECT_HEADER_SIZE)); \
-               if (__index >= SGEN_OBJECT_LAYOUT_BITMAP_BITS)          \
-                       __object_layout_bitmap = (unsigned int)-1;      \
-               else if (__object_layout_bitmap != (unsigned int)-1)    \
-                       __object_layout_bitmap |= (1 << __index);       \
-       } while (0)
-#define SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP do {               \
-               if (__object_layout_bitmap == (unsigned int)-1)         \
-                       sgen_object_layout_scanned_bitmap_overflow ();  \
-               else                                                    \
-                       sgen_object_layout_scanned_bitmap (__object_layout_bitmap); \
-       } while (0)
-
-#else
-
-#define sgen_object_layout_scanned_bitmap(bitmap)
-#define sgen_object_layout_scanned_bitmap_overflow()
-#define sgen_object_layout_scanned_ref_array()
-#define sgen_object_layout_scanned_vtype_array()
-
-#define sgen_object_layout_dump(out)
-
-#define SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP
-#define SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP(o,p)
-#define SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP
-
-#endif
-
-#endif
diff --git a/mono/metadata/sgen-los.c b/mono/metadata/sgen-los.c
deleted file mode 100644 (file)
index f1a77ee..0000000
+++ /dev/null
@@ -1,711 +0,0 @@
-/*
- * sgen-los.c: Large objects space.
- *
- * Author:
- *     Paolo Molaro (lupus@ximian.com)
- *
- * Copyright 2005-2010 Novell, Inc (http://www.novell.com)
- *
- * Thread start/stop adapted from Boehm's GC:
- * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
- * Copyright (c) 2000-2004 by Hewlett-Packard Company.  All rights reserved.
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-
-#ifdef HAVE_SGEN_GC
-
-#include <string.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-protocol.h"
-#include "mono/metadata/sgen-cardtable.h"
-#include "mono/metadata/sgen-memory-governor.h"
-#include "mono/metadata/sgen-client.h"
-
-#define LOS_SECTION_SIZE       (1024 * 1024)
-
-/*
- * This shouldn't be much smaller or larger than MAX_SMALL_OBJ_SIZE.
- * Must be at least sizeof (LOSSection).
- */
-#define LOS_CHUNK_SIZE         4096
-#define LOS_CHUNK_BITS         12
-
-/* Largest object that can be allocated in a section. */
-#define LOS_SECTION_OBJECT_LIMIT       (LOS_SECTION_SIZE - LOS_CHUNK_SIZE - sizeof (LOSObject))
-//#define LOS_SECTION_OBJECT_LIMIT     0
-#define LOS_SECTION_NUM_CHUNKS         ((LOS_SECTION_SIZE >> LOS_CHUNK_BITS) - 1)
-
-#define LOS_SECTION_FOR_OBJ(obj)       ((LOSSection*)((mword)(obj) & ~(mword)(LOS_SECTION_SIZE - 1)))
-#define LOS_CHUNK_INDEX(obj,section)   (((char*)(obj) - (char*)(section)) >> LOS_CHUNK_BITS)
-
-#define LOS_NUM_FAST_SIZES             32
-
-typedef struct _LOSFreeChunks LOSFreeChunks;
-struct _LOSFreeChunks {
-       LOSFreeChunks *next_size;
-       size_t size;
-};
-
-typedef struct _LOSSection LOSSection;
-struct _LOSSection {
-       LOSSection *next;
-       size_t num_free_chunks;
-       unsigned char *free_chunk_map;
-};
-
-LOSObject *los_object_list = NULL;
-mword los_memory_usage = 0;
-
-static LOSSection *los_sections = NULL;
-static LOSFreeChunks *los_fast_free_lists [LOS_NUM_FAST_SIZES]; /* 0 is for larger sizes */
-static mword los_num_objects = 0;
-static int los_num_sections = 0;
-
-//#define USE_MALLOC
-//#define LOS_CONSISTENCY_CHECK
-//#define LOS_DUMMY
-
-#ifdef LOS_DUMMY
-#define LOS_SEGMENT_SIZE       (4096 * 1024)
-
-static char *los_segment = NULL;
-static int los_segment_index = 0;
-#endif
-
-#ifdef LOS_CONSISTENCY_CHECK
-static void
-los_consistency_check (void)
-{
-       LOSSection *section;
-       LOSObject *obj;
-       int i;
-       mword memory_usage = 0;
-
-       for (obj = los_object_list; obj; obj = obj->next) {
-               char *end = obj->data + obj->size;
-               int start_index, num_chunks;
-
-               memory_usage += obj->size;
-
-               if (obj->size > LOS_SECTION_OBJECT_LIMIT)
-                       continue;
-
-               section = LOS_SECTION_FOR_OBJ (obj);
-
-               g_assert (end <= (char*)section + LOS_SECTION_SIZE);
-
-               start_index = LOS_CHUNK_INDEX (obj, section);
-               num_chunks = (obj->size + sizeof (LOSObject) + LOS_CHUNK_SIZE - 1) >> LOS_CHUNK_BITS;
-               for (i = start_index; i < start_index + num_chunks; ++i)
-                       g_assert (!section->free_chunk_map [i]);
-       }
-
-       for (i = 0; i < LOS_NUM_FAST_SIZES; ++i) {
-               LOSFreeChunks *size_chunks;
-               for (size_chunks = los_fast_free_lists [i]; size_chunks; size_chunks = size_chunks->next_size) {
-                       LOSSection *section = LOS_SECTION_FOR_OBJ (size_chunks);
-                       int j, num_chunks, start_index;
-
-                       if (i == 0)
-                               g_assert (size_chunks->size >= LOS_NUM_FAST_SIZES * LOS_CHUNK_SIZE);
-                       else
-                               g_assert (size_chunks->size == i * LOS_CHUNK_SIZE);
-
-                       num_chunks = size_chunks->size >> LOS_CHUNK_BITS;
-                       start_index = LOS_CHUNK_INDEX (size_chunks, section);
-                       for (j = start_index; j < start_index + num_chunks; ++j)
-                               g_assert (section->free_chunk_map [j]);
-               }
-       }
-
-       g_assert (los_memory_usage == memory_usage);
-}
-#endif
-
-static void
-add_free_chunk (LOSFreeChunks *free_chunks, size_t size)
-{
-       size_t num_chunks = size >> LOS_CHUNK_BITS;
-
-       free_chunks->size = size;
-
-       if (num_chunks >= LOS_NUM_FAST_SIZES)
-               num_chunks = 0;
-       free_chunks->next_size = los_fast_free_lists [num_chunks];
-       los_fast_free_lists [num_chunks] = free_chunks;
-}
-
-static LOSFreeChunks*
-get_from_size_list (LOSFreeChunks **list, size_t size)
-{
-       LOSFreeChunks *free_chunks = NULL;
-       LOSSection *section;
-       size_t i, num_chunks, start_index;
-
-
-       g_assert ((size & (LOS_CHUNK_SIZE - 1)) == 0);
-
-       while (*list) {
-               free_chunks = *list;
-               if (free_chunks->size >= size)
-                       break;
-               list = &(*list)->next_size;
-       }
-
-       if (!*list)
-               return NULL;
-
-       *list = free_chunks->next_size;
-
-       if (free_chunks->size > size)
-               add_free_chunk ((LOSFreeChunks*)((char*)free_chunks + size), free_chunks->size - size);
-
-       num_chunks = size >> LOS_CHUNK_BITS;
-
-       section = LOS_SECTION_FOR_OBJ (free_chunks);
-
-       start_index = LOS_CHUNK_INDEX (free_chunks, section);
-       for (i = start_index; i < start_index + num_chunks; ++i) {
-               g_assert (section->free_chunk_map [i]);
-               section->free_chunk_map [i] = 0;
-       }
-
-       section->num_free_chunks -= size >> LOS_CHUNK_BITS;
-       g_assert (section->num_free_chunks >= 0);
-
-       return free_chunks;
-}
-
-static LOSObject*
-get_los_section_memory (size_t size)
-{
-       LOSSection *section;
-       LOSFreeChunks *free_chunks;
-       size_t num_chunks;
-
-       size += LOS_CHUNK_SIZE - 1;
-       size &= ~(LOS_CHUNK_SIZE - 1);
-
-       num_chunks = size >> LOS_CHUNK_BITS;
-
-       g_assert (size > 0 && size - sizeof (LOSObject) <= LOS_SECTION_OBJECT_LIMIT);
-       g_assert (num_chunks > 0);
-
- retry:
-       if (num_chunks >= LOS_NUM_FAST_SIZES) {
-               free_chunks = get_from_size_list (&los_fast_free_lists [0], size);
-       } else {
-               size_t i;
-               for (i = num_chunks; i < LOS_NUM_FAST_SIZES; ++i) {
-                       free_chunks = get_from_size_list (&los_fast_free_lists [i], size);
-                       if (free_chunks)
-                               break;
-               }
-               if (!free_chunks)
-                       free_chunks = get_from_size_list (&los_fast_free_lists [0], size);
-       }
-
-       if (free_chunks)
-               return (LOSObject*)free_chunks;
-
-       if (!sgen_memgov_try_alloc_space (LOS_SECTION_SIZE, SPACE_LOS))
-               return NULL;
-
-       section = sgen_alloc_os_memory_aligned (LOS_SECTION_SIZE, LOS_SECTION_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, NULL);
-
-       if (!section)
-               return NULL;
-
-       free_chunks = (LOSFreeChunks*)((char*)section + LOS_CHUNK_SIZE);
-       free_chunks->size = LOS_SECTION_SIZE - LOS_CHUNK_SIZE;
-       free_chunks->next_size = los_fast_free_lists [0];
-       los_fast_free_lists [0] = free_chunks;
-
-       section->num_free_chunks = LOS_SECTION_NUM_CHUNKS;
-
-       section->free_chunk_map = (unsigned char*)section + sizeof (LOSSection);
-       g_assert (sizeof (LOSSection) + LOS_SECTION_NUM_CHUNKS + 1 <= LOS_CHUNK_SIZE);
-       section->free_chunk_map [0] = 0;
-       memset (section->free_chunk_map + 1, 1, LOS_SECTION_NUM_CHUNKS);
-
-       section->next = los_sections;
-       los_sections = section;
-
-       ++los_num_sections;
-
-       goto retry;
-}
-
-static void
-free_los_section_memory (LOSObject *obj, size_t size)
-{
-       LOSSection *section = LOS_SECTION_FOR_OBJ (obj);
-       size_t num_chunks, i, start_index;
-
-       size += LOS_CHUNK_SIZE - 1;
-       size &= ~(LOS_CHUNK_SIZE - 1);
-
-       num_chunks = size >> LOS_CHUNK_BITS;
-
-       g_assert (size > 0 && size - sizeof (LOSObject) <= LOS_SECTION_OBJECT_LIMIT);
-       g_assert (num_chunks > 0);
-
-       section->num_free_chunks += num_chunks;
-       g_assert (section->num_free_chunks <= LOS_SECTION_NUM_CHUNKS);
-
-       /*
-        * We could free the LOS section here if it's empty, but we
-        * can't unless we also remove its free chunks from the fast
-        * free lists.  Instead, we do it in los_sweep().
-        */
-
-       start_index = LOS_CHUNK_INDEX (obj, section);
-       for (i = start_index; i < start_index + num_chunks; ++i) {
-               g_assert (!section->free_chunk_map [i]);
-               section->free_chunk_map [i] = 1;
-       }
-
-       add_free_chunk ((LOSFreeChunks*)obj, size);
-}
-
-static int pagesize;
-
-void
-sgen_los_free_object (LOSObject *obj)
-{
-       SGEN_ASSERT (0, !obj->cardtable_mod_union, "We should never free a LOS object with a mod-union table.");
-
-#ifndef LOS_DUMMY
-       size_t size = obj->size;
-       SGEN_LOG (4, "Freed large object %p, size %lu", obj->data, (unsigned long)obj->size);
-       binary_protocol_empty (obj->data, obj->size);
-
-       los_memory_usage -= size;
-       los_num_objects--;
-
-#ifdef USE_MALLOC
-       free (obj);
-#else
-       if (size > LOS_SECTION_OBJECT_LIMIT) {
-               if (!pagesize)
-                       pagesize = mono_pagesize ();
-               size += sizeof (LOSObject);
-               size += pagesize - 1;
-               size &= ~(pagesize - 1);
-               sgen_free_os_memory (obj, size, SGEN_ALLOC_HEAP);
-               sgen_memgov_release_space (size, SPACE_LOS);
-       } else {
-               free_los_section_memory (obj, size + sizeof (LOSObject));
-#ifdef LOS_CONSISTENCY_CHECKS
-               los_consistency_check ();
-#endif
-       }
-#endif
-#endif
-}
-
-/*
- * Objects with size >= MAX_SMALL_SIZE are allocated in the large object space.
- * They are currently kept track of with a linked list.
- * They don't move, so there is no need to pin them during collection
- * and we avoid the memcpy overhead.
- */
-void*
-sgen_los_alloc_large_inner (GCVTable *vtable, size_t size)
-{
-       LOSObject *obj = NULL;
-       void **vtslot;
-
-       g_assert (size > SGEN_MAX_SMALL_OBJ_SIZE);
-       g_assert ((size & 1) == 0);
-
-       /*
-        * size + sizeof (LOSObject) <= SSIZE_MAX - (mono_pagesize () - 1)
-        *
-        * therefore:
-        *
-        * size <= SSIZE_MAX - (mono_pagesize () - 1) - sizeof (LOSObject)
-        */
-       if (size > SSIZE_MAX - (mono_pagesize () - 1) - sizeof (LOSObject))
-               return NULL;
-
-#ifdef LOS_DUMMY
-       if (!los_segment)
-               los_segment = sgen_alloc_os_memory (LOS_SEGMENT_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, NULL);
-       los_segment_index = ALIGN_UP (los_segment_index);
-
-       obj = (LOSObject*)(los_segment + los_segment_index);
-       los_segment_index += size + sizeof (LOSObject);
-       g_assert (los_segment_index <= LOS_SEGMENT_SIZE);
-#else
-       sgen_ensure_free_space (size);
-
-#ifdef USE_MALLOC
-       obj = malloc (size + sizeof (LOSObject));
-       memset (obj, 0, size + sizeof (LOSObject));
-#else
-       if (size > LOS_SECTION_OBJECT_LIMIT) {
-               size_t alloc_size = size;
-               if (!pagesize)
-                       pagesize = mono_pagesize ();
-               alloc_size += sizeof (LOSObject);
-               alloc_size += pagesize - 1;
-               alloc_size &= ~(pagesize - 1);
-               if (sgen_memgov_try_alloc_space (alloc_size, SPACE_LOS)) {
-                       obj = sgen_alloc_os_memory (alloc_size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, NULL);
-               }
-       } else {
-               obj = get_los_section_memory (size + sizeof (LOSObject));
-               if (obj)
-                       memset (obj, 0, size + sizeof (LOSObject));
-       }
-#endif
-#endif
-       if (!obj)
-               return NULL;
-       g_assert (!((mword)obj->data & (SGEN_ALLOC_ALIGN - 1)));
-       obj->size = size;
-       vtslot = (void**)obj->data;
-       *vtslot = vtable;
-       sgen_update_heap_boundaries ((mword)obj->data, (mword)obj->data + size);
-       obj->next = los_object_list;
-       los_object_list = obj;
-       los_memory_usage += size;
-       los_num_objects++;
-       SGEN_LOG (4, "Allocated large object %p, vtable: %p (%s), size: %zd", obj->data, vtable, sgen_client_vtable_get_name (vtable), size);
-       binary_protocol_alloc (obj->data, vtable, size, sgen_client_get_provenance ());
-
-#ifdef LOS_CONSISTENCY_CHECK
-       los_consistency_check ();
-#endif
-
-       return obj->data;
-}
-
-static void sgen_los_unpin_object (char *data);
-
-void
-sgen_los_sweep (void)
-{
-       LOSObject *bigobj, *prevbo;
-       LOSSection *section, *prev;
-       int i;
-       int num_sections = 0;
-
-       /* sweep the big objects list */
-       prevbo = NULL;
-       for (bigobj = los_object_list; bigobj;) {
-               SGEN_ASSERT (0, !SGEN_OBJECT_IS_PINNED (bigobj->data), "Who pinned a LOS object?");
-
-               if (bigobj->cardtable_mod_union) {
-                       sgen_card_table_free_mod_union (bigobj->cardtable_mod_union, bigobj->data, bigobj->size);
-                       bigobj->cardtable_mod_union = NULL;
-               }
-
-               if (sgen_los_object_is_pinned (bigobj->data)) {
-                       sgen_los_unpin_object (bigobj->data);
-                       sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
-               } else {
-                       LOSObject *to_free;
-                       /* not referenced anywhere, so we can free it */
-                       if (prevbo)
-                               prevbo->next = bigobj->next;
-                       else
-                               los_object_list = bigobj->next;
-                       to_free = bigobj;
-                       bigobj = bigobj->next;
-                       sgen_los_free_object (to_free);
-                       continue;
-               }
-               prevbo = bigobj;
-               bigobj = bigobj->next;
-       }
-
-       /* Try to free memory */
-       for (i = 0; i < LOS_NUM_FAST_SIZES; ++i)
-               los_fast_free_lists [i] = NULL;
-
-       prev = NULL;
-       section = los_sections;
-       while (section) {
-               if (section->num_free_chunks == LOS_SECTION_NUM_CHUNKS) {
-                       LOSSection *next = section->next;
-                       if (prev)
-                               prev->next = next;
-                       else
-                               los_sections = next;
-                       sgen_free_os_memory (section, LOS_SECTION_SIZE, SGEN_ALLOC_HEAP);
-                       sgen_memgov_release_space (LOS_SECTION_SIZE, SPACE_LOS);
-                       section = next;
-                       --los_num_sections;
-                       continue;
-               }
-
-               for (i = 0; i <= LOS_SECTION_NUM_CHUNKS; ++i) {
-                       if (section->free_chunk_map [i]) {
-                               int j;
-                               for (j = i + 1; j <= LOS_SECTION_NUM_CHUNKS && section->free_chunk_map [j]; ++j)
-                                       ;
-                               add_free_chunk ((LOSFreeChunks*)((char*)section + (i << LOS_CHUNK_BITS)), (j - i) << LOS_CHUNK_BITS);
-                               i = j - 1;
-                       }
-               }
-
-               prev = section;
-               section = section->next;
-
-               ++num_sections;
-       }
-
-#ifdef LOS_CONSISTENCY_CHECK
-       los_consistency_check ();
-#endif
-
-       /*
-       g_print ("LOS sections: %d  objects: %d  usage: %d\n", num_sections, los_num_objects, los_memory_usage);
-       for (i = 0; i < LOS_NUM_FAST_SIZES; ++i) {
-               int num_chunks = 0;
-               LOSFreeChunks *free_chunks;
-               for (free_chunks = los_fast_free_lists [i]; free_chunks; free_chunks = free_chunks->next_size)
-                       ++num_chunks;
-               g_print ("  %d: %d\n", i, num_chunks);
-       }
-       */
-
-       g_assert (los_num_sections == num_sections);
-}
-
-gboolean
-sgen_ptr_is_in_los (char *ptr, char **start)
-{
-       LOSObject *obj;
-
-       *start = NULL;
-       for (obj = los_object_list; obj; obj = obj->next) {
-               char *end = obj->data + obj->size;
-
-               if (ptr >= obj->data && ptr < end) {
-                       *start = obj->data;
-                       return TRUE;
-               }
-       }
-       return FALSE;
-}
-
-void
-sgen_los_iterate_objects (IterateObjectCallbackFunc cb, void *user_data)
-{
-       LOSObject *obj;
-
-       for (obj = los_object_list; obj; obj = obj->next)
-               cb (obj->data, obj->size, user_data);
-}
-
-gboolean
-sgen_los_is_valid_object (char *object)
-{
-       LOSObject *obj;
-
-       for (obj = los_object_list; obj; obj = obj->next) {
-               if (obj->data == object)
-                       return TRUE;
-       }
-       return FALSE;
-}
-
-gboolean
-mono_sgen_los_describe_pointer (char *ptr)
-{
-       LOSObject *obj;
-
-       for (obj = los_object_list; obj; obj = obj->next) {
-               const char *los_kind;
-               mword size;
-               gboolean pinned;
-
-               if (obj->data > ptr || obj->data + obj->size <= ptr)
-                       continue;
-
-               size = sgen_los_object_size (obj);
-               pinned = sgen_los_object_is_pinned (obj->data);
-
-               if (size > LOS_SECTION_OBJECT_LIMIT)
-                       los_kind = "huge-los-ptr";
-               else
-                       los_kind = "los-ptr";
-
-               if (obj->data == ptr) {
-                       SGEN_LOG (0, "%s (size %d pin %d)\n", los_kind, (int)size, pinned ? 1 : 0);
-               } else {
-                       SGEN_LOG (0, "%s (interior-ptr offset %td size %d pin %d)",
-                                         los_kind, ptr - obj->data, (int)size, pinned ? 1 : 0);
-               }
-
-               return TRUE;
-       }
-       return FALSE;
-}
-
-void
-sgen_los_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
-{
-       LOSObject *obj;
-       for (obj = los_object_list; obj; obj = obj->next) {
-               GCVTable *vt = (GCVTable*)SGEN_LOAD_VTABLE (obj->data);
-               if (SGEN_VTABLE_HAS_REFERENCES (vt))
-                       callback ((mword)obj->data, (mword)obj->size);
-       }
-}
-
-static guint8*
-get_cardtable_mod_union_for_object (LOSObject *obj)
-{
-       guint8 *mod_union = obj->cardtable_mod_union;
-       guint8 *other;
-       if (mod_union)
-               return mod_union;
-       mod_union = sgen_card_table_alloc_mod_union (obj->data, obj->size);
-       other = SGEN_CAS_PTR ((gpointer*)&obj->cardtable_mod_union, mod_union, NULL);
-       if (!other) {
-               SGEN_ASSERT (0, obj->cardtable_mod_union == mod_union, "Why did CAS not replace?");
-               return mod_union;
-       }
-       sgen_card_table_free_mod_union (mod_union, obj->data, obj->size);
-       return other;
-}
-
-void
-sgen_los_scan_card_table (gboolean mod_union, ScanCopyContext ctx)
-{
-       LOSObject *obj;
-
-       for (obj = los_object_list; obj; obj = obj->next) {
-               guint8 *cards;
-
-               if (!SGEN_OBJECT_HAS_REFERENCES (obj->data))
-                       continue;
-
-               if (mod_union) {
-                       if (!sgen_los_object_is_pinned (obj->data))
-                               continue;
-
-                       cards = get_cardtable_mod_union_for_object (obj);
-                       g_assert (cards);
-               } else {
-                       cards = NULL;
-               }
-
-               sgen_cardtable_scan_object (obj->data, obj->size, cards, mod_union, ctx);
-       }
-}
-
-void
-sgen_los_count_cards (long long *num_total_cards, long long *num_marked_cards)
-{
-       LOSObject *obj;
-       long long total_cards = 0;
-       long long marked_cards = 0;
-
-       for (obj = los_object_list; obj; obj = obj->next) {
-               int i;
-               guint8 *cards = sgen_card_table_get_card_scan_address ((mword) obj->data);
-               guint8 *cards_end = sgen_card_table_get_card_scan_address ((mword) obj->data + obj->size - 1);
-               mword num_cards = (cards_end - cards) + 1;
-
-               if (!SGEN_OBJECT_HAS_REFERENCES (obj->data))
-                       continue;
-
-               total_cards += num_cards;
-               for (i = 0; i < num_cards; ++i) {
-                       if (cards [i])
-                               ++marked_cards;
-               }
-       }
-
-       *num_total_cards = total_cards;
-       *num_marked_cards = marked_cards;
-}
-
-void
-sgen_los_update_cardtable_mod_union (void)
-{
-       LOSObject *obj;
-
-       for (obj = los_object_list; obj; obj = obj->next) {
-               if (!SGEN_OBJECT_HAS_REFERENCES (obj->data))
-                       continue;
-               sgen_card_table_update_mod_union (get_cardtable_mod_union_for_object (obj),
-                               obj->data, obj->size, NULL);
-       }
-}
-
-mword
-sgen_los_object_size (LOSObject *obj)
-{
-       return obj->size & ~1L;
-}
-
-LOSObject*
-sgen_los_header_for_object (char *data)
-{
-#if _MSC_VER
-       return (LOSObject*)(data - (int)(&(((LOSObject*)0)->data)));
-#else
-       return (LOSObject*)(data - sizeof (LOSObject));
-#endif
-}
-
-void
-sgen_los_pin_object (char *data)
-{
-       LOSObject *obj = sgen_los_header_for_object (data);
-       obj->size = obj->size | 1;
-       binary_protocol_pin (data, (gpointer)SGEN_LOAD_VTABLE (data), sgen_safe_object_get_size ((GCObject*)data));
-}
-
-static void
-sgen_los_unpin_object (char *data)
-{
-       LOSObject *obj = sgen_los_header_for_object (data);
-       obj->size = sgen_los_object_size (obj);
-}
-
-gboolean
-sgen_los_object_is_pinned (char *data)
-{
-       LOSObject *obj = sgen_los_header_for_object (data);
-       return obj->size & 1;
-}
-
-void
-sgen_los_mark_mod_union_card (GCObject *mono_obj, void **ptr)
-{
-       LOSObject *obj = sgen_los_header_for_object ((char*)mono_obj);
-       guint8 *mod_union = get_cardtable_mod_union_for_object (obj);
-       size_t offset = sgen_card_table_get_card_offset ((char*)ptr, (char*)sgen_card_table_align_pointer ((char*)obj));
-       SGEN_ASSERT (0, mod_union, "FIXME: optionally allocate the mod union if it's not here and CAS it in.");
-       SGEN_ASSERT (0, (char*)obj == (char*)sgen_card_table_align_pointer ((char*)obj), "Why are LOS objects not card aligned?");
-       mod_union [offset] = 1;
-}
-
-#endif /* HAVE_SGEN_GC */
diff --git a/mono/metadata/sgen-major-copy-object.h b/mono/metadata/sgen-major-copy-object.h
deleted file mode 100644 (file)
index a4b10ff..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * sgen-major-copy-object.h: Object copying in the major collectors.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define collector_pin_object(obj, queue) do { \
-       if (sgen_ptr_in_nursery (obj)) {        \
-               sgen_pin_object (obj, queue);   \
-       } else {        \
-               g_assert (objsize <= SGEN_MAX_SMALL_OBJ_SIZE);  \
-               pin_major_object (obj, queue);  \
-       }       \
-} while (0)
-
-#define COLLECTOR_SERIAL_ALLOC_FOR_PROMOTION sgen_minor_collector.alloc_for_promotion
-
-#include "sgen-copy-object.h"
diff --git a/mono/metadata/sgen-marksweep-drain-gray-stack.h b/mono/metadata/sgen-marksweep-drain-gray-stack.h
deleted file mode 100644 (file)
index ebfb250..0000000
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * sgen-marksweep-drain-gray-stack.h: The copy/mark and gray stack
- *     draining functions of the M&S major collector.
- *
- * Copyright (C) 2014 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * COPY_OR_MARK_FUNCTION_NAME must be defined to be the function name of the copy/mark
- * function.
- *
- * SCAN_OBJECT_FUNCTION_NAME must be defined to be the function name of the object scanning
- * function.
- *
- * DRAIN_GRAY_STACK_FUNCTION_NAME must be defined to be the function name of the gray stack
- * draining function.
- *
- * Define COPY_OR_MARK_WITH_EVACUATION to support evacuation.
- */
-
-/* Returns whether the object is still in the nursery. */
-static inline MONO_ALWAYS_INLINE gboolean
-COPY_OR_MARK_FUNCTION_NAME (void **ptr, void *obj, SgenGrayQueue *queue)
-{
-       MSBlockInfo *block;
-
-#ifdef HEAVY_STATISTICS
-       ++stat_optimized_copy;
-       {
-               char *forwarded;
-               mword desc;
-               if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj)))
-                       desc = sgen_obj_get_descriptor_safe (forwarded);
-               else
-                       desc = sgen_obj_get_descriptor_safe (obj);
-
-               sgen_descriptor_count_copied_object (desc);
-       }
-#endif
-
-       SGEN_ASSERT (9, obj, "null object from pointer %p", ptr);
-       SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
-
-       if (sgen_ptr_in_nursery (obj)) {
-               int word, bit;
-               char *forwarded, *old_obj;
-               mword vtable_word = *(mword*)obj;
-
-               HEAVY_STAT (++stat_optimized_copy_nursery);
-
-#if SGEN_MAX_DEBUG_LEVEL >= 9
-               if (sgen_nursery_is_to_space (obj))
-                       SGEN_ASSERT (9, !SGEN_VTABLE_IS_PINNED (vtable_word) && !SGEN_VTABLE_IS_FORWARDED (vtable_word), "To-space object can't be pinned or forwarded.");
-#endif
-
-               if (SGEN_VTABLE_IS_PINNED (vtable_word)) {
-                       SGEN_ASSERT (9, !SGEN_VTABLE_IS_FORWARDED (vtable_word), "Cannot be both pinned and forwarded.");
-                       HEAVY_STAT (++stat_optimized_copy_nursery_pinned);
-                       return TRUE;
-               }
-               if ((forwarded = SGEN_VTABLE_IS_FORWARDED (vtable_word))) {
-                       HEAVY_STAT (++stat_optimized_copy_nursery_forwarded);
-                       SGEN_UPDATE_REFERENCE (ptr, forwarded);
-                       return sgen_ptr_in_nursery (forwarded);
-               }
-
-               /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
-               if (sgen_nursery_is_to_space (obj))
-                       return TRUE;
-
-#ifdef COPY_OR_MARK_WITH_EVACUATION
-       do_copy_object:
-#endif
-               old_obj = obj;
-               obj = copy_object_no_checks (obj, queue);
-               if (G_UNLIKELY (old_obj == obj)) {
-                       /*
-                        * If we fail to evacuate an object we just stop doing it for a
-                        * given block size as all other will surely fail too.
-                        */
-                       /* FIXME: test this case somehow. */
-                       if (!sgen_ptr_in_nursery (obj)) {
-                               int size_index;
-                               block = MS_BLOCK_FOR_OBJ (obj);
-                               size_index = block->obj_size_index;
-                               evacuate_block_obj_sizes [size_index] = FALSE;
-                               MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
-                               return FALSE;
-                       }
-                       return TRUE;
-               }
-               HEAVY_STAT (++stat_objects_copied_major);
-               SGEN_UPDATE_REFERENCE (ptr, obj);
-
-               if (sgen_ptr_in_nursery (obj))
-                       return TRUE;
-
-               /*
-                * FIXME: See comment for copy_object_no_checks().  If
-                * we have that, we can let the allocation function
-                * give us the block info, too, and we won't have to
-                * re-fetch it.
-                *
-                * FIXME (2): We should rework this to avoid all those nursery checks.
-                */
-               /*
-                * For the split nursery allocator the object might
-                * still be in the nursery despite having being
-                * promoted, in which case we can't mark it.
-                */
-               block = MS_BLOCK_FOR_OBJ (obj);
-               MS_CALC_MARK_BIT (word, bit, obj);
-               SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
-               MS_SET_MARK_BIT (block, word, bit);
-               binary_protocol_mark (obj, (gpointer)LOAD_VTABLE (obj), sgen_safe_object_get_size ((GCObject*)obj));
-
-               return FALSE;
-       } else {
-               mword vtable_word = *(mword*)obj;
-               mword desc;
-               int type;
-
-               HEAVY_STAT (++stat_optimized_copy_major);
-
-#ifdef COPY_OR_MARK_WITH_EVACUATION
-               {
-                       char *forwarded;
-                       if ((forwarded = SGEN_VTABLE_IS_FORWARDED (vtable_word))) {
-                               HEAVY_STAT (++stat_optimized_copy_major_forwarded);
-                               SGEN_UPDATE_REFERENCE (ptr, forwarded);
-                               SGEN_ASSERT (9, !sgen_ptr_in_nursery (forwarded), "Cannot be forwarded to nursery.");
-                               return FALSE;
-                       }
-               }
-#endif
-
-               SGEN_ASSERT (9, !SGEN_VTABLE_IS_PINNED (vtable_word), "Pinned object in non-pinned block?");
-
-               desc = sgen_vtable_get_descriptor ((GCVTable*)vtable_word);
-               type = desc & DESC_TYPE_MASK;
-
-               if (sgen_safe_object_is_small ((GCObject*)obj, type)) {
-#ifdef HEAVY_STATISTICS
-                       if (type <= DESC_TYPE_MAX_SMALL_OBJ)
-                               ++stat_optimized_copy_major_small_fast;
-                       else
-                               ++stat_optimized_copy_major_small_slow;
-#endif
-
-                       block = MS_BLOCK_FOR_OBJ (obj);
-
-#ifdef COPY_OR_MARK_WITH_EVACUATION
-                       {
-                               int size_index = block->obj_size_index;
-
-                               if (evacuate_block_obj_sizes [size_index] && !block->has_pinned) {
-                                       HEAVY_STAT (++stat_optimized_copy_major_small_evacuate);
-                                       if (block->is_to_space)
-                                               return FALSE;
-                                       goto do_copy_object;
-                               }
-                       }
-#endif
-
-                       MS_MARK_OBJECT_AND_ENQUEUE (obj, desc, block, queue);
-               } else {
-                       HEAVY_STAT (++stat_optimized_copy_major_large);
-
-                       if (sgen_los_object_is_pinned (obj))
-                               return FALSE;
-                       binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((GCObject*)obj));
-
-                       sgen_los_pin_object (obj);
-                       if (SGEN_OBJECT_HAS_REFERENCES (obj))
-                               GRAY_OBJECT_ENQUEUE (queue, obj, sgen_obj_get_descriptor (obj));
-               }
-               return FALSE;
-       }
-       SGEN_ASSERT (0, FALSE, "How is this happening?");
-       return FALSE;
-}
-
-static void
-SCAN_OBJECT_FUNCTION_NAME (char *obj, mword desc, SgenGrayQueue *queue)
-{
-       char *start = obj;
-
-#ifdef HEAVY_STATISTICS
-       ++stat_optimized_major_scan;
-       if (!sgen_gc_descr_has_references (desc))
-               ++stat_optimized_major_scan_no_refs;
-       sgen_descriptor_count_scanned_object (desc);
-#endif
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-       add_scanned_object (start);
-#endif
-
-       /* Now scan the object. */
-
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj)    do {                                    \
-               void *__old = *(ptr);                                   \
-               binary_protocol_scan_process_reference ((obj), (ptr), __old); \
-               if (__old) {                                            \
-                       gboolean __still_in_nursery = COPY_OR_MARK_FUNCTION_NAME ((ptr), __old, queue); \
-                       if (G_UNLIKELY (__still_in_nursery && !sgen_ptr_in_nursery ((ptr)) && !SGEN_OBJECT_IS_CEMENTED (*(ptr)))) { \
-                               void *__copy = *(ptr);                  \
-                               sgen_add_to_global_remset ((ptr), __copy); \
-                       }                                               \
-               }                                                       \
-       } while (0)
-
-#define SCAN_OBJECT_PROTOCOL
-#include "sgen-scan-object.h"
-}
-
-static gboolean
-DRAIN_GRAY_STACK_FUNCTION_NAME (ScanCopyContext ctx)
-{
-       SgenGrayQueue *queue = ctx.queue;
-
-       SGEN_ASSERT (0, ctx.ops->scan_object == major_scan_object_with_evacuation, "Wrong scan function");
-
-       for (;;) {
-               char *obj;
-               mword desc;
-
-               HEAVY_STAT (++stat_drain_loops);
-
-               GRAY_OBJECT_DEQUEUE (queue, &obj, &desc);
-               if (!obj)
-                       return TRUE;
-
-               SCAN_OBJECT_FUNCTION_NAME (obj, desc, ctx.queue);
-       }
-}
-
-#undef COPY_OR_MARK_FUNCTION_NAME
-#undef COPY_OR_MARK_WITH_EVACUATION
-#undef SCAN_OBJECT_FUNCTION_NAME
-#undef DRAIN_GRAY_STACK_FUNCTION_NAME
diff --git a/mono/metadata/sgen-marksweep-scan-object-concurrent.h b/mono/metadata/sgen-marksweep-scan-object-concurrent.h
deleted file mode 100644 (file)
index bdad973..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * sgen-major-scan-object.h: Object scanning in the major collectors.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-extern guint64 stat_scan_object_called_major;
-
-/*
- * FIXME: We use the same scanning function in the concurrent collector whether we scan
- * during the starting/finishing collection pause (with the world stopped) or from the
- * concurrent worker thread.
- *
- * As long as the world is stopped, we should just follow pointers into the nursery and
- * evict if possible.  In that case we also don't need the ALWAYS_ADD_TO_GLOBAL_REMSET case,
- * which only seems to make sense for when the world is stopped, in which case we only need
- * it because we don't follow into the nursery.
- */
-
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj)    do {                                    \
-               void *__old = *(ptr);                                   \
-               SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP ((obj), (ptr)); \
-               binary_protocol_scan_process_reference ((obj), (ptr), __old); \
-               if (__old && !sgen_ptr_in_nursery (__old)) {            \
-                       PREFETCH_READ (__old);                  \
-                       major_copy_or_mark_object_concurrent ((ptr), __old, queue); \
-               } else {                                                \
-                       if (G_UNLIKELY (sgen_ptr_in_nursery (__old) && !sgen_ptr_in_nursery ((ptr)))) \
-                               ADD_TO_GLOBAL_REMSET ((GCObject*)(full_object), (ptr), __old); \
-               }                                                       \
-       } while (0)
-
-/* FIXME: Unify this with optimized code in sgen-marksweep.c. */
-
-#undef ADD_TO_GLOBAL_REMSET
-#define ADD_TO_GLOBAL_REMSET(object,ptr,target)        mark_mod_union_card ((object), (ptr))
-
-static void
-major_scan_object_no_mark_concurrent_anywhere (char *full_object, mword desc, SgenGrayQueue *queue)
-{
-       char *start = full_object;
-
-       SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP;
-
-#ifdef HEAVY_STATISTICS
-       sgen_descriptor_count_scanned_object (desc);
-#endif
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-       add_scanned_object (start);
-#endif
-
-#define SCAN_OBJECT_PROTOCOL
-#include "sgen-scan-object.h"
-
-       SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP;
-       HEAVY_STAT (++stat_scan_object_called_major);
-}
-
-static void
-major_scan_object_no_mark_concurrent_start (char *start, mword desc, SgenGrayQueue *queue)
-{
-       major_scan_object_no_mark_concurrent_anywhere (start, desc, queue);
-}
-
-static void
-major_scan_object_no_mark_concurrent (char *start, mword desc, SgenGrayQueue *queue)
-{
-       SGEN_ASSERT (0, !sgen_ptr_in_nursery (start), "Why are we scanning nursery objects in the concurrent collector?");
-       major_scan_object_no_mark_concurrent_anywhere (start, desc, queue);
-}
-
-#undef ADD_TO_GLOBAL_REMSET
-#define ADD_TO_GLOBAL_REMSET(object,ptr,target)        sgen_add_to_global_remset ((ptr), (target))
-
-static void
-major_scan_vtype_concurrent_finish (char *full_object, char *start, mword desc, SgenGrayQueue *queue BINARY_PROTOCOL_ARG (size_t size))
-{
-       SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP;
-
-#ifdef HEAVY_STATISTICS
-       /* FIXME: We're half scanning this object.  How do we account for that? */
-       //add_scanned_object (start);
-#endif
-
-       /* The descriptors include info about the object header as well */
-       start -= SGEN_CLIENT_OBJECT_HEADER_SIZE;
-
-#define SCAN_OBJECT_NOVTABLE
-#define SCAN_OBJECT_PROTOCOL
-#include "sgen-scan-object.h"
-
-       SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP;
-}
diff --git a/mono/metadata/sgen-marksweep.c b/mono/metadata/sgen-marksweep.c
deleted file mode 100644 (file)
index cc5b27d..0000000
+++ /dev/null
@@ -1,2548 +0,0 @@
-/*
- * sgen-marksweep.c: The Mark & Sweep major collector.
- *
- * Author:
- *     Mark Probst <mark.probst@gmail.com>
- *
- * Copyright 2009-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-
-#ifdef HAVE_SGEN_GC
-
-#include <math.h>
-#include <errno.h>
-#include <string.h>
-#include <stdlib.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-protocol.h"
-#include "mono/metadata/sgen-cardtable.h"
-#include "mono/metadata/sgen-memory-governor.h"
-#include "mono/metadata/sgen-layout-stats.h"
-#include "mono/metadata/sgen-pointer-queue.h"
-#include "mono/metadata/sgen-pinning.h"
-#include "mono/metadata/sgen-workers.h"
-#include "mono/metadata/sgen-thread-pool.h"
-#include "mono/metadata/sgen-client.h"
-#include "mono/utils/mono-membar.h"
-
-#if defined(ARCH_MIN_MS_BLOCK_SIZE) && defined(ARCH_MIN_MS_BLOCK_SIZE_SHIFT)
-#define MS_BLOCK_SIZE  ARCH_MIN_MS_BLOCK_SIZE
-#define MS_BLOCK_SIZE_SHIFT    ARCH_MIN_MS_BLOCK_SIZE_SHIFT
-#else
-#define MS_BLOCK_SIZE_SHIFT     14      /* INT FASTENABLE */
-#define MS_BLOCK_SIZE           (1 << MS_BLOCK_SIZE_SHIFT)
-#endif
-#define MAJOR_SECTION_SIZE     MS_BLOCK_SIZE
-#define CARDS_PER_BLOCK (MS_BLOCK_SIZE / CARD_SIZE_IN_BYTES)
-
-/*
- * Don't allocate single blocks, but alloc a contingent of this many
- * blocks in one swoop.  This must be a power of two.
- */
-#define MS_BLOCK_ALLOC_NUM     32
-
-/*
- * Number of bytes before the first object in a block.  At the start
- * of a block is the MSBlockHeader, then opional padding, then come
- * the objects, so this must be >= sizeof (MSBlockHeader).
- */
-#define MS_BLOCK_SKIP  ((sizeof (MSBlockHeader) + 15) & ~15)
-
-#define MS_BLOCK_FREE  (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
-
-#define MS_NUM_MARK_WORDS      ((MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
-
-/*
- * Blocks progress from one state to the next:
- *
- * SWEPT           The block is fully swept.  It might or might not be in
- *                 a free list.
- *
- * MARKING         The block might or might not contain live objects.  If
- *                 we're in between an initial collection pause and the
- *                 finishing pause, the block might or might not be in a
- *                 free list.
- *
- * CHECKING        The sweep thread is investigating the block to determine
- *                 whether or not it contains live objects.  The block is
- *                 not in a free list.
- *
- * NEED_SWEEPING   The block contains live objects but has not yet been
- *                 swept.  It also contains free slots.  It is in a block
- *                 free list.
- *
- * SWEEPING        The block is being swept.  It might be in a free list.
- */
-
-enum {
-       BLOCK_STATE_SWEPT,
-       BLOCK_STATE_MARKING,
-       BLOCK_STATE_CHECKING,
-       BLOCK_STATE_NEED_SWEEPING,
-       BLOCK_STATE_SWEEPING
-};
-
-typedef struct _MSBlockInfo MSBlockInfo;
-struct _MSBlockInfo {
-       guint16 obj_size;
-       /*
-        * FIXME: Do we even need this? It's only used during sweep and might be worth
-        * recalculating to save the space.
-        */
-       guint16 obj_size_index;
-       /* FIXME: Reduce this - it only needs a byte. */
-       volatile gint32 state;
-       unsigned int pinned : 1;
-       unsigned int has_references : 1;
-       unsigned int has_pinned : 1;    /* means cannot evacuate */
-       unsigned int is_to_space : 1;
-       void ** volatile free_list;
-       MSBlockInfo * volatile next_free;
-       guint8 * volatile cardtable_mod_union;
-       mword mark_words [MS_NUM_MARK_WORDS];
-};
-
-#define MS_BLOCK_FOR_BLOCK_INFO(b)     ((char*)(b))
-
-#define MS_BLOCK_OBJ(b,i)              (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (b)->obj_size * (i))
-#define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size)            (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (obj_size) * (i))
-#define MS_BLOCK_DATA_FOR_OBJ(o)       ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
-
-typedef struct {
-       MSBlockInfo info;
-} MSBlockHeader;
-
-#define MS_BLOCK_FOR_OBJ(o)            (&((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
-
-/* object index will always be small */
-#define MS_BLOCK_OBJ_INDEX(o,b)        ((int)(((char*)(o) - (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP)) / (b)->obj_size))
-
-//casting to int is fine since blocks are 32k
-#define MS_CALC_MARK_BIT(w,b,o)        do {                            \
-               int i = ((int)((char*)(o) - MS_BLOCK_DATA_FOR_OBJ ((o)))) >> SGEN_ALLOC_ALIGN_BITS; \
-               if (sizeof (mword) == 4) {                              \
-                       (w) = i >> 5;                                   \
-                       (b) = i & 31;                                   \
-               } else {                                                \
-                       (w) = i >> 6;                                   \
-                       (b) = i & 63;                                   \
-               }                                                       \
-       } while (0)
-
-#define MS_MARK_BIT(bl,w,b)    ((bl)->mark_words [(w)] & (ONE_P << (b)))
-#define MS_SET_MARK_BIT(bl,w,b)        ((bl)->mark_words [(w)] |= (ONE_P << (b)))
-
-#define MS_OBJ_ALLOCED(o,b)    (*(void**)(o) && (*(char**)(o) < MS_BLOCK_FOR_BLOCK_INFO (b) || *(char**)(o) >= MS_BLOCK_FOR_BLOCK_INFO (b) + MS_BLOCK_SIZE))
-
-#define MS_BLOCK_OBJ_SIZE_FACTOR       (pow (2.0, 1.0 / 3))
-
-/*
- * This way we can lookup block object size indexes for sizes up to
- * 256 bytes with a single load.
- */
-#define MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES     32
-
-static int *block_obj_sizes;
-static int num_block_obj_sizes;
-static int fast_block_obj_size_indexes [MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES];
-
-#define MS_BLOCK_FLAG_PINNED   1
-#define MS_BLOCK_FLAG_REFS     2
-
-#define MS_BLOCK_TYPE_MAX      4
-
-static gboolean *evacuate_block_obj_sizes;
-static float evacuation_threshold = 0.666f;
-static float concurrent_evacuation_threshold = 0.666f;
-static gboolean want_evacuation = FALSE;
-
-static gboolean lazy_sweep = FALSE;
-
-enum {
-       SWEEP_STATE_SWEPT,
-       SWEEP_STATE_NEED_SWEEPING,
-       SWEEP_STATE_SWEEPING,
-       SWEEP_STATE_SWEEPING_AND_ITERATING,
-       SWEEP_STATE_COMPACTING
-};
-
-static volatile int sweep_state = SWEEP_STATE_SWEPT;
-
-static gboolean concurrent_mark;
-static gboolean concurrent_sweep = TRUE;
-
-#define BLOCK_IS_TAGGED_HAS_REFERENCES(bl)     SGEN_POINTER_IS_TAGGED_1 ((bl))
-#define BLOCK_TAG_HAS_REFERENCES(bl)           SGEN_POINTER_TAG_1 ((bl))
-
-#define BLOCK_IS_TAGGED_CHECKING(bl)           SGEN_POINTER_IS_TAGGED_2 ((bl))
-#define BLOCK_TAG_CHECKING(bl)                 SGEN_POINTER_TAG_2 ((bl))
-
-#define BLOCK_UNTAG(bl)                                SGEN_POINTER_UNTAG_12 ((bl))
-
-#define BLOCK_TAG(bl)                          ((bl)->has_references ? BLOCK_TAG_HAS_REFERENCES ((bl)) : (bl))
-
-/* all allocated blocks in the system */
-static SgenPointerQueue allocated_blocks;
-
-/* non-allocated block free-list */
-static void *empty_blocks = NULL;
-static size_t num_empty_blocks = 0;
-
-#define FOREACH_BLOCK_NO_LOCK_CONDITION(cond,bl) {                     \
-       size_t __index;                                                 \
-       SGEN_ASSERT (0, (cond) && !sweep_in_progress (), "Can't iterate blocks while the world is running or sweep is in progress."); \
-       for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { \
-               (bl) = BLOCK_UNTAG (allocated_blocks.data [__index]);
-#define FOREACH_BLOCK_NO_LOCK(bl)                                      \
-       FOREACH_BLOCK_NO_LOCK_CONDITION(sgen_is_world_stopped (), bl)
-#define FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK(bl,hr) {                  \
-       size_t __index;                                                 \
-       SGEN_ASSERT (0, sgen_is_world_stopped () && !sweep_in_progress (), "Can't iterate blocks while the world is running or sweep is in progress."); \
-       for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { \
-               (bl) = allocated_blocks.data [__index];                 \
-               (hr) = BLOCK_IS_TAGGED_HAS_REFERENCES ((bl));           \
-               (bl) = BLOCK_UNTAG ((bl));
-#define END_FOREACH_BLOCK_NO_LOCK      } }
-
-static volatile size_t num_major_sections = 0;
-/*
- * One free block list for each block object size.  We add and remove blocks from these
- * lists lock-free via CAS.
- *
- * Blocks accessed/removed from `free_block_lists`:
- *   from the mutator (with GC lock held)
- *   in nursery collections
- *   in non-concurrent major collections
- *   in the finishing pause of concurrent major collections (whole list is cleared)
- *
- * Blocks added to `free_block_lists`:
- *   in the sweeping thread
- *   during nursery collections
- *   from domain clearing (with the world stopped and no sweeping happening)
- *
- * The only item of those that doesn't require the GC lock is the sweep thread.  The sweep
- * thread only ever adds blocks to the free list, so the ABA problem can't occur.
- */
-static MSBlockInfo * volatile *free_block_lists [MS_BLOCK_TYPE_MAX];
-
-static guint64 stat_major_blocks_alloced = 0;
-static guint64 stat_major_blocks_freed = 0;
-static guint64 stat_major_blocks_lazy_swept = 0;
-static guint64 stat_major_objects_evacuated = 0;
-
-#if SIZEOF_VOID_P != 8
-static guint64 stat_major_blocks_freed_ideal = 0;
-static guint64 stat_major_blocks_freed_less_ideal = 0;
-static guint64 stat_major_blocks_freed_individual = 0;
-static guint64 stat_major_blocks_alloced_less_ideal = 0;
-#endif
-
-#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
-static guint64 num_major_objects_marked = 0;
-#define INC_NUM_MAJOR_OBJECTS_MARKED() (++num_major_objects_marked)
-#else
-#define INC_NUM_MAJOR_OBJECTS_MARKED()
-#endif
-
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-static mono_mutex_t scanned_objects_list_lock;
-static SgenPointerQueue scanned_objects_list;
-
-static void
-add_scanned_object (void *ptr)
-{
-       if (!binary_protocol_is_enabled ())
-               return;
-
-       mono_mutex_lock (&scanned_objects_list_lock);
-       sgen_pointer_queue_add (&scanned_objects_list, ptr);
-       mono_mutex_unlock (&scanned_objects_list_lock);
-}
-#endif
-
-static gboolean sweep_block (MSBlockInfo *block);
-
-static int
-ms_find_block_obj_size_index (size_t size)
-{
-       int i;
-       SGEN_ASSERT (9, size <= SGEN_MAX_SMALL_OBJ_SIZE, "size %zd is bigger than max small object size %d", size, SGEN_MAX_SMALL_OBJ_SIZE);
-       for (i = 0; i < num_block_obj_sizes; ++i)
-               if (block_obj_sizes [i] >= size)
-                       return i;
-       g_error ("no object of size %zd\n", size);
-       return -1;
-}
-
-#define FREE_BLOCKS_FROM(lists,p,r)    (lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
-#define FREE_BLOCKS(p,r)               (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
-
-#define MS_BLOCK_OBJ_SIZE_INDEX(s)                             \
-       (((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ?      \
-        fast_block_obj_size_indexes [((s)+7)>>3] :             \
-        ms_find_block_obj_size_index ((s)))
-
-static void*
-major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
-{
-       char *start;
-       if (nursery_align)
-               start = sgen_alloc_os_memory_aligned (nursery_size, nursery_align, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
-       else
-               start = sgen_alloc_os_memory (nursery_size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
-
-       return start;
-}
-
-static void
-update_heap_boundaries_for_block (MSBlockInfo *block)
-{
-       sgen_update_heap_boundaries ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), (mword)MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE);
-}
-
-/*
- * Thread safe
- */
-static void*
-ms_get_empty_block (void)
-{
-       char *p;
-       int i;
-       void *block, *empty, *next;
-
- retry:
-       if (!empty_blocks) {
-               /*
-                * We try allocating MS_BLOCK_ALLOC_NUM blocks first.  If that's
-                * unsuccessful, we halve the number of blocks and try again, until we're at
-                * 1.  If that doesn't work, either, we assert.
-                */
-               int alloc_num = MS_BLOCK_ALLOC_NUM;
-               for (;;) {
-                       p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * alloc_num, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE,
-                                       alloc_num == 1 ? "major heap section" : NULL);
-                       if (p)
-                               break;
-                       alloc_num >>= 1;
-               }
-
-               for (i = 0; i < alloc_num; ++i) {
-                       block = p;
-                       /*
-                        * We do the free list update one after the
-                        * other so that other threads can use the new
-                        * blocks as quickly as possible.
-                        */
-                       do {
-                               empty = empty_blocks;
-                               *(void**)block = empty;
-                       } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block, empty) != empty);
-                       p += MS_BLOCK_SIZE;
-               }
-
-               SGEN_ATOMIC_ADD_P (num_empty_blocks, alloc_num);
-
-               stat_major_blocks_alloced += alloc_num;
-#if SIZEOF_VOID_P != 8
-               if (alloc_num != MS_BLOCK_ALLOC_NUM)
-                       stat_major_blocks_alloced_less_ideal += alloc_num;
-#endif
-       }
-
-       do {
-               empty = empty_blocks;
-               if (!empty)
-                       goto retry;
-               block = empty;
-               next = *(void**)block;
-       } while (SGEN_CAS_PTR (&empty_blocks, next, empty) != empty);
-
-       SGEN_ATOMIC_ADD_P (num_empty_blocks, -1);
-
-       *(void**)block = NULL;
-
-       g_assert (!((mword)block & (MS_BLOCK_SIZE - 1)));
-
-       return block;
-}
-
-/*
- * This doesn't actually free a block immediately, but enqueues it into the `empty_blocks`
- * list, where it will either be freed later on, or reused in nursery collections.
- */
-static void
-ms_free_block (void *block)
-{
-       void *empty;
-
-       sgen_memgov_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
-       memset (block, 0, MS_BLOCK_SIZE);
-
-       do {
-               empty = empty_blocks;
-               *(void**)block = empty;
-       } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
-
-       SGEN_ATOMIC_ADD_P (num_empty_blocks, 1);
-
-       binary_protocol_block_free (block, MS_BLOCK_SIZE);
-}
-
-static gboolean
-sweep_in_progress (void)
-{
-       int state = sweep_state;
-       return state == SWEEP_STATE_SWEEPING ||
-               state == SWEEP_STATE_SWEEPING_AND_ITERATING ||
-               state == SWEEP_STATE_COMPACTING;
-}
-
-static inline gboolean
-block_is_swept_or_marking (MSBlockInfo *block)
-{
-       gint32 state = block->state;
-       return state == BLOCK_STATE_SWEPT || state == BLOCK_STATE_MARKING;
-}
-
-//#define MARKSWEEP_CONSISTENCY_CHECK
-
-#ifdef MARKSWEEP_CONSISTENCY_CHECK
-static void
-check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
-{
-       SGEN_ASSERT (0, !sweep_in_progress (), "Can't examine allocated blocks during sweep");
-       for (; block; block = block->next_free) {
-               SGEN_ASSERT (0, block->state != BLOCK_STATE_CHECKING, "Can't have a block we're checking in a free list.");
-               g_assert (block->obj_size == size);
-               g_assert ((pinned && block->pinned) || (!pinned && !block->pinned));
-
-               /* blocks in the free lists must have at least
-                  one free slot */
-               g_assert (block->free_list);
-
-               /* the block must be in the allocated_blocks array */
-               g_assert (sgen_pointer_queue_find (&allocated_blocks, BLOCK_TAG (block)) != (size_t)-1);
-       }
-}
-
-static void
-check_empty_blocks (void)
-{
-       void *p;
-       size_t i = 0;
-       for (p = empty_blocks; p; p = *(void**)p)
-               ++i;
-       g_assert (i == num_empty_blocks);
-}
-
-static void
-consistency_check (void)
-{
-       MSBlockInfo *block;
-       int i;
-
-       /* check all blocks */
-       FOREACH_BLOCK_NO_LOCK (block) {
-               int count = MS_BLOCK_FREE / block->obj_size;
-               int num_free = 0;
-               void **free;
-
-               /* count number of free slots */
-               for (i = 0; i < count; ++i) {
-                       void **obj = (void**) MS_BLOCK_OBJ (block, i);
-                       if (!MS_OBJ_ALLOCED (obj, block))
-                               ++num_free;
-               }
-
-               /* check free list */
-               for (free = block->free_list; free; free = (void**)*free) {
-                       g_assert (MS_BLOCK_FOR_OBJ (free) == block);
-                       --num_free;
-               }
-               g_assert (num_free == 0);
-
-               /* check all mark words are zero */
-               if (!sgen_concurrent_collection_in_progress () && block_is_swept_or_marking (block)) {
-                       for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
-                               g_assert (block->mark_words [i] == 0);
-               }
-       } END_FOREACH_BLOCK_NO_LOCK;
-
-       /* check free blocks */
-       for (i = 0; i < num_block_obj_sizes; ++i) {
-               int j;
-               for (j = 0; j < MS_BLOCK_TYPE_MAX; ++j)
-                       check_block_free_list (free_block_lists [j][i], block_obj_sizes [i], j & MS_BLOCK_FLAG_PINNED);
-       }
-
-       check_empty_blocks ();
-}
-#endif
-
-static void
-add_free_block (MSBlockInfo * volatile *free_blocks, int size_index, MSBlockInfo *block)
-{
-       MSBlockInfo *old;
-       do {
-               block->next_free = old = free_blocks [size_index];
-       } while (SGEN_CAS_PTR ((gpointer)&free_blocks [size_index], block, old) != old);
-}
-
-static void major_finish_sweep_checking (void);
-
-static gboolean
-ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
-{
-       int size = block_obj_sizes [size_index];
-       int count = MS_BLOCK_FREE / size;
-       MSBlockInfo *info;
-       MSBlockInfo * volatile * free_blocks = FREE_BLOCKS (pinned, has_references);
-       char *obj_start;
-       int i;
-
-       if (!sgen_memgov_try_alloc_space (MS_BLOCK_SIZE, SPACE_MAJOR))
-               return FALSE;
-
-       info = (MSBlockInfo*)ms_get_empty_block ();
-
-       SGEN_ASSERT (9, count >= 2, "block with %d objects, it must hold at least 2", count);
-
-       info->obj_size = size;
-       info->obj_size_index = size_index;
-       info->pinned = pinned;
-       info->has_references = has_references;
-       info->has_pinned = pinned;
-       /*
-        * Blocks that are to-space are not evacuated from.  During an major collection
-        * blocks are allocated for two reasons: evacuating objects from the nursery and
-        * evacuating them from major blocks marked for evacuation.  In both cases we don't
-        * want further evacuation.
-        */
-       info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD);
-       info->state = (info->is_to_space || sgen_concurrent_collection_in_progress ()) ? BLOCK_STATE_MARKING : BLOCK_STATE_SWEPT;
-       SGEN_ASSERT (6, !sweep_in_progress () || info->state == BLOCK_STATE_SWEPT, "How do we add a new block to be swept while sweeping?");
-       info->cardtable_mod_union = NULL;
-
-       update_heap_boundaries_for_block (info);
-
-       binary_protocol_block_alloc (info, MS_BLOCK_SIZE);
-
-       /* build free list */
-       obj_start = MS_BLOCK_FOR_BLOCK_INFO (info) + MS_BLOCK_SKIP;
-       info->free_list = (void**)obj_start;
-       /* we're skipping the last one - it must be nulled */
-       for (i = 0; i < count - 1; ++i) {
-               char *next_obj_start = obj_start + size;
-               *(void**)obj_start = next_obj_start;
-               obj_start = next_obj_start;
-       }
-       /* the last one */
-       *(void**)obj_start = NULL;
-
-       add_free_block (free_blocks, size_index, info);
-
-       /*
-        * This is the only place where the `allocated_blocks` array can potentially grow.
-        * We need to make sure concurrent sweep isn't running when that happens, so in that
-        * specific case we just wait for sweep to finish.
-        */
-       if (sgen_pointer_queue_will_grow (&allocated_blocks))
-               major_finish_sweep_checking ();
-
-       sgen_pointer_queue_add (&allocated_blocks, BLOCK_TAG (info));
-
-       SGEN_ATOMIC_ADD_P (num_major_sections, 1);
-       return TRUE;
-}
-
-static gboolean
-obj_is_from_pinned_alloc (char *ptr)
-{
-       MSBlockInfo *block;
-
-       FOREACH_BLOCK_NO_LOCK (block) {
-               if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE)
-                       return block->pinned;
-       } END_FOREACH_BLOCK_NO_LOCK;
-       return FALSE;
-}
-
-static void
-ensure_can_access_block_free_list (MSBlockInfo *block)
-{
- retry:
-       for (;;) {
-               switch (block->state) {
-               case BLOCK_STATE_SWEPT:
-               case BLOCK_STATE_MARKING:
-                       return;
-               case BLOCK_STATE_CHECKING:
-                       SGEN_ASSERT (0, FALSE, "How did we get a block that's being checked from a free list?");
-                       break;
-               case BLOCK_STATE_NEED_SWEEPING:
-                       if (sweep_block (block))
-                               ++stat_major_blocks_lazy_swept;
-                       break;
-               case BLOCK_STATE_SWEEPING:
-                       /* FIXME: do this more elegantly */
-                       g_usleep (100);
-                       goto retry;
-               default:
-                       SGEN_ASSERT (0, FALSE, "Illegal block state");
-                       break;
-               }
-       }
-}
-
-static void*
-unlink_slot_from_free_list_uncontested (MSBlockInfo * volatile *free_blocks, int size_index)
-{
-       MSBlockInfo *block, *next_free_block;
-       void *obj, *next_free_slot;
-
- retry:
-       block = free_blocks [size_index];
-       SGEN_ASSERT (9, block, "no free block to unlink from free_blocks %p size_index %d", free_blocks, size_index);
-
-       ensure_can_access_block_free_list (block);
-
-       obj = block->free_list;
-       SGEN_ASSERT (6, obj, "block %p in free list had no available object to alloc from", block);
-
-       next_free_slot = *(void**)obj;
-       if (next_free_slot) {
-               block->free_list = next_free_slot;
-               return obj;
-       }
-
-       next_free_block = block->next_free;
-       if (SGEN_CAS_PTR ((gpointer)&free_blocks [size_index], next_free_block, block) != block)
-               goto retry;
-
-       block->free_list = NULL;
-       block->next_free = NULL;
-
-       return obj;
-}
-
-static void*
-alloc_obj (GCVTable *vtable, size_t size, gboolean pinned, gboolean has_references)
-{
-       int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
-       MSBlockInfo * volatile * free_blocks = FREE_BLOCKS (pinned, has_references);
-       void *obj;
-
-       if (!free_blocks [size_index]) {
-               if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references)))
-                       return NULL;
-       }
-
-       obj = unlink_slot_from_free_list_uncontested (free_blocks, size_index);
-
-       *(GCVTable**)obj = vtable;
-
-       return obj;
-}
-
-static void*
-major_alloc_object (GCVTable *vtable, size_t size, gboolean has_references)
-{
-       return alloc_obj (vtable, size, FALSE, has_references);
-}
-
-/*
- * We're not freeing the block if it's empty.  We leave that work for
- * the next major collection.
- *
- * This is just called from the domain clearing code, which runs in a
- * single thread and has the GC lock, so we don't need an extra lock.
- */
-static void
-free_object (char *obj, size_t size, gboolean pinned)
-{
-       MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
-       int word, bit;
-       gboolean in_free_list;
-
-       SGEN_ASSERT (9, sweep_state == SWEEP_STATE_SWEPT, "Should have waited for sweep to free objects.");
-
-       ensure_can_access_block_free_list (block);
-       SGEN_ASSERT (9, (pinned && block->pinned) || (!pinned && !block->pinned), "free-object pinning mixup object %p pinned %d block %p pinned %d", obj, pinned, block, block->pinned);
-       SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p is already free", obj);
-       MS_CALC_MARK_BIT (word, bit, obj);
-       SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p has mark bit set", obj);
-
-       memset (obj, 0, size);
-
-       in_free_list = !!block->free_list;
-       *(void**)obj = block->free_list;
-       block->free_list = (void**)obj;
-
-       if (!in_free_list) {
-               MSBlockInfo * volatile *free_blocks = FREE_BLOCKS (pinned, block->has_references);
-               int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
-               SGEN_ASSERT (9, !block->next_free, "block %p doesn't have a free-list of object but belongs to a free-list of blocks", block);
-               add_free_block (free_blocks, size_index, block);
-       }
-}
-
-static void
-major_free_non_pinned_object (char *obj, size_t size)
-{
-       free_object (obj, size, FALSE);
-}
-
-/* size is a multiple of SGEN_ALLOC_ALIGN */
-static void*
-major_alloc_small_pinned_obj (GCVTable *vtable, size_t size, gboolean has_references)
-{
-       void *res;
-
-       res = alloc_obj (vtable, size, TRUE, has_references);
-        /*If we failed to alloc memory, we better try releasing memory
-         *as pinned alloc is requested by the runtime.
-         */
-        if (!res) {
-               sgen_perform_collection (0, GENERATION_OLD, "pinned alloc failure", TRUE);
-               res = alloc_obj (vtable, size, TRUE, has_references);
-        }
-        return res;
-}
-
-static void
-free_pinned_object (char *obj, size_t size)
-{
-       free_object (obj, size, TRUE);
-}
-
-/*
- * size is already rounded up and we hold the GC lock.
- */
-static void*
-major_alloc_degraded (GCVTable *vtable, size_t size)
-{
-       void *obj = alloc_obj (vtable, size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
-       if (G_LIKELY (obj)) {
-               HEAVY_STAT (++stat_objects_alloced_degraded);
-               HEAVY_STAT (stat_bytes_alloced_degraded += size);
-       }
-       return obj;
-}
-
-/*
- * obj is some object.  If it's not in the major heap (i.e. if it's in
- * the nursery or LOS), return FALSE.  Otherwise return whether it's
- * been marked or copied.
- */
-static gboolean
-major_is_object_live (char *obj)
-{
-       MSBlockInfo *block;
-       int word, bit;
-       mword objsize;
-
-       if (sgen_ptr_in_nursery (obj))
-               return FALSE;
-
-       objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)obj));
-
-       /* LOS */
-       if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
-               return FALSE;
-
-       /* now we know it's in a major block */
-       block = MS_BLOCK_FOR_OBJ (obj);
-       SGEN_ASSERT (9, !block->pinned, "block %p is pinned, BTW why is this bad?", block);
-       MS_CALC_MARK_BIT (word, bit, obj);
-       return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
-}
-
-static gboolean
-major_ptr_is_in_non_pinned_space (char *ptr, char **start)
-{
-       MSBlockInfo *block;
-
-       FOREACH_BLOCK_NO_LOCK (block) {
-               if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) {
-                       int count = MS_BLOCK_FREE / block->obj_size;
-                       int i;
-
-                       *start = NULL;
-                       for (i = 0; i <= count; ++i) {
-                               if (ptr >= MS_BLOCK_OBJ (block, i) && ptr < MS_BLOCK_OBJ (block, i + 1)) {
-                                       *start = MS_BLOCK_OBJ (block, i);
-                                       break;
-                               }
-                       }
-                       return !block->pinned;
-               }
-       } END_FOREACH_BLOCK_NO_LOCK;
-       return FALSE;
-}
-
-static gboolean
-try_set_sweep_state (int new, int expected)
-{
-       int old = SGEN_CAS (&sweep_state, new, expected);
-       return old == expected;
-}
-
-static void
-set_sweep_state (int new, int expected)
-{
-       gboolean success = try_set_sweep_state (new, expected);
-       SGEN_ASSERT (0, success, "Could not set sweep state.");
-}
-
-static gboolean ensure_block_is_checked_for_sweeping (int block_index, gboolean wait, gboolean *have_checked);
-
-static SgenThreadPoolJob * volatile sweep_job;
-
-static void
-major_finish_sweep_checking (void)
-{
-       int block_index;
-       SgenThreadPoolJob *job;
-
- retry:
-       switch (sweep_state) {
-       case SWEEP_STATE_SWEPT:
-       case SWEEP_STATE_NEED_SWEEPING:
-               return;
-       case SWEEP_STATE_SWEEPING:
-               if (try_set_sweep_state (SWEEP_STATE_SWEEPING_AND_ITERATING, SWEEP_STATE_SWEEPING))
-                       break;
-               goto retry;
-       case SWEEP_STATE_SWEEPING_AND_ITERATING:
-               SGEN_ASSERT (0, FALSE, "Is there another minor collection running?");
-               goto retry;
-       case SWEEP_STATE_COMPACTING:
-               goto wait;
-       default:
-               SGEN_ASSERT (0, FALSE, "Invalid sweep state.");
-               break;
-       }
-
-       /*
-        * We're running with the world stopped and the only other thread doing work is the
-        * sweep thread, which doesn't add blocks to the array, so we can safely access
-        * `next_slot`.
-        */
-       for (block_index = 0; block_index < allocated_blocks.next_slot; ++block_index)
-               ensure_block_is_checked_for_sweeping (block_index, FALSE, NULL);
-
-       set_sweep_state (SWEEP_STATE_SWEEPING, SWEEP_STATE_SWEEPING_AND_ITERATING);
-
- wait:
-       job = sweep_job;
-       if (job)
-               sgen_thread_pool_job_wait (job);
-       SGEN_ASSERT (0, !sweep_job, "Why did the sweep job not null itself?");
-       SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "How is the sweep job done but we're not swept?");
-}
-
-static void
-major_iterate_objects (IterateObjectsFlags flags, IterateObjectCallbackFunc callback, void *data)
-{
-       gboolean sweep = flags & ITERATE_OBJECTS_SWEEP;
-       gboolean non_pinned = flags & ITERATE_OBJECTS_NON_PINNED;
-       gboolean pinned = flags & ITERATE_OBJECTS_PINNED;
-       MSBlockInfo *block;
-
-       major_finish_sweep_checking ();
-       FOREACH_BLOCK_NO_LOCK (block) {
-               int count = MS_BLOCK_FREE / block->obj_size;
-               int i;
-
-               if (block->pinned && !pinned)
-                       continue;
-               if (!block->pinned && !non_pinned)
-                       continue;
-               if (sweep && lazy_sweep) {
-                       sweep_block (block);
-                       SGEN_ASSERT (6, block->state == BLOCK_STATE_SWEPT, "Block must be swept after sweeping");
-               }
-
-               for (i = 0; i < count; ++i) {
-                       void **obj = (void**) MS_BLOCK_OBJ (block, i);
-                       /*
-                        * We've finished sweep checking, but if we're sweeping lazily and
-                        * the flags don't require us to sweep, the block might still need
-                        * sweeping.  In that case, we need to consult the mark bits to tell
-                        * us whether an object slot is live.
-                        */
-                       if (!block_is_swept_or_marking (block)) {
-                               int word, bit;
-                               SGEN_ASSERT (6, !sweep && block->state == BLOCK_STATE_NEED_SWEEPING, "Has sweeping not finished?");
-                               MS_CALC_MARK_BIT (word, bit, obj);
-                               if (!MS_MARK_BIT (block, word, bit))
-                                       continue;
-                       }
-                       if (MS_OBJ_ALLOCED (obj, block))
-                               callback ((char*)obj, block->obj_size, data);
-               }
-       } END_FOREACH_BLOCK_NO_LOCK;
-}
-
-static gboolean
-major_is_valid_object (char *object)
-{
-       MSBlockInfo *block;
-
-       FOREACH_BLOCK_NO_LOCK (block) {
-               int idx;
-               char *obj;
-
-               if ((MS_BLOCK_FOR_BLOCK_INFO (block) > object) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= object))
-                       continue;
-
-               idx = MS_BLOCK_OBJ_INDEX (object, block);
-               obj = (char*)MS_BLOCK_OBJ (block, idx);
-               if (obj != object)
-                       return FALSE;
-               return MS_OBJ_ALLOCED (obj, block);
-       } END_FOREACH_BLOCK_NO_LOCK;
-
-       return FALSE;
-}
-
-
-static GCVTable*
-major_describe_pointer (char *ptr)
-{
-       MSBlockInfo *block;
-
-       FOREACH_BLOCK_NO_LOCK (block) {
-               int idx;
-               char *obj;
-               gboolean live;
-               GCVTable *vtable;
-               int w, b;
-               gboolean marked;
-
-               if ((MS_BLOCK_FOR_BLOCK_INFO (block) > ptr) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= ptr))
-                       continue;
-
-               SGEN_LOG (0, "major-ptr (block %p sz %d pin %d ref %d)\n",
-                       MS_BLOCK_FOR_BLOCK_INFO (block), block->obj_size, block->pinned, block->has_references);
-
-               idx = MS_BLOCK_OBJ_INDEX (ptr, block);
-               obj = (char*)MS_BLOCK_OBJ (block, idx);
-               live = MS_OBJ_ALLOCED (obj, block);
-               vtable = live ? (GCVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
-
-               MS_CALC_MARK_BIT (w, b, obj);
-               marked = MS_MARK_BIT (block, w, b);
-
-               if (obj == ptr) {
-                       SGEN_LOG (0, "\t(");
-                       if (live)
-                               SGEN_LOG (0, "object");
-                       else
-                               SGEN_LOG (0, "dead-object");
-               } else {
-                       if (live)
-                               SGEN_LOG (0, "interior-ptr offset %td", ptr - obj);
-                       else
-                               SGEN_LOG (0, "dead-interior-ptr offset %td", ptr - obj);
-               }
-
-               SGEN_LOG (0, " marked %d)\n", marked ? 1 : 0);
-
-               return vtable;
-       } END_FOREACH_BLOCK_NO_LOCK;
-
-       return NULL;
-}
-
-static void
-major_check_scan_starts (void)
-{
-}
-
-static void
-major_dump_heap (FILE *heap_dump_file)
-{
-       MSBlockInfo *block;
-       int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
-       int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
-       int i;
-
-       for (i = 0; i < num_block_obj_sizes; ++i)
-               slots_available [i] = slots_used [i] = 0;
-
-       FOREACH_BLOCK_NO_LOCK (block) {
-               int index = ms_find_block_obj_size_index (block->obj_size);
-               int count = MS_BLOCK_FREE / block->obj_size;
-
-               slots_available [index] += count;
-               for (i = 0; i < count; ++i) {
-                       if (MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block))
-                               ++slots_used [index];
-               }
-       } END_FOREACH_BLOCK_NO_LOCK;
-
-       fprintf (heap_dump_file, "<occupancies>\n");
-       for (i = 0; i < num_block_obj_sizes; ++i) {
-               fprintf (heap_dump_file, "<occupancy size=\"%d\" available=\"%d\" used=\"%d\" />\n",
-                               block_obj_sizes [i], slots_available [i], slots_used [i]);
-       }
-       fprintf (heap_dump_file, "</occupancies>\n");
-
-       FOREACH_BLOCK_NO_LOCK (block) {
-               int count = MS_BLOCK_FREE / block->obj_size;
-               int i;
-               int start = -1;
-
-               fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", "old", (size_t)MS_BLOCK_FREE);
-
-               for (i = 0; i <= count; ++i) {
-                       if ((i < count) && MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block)) {
-                               if (start < 0)
-                                       start = i;
-                       } else {
-                               if (start >= 0) {
-                                       sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), MS_BLOCK_FOR_BLOCK_INFO (block));
-                                       start = -1;
-                               }
-                       }
-               }
-
-               fprintf (heap_dump_file, "</section>\n");
-       } END_FOREACH_BLOCK_NO_LOCK;
-}
-
-static guint8*
-get_cardtable_mod_union_for_block (MSBlockInfo *block, gboolean allocate)
-{
-       guint8 *mod_union = block->cardtable_mod_union;
-       guint8 *other;
-       if (mod_union)
-               return mod_union;
-       else if (!allocate)
-               return NULL;
-       mod_union = sgen_card_table_alloc_mod_union (MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
-       other = SGEN_CAS_PTR ((gpointer*)&block->cardtable_mod_union, mod_union, NULL);
-       if (!other) {
-               SGEN_ASSERT (0, block->cardtable_mod_union == mod_union, "Why did CAS not replace?");
-               return mod_union;
-       }
-       sgen_card_table_free_mod_union (mod_union, MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
-       return other;
-}
-
-static inline guint8*
-major_get_cardtable_mod_union_for_reference (char *ptr)
-{
-       MSBlockInfo *block = MS_BLOCK_FOR_OBJ (ptr);
-       size_t offset = sgen_card_table_get_card_offset (ptr, (char*)sgen_card_table_align_pointer (MS_BLOCK_FOR_BLOCK_INFO (block)));
-       guint8 *mod_union = get_cardtable_mod_union_for_block (block, TRUE);
-       SGEN_ASSERT (0, mod_union, "FIXME: optionally allocate the mod union if it's not here and CAS it in.");
-       return &mod_union [offset];
-}
-
-/*
- * Mark the mod-union card for `ptr`, which must be a reference within the object `obj`.
- */
-static void
-mark_mod_union_card (GCObject *obj, void **ptr)
-{
-       int type = sgen_obj_get_descriptor ((char*)obj) & DESC_TYPE_MASK;
-       if (sgen_safe_object_is_small (obj, type)) {
-               guint8 *card_byte = major_get_cardtable_mod_union_for_reference ((char*)ptr);
-               SGEN_ASSERT (0, MS_BLOCK_FOR_OBJ (obj) == MS_BLOCK_FOR_OBJ (ptr), "How can an object and a reference inside it not be in the same block?");
-               *card_byte = 1;
-       } else {
-               sgen_los_mark_mod_union_card (obj, ptr);
-       }
-}
-
-#define LOAD_VTABLE    SGEN_LOAD_VTABLE
-
-#define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,desc,block,queue) do {  \
-               int __word, __bit;                                      \
-               MS_CALC_MARK_BIT (__word, __bit, (obj));                \
-               if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
-                       MS_SET_MARK_BIT ((block), __word, __bit);       \
-                       if (sgen_gc_descr_has_references (desc))                        \
-                               GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
-                       binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((GCObject*)(obj))); \
-                       INC_NUM_MAJOR_OBJECTS_MARKED ();                \
-               }                                                       \
-       } while (0)
-#define MS_MARK_OBJECT_AND_ENQUEUE(obj,desc,block,queue) do {          \
-               int __word, __bit;                                      \
-               MS_CALC_MARK_BIT (__word, __bit, (obj));                \
-               SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
-               if (!MS_MARK_BIT ((block), __word, __bit)) {            \
-                       MS_SET_MARK_BIT ((block), __word, __bit);       \
-                       if (sgen_gc_descr_has_references (desc))                        \
-                               GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
-                       binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((GCObject*)(obj))); \
-                       INC_NUM_MAJOR_OBJECTS_MARKED ();                \
-               }                                                       \
-       } while (0)
-
-static void
-pin_major_object (char *obj, SgenGrayQueue *queue)
-{
-       MSBlockInfo *block;
-
-       if (concurrent_mark)
-               g_assert_not_reached ();
-
-       block = MS_BLOCK_FOR_OBJ (obj);
-       block->has_pinned = TRUE;
-       MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
-}
-
-#include "sgen-major-copy-object.h"
-
-static void
-major_copy_or_mark_object_concurrent (void **ptr, void *obj, SgenGrayQueue *queue)
-{
-       SGEN_ASSERT (9, sgen_concurrent_collection_in_progress (), "Why are we scanning concurrently when there's no concurrent collection on?");
-       SGEN_ASSERT (9, !sgen_workers_are_working () || sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "We must not scan from two threads at the same time!");
-
-       g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
-
-       if (!sgen_ptr_in_nursery (obj)) {
-               mword objsize;
-
-               objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)obj));
-
-               if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE) {
-                       MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
-                       MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
-               } else {
-                       if (sgen_los_object_is_pinned (obj))
-                               return;
-
-                       binary_protocol_mark (obj, SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size (obj));
-
-                       sgen_los_pin_object (obj);
-                       if (SGEN_OBJECT_HAS_REFERENCES (obj))
-                               GRAY_OBJECT_ENQUEUE (queue, obj, sgen_obj_get_descriptor (obj));
-                       INC_NUM_MAJOR_OBJECTS_MARKED ();
-               }
-       }
-}
-
-static long long
-major_get_and_reset_num_major_objects_marked (void)
-{
-#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
-       long long num = num_major_objects_marked;
-       num_major_objects_marked = 0;
-       return num;
-#else
-       return 0;
-#endif
-}
-
-#define PREFETCH_CARDS         1       /* BOOL FASTENABLE */
-#if !PREFETCH_CARDS
-#undef PREFETCH_CARDS
-#endif
-
-/* gcc 4.2.1 from xcode4 crashes on sgen_card_table_get_card_address () when this is enabled */
-#if defined(PLATFORM_MACOSX)
-#define GCC_VERSION (__GNUC__ * 10000 \
-                               + __GNUC_MINOR__ * 100 \
-                               + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION <= 40300
-#undef PREFETCH_CARDS
-#endif
-#endif
-
-#ifdef HEAVY_STATISTICS
-static guint64 stat_optimized_copy;
-static guint64 stat_optimized_copy_nursery;
-static guint64 stat_optimized_copy_nursery_forwarded;
-static guint64 stat_optimized_copy_nursery_pinned;
-static guint64 stat_optimized_copy_major;
-static guint64 stat_optimized_copy_major_small_fast;
-static guint64 stat_optimized_copy_major_small_slow;
-static guint64 stat_optimized_copy_major_large;
-static guint64 stat_optimized_copy_major_forwarded;
-static guint64 stat_optimized_copy_major_small_evacuate;
-static guint64 stat_optimized_major_scan;
-static guint64 stat_optimized_major_scan_no_refs;
-
-static guint64 stat_drain_prefetch_fills;
-static guint64 stat_drain_prefetch_fill_failures;
-static guint64 stat_drain_loops;
-#endif
-
-static void major_scan_object_with_evacuation (char *start, mword desc, SgenGrayQueue *queue);
-
-#define COPY_OR_MARK_FUNCTION_NAME     major_copy_or_mark_object_no_evacuation
-#define SCAN_OBJECT_FUNCTION_NAME      major_scan_object_no_evacuation
-#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_no_evacuation
-#include "sgen-marksweep-drain-gray-stack.h"
-
-#define COPY_OR_MARK_WITH_EVACUATION
-#define COPY_OR_MARK_FUNCTION_NAME     major_copy_or_mark_object_with_evacuation
-#define SCAN_OBJECT_FUNCTION_NAME      major_scan_object_with_evacuation
-#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_with_evacuation
-#include "sgen-marksweep-drain-gray-stack.h"
-
-static gboolean
-drain_gray_stack (ScanCopyContext ctx)
-{
-       gboolean evacuation = FALSE;
-       int i;
-       for (i = 0; i < num_block_obj_sizes; ++i) {
-               if (evacuate_block_obj_sizes [i]) {
-                       evacuation = TRUE;
-                       break;
-               }
-       }
-
-       if (evacuation)
-               return drain_gray_stack_with_evacuation (ctx);
-       else
-               return drain_gray_stack_no_evacuation (ctx);
-}
-
-#include "sgen-marksweep-scan-object-concurrent.h"
-
-static void
-major_copy_or_mark_object_canonical (void **ptr, SgenGrayQueue *queue)
-{
-       major_copy_or_mark_object_with_evacuation (ptr, *ptr, queue);
-}
-
-static void
-major_copy_or_mark_object_concurrent_canonical (void **ptr, SgenGrayQueue *queue)
-{
-       major_copy_or_mark_object_concurrent (ptr, *ptr, queue);
-}
-
-static void
-major_copy_or_mark_object_concurrent_finish_canonical (void **ptr, SgenGrayQueue *queue)
-{
-       major_copy_or_mark_object_no_evacuation (ptr, *ptr, queue);
-}
-
-static void
-mark_pinned_objects_in_block (MSBlockInfo *block, size_t first_entry, size_t last_entry, SgenGrayQueue *queue)
-{
-       void **entry, **end;
-       int last_index = -1;
-
-       if (first_entry == last_entry)
-               return;
-
-       block->has_pinned = TRUE;
-
-       entry = sgen_pinning_get_entry (first_entry);
-       end = sgen_pinning_get_entry (last_entry);
-
-       for (; entry < end; ++entry) {
-               int index = MS_BLOCK_OBJ_INDEX (*entry, block);
-               char *obj;
-               SGEN_ASSERT (9, index >= 0 && index < MS_BLOCK_FREE / block->obj_size, "invalid object %p index %d max-index %d", *entry, index, (int)(MS_BLOCK_FREE / block->obj_size));
-               if (index == last_index)
-                       continue;
-               obj = MS_BLOCK_OBJ (block, index);
-               MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (obj, sgen_obj_get_descriptor (obj), block, queue);
-               last_index = index;
-       }
-}
-
-static inline void
-sweep_block_for_size (MSBlockInfo *block, int count, int obj_size)
-{
-       int obj_index;
-
-       for (obj_index = 0; obj_index < count; ++obj_index) {
-               int word, bit;
-               void *obj = MS_BLOCK_OBJ_FOR_SIZE (block, obj_index, obj_size);
-
-               MS_CALC_MARK_BIT (word, bit, obj);
-               if (MS_MARK_BIT (block, word, bit)) {
-                       SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p not allocated", obj);
-               } else {
-                       /* an unmarked object */
-                       if (MS_OBJ_ALLOCED (obj, block)) {
-                               /*
-                                * FIXME: Merge consecutive
-                                * slots for lower reporting
-                                * overhead.  Maybe memset
-                                * will also benefit?
-                                */
-                               binary_protocol_empty (obj, obj_size);
-                               memset (obj, 0, obj_size);
-                       }
-                       *(void**)obj = block->free_list;
-                       block->free_list = obj;
-               }
-       }
-}
-
-static inline gboolean
-try_set_block_state (MSBlockInfo *block, gint32 new_state, gint32 expected_state)
-{
-       gint32 old_state = SGEN_CAS (&block->state, new_state, expected_state);
-       gboolean success = old_state == expected_state;
-       if (success)
-               binary_protocol_block_set_state (block, MS_BLOCK_SIZE, old_state, new_state);
-       return success;
-}
-
-static inline void
-set_block_state (MSBlockInfo *block, gint32 new_state, gint32 expected_state)
-{
-       SGEN_ASSERT (6, block->state == expected_state, "Block state incorrect before set");
-       block->state = new_state;
-}
-
-/*
- * If `block` needs sweeping, sweep it and return TRUE.  Otherwise return FALSE.
- *
- * Sweeping means iterating through the block's slots and building the free-list from the
- * unmarked ones.  They will also be zeroed.  The mark bits will be reset.
- */
-static gboolean
-sweep_block (MSBlockInfo *block)
-{
-       int count;
-       void *reversed = NULL;
-
- retry:
-       switch (block->state) {
-       case BLOCK_STATE_SWEPT:
-               return FALSE;
-       case BLOCK_STATE_MARKING:
-       case BLOCK_STATE_CHECKING:
-               SGEN_ASSERT (0, FALSE, "How did we get to sweep a block that's being marked or being checked?");
-               goto retry;
-       case BLOCK_STATE_SWEEPING:
-               /* FIXME: Do this more elegantly */
-               g_usleep (100);
-               goto retry;
-       case BLOCK_STATE_NEED_SWEEPING:
-               if (!try_set_block_state (block, BLOCK_STATE_SWEEPING, BLOCK_STATE_NEED_SWEEPING))
-                       goto retry;
-               break;
-       default:
-               SGEN_ASSERT (0, FALSE, "Illegal block state");
-       }
-
-       SGEN_ASSERT (6, block->state == BLOCK_STATE_SWEEPING, "How did we get here without setting state to sweeping?");
-
-       count = MS_BLOCK_FREE / block->obj_size;
-
-       block->free_list = NULL;
-
-       /* Use inline instances specialized to constant sizes, this allows the compiler to replace the memset calls with inline code */
-       // FIXME: Add more sizes
-       switch (block->obj_size) {
-       case 16:
-               sweep_block_for_size (block, count, 16);
-               break;
-       default:
-               sweep_block_for_size (block, count, block->obj_size);
-               break;
-       }
-
-       /* reset mark bits */
-       memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
-
-       /* Reverse free list so that it's in address order */
-       reversed = NULL;
-       while (block->free_list) {
-               void *next = *(void**)block->free_list;
-               *(void**)block->free_list = reversed;
-               reversed = block->free_list;
-               block->free_list = next;
-       }
-       block->free_list = reversed;
-
-       mono_memory_write_barrier ();
-
-       set_block_state (block, BLOCK_STATE_SWEPT, BLOCK_STATE_SWEEPING);
-
-       return TRUE;
-}
-
-static inline int
-bitcount (mword d)
-{
-       int count = 0;
-
-#ifdef __GNUC__
-       if (sizeof (mword) == sizeof (unsigned long))
-               count += __builtin_popcountl (d);
-       else
-               count += __builtin_popcount (d);
-#else
-       while (d) {
-               count ++;
-               d &= (d - 1);
-       }
-#endif
-       return count;
-}
-
-/* statistics for evacuation */
-static size_t *sweep_slots_available;
-static size_t *sweep_slots_used;
-static size_t *sweep_num_blocks;
-
-static volatile size_t num_major_sections_before_sweep;
-static volatile size_t num_major_sections_freed_in_sweep;
-
-static void
-sweep_start (void)
-{
-       int i;
-
-       for (i = 0; i < num_block_obj_sizes; ++i)
-               sweep_slots_available [i] = sweep_slots_used [i] = sweep_num_blocks [i] = 0;
-
-       /* clear all the free lists */
-       for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
-               MSBlockInfo * volatile *free_blocks = free_block_lists [i];
-               int j;
-               for (j = 0; j < num_block_obj_sizes; ++j)
-                       free_blocks [j] = NULL;
-       }
-}
-
-static void sweep_finish (void);
-
-/*
- * If `wait` is TRUE and the block is currently being checked, this function will wait until
- * the checking has finished.
- *
- * Returns whether the block is still there.  If `wait` is FALSE, the return value will not
- * be correct, i.e. must not be used.
- */
-static gboolean
-ensure_block_is_checked_for_sweeping (int block_index, gboolean wait, gboolean *have_checked)
-{
-       int count;
-       gboolean have_live = FALSE;
-       gboolean have_free = FALSE;
-       int nused = 0;
-       int block_state;
-       int i;
-       void *tagged_block;
-       MSBlockInfo *block;
-
-       SGEN_ASSERT (6, sweep_in_progress (), "Why do we call this function if there's no sweep in progress?");
-
-       if (have_checked)
-               *have_checked = FALSE;
-
- retry:
-       tagged_block = *(void * volatile *)&allocated_blocks.data [block_index];
-       if (!tagged_block)
-               return FALSE;
-
-       if (BLOCK_IS_TAGGED_CHECKING (tagged_block)) {
-               if (!wait)
-                       return FALSE;
-               /* FIXME: do this more elegantly */
-               g_usleep (100);
-               goto retry;
-       }
-
-       if (SGEN_CAS_PTR (&allocated_blocks.data [block_index], BLOCK_TAG_CHECKING (tagged_block), tagged_block) != tagged_block)
-               goto retry;
-
-       block = BLOCK_UNTAG (tagged_block);
-       block_state = block->state;
-
-       if (!sweep_in_progress ()) {
-               SGEN_ASSERT (6, block_state != BLOCK_STATE_SWEEPING && block_state != BLOCK_STATE_CHECKING, "Invalid block state.");
-               if (!lazy_sweep)
-                       SGEN_ASSERT (6, block_state != BLOCK_STATE_NEED_SWEEPING, "Invalid block state.");
-       }
-
-       switch (block_state) {
-       case BLOCK_STATE_SWEPT:
-       case BLOCK_STATE_NEED_SWEEPING:
-       case BLOCK_STATE_SWEEPING:
-               goto done;
-       case BLOCK_STATE_MARKING:
-               break;
-       case BLOCK_STATE_CHECKING:
-               SGEN_ASSERT (0, FALSE, "We set the CHECKING bit - how can the stage be CHECKING?");
-               goto done;
-       default:
-               SGEN_ASSERT (0, FALSE, "Illegal block state");
-               break;
-       }
-
-       SGEN_ASSERT (6, block->state == BLOCK_STATE_MARKING, "When we sweep all blocks must start out marking.");
-       set_block_state (block, BLOCK_STATE_CHECKING, BLOCK_STATE_MARKING);
-
-       if (have_checked)
-               *have_checked = TRUE;
-
-       block->has_pinned = block->pinned;
-
-       block->is_to_space = FALSE;
-
-       count = MS_BLOCK_FREE / block->obj_size;
-
-       if (block->cardtable_mod_union) {
-               sgen_card_table_free_mod_union (block->cardtable_mod_union, MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
-               block->cardtable_mod_union = NULL;
-       }
-
-       /* Count marked objects in the block */
-       for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
-               nused += bitcount (block->mark_words [i]);
-
-       if (nused)
-               have_live = TRUE;
-       if (nused < count)
-               have_free = TRUE;
-
-       if (have_live) {
-               int obj_size_index = block->obj_size_index;
-               gboolean has_pinned = block->has_pinned;
-
-               set_block_state (block, BLOCK_STATE_NEED_SWEEPING, BLOCK_STATE_CHECKING);
-
-               /*
-                * FIXME: Go straight to SWEPT if there are no free slots.  We need
-                * to set the free slot list to NULL, though, and maybe update some
-                * statistics.
-                */
-               if (!lazy_sweep)
-                       sweep_block (block);
-
-               if (!has_pinned) {
-                       ++sweep_num_blocks [obj_size_index];
-                       sweep_slots_used [obj_size_index] += nused;
-                       sweep_slots_available [obj_size_index] += count;
-               }
-
-               /*
-                * If there are free slots in the block, add
-                * the block to the corresponding free list.
-                */
-               if (have_free) {
-                       MSBlockInfo * volatile *free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
-
-                       if (!lazy_sweep)
-                               SGEN_ASSERT (6, block->free_list, "How do we not have a free list when there are free slots?");
-
-                       add_free_block (free_blocks, obj_size_index, block);
-               }
-
-               /* FIXME: Do we need the heap boundaries while we do nursery collections? */
-               update_heap_boundaries_for_block (block);
-       } else {
-               /*
-                * Blocks without live objects are removed from the
-                * block list and freed.
-                */
-               SGEN_ASSERT (6, block_index < allocated_blocks.next_slot, "How did the number of blocks shrink?");
-               SGEN_ASSERT (6, allocated_blocks.data [block_index] == BLOCK_TAG_CHECKING (tagged_block), "How did the block move?");
-
-               binary_protocol_empty (MS_BLOCK_OBJ (block, 0), (char*)MS_BLOCK_OBJ (block, count) - (char*)MS_BLOCK_OBJ (block, 0));
-               ms_free_block (block);
-
-               SGEN_ATOMIC_ADD_P (num_major_sections, -1);
-
-               tagged_block = NULL;
-       }
-
- done:
-       allocated_blocks.data [block_index] = tagged_block;
-       return !!tagged_block;
-}
-
-static void
-sweep_job_func (void *thread_data_untyped, SgenThreadPoolJob *job)
-{
-       int block_index;
-       int num_blocks = num_major_sections_before_sweep;
-
-       SGEN_ASSERT (0, sweep_in_progress (), "Sweep thread called with wrong state");
-       SGEN_ASSERT (0, num_blocks <= allocated_blocks.next_slot, "How did we lose blocks?");
-
-       /*
-        * We traverse the block array from high to low.  Nursery collections will have to
-        * cooperate with the sweep thread to finish sweeping, and they will traverse from
-        * low to high, to avoid constantly colliding on the same blocks.
-        */
-       for (block_index = num_blocks - 1; block_index >= 0; --block_index) {
-               gboolean have_checked;
-
-               /*
-                * The block might have been freed by another thread doing some checking
-                * work.
-                */
-               if (!ensure_block_is_checked_for_sweeping (block_index, TRUE, &have_checked))
-                       ++num_major_sections_freed_in_sweep;
-       }
-
-       while (!try_set_sweep_state (SWEEP_STATE_COMPACTING, SWEEP_STATE_SWEEPING)) {
-               /*
-                * The main GC thread is currently iterating over the block array to help us
-                * finish the sweep.  We have already finished, but we don't want to mess up
-                * that iteration, so we just wait for it.
-                */
-               g_usleep (100);
-       }
-
-       if (SGEN_MAX_ASSERT_LEVEL >= 6) {
-               for (block_index = num_blocks; block_index < allocated_blocks.next_slot; ++block_index) {
-                       MSBlockInfo *block = BLOCK_UNTAG (allocated_blocks.data [block_index]);
-                       SGEN_ASSERT (6, block && block->state == BLOCK_STATE_SWEPT, "How did a new block to be swept get added while swept?");
-               }
-       }
-
-       sgen_pointer_queue_remove_nulls (&allocated_blocks);
-
-       sweep_finish ();
-
-       sweep_job = NULL;
-}
-
-static void
-sweep_finish (void)
-{
-       mword total_evacuate_heap = 0;
-       mword total_evacuate_saved = 0;
-       int i;
-
-       for (i = 0; i < num_block_obj_sizes; ++i) {
-               float usage = (float)sweep_slots_used [i] / (float)sweep_slots_available [i];
-               if (sweep_num_blocks [i] > 5 && usage < evacuation_threshold) {
-                       evacuate_block_obj_sizes [i] = TRUE;
-                       /*
-                       g_print ("slot size %d - %d of %d used\n",
-                                       block_obj_sizes [i], slots_used [i], slots_available [i]);
-                       */
-               } else {
-                       evacuate_block_obj_sizes [i] = FALSE;
-               }
-               {
-                       mword total_bytes = block_obj_sizes [i] * sweep_slots_available [i];
-                       total_evacuate_heap += total_bytes;
-                       if (evacuate_block_obj_sizes [i])
-                               total_evacuate_saved += total_bytes - block_obj_sizes [i] * sweep_slots_used [i];
-               }
-       }
-
-       want_evacuation = (float)total_evacuate_saved / (float)total_evacuate_heap > (1 - concurrent_evacuation_threshold);
-
-       set_sweep_state (SWEEP_STATE_SWEPT, SWEEP_STATE_COMPACTING);
-}
-
-static void
-major_sweep (void)
-{
-       set_sweep_state (SWEEP_STATE_SWEEPING, SWEEP_STATE_NEED_SWEEPING);
-
-       sweep_start ();
-
-       SGEN_ASSERT (0, num_major_sections == allocated_blocks.next_slot, "We don't know how many blocks we have?");
-
-       num_major_sections_before_sweep = num_major_sections;
-       num_major_sections_freed_in_sweep = 0;
-
-       SGEN_ASSERT (0, !sweep_job, "We haven't finished the last sweep?");
-       if (concurrent_sweep) {
-               sweep_job = sgen_thread_pool_job_alloc ("sweep", sweep_job_func, sizeof (SgenThreadPoolJob));
-               sgen_thread_pool_job_enqueue (sweep_job);
-       } else {
-               sweep_job_func (NULL, NULL);
-       }
-}
-
-static gboolean
-major_have_swept (void)
-{
-       return sweep_state == SWEEP_STATE_SWEPT;
-}
-
-static int count_pinned_ref;
-static int count_pinned_nonref;
-static int count_nonpinned_ref;
-static int count_nonpinned_nonref;
-
-static void
-count_nonpinned_callback (char *obj, size_t size, void *data)
-{
-       GCVTable *vtable = (GCVTable*)LOAD_VTABLE (obj);
-
-       if (SGEN_VTABLE_HAS_REFERENCES (vtable))
-               ++count_nonpinned_ref;
-       else
-               ++count_nonpinned_nonref;
-}
-
-static void
-count_pinned_callback (char *obj, size_t size, void *data)
-{
-       GCVTable *vtable = (GCVTable*)LOAD_VTABLE (obj);
-
-       if (SGEN_VTABLE_HAS_REFERENCES (vtable))
-               ++count_pinned_ref;
-       else
-               ++count_pinned_nonref;
-}
-
-static G_GNUC_UNUSED void
-count_ref_nonref_objs (void)
-{
-       int total;
-
-       count_pinned_ref = 0;
-       count_pinned_nonref = 0;
-       count_nonpinned_ref = 0;
-       count_nonpinned_nonref = 0;
-
-       major_iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, count_nonpinned_callback, NULL);
-       major_iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, count_pinned_callback, NULL);
-
-       total = count_pinned_nonref + count_nonpinned_nonref + count_pinned_ref + count_nonpinned_ref;
-
-       g_print ("ref: %d pinned %d non-pinned   non-ref: %d pinned %d non-pinned  --  %.1f\n",
-                       count_pinned_ref, count_nonpinned_ref,
-                       count_pinned_nonref, count_nonpinned_nonref,
-                       (count_pinned_nonref + count_nonpinned_nonref) * 100.0 / total);
-}
-
-static int
-ms_calculate_block_obj_sizes (double factor, int *arr)
-{
-       double target_size;
-       int num_sizes = 0;
-       int last_size = 0;
-
-       /*
-        * Have every possible slot size starting with the minimal
-        * object size up to and including four times that size.  Then
-        * proceed by increasing geometrically with the given factor.
-        */
-
-       for (int size = SGEN_CLIENT_MINIMUM_OBJECT_SIZE; size <= 4 * SGEN_CLIENT_MINIMUM_OBJECT_SIZE; size += SGEN_ALLOC_ALIGN) {
-               if (arr)
-                       arr [num_sizes] = size;
-               ++num_sizes;
-               last_size = size;
-       }
-       target_size = (double)last_size;
-
-       do {
-               int target_count = (int)floor (MS_BLOCK_FREE / target_size);
-               int size = MIN ((MS_BLOCK_FREE / target_count) & ~(SGEN_ALLOC_ALIGN - 1), SGEN_MAX_SMALL_OBJ_SIZE);
-
-               if (size != last_size) {
-                       if (arr)
-                               arr [num_sizes] = size;
-                       ++num_sizes;
-                       last_size = size;
-               }
-
-               target_size *= factor;
-       } while (last_size < SGEN_MAX_SMALL_OBJ_SIZE);
-
-       return num_sizes;
-}
-
-/* only valid during minor collections */
-static mword old_num_major_sections;
-
-static void
-major_start_nursery_collection (void)
-{
-#ifdef MARKSWEEP_CONSISTENCY_CHECK
-       consistency_check ();
-#endif
-
-       old_num_major_sections = num_major_sections;
-}
-
-static void
-major_finish_nursery_collection (void)
-{
-#ifdef MARKSWEEP_CONSISTENCY_CHECK
-       consistency_check ();
-#endif
-}
-
-static void
-major_start_major_collection (void)
-{
-       MSBlockInfo *block;
-       int i;
-
-       major_finish_sweep_checking ();
-
-       /*
-        * Clear the free lists for block sizes where we do evacuation.  For those block
-        * sizes we will have to allocate new blocks.
-        */
-       for (i = 0; i < num_block_obj_sizes; ++i) {
-               if (!evacuate_block_obj_sizes [i])
-                       continue;
-
-               free_block_lists [0][i] = NULL;
-               free_block_lists [MS_BLOCK_FLAG_REFS][i] = NULL;
-       }
-
-       if (lazy_sweep)
-               binary_protocol_sweep_begin (GENERATION_OLD, TRUE);
-
-       /* Sweep all unswept blocks and set them to MARKING */
-       FOREACH_BLOCK_NO_LOCK (block) {
-               if (lazy_sweep)
-                       sweep_block (block);
-               SGEN_ASSERT (0, block->state == BLOCK_STATE_SWEPT, "All blocks must be swept when we're pinning.");
-               set_block_state (block, BLOCK_STATE_MARKING, BLOCK_STATE_SWEPT);
-       } END_FOREACH_BLOCK_NO_LOCK;
-
-       if (lazy_sweep)
-               binary_protocol_sweep_end (GENERATION_OLD, TRUE);
-
-       set_sweep_state (SWEEP_STATE_NEED_SWEEPING, SWEEP_STATE_SWEPT);
-}
-
-static void
-major_finish_major_collection (ScannedObjectCounts *counts)
-{
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-       if (binary_protocol_is_enabled ()) {
-               counts->num_scanned_objects = scanned_objects_list.next_slot;
-
-               sgen_pointer_queue_sort_uniq (&scanned_objects_list);
-               counts->num_unique_scanned_objects = scanned_objects_list.next_slot;
-
-               sgen_pointer_queue_clear (&scanned_objects_list);
-       }
-#endif
-}
-
-#if SIZEOF_VOID_P != 8
-static int
-compare_pointers (const void *va, const void *vb) {
-       char *a = *(char**)va, *b = *(char**)vb;
-       if (a < b)
-               return -1;
-       if (a > b)
-               return 1;
-       return 0;
-}
-#endif
-
-/*
- * This is called with sweep completed and the world stopped.
- */
-static void
-major_free_swept_blocks (size_t allowance)
-{
-       /* FIXME: This is probably too much.  It's assuming all objects are small. */
-       size_t section_reserve = allowance / MS_BLOCK_SIZE;
-
-       SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "Sweeping must have finished before freeing blocks");
-
-#if SIZEOF_VOID_P != 8
-       {
-               int i, num_empty_blocks_orig, num_blocks, arr_length;
-               void *block;
-               void **empty_block_arr;
-               void **rebuild_next;
-
-#ifdef TARGET_WIN32
-               /*
-                * sgen_free_os_memory () asserts in mono_vfree () because windows doesn't like freeing the middle of
-                * a VirtualAlloc ()-ed block.
-                */
-               return;
-#endif
-
-               if (num_empty_blocks <= section_reserve)
-                       return;
-               SGEN_ASSERT (0, num_empty_blocks > 0, "section reserve can't be negative");
-
-               num_empty_blocks_orig = num_empty_blocks;
-               empty_block_arr = (void**)sgen_alloc_internal_dynamic (sizeof (void*) * num_empty_blocks_orig,
-                               INTERNAL_MEM_MS_BLOCK_INFO_SORT, FALSE);
-               if (!empty_block_arr)
-                       goto fallback;
-
-               i = 0;
-               for (block = empty_blocks; block; block = *(void**)block)
-                       empty_block_arr [i++] = block;
-               SGEN_ASSERT (0, i == num_empty_blocks, "empty block count wrong");
-
-               sgen_qsort (empty_block_arr, num_empty_blocks, sizeof (void*), compare_pointers);
-
-               /*
-                * We iterate over the free blocks, trying to find MS_BLOCK_ALLOC_NUM
-                * contiguous ones.  If we do, we free them.  If that's not enough to get to
-                * section_reserve, we halve the number of contiguous blocks we're looking
-                * for and have another go, until we're done with looking for pairs of
-                * blocks, at which point we give up and go to the fallback.
-                */
-               arr_length = num_empty_blocks_orig;
-               num_blocks = MS_BLOCK_ALLOC_NUM;
-               while (num_empty_blocks > section_reserve && num_blocks > 1) {
-                       int first = -1;
-                       int dest = 0;
-
-                       dest = 0;
-                       for (i = 0; i < arr_length; ++i) {
-                               int d = dest;
-                               void *block = empty_block_arr [i];
-                               SGEN_ASSERT (6, block, "we're not shifting correctly");
-                               if (i != dest) {
-                                       empty_block_arr [dest] = block;
-                                       /*
-                                        * This is not strictly necessary, but we're
-                                        * cautious.
-                                        */
-                                       empty_block_arr [i] = NULL;
-                               }
-                               ++dest;
-
-                               if (first < 0) {
-                                       first = d;
-                                       continue;
-                               }
-
-                               SGEN_ASSERT (6, first >= 0 && d > first, "algorithm is wrong");
-
-                               if ((char*)block != ((char*)empty_block_arr [d-1]) + MS_BLOCK_SIZE) {
-                                       first = d;
-                                       continue;
-                               }
-
-                               if (d + 1 - first == num_blocks) {
-                                       /*
-                                        * We found num_blocks contiguous blocks.  Free them
-                                        * and null their array entries.  As an optimization
-                                        * we could, instead of nulling the entries, shift
-                                        * the following entries over to the left, while
-                                        * we're iterating.
-                                        */
-                                       int j;
-                                       sgen_free_os_memory (empty_block_arr [first], MS_BLOCK_SIZE * num_blocks, SGEN_ALLOC_HEAP);
-                                       for (j = first; j <= d; ++j)
-                                               empty_block_arr [j] = NULL;
-                                       dest = first;
-                                       first = -1;
-
-                                       num_empty_blocks -= num_blocks;
-
-                                       stat_major_blocks_freed += num_blocks;
-                                       if (num_blocks == MS_BLOCK_ALLOC_NUM)
-                                               stat_major_blocks_freed_ideal += num_blocks;
-                                       else
-                                               stat_major_blocks_freed_less_ideal += num_blocks;
-
-                               }
-                       }
-
-                       SGEN_ASSERT (6, dest <= i && dest <= arr_length, "array length is off");
-                       arr_length = dest;
-                       SGEN_ASSERT (6, arr_length == num_empty_blocks, "array length is off");
-
-                       num_blocks >>= 1;
-               }
-
-               /* rebuild empty_blocks free list */
-               rebuild_next = (void**)&empty_blocks;
-               for (i = 0; i < arr_length; ++i) {
-                       void *block = empty_block_arr [i];
-                       SGEN_ASSERT (6, block, "we're missing blocks");
-                       *rebuild_next = block;
-                       rebuild_next = (void**)block;
-               }
-               *rebuild_next = NULL;
-
-               /* free array */
-               sgen_free_internal_dynamic (empty_block_arr, sizeof (void*) * num_empty_blocks_orig, INTERNAL_MEM_MS_BLOCK_INFO_SORT);
-       }
-
-       SGEN_ASSERT (0, num_empty_blocks >= 0, "we freed more blocks than we had in the first place?");
-
- fallback:
-       /*
-        * This is our threshold.  If there's not more empty than used blocks, we won't
-        * release uncontiguous blocks, in fear of fragmenting the address space.
-        */
-       if (num_empty_blocks <= num_major_sections)
-               return;
-#endif
-
-       while (num_empty_blocks > section_reserve) {
-               void *next = *(void**)empty_blocks;
-               sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP);
-               empty_blocks = next;
-               /*
-                * Needs not be atomic because this is running
-                * single-threaded.
-                */
-               --num_empty_blocks;
-
-               ++stat_major_blocks_freed;
-#if SIZEOF_VOID_P != 8
-               ++stat_major_blocks_freed_individual;
-#endif
-       }
-}
-
-static void
-major_pin_objects (SgenGrayQueue *queue)
-{
-       MSBlockInfo *block;
-
-       FOREACH_BLOCK_NO_LOCK (block) {
-               size_t first_entry, last_entry;
-               SGEN_ASSERT (6, block_is_swept_or_marking (block), "All blocks must be swept when we're pinning.");
-               sgen_find_optimized_pin_queue_area (MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SKIP, MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE,
-                               &first_entry, &last_entry);
-               mark_pinned_objects_in_block (block, first_entry, last_entry, queue);
-       } END_FOREACH_BLOCK_NO_LOCK;
-}
-
-static void
-major_init_to_space (void)
-{
-}
-
-static void
-major_report_pinned_memory_usage (void)
-{
-       g_assert_not_reached ();
-}
-
-static gint64
-major_get_used_size (void)
-{
-       gint64 size = 0;
-       MSBlockInfo *block;
-
-       /*
-        * We're holding the GC lock, but the sweep thread might be running.  Make sure it's
-        * finished, then we can iterate over the block array.
-        */
-       major_finish_sweep_checking ();
-
-       FOREACH_BLOCK_NO_LOCK_CONDITION (TRUE, block) {
-               int count = MS_BLOCK_FREE / block->obj_size;
-               void **iter;
-               size += count * block->obj_size;
-               for (iter = block->free_list; iter; iter = (void**)*iter)
-                       size -= block->obj_size;
-       } END_FOREACH_BLOCK_NO_LOCK;
-
-       return size;
-}
-
-/* FIXME: return number of bytes, not of sections */
-static size_t
-get_num_major_sections (void)
-{
-       return num_major_sections;
-}
-
-/*
- * Returns the number of bytes in blocks that were present when the last sweep was
- * initiated, and were not freed during the sweep.  They are the basis for calculating the
- * allowance.
- */
-static size_t
-get_bytes_survived_last_sweep (void)
-{
-       SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "Can only query unswept sections after sweep");
-       return (num_major_sections_before_sweep - num_major_sections_freed_in_sweep) * MS_BLOCK_SIZE;
-}
-
-static gboolean
-major_handle_gc_param (const char *opt)
-{
-       if (g_str_has_prefix (opt, "evacuation-threshold=")) {
-               const char *arg = strchr (opt, '=') + 1;
-               int percentage = atoi (arg);
-               if (percentage < 0 || percentage > 100) {
-                       fprintf (stderr, "evacuation-threshold must be an integer in the range 0-100.\n");
-                       exit (1);
-               }
-               evacuation_threshold = (float)percentage / 100.0f;
-               return TRUE;
-       } else if (!strcmp (opt, "lazy-sweep")) {
-               lazy_sweep = TRUE;
-               return TRUE;
-       } else if (!strcmp (opt, "no-lazy-sweep")) {
-               lazy_sweep = FALSE;
-               return TRUE;
-       } else if (!strcmp (opt, "concurrent-sweep")) {
-               concurrent_sweep = TRUE;
-               return TRUE;
-       } else if (!strcmp (opt, "no-concurrent-sweep")) {
-               concurrent_sweep = FALSE;
-               return TRUE;
-       }
-
-       return FALSE;
-}
-
-static void
-major_print_gc_param_usage (void)
-{
-       fprintf (stderr,
-                       ""
-                       "  evacuation-threshold=P (where P is a percentage, an integer in 0-100)\n"
-                       "  (no-)lazy-sweep\n"
-                       "  (no-)concurrent-sweep\n"
-                       );
-}
-
-/*
- * This callback is used to clear cards, move cards to the shadow table and do counting.
- */
-static void
-major_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
-{
-       MSBlockInfo *block;
-       gboolean has_references;
-
-       major_finish_sweep_checking ();
-       FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
-               if (has_references)
-                       callback ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
-       } END_FOREACH_BLOCK_NO_LOCK;
-}
-
-#ifdef HEAVY_STATISTICS
-extern guint64 marked_cards;
-extern guint64 scanned_cards;
-extern guint64 scanned_objects;
-extern guint64 remarked_cards;
-#endif
-
-#define CARD_WORDS_PER_BLOCK (CARDS_PER_BLOCK / SIZEOF_VOID_P)
-/*
- * MS blocks are 16K aligned.
- * Cardtables are 4K aligned, at least.
- * This means that the cardtable of a given block is 32 bytes aligned.
- */
-static guint8*
-initial_skip_card (guint8 *card_data)
-{
-       mword *cards = (mword*)card_data;
-       mword card;
-       int i;
-       for (i = 0; i < CARD_WORDS_PER_BLOCK; ++i) {
-               card = cards [i];
-               if (card)
-                       break;
-       }
-
-       if (i == CARD_WORDS_PER_BLOCK)
-               return card_data + CARDS_PER_BLOCK;
-
-#if defined(__i386__) && defined(__GNUC__)
-       return card_data + i * 4 +  (__builtin_ffs (card) - 1) / 8;
-#elif defined(__x86_64__) && defined(__GNUC__)
-       return card_data + i * 8 +  (__builtin_ffsll (card) - 1) / 8;
-#elif defined(__s390x__) && defined(__GNUC__)
-       return card_data + i * 8 +  (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
-#else
-       for (i = i * SIZEOF_VOID_P; i < CARDS_PER_BLOCK; ++i) {
-               if (card_data [i])
-                       return &card_data [i];
-       }
-       return card_data;
-#endif
-}
-
-#define MS_BLOCK_OBJ_INDEX_FAST(o,b,os)        (((char*)(o) - ((b) + MS_BLOCK_SKIP)) / (os))
-#define MS_BLOCK_OBJ_FAST(b,os,i)                      ((b) + MS_BLOCK_SKIP + (os) * (i))
-#define MS_OBJ_ALLOCED_FAST(o,b)               (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + MS_BLOCK_SIZE))
-
-static void
-scan_card_table_for_block (MSBlockInfo *block, gboolean mod_union, ScanCopyContext ctx)
-{
-       SgenGrayQueue *queue = ctx.queue;
-       ScanObjectFunc scan_func = ctx.ops->scan_object;
-#ifndef SGEN_HAVE_OVERLAPPING_CARDS
-       guint8 cards_copy [CARDS_PER_BLOCK];
-#endif
-       gboolean small_objects;
-       int block_obj_size;
-       char *block_start;
-       guint8 *card_data, *card_base;
-       guint8 *card_data_end;
-       char *scan_front = NULL;
-
-       block_obj_size = block->obj_size;
-       small_objects = block_obj_size < CARD_SIZE_IN_BYTES;
-
-       block_start = MS_BLOCK_FOR_BLOCK_INFO (block);
-
-       /*
-        * This is safe in face of card aliasing for the following reason:
-        *
-        * Major blocks are 16k aligned, or 32 cards aligned.
-        * Cards aliasing happens in powers of two, so as long as major blocks are aligned to their
-        * sizes, they won't overflow the cardtable overlap modulus.
-        */
-       if (mod_union) {
-               card_data = card_base = block->cardtable_mod_union;
-               /*
-                * This happens when the nursery collection that precedes finishing
-                * the concurrent collection allocates new major blocks.
-                */
-               if (!card_data)
-                       return;
-       } else {
-#ifdef SGEN_HAVE_OVERLAPPING_CARDS
-               card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
-#else
-               if (!sgen_card_table_get_card_data (cards_copy, (mword)block_start, CARDS_PER_BLOCK))
-                       return;
-               card_data = card_base = cards_copy;
-#endif
-       }
-       card_data_end = card_data + CARDS_PER_BLOCK;
-
-       card_data += MS_BLOCK_SKIP >> CARD_BITS;
-
-       card_data = initial_skip_card (card_data);
-       while (card_data < card_data_end) {
-               size_t card_index, first_object_index;
-               char *start;
-               char *end;
-               char *first_obj, *obj;
-
-               HEAVY_STAT (++scanned_cards);
-
-               if (!*card_data) {
-                       ++card_data;
-                       continue;
-               }
-
-               card_index = card_data - card_base;
-               start = (char*)(block_start + card_index * CARD_SIZE_IN_BYTES);
-               end = start + CARD_SIZE_IN_BYTES;
-
-               if (!block_is_swept_or_marking (block))
-                       sweep_block (block);
-
-               HEAVY_STAT (++marked_cards);
-
-               if (small_objects)
-                       sgen_card_table_prepare_card_for_scanning (card_data);
-
-               /*
-                * If the card we're looking at starts at or in the block header, we
-                * must start at the first object in the block, without calculating
-                * the index of the object we're hypothetically starting at, because
-                * it would be negative.
-                */
-               if (card_index <= (MS_BLOCK_SKIP >> CARD_BITS))
-                       first_object_index = 0;
-               else
-                       first_object_index = MS_BLOCK_OBJ_INDEX_FAST (start, block_start, block_obj_size);
-
-               obj = first_obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, first_object_index);
-
-               binary_protocol_card_scan (first_obj, end - first_obj);
-
-               while (obj < end) {
-                       if (obj < scan_front || !MS_OBJ_ALLOCED_FAST (obj, block_start))
-                               goto next_object;
-
-                       if (mod_union) {
-                               /* FIXME: do this more efficiently */
-                               int w, b;
-                               MS_CALC_MARK_BIT (w, b, obj);
-                               if (!MS_MARK_BIT (block, w, b))
-                                       goto next_object;
-                       }
-
-                       if (small_objects) {
-                               HEAVY_STAT (++scanned_objects);
-                               scan_func (obj, sgen_obj_get_descriptor (obj), queue);
-                       } else {
-                               size_t offset = sgen_card_table_get_card_offset (obj, block_start);
-                               sgen_cardtable_scan_object (obj, block_obj_size, card_base + offset, mod_union, ctx);
-                       }
-               next_object:
-                       obj += block_obj_size;
-                       g_assert (scan_front <= obj);
-                       scan_front = obj;
-               }
-
-               HEAVY_STAT (if (*card_data) ++remarked_cards);
-
-               if (small_objects)
-                       ++card_data;
-               else
-                       card_data = card_base + sgen_card_table_get_card_offset (obj, block_start);
-       }
-}
-
-static void
-major_scan_card_table (gboolean mod_union, ScanCopyContext ctx)
-{
-       MSBlockInfo *block;
-       gboolean has_references;
-
-       if (!concurrent_mark)
-               g_assert (!mod_union);
-
-       major_finish_sweep_checking ();
-       FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
-#ifdef PREFETCH_CARDS
-               int prefetch_index = __index + 6;
-               if (prefetch_index < allocated_blocks.next_slot) {
-                       MSBlockInfo *prefetch_block = BLOCK_UNTAG (allocated_blocks.data [prefetch_index]);
-                       guint8 *prefetch_cards = sgen_card_table_get_card_scan_address ((mword)MS_BLOCK_FOR_BLOCK_INFO (prefetch_block));
-                       PREFETCH_READ (prefetch_block);
-                       PREFETCH_WRITE (prefetch_cards);
-                       PREFETCH_WRITE (prefetch_cards + 32);
-                }
-#endif
-
-               if (!has_references)
-                       continue;
-
-               scan_card_table_for_block (block, mod_union, ctx);
-       } END_FOREACH_BLOCK_NO_LOCK;
-}
-
-static void
-major_count_cards (long long *num_total_cards, long long *num_marked_cards)
-{
-       MSBlockInfo *block;
-       gboolean has_references;
-       long long total_cards = 0;
-       long long marked_cards = 0;
-
-       if (sweep_in_progress ()) {
-               *num_total_cards = -1;
-               *num_marked_cards = -1;
-               return;
-       }
-
-       FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
-               guint8 *cards = sgen_card_table_get_card_scan_address ((mword) MS_BLOCK_FOR_BLOCK_INFO (block));
-               int i;
-
-               if (!has_references)
-                       continue;
-
-               total_cards += CARDS_PER_BLOCK;
-               for (i = 0; i < CARDS_PER_BLOCK; ++i) {
-                       if (cards [i])
-                               ++marked_cards;
-               }
-       } END_FOREACH_BLOCK_NO_LOCK;
-
-       *num_total_cards = total_cards;
-       *num_marked_cards = marked_cards;
-}
-
-static void
-update_cardtable_mod_union (void)
-{
-       MSBlockInfo *block;
-
-       FOREACH_BLOCK_NO_LOCK (block) {
-               size_t num_cards;
-               guint8 *mod_union = get_cardtable_mod_union_for_block (block, TRUE);
-               sgen_card_table_update_mod_union (mod_union, MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE, &num_cards);
-               SGEN_ASSERT (6, num_cards == CARDS_PER_BLOCK, "Number of cards calculation is wrong");
-       } END_FOREACH_BLOCK_NO_LOCK;
-}
-
-#undef pthread_create
-
-static void
-post_param_init (SgenMajorCollector *collector)
-{
-       collector->sweeps_lazily = lazy_sweep;
-       collector->needs_thread_pool = concurrent_mark || concurrent_sweep;
-}
-
-static void
-sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurrent)
-{
-       int i;
-
-       sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
-
-       num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
-       block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
-       ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
-
-       evacuate_block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (gboolean) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
-       for (i = 0; i < num_block_obj_sizes; ++i)
-               evacuate_block_obj_sizes [i] = FALSE;
-
-       sweep_slots_available = sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
-       sweep_slots_used = sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
-       sweep_num_blocks = sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
-
-       /*
-       {
-               int i;
-               g_print ("block object sizes:\n");
-               for (i = 0; i < num_block_obj_sizes; ++i)
-                       g_print ("%d\n", block_obj_sizes [i]);
-       }
-       */
-
-       for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
-               free_block_lists [i] = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
-
-       for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES; ++i)
-               fast_block_obj_size_indexes [i] = ms_find_block_obj_size_index (i * 8);
-       for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
-               g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
-
-       mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced);
-       mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed);
-       mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_lazy_swept);
-       mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_objects_evacuated);
-#if SIZEOF_VOID_P != 8
-       mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_ideal);
-       mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_less_ideal);
-       mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_individual);
-       mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced_less_ideal);
-#endif
-
-       collector->section_size = MAJOR_SECTION_SIZE;
-
-       concurrent_mark = is_concurrent;
-       collector->is_concurrent = is_concurrent;
-       collector->needs_thread_pool = is_concurrent || concurrent_sweep;
-       if (is_concurrent)
-               collector->want_synchronous_collection = &want_evacuation;
-       else
-               collector->want_synchronous_collection = NULL;
-       collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
-       collector->supports_cardtable = TRUE;
-
-       collector->alloc_heap = major_alloc_heap;
-       collector->is_object_live = major_is_object_live;
-       collector->alloc_small_pinned_obj = major_alloc_small_pinned_obj;
-       collector->alloc_degraded = major_alloc_degraded;
-
-       collector->alloc_object = major_alloc_object;
-       collector->free_pinned_object = free_pinned_object;
-       collector->iterate_objects = major_iterate_objects;
-       collector->free_non_pinned_object = major_free_non_pinned_object;
-       collector->pin_objects = major_pin_objects;
-       collector->pin_major_object = pin_major_object;
-       collector->scan_card_table = major_scan_card_table;
-       collector->iterate_live_block_ranges = (void*)(void*) major_iterate_live_block_ranges;
-       if (is_concurrent) {
-               collector->update_cardtable_mod_union = update_cardtable_mod_union;
-               collector->get_cardtable_mod_union_for_object = major_get_cardtable_mod_union_for_reference;
-       }
-       collector->init_to_space = major_init_to_space;
-       collector->sweep = major_sweep;
-       collector->have_swept = major_have_swept;
-       collector->finish_sweeping = major_finish_sweep_checking;
-       collector->free_swept_blocks = major_free_swept_blocks;
-       collector->check_scan_starts = major_check_scan_starts;
-       collector->dump_heap = major_dump_heap;
-       collector->get_used_size = major_get_used_size;
-       collector->start_nursery_collection = major_start_nursery_collection;
-       collector->finish_nursery_collection = major_finish_nursery_collection;
-       collector->start_major_collection = major_start_major_collection;
-       collector->finish_major_collection = major_finish_major_collection;
-       collector->ptr_is_in_non_pinned_space = major_ptr_is_in_non_pinned_space;
-       collector->obj_is_from_pinned_alloc = obj_is_from_pinned_alloc;
-       collector->report_pinned_memory_usage = major_report_pinned_memory_usage;
-       collector->get_num_major_sections = get_num_major_sections;
-       collector->get_bytes_survived_last_sweep = get_bytes_survived_last_sweep;
-       collector->handle_gc_param = major_handle_gc_param;
-       collector->print_gc_param_usage = major_print_gc_param_usage;
-       collector->post_param_init = post_param_init;
-       collector->is_valid_object = major_is_valid_object;
-       collector->describe_pointer = major_describe_pointer;
-       collector->count_cards = major_count_cards;
-
-       collector->major_ops_serial.copy_or_mark_object = major_copy_or_mark_object_canonical;
-       collector->major_ops_serial.scan_object = major_scan_object_with_evacuation;
-       if (is_concurrent) {
-               collector->major_ops_concurrent_start.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
-               collector->major_ops_concurrent_start.scan_object = major_scan_object_no_mark_concurrent_start;
-
-               collector->major_ops_concurrent.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
-               collector->major_ops_concurrent.scan_object = major_scan_object_no_mark_concurrent;
-
-               collector->major_ops_concurrent_finish.copy_or_mark_object = major_copy_or_mark_object_concurrent_finish_canonical;
-               collector->major_ops_concurrent_finish.scan_object = major_scan_object_no_evacuation;
-               collector->major_ops_concurrent_finish.scan_vtype = major_scan_vtype_concurrent_finish;
-       }
-
-#if !defined (FIXED_HEAP) && !defined (SGEN_PARALLEL_MARK)
-       if (!is_concurrent)
-               collector->drain_gray_stack = drain_gray_stack;
-
-#ifdef HEAVY_STATISTICS
-       mono_counters_register ("Optimized copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy);
-       mono_counters_register ("Optimized copy nursery", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery);
-       mono_counters_register ("Optimized copy nursery forwarded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery_forwarded);
-       mono_counters_register ("Optimized copy nursery pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery_pinned);
-       mono_counters_register ("Optimized copy major", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major);
-       mono_counters_register ("Optimized copy major small fast", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_fast);
-       mono_counters_register ("Optimized copy major small slow", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_slow);
-       mono_counters_register ("Optimized copy major large", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_large);
-       mono_counters_register ("Optimized major scan", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan);
-       mono_counters_register ("Optimized major scan no refs", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan_no_refs);
-
-       mono_counters_register ("Gray stack drain loops", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_loops);
-       mono_counters_register ("Gray stack prefetch fills", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fills);
-       mono_counters_register ("Gray stack prefetch failures", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fill_failures);
-#endif
-#endif
-
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-       mono_mutex_init (&scanned_objects_list_lock);
-#endif
-
-       SGEN_ASSERT (0, SGEN_MAX_SMALL_OBJ_SIZE <= MS_BLOCK_FREE / 2, "MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2");
-
-       /*cardtable requires major pages to be 8 cards aligned*/
-       g_assert ((MS_BLOCK_SIZE % (8 * CARD_SIZE_IN_BYTES)) == 0);
-}
-
-void
-sgen_marksweep_init (SgenMajorCollector *collector)
-{
-       sgen_marksweep_init_internal (collector, FALSE);
-}
-
-void
-sgen_marksweep_conc_init (SgenMajorCollector *collector)
-{
-       sgen_marksweep_init_internal (collector, TRUE);
-}
-
-#endif
diff --git a/mono/metadata/sgen-memory-governor.c b/mono/metadata/sgen-memory-governor.c
deleted file mode 100644 (file)
index 5c9b837..0000000
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * sgen-memory-governor.c: When to schedule collections based on
- * memory usage.
- *
- * Author:
- *     Rodrigo Kumpera (rkumpera@novell.com)
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include <stdlib.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-memory-governor.h"
-#include "mono/metadata/sgen-thread-pool.h"
-#include "mono/metadata/sgen-client.h"
-
-#define MIN_MINOR_COLLECTION_ALLOWANCE ((mword)(DEFAULT_NURSERY_SIZE * default_allowance_nursery_size_ratio))
-
-/*Heap limits and allocation knobs*/
-static mword max_heap_size = ((mword)0)- ((mword)1);
-static mword soft_heap_limit = ((mword)0) - ((mword)1);
-
-static double default_allowance_nursery_size_ratio = SGEN_DEFAULT_ALLOWANCE_NURSERY_SIZE_RATIO;
-static double save_target_ratio = SGEN_DEFAULT_SAVE_TARGET_RATIO;
-
-/**/
-static mword allocated_heap;
-static mword total_alloc = 0;
-static mword total_alloc_max = 0;
-
-/* GC triggers. */
-
-static gboolean debug_print_allowance = FALSE;
-
-
-/* use this to tune when to do a major/minor collection */
-static mword major_collection_trigger_size;
-
-static mword last_major_num_sections = 0;
-static mword last_los_memory_usage = 0;
-
-static gboolean need_calculate_minor_collection_allowance;
-
-/* The size of the LOS after the last major collection, after sweeping. */
-static mword last_collection_los_memory_usage = 0;
-
-static mword sgen_memgov_available_free_space (void);
-
-
-/* GC trigger heuristics. */
-
-static void
-sgen_memgov_calculate_minor_collection_allowance (void)
-{
-       size_t new_major, new_heap_size, allowance_target, allowance;
-
-       if (!need_calculate_minor_collection_allowance)
-               return;
-
-       SGEN_ASSERT (0, major_collector.have_swept (), "Can only calculate allowance if heap is swept");
-
-       new_major = major_collector.get_bytes_survived_last_sweep ();
-       new_heap_size = new_major + last_collection_los_memory_usage;
-
-       /*
-        * We allow the heap to grow by one third its current size before we start the next
-        * major collection.
-        */
-       allowance_target = new_heap_size / 3;
-
-       allowance = MAX (allowance_target, MIN_MINOR_COLLECTION_ALLOWANCE);
-
-       if (new_heap_size + allowance > soft_heap_limit) {
-               if (new_heap_size > soft_heap_limit)
-                       allowance = MIN_MINOR_COLLECTION_ALLOWANCE;
-               else
-                       allowance = MAX (soft_heap_limit - new_heap_size, MIN_MINOR_COLLECTION_ALLOWANCE);
-       }
-
-       /* FIXME: Why is this here? */
-       if (major_collector.free_swept_blocks)
-               major_collector.free_swept_blocks (allowance);
-
-       major_collection_trigger_size = new_heap_size + allowance;
-
-       need_calculate_minor_collection_allowance = FALSE;
-
-       if (debug_print_allowance) {
-               SGEN_LOG (0, "Surviving sweep: %ld bytes (%ld major, %ld LOS)", (long)new_heap_size, (long)new_major, (long)last_collection_los_memory_usage);
-               SGEN_LOG (0, "Allowance: %ld bytes", (long)allowance);
-               SGEN_LOG (0, "Trigger size: %ld bytes", (long)major_collection_trigger_size);
-       }
-}
-
-gboolean
-sgen_need_major_collection (mword space_needed)
-{
-       size_t heap_size;
-
-       if (sgen_concurrent_collection_in_progress ())
-               return FALSE;
-
-       /* FIXME: This is a cop-out.  We should have some way of figuring this out. */
-       if (!major_collector.have_swept ())
-               return FALSE;
-
-       if (space_needed > sgen_memgov_available_free_space ())
-               return TRUE;
-
-       sgen_memgov_calculate_minor_collection_allowance ();
-
-       heap_size = major_collector.get_num_major_sections () * major_collector.section_size + los_memory_usage;
-
-       return heap_size > major_collection_trigger_size;
-}
-
-void
-sgen_memgov_minor_collection_start (void)
-{
-}
-
-void
-sgen_memgov_minor_collection_end (void)
-{
-}
-
-void
-sgen_memgov_major_collection_start (void)
-{
-       need_calculate_minor_collection_allowance = TRUE;
-
-       if (debug_print_allowance) {
-               SGEN_LOG (0, "Starting collection with heap size %ld bytes", (long)(major_collector.get_num_major_sections () * major_collector.section_size + los_memory_usage));
-       }
-}
-
-void
-sgen_memgov_major_collection_end (gboolean forced)
-{
-       last_collection_los_memory_usage = los_memory_usage;
-
-       if (forced) {
-               sgen_get_major_collector ()->finish_sweeping ();
-               sgen_memgov_calculate_minor_collection_allowance ();
-       }
-}
-
-void
-sgen_memgov_collection_start (int generation)
-{
-}
-
-void
-sgen_memgov_collection_end (int generation, GGTimingInfo* info, int info_count)
-{
-       int i;
-       for (i = 0; i < info_count; ++i) {
-               if (info[i].generation != -1)
-                       sgen_client_log_timing (&info [i], last_major_num_sections, last_los_memory_usage);
-       }
-}
-
-/*
-Global GC memory tracking.
-This tracks the total usage of memory by the GC. This includes
-managed and unmanaged memory.
-*/
-
-static unsigned long
-prot_flags_for_activate (int activate)
-{
-       unsigned long prot_flags = activate? MONO_MMAP_READ|MONO_MMAP_WRITE: MONO_MMAP_NONE;
-       return prot_flags | MONO_MMAP_PRIVATE | MONO_MMAP_ANON;
-}
-
-void
-sgen_assert_memory_alloc (void *ptr, size_t requested_size, const char *assert_description)
-{
-       if (ptr || !assert_description)
-               return;
-       fprintf (stderr, "Error: Garbage collector could not allocate %zu bytes of memory for %s.\n", requested_size, assert_description);
-       exit (1);
-}
-
-/*
- * Allocate a big chunk of memory from the OS (usually 64KB to several megabytes).
- * This must not require any lock.
- */
-void*
-sgen_alloc_os_memory (size_t size, SgenAllocFlags flags, const char *assert_description)
-{
-       void *ptr;
-
-       g_assert (!(flags & ~(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE)));
-
-       ptr = mono_valloc (0, size, prot_flags_for_activate (flags & SGEN_ALLOC_ACTIVATE));
-       sgen_assert_memory_alloc (ptr, size, assert_description);
-       if (ptr) {
-               SGEN_ATOMIC_ADD_P (total_alloc, size);
-               total_alloc_max = MAX (total_alloc_max, total_alloc);
-       }
-       return ptr;
-}
-
-/* size must be a power of 2 */
-void*
-sgen_alloc_os_memory_aligned (size_t size, mword alignment, SgenAllocFlags flags, const char *assert_description)
-{
-       void *ptr;
-
-       g_assert (!(flags & ~(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE)));
-
-       ptr = mono_valloc_aligned (size, alignment, prot_flags_for_activate (flags & SGEN_ALLOC_ACTIVATE));
-       sgen_assert_memory_alloc (ptr, size, assert_description);
-       if (ptr) {
-               SGEN_ATOMIC_ADD_P (total_alloc, size);
-               total_alloc_max = MAX (total_alloc_max, total_alloc);
-       }
-       return ptr;
-}
-
-/*
- * Free the memory returned by sgen_alloc_os_memory (), returning it to the OS.
- */
-void
-sgen_free_os_memory (void *addr, size_t size, SgenAllocFlags flags)
-{
-       g_assert (!(flags & ~SGEN_ALLOC_HEAP));
-
-       mono_vfree (addr, size);
-       SGEN_ATOMIC_ADD_P (total_alloc, -(gssize)size);
-       total_alloc_max = MAX (total_alloc_max, total_alloc);
-}
-
-size_t
-sgen_gc_get_total_heap_allocation (void)
-{
-       return total_alloc;
-}
-
-
-/*
-Heap Sizing limits.
-This limit the max size of the heap. It takes into account
-only memory actively in use to hold heap objects and not
-for other parts of the GC.
- */
-static mword
-sgen_memgov_available_free_space (void)
-{
-       return max_heap_size - MIN (allocated_heap, max_heap_size);
-}
-
-void
-sgen_memgov_release_space (mword size, int space)
-{
-       SGEN_ATOMIC_ADD_P (allocated_heap, -(gssize)size);
-}
-
-gboolean
-sgen_memgov_try_alloc_space (mword size, int space)
-{
-       if (sgen_memgov_available_free_space () < size) {
-               SGEN_ASSERT (4, !sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "Memory shouldn't run out in worker thread");
-               return FALSE;
-       }
-
-       SGEN_ATOMIC_ADD_P (allocated_heap, size);
-       sgen_client_total_allocated_heap_changed (allocated_heap);
-       return TRUE;
-}
-
-void
-sgen_memgov_init (size_t max_heap, size_t soft_limit, gboolean debug_allowance, double allowance_ratio, double save_target)
-{
-       if (soft_limit)
-               soft_heap_limit = soft_limit;
-
-       debug_print_allowance = debug_allowance;
-       major_collection_trigger_size = MIN_MINOR_COLLECTION_ALLOWANCE;
-
-       mono_counters_register ("Memgov alloc", MONO_COUNTER_GC | MONO_COUNTER_WORD | MONO_COUNTER_BYTES | MONO_COUNTER_VARIABLE, &total_alloc);
-       mono_counters_register ("Memgov max alloc", MONO_COUNTER_GC | MONO_COUNTER_WORD | MONO_COUNTER_BYTES | MONO_COUNTER_MONOTONIC, &total_alloc_max);
-
-       if (max_heap == 0)
-               return;
-
-       if (max_heap < soft_limit) {
-               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Setting to minimum.", "`max-heap-size` must be at least as large as `soft-heap-limit`.");
-               max_heap = soft_limit;
-       }
-
-       if (max_heap < sgen_nursery_size * 4) {
-               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Setting to minimum.", "`max-heap-size` must be at least 4 times as large as `nursery size`.");
-               max_heap = sgen_nursery_size * 4;
-       }
-       max_heap_size = max_heap - sgen_nursery_size;
-
-       if (allowance_ratio)
-               default_allowance_nursery_size_ratio = allowance_ratio;
-
-       if (save_target)
-               save_target_ratio = save_target;
-}
-
-#endif
diff --git a/mono/metadata/sgen-memory-governor.h b/mono/metadata/sgen-memory-governor.h
deleted file mode 100644 (file)
index 0115ec6..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef __MONO_SGEN_MEMORY_GOVERNOR_H__
-#define __MONO_SGEN_MEMORY_GOVERNOR_H__
-
-/* Heap limits */
-void sgen_memgov_init (size_t max_heap, size_t soft_limit, gboolean debug_allowance, double min_allowance_ratio, double save_target);
-void sgen_memgov_release_space (mword size, int space);
-gboolean sgen_memgov_try_alloc_space (mword size, int space);
-
-/* GC trigger heuristics */
-void sgen_memgov_minor_collection_start (void);
-void sgen_memgov_minor_collection_end (void);
-
-void sgen_memgov_major_collection_start (void);
-void sgen_memgov_major_collection_end (gboolean forced);
-
-void sgen_memgov_collection_start (int generation);
-void sgen_memgov_collection_end (int generation, GGTimingInfo* info, int info_count);
-
-gboolean sgen_need_major_collection (mword space_needed);
-
-
-typedef enum {
-       SGEN_ALLOC_INTERNAL = 0,
-       SGEN_ALLOC_HEAP = 1,
-       SGEN_ALLOC_ACTIVATE = 2
-} SgenAllocFlags;
-
-/* OS memory allocation */
-void* sgen_alloc_os_memory (size_t size, SgenAllocFlags flags, const char *assert_description);
-void* sgen_alloc_os_memory_aligned (size_t size, mword alignment, SgenAllocFlags flags, const char *assert_description);
-void sgen_free_os_memory (void *addr, size_t size, SgenAllocFlags flags);
-
-/* Error handling */
-void sgen_assert_memory_alloc (void *ptr, size_t requested_size, const char *assert_description);
-
-#endif
-
diff --git a/mono/metadata/sgen-minor-copy-object.h b/mono/metadata/sgen-minor-copy-object.h
deleted file mode 100644 (file)
index e323218..0000000
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * sgen-minor-copy-object.h: Copy functions for nursery collections.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define collector_pin_object(obj, queue) sgen_pin_object (obj, queue);
-#define COLLECTOR_SERIAL_ALLOC_FOR_PROMOTION alloc_for_promotion
-
-extern guint64 stat_nursery_copy_object_failed_to_space; /* from sgen-gc.c */
-
-#include "sgen-copy-object.h"
-
-/*
- * This is how the copying happens from the nursery to the old generation.
- * We assume that at this time all the pinned objects have been identified and
- * marked as such.
- * We run scan_object() for each pinned object so that each referenced
- * objects if possible are copied. The new gray objects created can have
- * scan_object() run on them right away, too.
- * Then we run copy_object() for the precisely tracked roots. At this point
- * all the roots are either gray or black. We run scan_object() on the gray
- * objects until no more gray objects are created.
- * At the end of the process we walk again the pinned list and we unmark
- * the pinned flag. As we go we also create the list of free space for use
- * in the next allocation runs.
- *
- * We need to remember objects from the old generation that point to the new one
- * (or just addresses?).
- *
- * copy_object could be made into a macro once debugged (use inline for now).
- */
-
-static MONO_ALWAYS_INLINE void
-SERIAL_COPY_OBJECT (void **obj_slot, SgenGrayQueue *queue) 
-{
-       char *forwarded;
-       char *copy;
-       char *obj = *obj_slot;
-
-       SGEN_ASSERT (9, current_collection_generation == GENERATION_NURSERY, "calling minor-serial-copy from a %d generation collection", current_collection_generation);
-
-       HEAVY_STAT (++stat_copy_object_called_nursery);
-
-       if (!sgen_ptr_in_nursery (obj)) {
-               HEAVY_STAT (++stat_nursery_copy_object_failed_from_space);
-               return;
-       }
-
-       SGEN_LOG (9, "Precise copy of %p from %p", obj, obj_slot);
-
-       /*
-        * Before we can copy the object we must make sure that we are
-        * allowed to, i.e. that the object not pinned, not already
-        * forwarded or belongs to the nursery To Space.
-        */
-
-       if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
-               SGEN_ASSERT (9, sgen_obj_get_descriptor (forwarded),  "forwarded object %p has no gc descriptor", forwarded);
-               SGEN_LOG (9, " (already forwarded to %p)", forwarded);
-               HEAVY_STAT (++stat_nursery_copy_object_failed_forwarded);
-               SGEN_UPDATE_REFERENCE (obj_slot, forwarded);
-               return;
-       }
-       if (G_UNLIKELY (SGEN_OBJECT_IS_PINNED (obj))) {
-               SGEN_ASSERT (9, sgen_vtable_get_descriptor ((GCVTable*)SGEN_LOAD_VTABLE(obj)), "pinned object %p has no gc descriptor", obj);
-               SGEN_LOG (9, " (pinned, no change)");
-               HEAVY_STAT (++stat_nursery_copy_object_failed_pinned);
-               return;
-       }
-
-#ifndef SGEN_SIMPLE_NURSERY
-       if (sgen_nursery_is_to_space (obj)) {
-               SGEN_ASSERT (9, sgen_vtable_get_descriptor ((GCVTable*)SGEN_LOAD_VTABLE(obj)), "to space object %p has no gc descriptor", obj);
-               SGEN_LOG (9, " (tospace, no change)");
-               HEAVY_STAT (++stat_nursery_copy_object_failed_to_space);                
-               return;
-       }
-#endif
-
-       HEAVY_STAT (++stat_objects_copied_nursery);
-
-       copy = copy_object_no_checks (obj, queue);
-       SGEN_UPDATE_REFERENCE (obj_slot, copy);
-}
-
-/*
- * SERIAL_COPY_OBJECT_FROM_OBJ:
- *
- *   Similar to SERIAL_COPY_OBJECT, but assumes that OBJ_SLOT is part of an object, so it handles global remsets as well.
- */
-static MONO_ALWAYS_INLINE void
-SERIAL_COPY_OBJECT_FROM_OBJ (void **obj_slot, SgenGrayQueue *queue) 
-{
-       char *forwarded;
-       char *obj = *obj_slot;
-       void *copy;
-
-       SGEN_ASSERT (9, current_collection_generation == GENERATION_NURSERY, "calling minor-serial-copy-from-obj from a %d generation collection", current_collection_generation);
-
-       HEAVY_STAT (++stat_copy_object_called_nursery);
-
-       if (!sgen_ptr_in_nursery (obj)) {
-               HEAVY_STAT (++stat_nursery_copy_object_failed_from_space);
-               return;
-       }
-
-       SGEN_LOG (9, "Precise copy of %p from %p", obj, obj_slot);
-
-       /*
-        * Before we can copy the object we must make sure that we are
-        * allowed to, i.e. that the object not pinned, not already
-        * forwarded or belongs to the nursery To Space.
-        */
-
-       if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
-               SGEN_ASSERT (9, sgen_obj_get_descriptor (forwarded),  "forwarded object %p has no gc descriptor", forwarded);
-               SGEN_LOG (9, " (already forwarded to %p)", forwarded);
-               HEAVY_STAT (++stat_nursery_copy_object_failed_forwarded);
-               SGEN_UPDATE_REFERENCE (obj_slot, forwarded);
-#ifndef SGEN_SIMPLE_NURSERY
-               if (G_UNLIKELY (sgen_ptr_in_nursery (forwarded) && !sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (forwarded)))
-                       sgen_add_to_global_remset (obj_slot, forwarded);
-#endif
-               return;
-       }
-       if (G_UNLIKELY (SGEN_OBJECT_IS_PINNED (obj))) {
-               SGEN_ASSERT (9, sgen_vtable_get_descriptor ((GCVTable*)SGEN_LOAD_VTABLE(obj)), "pinned object %p has no gc descriptor", obj);
-               SGEN_LOG (9, " (pinned, no change)");
-               HEAVY_STAT (++stat_nursery_copy_object_failed_pinned);
-               if (!sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (obj))
-                       sgen_add_to_global_remset (obj_slot, obj);
-               return;
-       }
-
-#ifndef SGEN_SIMPLE_NURSERY
-       if (sgen_nursery_is_to_space (obj)) {
-               /* FIXME: all of these could just use `sgen_obj_get_descriptor_safe()` */
-               SGEN_ASSERT (9, sgen_vtable_get_descriptor ((GCVTable*)SGEN_LOAD_VTABLE(obj)), "to space object %p has no gc descriptor", obj);
-               SGEN_LOG (9, " (tospace, no change)");
-               HEAVY_STAT (++stat_nursery_copy_object_failed_to_space);                
-
-               /*
-                * FIXME:
-                *
-                * The card table scanning code sometimes clears cards
-                * that have just been set for a global remset.  In
-                * the split nursery the following situation can
-                * occur:
-                *
-                * Let's say object A starts in card C but continues
-                * into C+1.  Within A, at offset O there's a
-                * reference to a new nursery object X.  A+O is in
-                * card C+1.  Now card C is scanned, and as part of
-                * it, object A.  The reference at A+O is processed by
-                * copying X into nursery to-space at Y.  Since it's
-                * still in the nursery, a global remset must be added
-                * for A+O, so card C+1 is marked.  Now, however, card
-                * C+1 is scanned, which means that it's cleared
-                * first.  This wouldn't be terribly bad if reference
-                * A+O were re-scanned and the global remset re-added,
-                * but since the reference points to to-space, that
-                * doesn't happen, and C+1 remains cleared: the remset
-                * is lost.
-                *
-                * There's at least two ways to fix this.  The easy
-                * one is to re-add the remset on the re-scan.  This
-                * is that - the following two lines of code.
-                *
-                * The proper solution appears to be to first make a
-                * copy of the cards before scanning a block, then to
-                * clear all the cards and scan from the copy, so no
-                * remsets will be overwritten.  Scanning objects at
-                * most once would be the icing on the cake.
-                */
-               if (!sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (obj))
-                       sgen_add_to_global_remset (obj_slot, obj);
-
-               return;
-       }
-#endif
-
-       HEAVY_STAT (++stat_objects_copied_nursery);
-
-       copy = copy_object_no_checks (obj, queue);
-       SGEN_UPDATE_REFERENCE (obj_slot, copy);
-#ifndef SGEN_SIMPLE_NURSERY
-       if (G_UNLIKELY (sgen_ptr_in_nursery (copy) && !sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (copy)))
-               sgen_add_to_global_remset (obj_slot, copy);
-#else
-       /* copy_object_no_checks () can return obj on OOM */
-       if (G_UNLIKELY (obj == copy)) {
-               if (G_UNLIKELY (sgen_ptr_in_nursery (copy) && !sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (copy)))
-                       sgen_add_to_global_remset (obj_slot, copy);
-       }
-#endif
-}
-
-#define FILL_MINOR_COLLECTOR_COPY_OBJECT(collector)    do {                    \
-               (collector)->serial_ops.copy_or_mark_object = SERIAL_COPY_OBJECT;                       \
-       } while (0)
diff --git a/mono/metadata/sgen-minor-scan-object.h b/mono/metadata/sgen-minor-scan-object.h
deleted file mode 100644 (file)
index efe782f..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * sgen-minor-scan-object.h: Object scanning in the nursery collectors.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-extern guint64 stat_scan_object_called_nursery;
-
-#if defined(SGEN_SIMPLE_NURSERY)
-#define SERIAL_SCAN_OBJECT simple_nursery_serial_scan_object
-#define SERIAL_SCAN_VTYPE simple_nursery_serial_scan_vtype
-
-#elif defined (SGEN_SPLIT_NURSERY)
-#define SERIAL_SCAN_OBJECT split_nursery_serial_scan_object
-#define SERIAL_SCAN_VTYPE split_nursery_serial_scan_vtype
-
-#else
-#error "Please define GC_CONF_NAME"
-#endif
-
-#undef HANDLE_PTR
-/* Global remsets are handled in SERIAL_COPY_OBJECT_FROM_OBJ */
-#define HANDLE_PTR(ptr,obj)    do {    \
-               void *__old = *(ptr);   \
-               SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP ((obj), (ptr)); \
-               binary_protocol_scan_process_reference ((obj), (ptr), __old); \
-               if (__old) {    \
-                       SERIAL_COPY_OBJECT_FROM_OBJ ((ptr), queue);     \
-                       SGEN_COND_LOG (9, __old != *(ptr), "Overwrote field at %p with %p (was: %p)", (ptr), *(ptr), __old); \
-               }       \
-       } while (0)
-
-static void
-SERIAL_SCAN_OBJECT (char *start, mword desc, SgenGrayQueue *queue)
-{
-       SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP;
-
-#ifdef HEAVY_STATISTICS
-       sgen_descriptor_count_scanned_object (desc);
-#endif
-
-       SGEN_ASSERT (9, sgen_get_current_collection_generation () == GENERATION_NURSERY, "Must not use minor scan during major collection.");
-
-#define SCAN_OBJECT_PROTOCOL
-#include "sgen-scan-object.h"
-
-       SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP;
-       HEAVY_STAT (++stat_scan_object_called_nursery);
-}
-
-static void
-SERIAL_SCAN_VTYPE (char *full_object, char *start, mword desc, SgenGrayQueue *queue BINARY_PROTOCOL_ARG (size_t size))
-{
-       SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP;
-
-       SGEN_ASSERT (9, sgen_get_current_collection_generation () == GENERATION_NURSERY, "Must not use minor scan during major collection.");
-
-       /* The descriptors include info about the MonoObject header as well */
-       start -= SGEN_CLIENT_OBJECT_HEADER_SIZE;
-
-#define SCAN_OBJECT_NOVTABLE
-#define SCAN_OBJECT_PROTOCOL
-#include "sgen-scan-object.h"
-}
-
-#define FILL_MINOR_COLLECTOR_SCAN_OBJECT(collector)    do {                    \
-               (collector)->serial_ops.scan_object = SERIAL_SCAN_OBJECT;       \
-               (collector)->serial_ops.scan_vtype = SERIAL_SCAN_VTYPE; \
-       } while (0)
index e06ce0c7cbed57f2e4e69426521c33b646639a22..0a0090ad7821fc4aa4ecfc4253466c57dec589c1 100644 (file)
 #include "config.h"
 #ifdef HAVE_SGEN_GC
 
-#include "metadata/sgen-gc.h"
-#include "metadata/sgen-protocol.h"
+#include "sgen/sgen-gc.h"
+#include "sgen/sgen-protocol.h"
 #include "metadata/monitor.h"
-#include "metadata/sgen-layout-stats.h"
-#include "metadata/sgen-client.h"
-#include "metadata/sgen-cardtable.h"
-#include "metadata/sgen-pinning.h"
+#include "sgen/sgen-layout-stats.h"
+#include "sgen/sgen-client.h"
+#include "sgen/sgen-cardtable.h"
+#include "sgen/sgen-pinning.h"
 #include "metadata/marshal.h"
 #include "metadata/method-builder.h"
 #include "metadata/abi-details.h"
@@ -92,7 +92,7 @@ static void
 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
 {
 #define SCAN_OBJECT_NOVTABLE
-#include "sgen-scan-object.h"
+#include "sgen/sgen-scan-object.h"
 }
 #endif
 
@@ -2031,7 +2031,7 @@ collect_references (HeapWalkInfo *hwi, char *start, size_t size)
 {
        mword desc = sgen_obj_get_descriptor (start);
 
-#include "sgen-scan-object.h"
+#include "sgen/sgen-scan-object.h"
 }
 
 static void
index 43dddcbe52e41c77dc3a6631b7316af73a003a86..38997535e5e8190619825563aded2e78580cceda 100644 (file)
 #include <stdlib.h>
 #include <errno.h>
 
-#include "sgen-gc.h"
+#include "sgen/sgen-gc.h"
 #include "sgen-bridge-internal.h"
-#include "sgen-hash-table.h"
-#include "sgen-qsort.h"
-#include "sgen-client.h"
+#include "sgen/sgen-hash-table.h"
+#include "sgen/sgen-qsort.h"
+#include "sgen/sgen-client.h"
 #include "tabledefs.h"
 #include "utils/mono-logger-internal.h"
 
@@ -672,7 +672,7 @@ dfs1 (HashEntry *obj_entry)
                                dyn_array_ptr_push (&dfs_stack, obj_entry);
                                dyn_array_ptr_push (&dfs_stack, NULL);
 
-#include "sgen-scan-object.h"
+#include "sgen/sgen-scan-object.h"
 
                                /*
                                 * We can remove non-bridge objects with a single outgoing
diff --git a/mono/metadata/sgen-nursery-allocator.c b/mono/metadata/sgen-nursery-allocator.c
deleted file mode 100644 (file)
index adcce0f..0000000
+++ /dev/null
@@ -1,927 +0,0 @@
-/*
- * sgen-nursery-allocator.c: Nursery allocation code.
- *
- * Copyright 2009-2010 Novell, Inc.
- *           2011 Rodrigo Kumpera
- * 
- * Copyright 2011 Xamarin Inc  (http://www.xamarin.com)
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * The young generation is divided into fragments. This is because
- * we can hand one fragments to a thread for lock-less fast alloc and
- * because the young generation ends up fragmented anyway by pinned objects.
- * Once a collection is done, a list of fragments is created. When doing
- * thread local alloc we use smallish nurseries so we allow new threads to
- * allocate memory from gen0 without triggering a collection. Threads that
- * are found to allocate lots of memory are given bigger fragments. This
- * should make the finalizer thread use little nursery memory after a while.
- * We should start assigning threads very small fragments: if there are many
- * threads the nursery will be full of reserved space that the threads may not
- * use at all, slowing down allocation speed.
- * Thread local allocation is done from areas of memory Hotspot calls Thread Local 
- * Allocation Buffers (TLABs).
- */
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#ifdef HAVE_PTHREAD_H
-#include <pthread.h>
-#endif
-#ifdef HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-#include <stdio.h>
-#include <string.h>
-#include <errno.h>
-#include <assert.h>
-#ifdef __MACH__
-#undef _XOPEN_SOURCE
-#endif
-#ifdef __MACH__
-#define _XOPEN_SOURCE
-#endif
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-cardtable.h"
-#include "mono/metadata/sgen-protocol.h"
-#include "mono/metadata/sgen-memory-governor.h"
-#include "mono/metadata/sgen-pinning.h"
-#include "mono/metadata/sgen-client.h"
-#include "mono/utils/mono-membar.h"
-
-/* Enable it so nursery allocation diagnostic data is collected */
-//#define NALLOC_DEBUG 1
-
-/* The mutator allocs from here. */
-static SgenFragmentAllocator mutator_allocator;
-
-/* freeelist of fragment structures */
-static SgenFragment *fragment_freelist = NULL;
-
-/* Allocator cursors */
-static char *nursery_last_pinned_end = NULL;
-
-char *sgen_nursery_start;
-char *sgen_nursery_end;
-
-#ifdef USER_CONFIG
-size_t sgen_nursery_size = (1 << 22);
-int sgen_nursery_bits = 22;
-#endif
-
-char *sgen_space_bitmap;
-size_t sgen_space_bitmap_size;
-
-#ifdef HEAVY_STATISTICS
-
-static mword stat_wasted_bytes_trailer = 0;
-static mword stat_wasted_bytes_small_areas = 0;
-static mword stat_wasted_bytes_discarded_fragments = 0;
-static guint64 stat_nursery_alloc_requests = 0;
-static guint64 stat_alloc_iterations = 0;
-static guint64 stat_alloc_retries = 0;
-
-static guint64 stat_nursery_alloc_range_requests = 0;
-static guint64 stat_alloc_range_iterations = 0;
-static guint64 stat_alloc_range_retries = 0;
-
-#endif
-
-/************************************Nursery allocation debugging *********************************************/
-
-#ifdef NALLOC_DEBUG
-
-enum {
-       FIXED_ALLOC = 1,
-       RANGE_ALLOC,
-       PINNING,
-       BLOCK_ZEROING,
-       CLEAR_NURSERY_FRAGS
-};
-
-typedef struct {
-       char *address;
-       size_t size;
-       int reason;
-       int seq;
-       MonoNativeThreadId tid;
-} AllocRecord;
-
-#define ALLOC_RECORD_COUNT 128000
-
-
-static AllocRecord *alloc_records;
-static volatile int next_record;
-static volatile int alloc_count;
-
-void dump_alloc_records (void);
-void verify_alloc_records (void);
-
-static const char*
-get_reason_name (AllocRecord *rec)
-{
-       switch (rec->reason) {
-       case FIXED_ALLOC: return "fixed-alloc";
-       case RANGE_ALLOC: return "range-alloc";
-       case PINNING: return "pinning";
-       case BLOCK_ZEROING: return "block-zeroing";
-       case CLEAR_NURSERY_FRAGS: return "clear-nursery-frag";
-       default: return "invalid";
-       }
-}
-
-static void
-reset_alloc_records (void)
-{
-       next_record = 0;
-       alloc_count = 0;
-}
-
-static void
-add_alloc_record (char *addr, size_t size, int reason)
-{
-       int idx = InterlockedIncrement (&next_record) - 1;
-       alloc_records [idx].address = addr;
-       alloc_records [idx].size = size;
-       alloc_records [idx].reason = reason;
-       alloc_records [idx].seq = idx;
-       alloc_records [idx].tid = mono_native_thread_id_get ();
-}
-
-static int
-comp_alloc_record (const void *_a, const void *_b)
-{
-       const AllocRecord *a = _a;
-       const AllocRecord *b = _b;
-       if (a->address == b->address)
-               return a->seq - b->seq;
-       return a->address - b->address;
-}
-
-#define rec_end(REC) ((REC)->address + (REC)->size)
-
-void
-dump_alloc_records (void)
-{
-       int i;
-       sgen_qsort (alloc_records, next_record, sizeof (AllocRecord), comp_alloc_record);
-
-       printf ("------------------------------------DUMP RECORDS----------------------------\n");
-       for (i = 0; i < next_record; ++i) {
-               AllocRecord *rec = alloc_records + i;
-               printf ("obj [%p, %p] size %d reason %s seq %d tid %x\n", rec->address, rec_end (rec), (int)rec->size, get_reason_name (rec), rec->seq, (size_t)rec->tid);
-       }
-}
-
-void
-verify_alloc_records (void)
-{
-       int i;
-       int total = 0;
-       int holes = 0;
-       int max_hole = 0;
-       AllocRecord *prev = NULL;
-
-       sgen_qsort (alloc_records, next_record, sizeof (AllocRecord), comp_alloc_record);
-       printf ("------------------------------------DUMP RECORDS- %d %d---------------------------\n", next_record, alloc_count);
-       for (i = 0; i < next_record; ++i) {
-               AllocRecord *rec = alloc_records + i;
-               int hole_size = 0;
-               total += rec->size;
-               if (prev) {
-                       if (rec_end (prev) > rec->address)
-                               printf ("WE GOT OVERLAPPING objects %p and %p\n", prev->address, rec->address);
-                       if ((rec->address - rec_end (prev)) >= 8)
-                               ++holes;
-                       hole_size = rec->address - rec_end (prev);
-                       max_hole = MAX (max_hole, hole_size);
-               }
-               printf ("obj [%p, %p] size %d hole to prev %d reason %s seq %d tid %zx\n", rec->address, rec_end (rec), (int)rec->size, hole_size, get_reason_name (rec), rec->seq, (size_t)rec->tid);
-               prev = rec;
-       }
-       printf ("SUMMARY total alloc'd %d holes %d max_hole %d\n", total, holes, max_hole);
-}
-
-#endif
-
-/*********************************************************************************/
-
-
-static inline gpointer
-mask (gpointer n, uintptr_t bit)
-{
-       return (gpointer)(((uintptr_t)n) | bit);
-}
-
-static inline gpointer
-unmask (gpointer p)
-{
-       return (gpointer)((uintptr_t)p & ~(uintptr_t)0x3);
-}
-
-static inline uintptr_t
-get_mark (gpointer n)
-{
-       return (uintptr_t)n & 0x1;
-}
-
-/*MUST be called with world stopped*/
-SgenFragment*
-sgen_fragment_allocator_alloc (void)
-{
-       SgenFragment *frag = fragment_freelist;
-       if (frag) {
-               fragment_freelist = frag->next_in_order;
-               frag->next = frag->next_in_order = NULL;
-               return frag;
-       }
-       frag = sgen_alloc_internal (INTERNAL_MEM_FRAGMENT);
-       frag->next = frag->next_in_order = NULL;
-       return frag;
-}
-
-void
-sgen_fragment_allocator_add (SgenFragmentAllocator *allocator, char *start, char *end)
-{
-       SgenFragment *fragment;
-
-       fragment = sgen_fragment_allocator_alloc ();
-       fragment->fragment_start = start;
-       fragment->fragment_next = start;
-       fragment->fragment_end = end;
-       fragment->next_in_order = fragment->next = unmask (allocator->region_head);
-
-       allocator->region_head = allocator->alloc_head = fragment;
-       g_assert (fragment->fragment_end > fragment->fragment_start);
-}
-
-void
-sgen_fragment_allocator_release (SgenFragmentAllocator *allocator)
-{
-       SgenFragment *last = allocator->region_head;
-       if (!last)
-               return;
-
-       /* Find the last fragment in insert order */
-       for (; last->next_in_order; last = last->next_in_order) ;
-
-       last->next_in_order = fragment_freelist;
-       fragment_freelist = allocator->region_head;
-       allocator->alloc_head = allocator->region_head = NULL;
-}
-
-static SgenFragment**
-find_previous_pointer_fragment (SgenFragmentAllocator *allocator, SgenFragment *frag)
-{
-       SgenFragment **prev;
-       SgenFragment *cur, *next;
-#ifdef NALLOC_DEBUG
-       int count = 0;
-#endif
-
-try_again:
-       prev = &allocator->alloc_head;
-#ifdef NALLOC_DEBUG
-       if (count++ > 5)
-               printf ("retry count for fppf is %d\n", count);
-#endif
-
-       cur = unmask (*prev);
-
-       while (1) {
-               if (cur == NULL)
-                       return NULL;
-               next = cur->next;
-
-               /*
-                * We need to make sure that we dereference prev below
-                * after reading cur->next above, so we need a read
-                * barrier.
-                */
-               mono_memory_read_barrier ();
-
-               if (*prev != cur)
-                       goto try_again;
-
-               if (!get_mark (next)) {
-                       if (cur == frag)
-                               return prev;
-                       prev = &cur->next;
-               } else {
-                       next = unmask (next);
-                       if (InterlockedCompareExchangePointer ((volatile gpointer*)prev, next, cur) != cur)
-                               goto try_again;
-                       /*we must make sure that the next from cur->next happens after*/
-                       mono_memory_write_barrier ();
-               }
-
-               cur = unmask (next);
-       }
-       return NULL;
-}
-
-static gboolean
-claim_remaining_size (SgenFragment *frag, char *alloc_end)
-{
-       /* All space used, nothing to claim. */
-       if (frag->fragment_end <= alloc_end)
-               return FALSE;
-
-       /* Try to alloc all the remaining space. */
-       return InterlockedCompareExchangePointer ((volatile gpointer*)&frag->fragment_next, frag->fragment_end, alloc_end) == alloc_end;
-}
-
-static void*
-par_alloc_from_fragment (SgenFragmentAllocator *allocator, SgenFragment *frag, size_t size)
-{
-       char *p = frag->fragment_next;
-       char *end = p + size;
-
-       if (end > frag->fragment_end)
-               return NULL;
-
-       /* p = frag->fragment_next must happen before */
-       mono_memory_barrier ();
-
-       if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->fragment_next, end, p) != p)
-               return NULL;
-
-       if (frag->fragment_end - end < SGEN_MAX_NURSERY_WASTE) {
-               SgenFragment *next, **prev_ptr;
-               
-               /*
-                * Before we clean the remaining nursery, we must claim the remaining space
-                * as it could end up been used by the range allocator since it can end up
-                * allocating from this dying fragment as it doesn't respect SGEN_MAX_NURSERY_WASTE
-                * when doing second chance allocation.
-                */
-               if ((sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION || sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG) && claim_remaining_size (frag, end)) {
-                       sgen_clear_range (end, frag->fragment_end);
-                       HEAVY_STAT (InterlockedExchangeAdd (&stat_wasted_bytes_trailer, frag->fragment_end - end));
-#ifdef NALLOC_DEBUG
-                       add_alloc_record (end, frag->fragment_end - end, BLOCK_ZEROING);
-#endif
-               }
-
-               prev_ptr = find_previous_pointer_fragment (allocator, frag);
-
-               /*Use Michaels linked list remove*/
-
-               /*prev_ptr will be null if the fragment was removed concurrently */
-               while (prev_ptr) {
-                       next = frag->next;
-
-                       /*already deleted*/
-                       if (!get_mark (next)) {
-                               /*frag->next read must happen before the first CAS*/
-                               mono_memory_write_barrier ();
-
-                               /*Fail if the next node is removed concurrently and its CAS wins */
-                               if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->next, mask (next, 1), next) != next) {
-                                       continue;
-                               }
-                       }
-
-                       /* The second CAS must happen after the first CAS or frag->next. */
-                       mono_memory_write_barrier ();
-
-                       /* Fail if the previous node was deleted and its CAS wins */
-                       if (InterlockedCompareExchangePointer ((volatile gpointer*)prev_ptr, unmask (next), frag) != frag) {
-                               prev_ptr = find_previous_pointer_fragment (allocator, frag);
-                               continue;
-                       }
-                       break;
-               }
-       }
-
-       return p;
-}
-
-static void*
-serial_alloc_from_fragment (SgenFragment **previous, SgenFragment *frag, size_t size)
-{
-       char *p = frag->fragment_next;
-       char *end = p + size;
-
-       if (end > frag->fragment_end)
-               return NULL;
-
-       frag->fragment_next = end;
-
-       if (frag->fragment_end - end < SGEN_MAX_NURSERY_WASTE) {
-               *previous = frag->next;
-               
-               /* Clear the remaining space, pinning depends on this. FIXME move this to use phony arrays */
-               memset (end, 0, frag->fragment_end - end);
-
-               *previous = frag->next;
-       }
-
-       return p;
-}
-
-void*
-sgen_fragment_allocator_par_alloc (SgenFragmentAllocator *allocator, size_t size)
-{
-       SgenFragment *frag;
-
-#ifdef NALLOC_DEBUG
-       InterlockedIncrement (&alloc_count);
-#endif
-
-restart:
-       for (frag = unmask (allocator->alloc_head); unmask (frag); frag = unmask (frag->next)) {
-               HEAVY_STAT (InterlockedIncrement (&stat_alloc_iterations));
-
-               if (size <= (size_t)(frag->fragment_end - frag->fragment_next)) {
-                       void *p = par_alloc_from_fragment (allocator, frag, size);
-                       if (!p) {
-                               HEAVY_STAT (InterlockedIncrement (&stat_alloc_retries));
-                               goto restart;
-                       }
-#ifdef NALLOC_DEBUG
-                       add_alloc_record (p, size, FIXED_ALLOC);
-#endif
-                       return p;
-               }
-       }
-       return NULL;
-}
-
-void*
-sgen_fragment_allocator_serial_alloc (SgenFragmentAllocator *allocator, size_t size)
-{
-       SgenFragment *frag;
-       SgenFragment **previous;
-#ifdef NALLOC_DEBUG
-       InterlockedIncrement (&alloc_count);
-#endif
-
-       previous = &allocator->alloc_head;
-
-       for (frag = *previous; frag; frag = *previous) {
-               char *p = serial_alloc_from_fragment (previous, frag, size);
-
-               HEAVY_STAT (InterlockedIncrement (&stat_alloc_iterations));
-
-               if (p) {
-#ifdef NALLOC_DEBUG
-                       add_alloc_record (p, size, FIXED_ALLOC);
-#endif
-                       return p;
-               }
-               previous = &frag->next;
-       }
-       return NULL;
-}
-
-void*
-sgen_fragment_allocator_serial_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size)
-{
-       SgenFragment *frag, **previous, *min_frag = NULL, **prev_min_frag = NULL;
-       size_t current_minimum = minimum_size;
-
-#ifdef NALLOC_DEBUG
-       InterlockedIncrement (&alloc_count);
-#endif
-
-       previous = &allocator->alloc_head;
-
-       for (frag = *previous; frag; frag = *previous) {
-               size_t frag_size = frag->fragment_end - frag->fragment_next;
-
-               HEAVY_STAT (InterlockedIncrement (&stat_alloc_range_iterations));
-
-               if (desired_size <= frag_size) {
-                       void *p;
-                       *out_alloc_size = desired_size;
-
-                       p = serial_alloc_from_fragment (previous, frag, desired_size);
-#ifdef NALLOC_DEBUG
-                       add_alloc_record (p, desired_size, RANGE_ALLOC);
-#endif
-                       return p;
-               }
-               if (current_minimum <= frag_size) {
-                       min_frag = frag;
-                       prev_min_frag = previous;
-                       current_minimum = frag_size;
-               }
-               previous = &frag->next;
-       }
-
-       if (min_frag) {
-               void *p;
-               size_t frag_size = min_frag->fragment_end - min_frag->fragment_next;
-               *out_alloc_size = frag_size;
-
-               p = serial_alloc_from_fragment (prev_min_frag, min_frag, frag_size);
-
-#ifdef NALLOC_DEBUG
-               add_alloc_record (p, frag_size, RANGE_ALLOC);
-#endif
-               return p;
-       }
-
-       return NULL;
-}
-
-void*
-sgen_fragment_allocator_par_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size)
-{
-       SgenFragment *frag, *min_frag;
-       size_t current_minimum;
-
-restart:
-       min_frag = NULL;
-       current_minimum = minimum_size;
-
-#ifdef NALLOC_DEBUG
-       InterlockedIncrement (&alloc_count);
-#endif
-
-       for (frag = unmask (allocator->alloc_head); frag; frag = unmask (frag->next)) {
-               size_t frag_size = frag->fragment_end - frag->fragment_next;
-
-               HEAVY_STAT (InterlockedIncrement (&stat_alloc_range_iterations));
-
-               if (desired_size <= frag_size) {
-                       void *p;
-                       *out_alloc_size = desired_size;
-
-                       p = par_alloc_from_fragment (allocator, frag, desired_size);
-                       if (!p) {
-                               HEAVY_STAT (InterlockedIncrement (&stat_alloc_range_retries));
-                               goto restart;
-                       }
-#ifdef NALLOC_DEBUG
-                       add_alloc_record (p, desired_size, RANGE_ALLOC);
-#endif
-                       return p;
-               }
-               if (current_minimum <= frag_size) {
-                       min_frag = frag;
-                       current_minimum = frag_size;
-               }
-       }
-
-       /* The second fragment_next read should be ordered in respect to the first code block */
-       mono_memory_barrier ();
-
-       if (min_frag) {
-               void *p;
-               size_t frag_size;
-
-               frag_size = min_frag->fragment_end - min_frag->fragment_next;
-               if (frag_size < minimum_size)
-                       goto restart;
-
-               *out_alloc_size = frag_size;
-
-               mono_memory_barrier ();
-               p = par_alloc_from_fragment (allocator, min_frag, frag_size);
-
-               /*XXX restarting here is quite dubious given this is already second chance allocation. */
-               if (!p) {
-                       HEAVY_STAT (InterlockedIncrement (&stat_alloc_retries));
-                       goto restart;
-               }
-#ifdef NALLOC_DEBUG
-               add_alloc_record (p, frag_size, RANGE_ALLOC);
-#endif
-               return p;
-       }
-
-       return NULL;
-}
-
-void
-sgen_clear_allocator_fragments (SgenFragmentAllocator *allocator)
-{
-       SgenFragment *frag;
-
-       for (frag = unmask (allocator->alloc_head); frag; frag = unmask (frag->next)) {
-               SGEN_LOG (4, "Clear nursery frag %p-%p", frag->fragment_next, frag->fragment_end);
-               sgen_clear_range (frag->fragment_next, frag->fragment_end);
-#ifdef NALLOC_DEBUG
-               add_alloc_record (frag->fragment_next, frag->fragment_end - frag->fragment_next, CLEAR_NURSERY_FRAGS);
-#endif
-       }       
-}
-
-/* Clear all remaining nursery fragments */
-void
-sgen_clear_nursery_fragments (void)
-{
-       if (sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION || sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG) {
-               sgen_clear_allocator_fragments (&mutator_allocator);
-               sgen_minor_collector.clear_fragments ();
-       }
-}
-
-/*
- * Mark a given range of memory as invalid.
- *
- * This can be done either by zeroing memory or by placing
- * a phony byte[] array. This keeps the heap forward walkable.
- *
- * This function ignores calls with a zero range, even if
- * both start and end are NULL.
- */
-void
-sgen_clear_range (char *start, char *end)
-{
-       size_t size = end - start;
-
-       if ((start && !end) || (start > end))
-               g_error ("Invalid range [%p %p]", start, end);
-
-       if (sgen_client_array_fill_range (start, size)) {
-               sgen_set_nursery_scan_start (start);
-               SGEN_ASSERT (0, start + sgen_safe_object_get_size ((GCObject*)start) == end, "Array fill produced wrong size");
-       }
-}
-
-void
-sgen_nursery_allocator_prepare_for_pinning (void)
-{
-       sgen_clear_allocator_fragments (&mutator_allocator);
-       sgen_minor_collector.clear_fragments ();
-}
-
-static mword fragment_total = 0;
-/*
- * We found a fragment of free memory in the nursery: memzero it and if
- * it is big enough, add it to the list of fragments that can be used for
- * allocation.
- */
-static void
-add_nursery_frag (SgenFragmentAllocator *allocator, size_t frag_size, char* frag_start, char* frag_end)
-{
-       SGEN_LOG (4, "Found empty fragment: %p-%p, size: %zd", frag_start, frag_end, frag_size);
-       binary_protocol_empty (frag_start, frag_size);
-       /* Not worth dealing with smaller fragments: need to tune */
-       if (frag_size >= SGEN_MAX_NURSERY_WASTE) {
-               /* memsetting just the first chunk start is bound to provide better cache locality */
-               if (sgen_get_nursery_clear_policy () == CLEAR_AT_GC)
-                       memset (frag_start, 0, frag_size);
-               else if (sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG)
-                       memset (frag_start, 0xff, frag_size);
-
-#ifdef NALLOC_DEBUG
-               /* XXX convert this into a flight record entry
-               printf ("\tfragment [%p %p] size %zd\n", frag_start, frag_end, frag_size);
-               */
-#endif
-               sgen_fragment_allocator_add (allocator, frag_start, frag_end);
-               fragment_total += frag_size;
-       } else {
-               /* Clear unused fragments, pinning depends on this */
-               sgen_clear_range (frag_start, frag_end);
-               HEAVY_STAT (InterlockedExchangeAdd (&stat_wasted_bytes_small_areas, frag_size));
-       }
-}
-
-static void
-fragment_list_reverse (SgenFragmentAllocator *allocator)
-{
-       SgenFragment *prev = NULL, *list = allocator->region_head;
-       while (list) {
-               SgenFragment *next = list->next;
-               list->next = prev;
-               list->next_in_order = prev;
-               prev = list;
-               list = next;
-       }
-
-       allocator->region_head = allocator->alloc_head = prev;
-}
-
-mword
-sgen_build_nursery_fragments (GCMemSection *nursery_section, SgenGrayQueue *unpin_queue)
-{
-       char *frag_start, *frag_end;
-       size_t frag_size;
-       SgenFragment *frags_ranges;
-       void **pin_start, **pin_entry, **pin_end;
-
-#ifdef NALLOC_DEBUG
-       reset_alloc_records ();
-#endif
-       /*The mutator fragments are done. We no longer need them. */
-       sgen_fragment_allocator_release (&mutator_allocator);
-
-       frag_start = sgen_nursery_start;
-       fragment_total = 0;
-
-       /* The current nursery might give us a fragment list to exclude [start, next[*/
-       frags_ranges = sgen_minor_collector.build_fragments_get_exclude_head ();
-
-       /* clear scan starts */
-       memset (nursery_section->scan_starts, 0, nursery_section->num_scan_start * sizeof (gpointer));
-
-       pin_start = pin_entry = sgen_pinning_get_entry (nursery_section->pin_queue_first_entry);
-       pin_end = sgen_pinning_get_entry (nursery_section->pin_queue_last_entry);
-
-       while (pin_entry < pin_end || frags_ranges) {
-               char *addr0, *addr1;
-               size_t size;
-
-               addr0 = addr1 = sgen_nursery_end;
-               if (pin_entry < pin_end)
-                       addr0 = *pin_entry;
-               if (frags_ranges)
-                       addr1 = frags_ranges->fragment_start;
-
-               if (addr0 < addr1) {
-                       if (unpin_queue)
-                               GRAY_OBJECT_ENQUEUE (unpin_queue, addr0, sgen_obj_get_descriptor_safe (addr0));
-                       else
-                               SGEN_UNPIN_OBJECT (addr0);
-                       size = SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)addr0));
-                       CANARIFY_SIZE (size);
-                       sgen_set_nursery_scan_start (addr0);
-                       frag_end = addr0;
-                       ++pin_entry;
-               } else {
-                       frag_end = addr1;
-                       size = frags_ranges->fragment_next - addr1;
-                       frags_ranges = frags_ranges->next_in_order;
-               }
-
-               frag_size = frag_end - frag_start;
-
-               if (size == 0)
-                       continue;
-
-               g_assert (frag_size >= 0);
-               g_assert (size > 0);
-               if (frag_size && size)
-                       add_nursery_frag (&mutator_allocator, frag_size, frag_start, frag_end); 
-
-               frag_size = size;
-#ifdef NALLOC_DEBUG
-               add_alloc_record (*pin_entry, frag_size, PINNING);
-#endif
-               frag_start = frag_end + frag_size;
-       }
-
-       nursery_last_pinned_end = frag_start;
-       frag_end = sgen_nursery_end;
-       frag_size = frag_end - frag_start;
-       if (frag_size)
-               add_nursery_frag (&mutator_allocator, frag_size, frag_start, frag_end);
-
-       /* Now it's safe to release the fragments exclude list. */
-       sgen_minor_collector.build_fragments_release_exclude_head ();
-
-       /* First we reorder the fragment list to be in ascending address order. This makes H/W prefetchers happier. */
-       fragment_list_reverse (&mutator_allocator);
-
-       /*The collector might want to do something with the final nursery fragment list.*/
-       sgen_minor_collector.build_fragments_finish (&mutator_allocator);
-
-       if (!unmask (mutator_allocator.alloc_head)) {
-               SGEN_LOG (1, "Nursery fully pinned");
-               for (pin_entry = pin_start; pin_entry < pin_end; ++pin_entry) {
-                       void *p = *pin_entry;
-                       SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", p, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (p)), sgen_safe_object_get_size (p));
-               }
-       }
-       return fragment_total;
-}
-
-char *
-sgen_nursery_alloc_get_upper_alloc_bound (void)
-{
-       /*FIXME we need to calculate the collector upper bound as well, but this must be done in the previous GC. */
-       return sgen_nursery_end;
-}
-
-/*** Nursery memory allocation ***/
-void
-sgen_nursery_retire_region (void *address, ptrdiff_t size)
-{
-       HEAVY_STAT (InterlockedExchangeAdd (&stat_wasted_bytes_discarded_fragments, size));
-}
-
-gboolean
-sgen_can_alloc_size (size_t size)
-{
-       SgenFragment *frag;
-
-       if (!SGEN_CAN_ALIGN_UP (size))
-               return FALSE;
-
-       size = SGEN_ALIGN_UP (size);
-
-       for (frag = unmask (mutator_allocator.alloc_head); frag; frag = unmask (frag->next)) {
-               if ((size_t)(frag->fragment_end - frag->fragment_next) >= size)
-                       return TRUE;
-       }
-       return FALSE;
-}
-
-void*
-sgen_nursery_alloc (size_t size)
-{
-       SGEN_ASSERT (1, size >= (SGEN_CLIENT_MINIMUM_OBJECT_SIZE + CANARY_SIZE) && size <= (SGEN_MAX_SMALL_OBJ_SIZE + CANARY_SIZE), "Invalid nursery object size");
-
-       SGEN_LOG (4, "Searching nursery for size: %zd", size);
-       size = SGEN_ALIGN_UP (size);
-
-       HEAVY_STAT (InterlockedIncrement (&stat_nursery_alloc_requests));
-
-       return sgen_fragment_allocator_par_alloc (&mutator_allocator, size);
-}
-
-void*
-sgen_nursery_alloc_range (size_t desired_size, size_t minimum_size, size_t *out_alloc_size)
-{
-       SGEN_LOG (4, "Searching for byte range desired size: %zd minimum size %zd", desired_size, minimum_size);
-
-       HEAVY_STAT (InterlockedIncrement (&stat_nursery_alloc_range_requests));
-
-       return sgen_fragment_allocator_par_range_alloc (&mutator_allocator, desired_size, minimum_size, out_alloc_size);
-}
-
-/*** Initialization ***/
-
-#ifdef HEAVY_STATISTICS
-
-void
-sgen_nursery_allocator_init_heavy_stats (void)
-{
-       mono_counters_register ("bytes wasted trailer fragments", MONO_COUNTER_GC | MONO_COUNTER_WORD | MONO_COUNTER_BYTES, &stat_wasted_bytes_trailer);
-       mono_counters_register ("bytes wasted small areas", MONO_COUNTER_GC | MONO_COUNTER_WORD | MONO_COUNTER_BYTES, &stat_wasted_bytes_small_areas);
-       mono_counters_register ("bytes wasted discarded fragments", MONO_COUNTER_GC | MONO_COUNTER_WORD | MONO_COUNTER_BYTES, &stat_wasted_bytes_discarded_fragments);
-
-       mono_counters_register ("# nursery alloc requests", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_alloc_requests);
-       mono_counters_register ("# nursery alloc iterations", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_alloc_iterations);
-       mono_counters_register ("# nursery alloc retries", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_alloc_retries);
-
-       mono_counters_register ("# nursery alloc range requests", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_alloc_range_requests);
-       mono_counters_register ("# nursery alloc range iterations", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_alloc_range_iterations);
-       mono_counters_register ("# nursery alloc range restries", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_alloc_range_retries);
-}
-
-#endif
-
-void
-sgen_init_nursery_allocator (void)
-{
-       sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FRAGMENT, sizeof (SgenFragment));
-#ifdef NALLOC_DEBUG
-       alloc_records = sgen_alloc_os_memory (sizeof (AllocRecord) * ALLOC_RECORD_COUNT, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, "debugging memory");
-#endif
-}
-
-void
-sgen_nursery_alloc_prepare_for_minor (void)
-{
-       sgen_minor_collector.prepare_to_space (sgen_space_bitmap, sgen_space_bitmap_size);
-}
-
-void
-sgen_nursery_alloc_prepare_for_major (void)
-{
-       sgen_minor_collector.prepare_to_space (sgen_space_bitmap, sgen_space_bitmap_size);
-}
-
-void
-sgen_nursery_allocator_set_nursery_bounds (char *start, char *end)
-{
-       sgen_nursery_start = start;
-       sgen_nursery_end = end;
-
-       /*
-        * This will not divide evenly for tiny nurseries (<4kb), so we make sure to be on
-        * the right side of things and round up.  We could just do a MIN(1,x) instead,
-        * since the nursery size must be a power of 2.
-        */
-       sgen_space_bitmap_size = (end - start + SGEN_TO_SPACE_GRANULE_IN_BYTES * 8 - 1) / (SGEN_TO_SPACE_GRANULE_IN_BYTES * 8);
-       sgen_space_bitmap = g_malloc0 (sgen_space_bitmap_size);
-
-       /* Setup the single first large fragment */
-       sgen_minor_collector.init_nursery (&mutator_allocator, start, end);
-}
-
-#endif
index 7ce0978ac2c317a4bffbd99326c75b460dc5b2bf..2cef968485d4b0c88cc3488fab635ff2c9b73e57 100644 (file)
 
 #include <stdlib.h>
 
-#include "sgen-gc.h"
+#include "sgen/sgen-gc.h"
 #include "sgen-bridge-internal.h"
-#include "sgen-hash-table.h"
-#include "sgen-qsort.h"
-#include "sgen-client.h"
+#include "sgen/sgen-hash-table.h"
+#include "sgen/sgen-qsort.h"
+#include "sgen/sgen-client.h"
 #include "utils/mono-logger-internal.h"
 
 typedef struct {
@@ -542,7 +542,7 @@ dfs1 (HashEntry *obj_entry)
                        /* NULL marks that the next entry is to be finished */
                        dyn_array_ptr_push (&dfs_stack, NULL);
 
-#include "sgen-scan-object.h"
+#include "sgen/sgen-scan-object.h"
                } else {
                        obj_entry = dyn_array_ptr_pop (&dfs_stack);
 
index 09bddb94a63f85565069d24e8d5e6bc6832ab96a..eee4d837d3f728e4441833faec69aaa647b7396c 100644 (file)
 
 
 #include <glib.h>
-#include "metadata/sgen-gc.h"
-#include "metadata/sgen-archdep.h"
-#include "metadata/sgen-protocol.h"
-#include "metadata/sgen-thread-pool.h"
+#include "sgen/sgen-gc.h"
+#include "sgen/sgen-archdep.h"
+#include "sgen/sgen-protocol.h"
+#include "sgen/sgen-thread-pool.h"
 #include "metadata/object-internals.h"
 #include "metadata/gc-internal.h"
 
index 27aa1a9eec892111c24677b8608fc82947e57217..d0aa841296abca3ffc5e6da7ec0a5080bf7ccacb 100644 (file)
@@ -30,9 +30,9 @@
 
 #include <errno.h>
 #include <glib.h>
-#include "metadata/sgen-gc.h"
+#include "sgen/sgen-gc.h"
 #include "metadata/gc-internal.h"
-#include "metadata/sgen-archdep.h"
+#include "sgen/sgen-archdep.h"
 #include "metadata/object-internals.h"
 #include "utils/mono-signal-handler.h"
 
index 429d356adeef389da01451ee7d446ee366f442ca..656d8393045abfc5cf3a4d49365cc6c6f13a686b 100644 (file)
@@ -4,7 +4,7 @@
 
 #include "io-layer/io-layer.h"
 
-#include "metadata/sgen-gc.h"
+#include "sgen/sgen-gc.h"
 #include "metadata/gc-internal.h"
 
 gboolean
diff --git a/mono/metadata/sgen-pinning-stats.c b/mono/metadata/sgen-pinning-stats.c
deleted file mode 100644 (file)
index e6ecd57..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * 
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include <string.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-pinning.h"
-#include "mono/metadata/sgen-hash-table.h"
-#include "mono/metadata/sgen-client.h"
-
-typedef struct _PinStatAddress PinStatAddress;
-struct _PinStatAddress {
-       char *addr;
-       int pin_types;
-       PinStatAddress *left;
-       PinStatAddress *right;
-};
-
-typedef struct {
-       size_t num_pins [PIN_TYPE_MAX];
-} PinnedClassEntry;
-
-typedef struct {
-       gulong num_remsets;
-} GlobalRemsetClassEntry;
-
-static gboolean do_pin_stats = FALSE;
-
-static PinStatAddress *pin_stat_addresses = NULL;
-static size_t pinned_byte_counts [PIN_TYPE_MAX];
-
-static SgenPointerQueue pinned_objects = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_STATISTICS);
-
-static SgenHashTable pinned_class_hash_table = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_STATISTICS, INTERNAL_MEM_STAT_PINNED_CLASS, sizeof (PinnedClassEntry), g_str_hash, g_str_equal);
-static SgenHashTable global_remset_class_hash_table = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_STATISTICS, INTERNAL_MEM_STAT_REMSET_CLASS, sizeof (GlobalRemsetClassEntry), g_str_hash, g_str_equal);
-
-void
-sgen_pin_stats_enable (void)
-{
-       do_pin_stats = TRUE;
-}
-
-static void
-pin_stats_tree_free (PinStatAddress *node)
-{
-       if (!node)
-               return;
-       pin_stats_tree_free (node->left);
-       pin_stats_tree_free (node->right);
-       sgen_free_internal_dynamic (node, sizeof (PinStatAddress), INTERNAL_MEM_STATISTICS);
-}
-
-void
-sgen_pin_stats_reset (void)
-{
-       int i;
-       pin_stats_tree_free (pin_stat_addresses);
-       pin_stat_addresses = NULL;
-       for (i = 0; i < PIN_TYPE_MAX; ++i)
-               pinned_byte_counts [i] = 0;
-       sgen_pointer_queue_clear (&pinned_objects);
-}
-
-void
-sgen_pin_stats_register_address (char *addr, int pin_type)
-{
-       PinStatAddress **node_ptr = &pin_stat_addresses;
-       PinStatAddress *node;
-       int pin_type_bit = 1 << pin_type;
-
-       while (*node_ptr) {
-               node = *node_ptr;
-               if (addr == node->addr) {
-                       node->pin_types |= pin_type_bit;
-                       return;
-               }
-               if (addr < node->addr)
-                       node_ptr = &node->left;
-               else
-                       node_ptr = &node->right;
-       }
-
-       node = sgen_alloc_internal_dynamic (sizeof (PinStatAddress), INTERNAL_MEM_STATISTICS, TRUE);
-       node->addr = addr;
-       node->pin_types = pin_type_bit;
-       node->left = node->right = NULL;
-
-       *node_ptr = node;
-}
-
-static void
-pin_stats_count_object_from_tree (char *obj, size_t size, PinStatAddress *node, int *pin_types)
-{
-       if (!node)
-               return;
-       if (node->addr >= obj && node->addr < obj + size) {
-               int i;
-               for (i = 0; i < PIN_TYPE_MAX; ++i) {
-                       int pin_bit = 1 << i;
-                       if (!(*pin_types & pin_bit) && (node->pin_types & pin_bit)) {
-                               pinned_byte_counts [i] += size;
-                               *pin_types |= pin_bit;
-                       }
-               }
-       }
-       if (obj < node->addr)
-               pin_stats_count_object_from_tree (obj, size, node->left, pin_types);
-       if (obj + size - 1 > node->addr)
-               pin_stats_count_object_from_tree (obj, size, node->right, pin_types);
-}
-
-static gpointer
-lookup_vtable_entry (SgenHashTable *hash_table, GCVTable *vtable, gpointer empty_entry)
-{
-       char *name = g_strdup_printf ("%s.%s", sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable));
-       gpointer entry = sgen_hash_table_lookup (hash_table, name);
-
-       if (entry) {
-               g_free (name);
-       } else {
-               sgen_hash_table_replace (hash_table, name, empty_entry, NULL);
-               entry = sgen_hash_table_lookup (hash_table, name);
-       }
-
-       return entry;
-}
-
-static void
-register_vtable (GCVTable *vtable, int pin_types)
-{
-       PinnedClassEntry empty_entry;
-       PinnedClassEntry *entry;
-       int i;
-
-       memset (&empty_entry, 0, sizeof (PinnedClassEntry));
-       entry = lookup_vtable_entry (&pinned_class_hash_table, vtable, &empty_entry);
-
-       for (i = 0; i < PIN_TYPE_MAX; ++i) {
-               if (pin_types & (1 << i))
-                       ++entry->num_pins [i];
-       }
-}
-
-void
-sgen_pin_stats_register_object (char *obj, size_t size)
-{
-       int pin_types = 0;
-
-       if (!do_pin_stats)
-               return;
-
-       pin_stats_count_object_from_tree (obj, size, pin_stat_addresses, &pin_types);
-       sgen_pointer_queue_add (&pinned_objects, obj);
-
-       if (pin_types)
-               register_vtable ((GCVTable*)SGEN_LOAD_VTABLE (obj), pin_types);
-}
-
-void
-sgen_pin_stats_register_global_remset (char *obj)
-{
-       GlobalRemsetClassEntry empty_entry;
-       GlobalRemsetClassEntry *entry;
-
-       if (!do_pin_stats)
-               return;
-
-       memset (&empty_entry, 0, sizeof (GlobalRemsetClassEntry));
-       entry = lookup_vtable_entry (&global_remset_class_hash_table, (GCVTable*)SGEN_LOAD_VTABLE (obj), &empty_entry);
-
-       ++entry->num_remsets;
-}
-
-void
-sgen_pin_stats_print_class_stats (void)
-{
-       char *name;
-       PinnedClassEntry *pinned_entry;
-       GlobalRemsetClassEntry *remset_entry;
-
-       if (!do_pin_stats)
-               return;
-
-       g_print ("\n%-50s  %10s  %10s  %10s\n", "Class", "Stack", "Static", "Other");
-       SGEN_HASH_TABLE_FOREACH (&pinned_class_hash_table, name, pinned_entry) {
-               int i;
-               g_print ("%-50s", name);
-               for (i = 0; i < PIN_TYPE_MAX; ++i)
-                       g_print ("  %10ld", pinned_entry->num_pins [i]);
-               g_print ("\n");
-       } SGEN_HASH_TABLE_FOREACH_END;
-
-       g_print ("\n%-50s  %10s\n", "Class", "#Remsets");
-       SGEN_HASH_TABLE_FOREACH (&global_remset_class_hash_table, name, remset_entry) {
-               g_print ("%-50s  %10ld\n", name, remset_entry->num_remsets);
-       } SGEN_HASH_TABLE_FOREACH_END;
-}
-
-size_t
-sgen_pin_stats_get_pinned_byte_count (int pin_type)
-{
-       return pinned_byte_counts [pin_type];
-}
-
-SgenPointerQueue*
-sgen_pin_stats_get_object_list (void)
-{
-       return &pinned_objects;
-}
-
-#endif /* HAVE_SGEN_GC */
diff --git a/mono/metadata/sgen-pinning.c b/mono/metadata/sgen-pinning.c
deleted file mode 100644 (file)
index ed4824d..0000000
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * sgen-pinning.c: The pin queue.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include <string.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-pinning.h"
-#include "mono/metadata/sgen-protocol.h"
-#include "mono/metadata/sgen-pointer-queue.h"
-#include "mono/metadata/sgen-client.h"
-
-static SgenPointerQueue pin_queue;
-static size_t last_num_pinned = 0;
-
-#define PIN_HASH_SIZE 1024
-static void *pin_hash_filter [PIN_HASH_SIZE];
-
-void
-sgen_init_pinning (void)
-{
-       memset (pin_hash_filter, 0, sizeof (pin_hash_filter));
-       pin_queue.mem_type = INTERNAL_MEM_PIN_QUEUE;
-}
-
-void
-sgen_finish_pinning (void)
-{
-       last_num_pinned = pin_queue.next_slot;
-       sgen_pointer_queue_clear (&pin_queue);
-}
-
-void
-sgen_pin_stage_ptr (void *ptr)
-{
-       /*very simple multiplicative hash function, tons better than simple and'ng */ 
-       int hash_idx = ((mword)ptr * 1737350767) & (PIN_HASH_SIZE - 1);
-       if (pin_hash_filter [hash_idx] == ptr)
-               return;
-
-       pin_hash_filter [hash_idx] = ptr;
-
-       sgen_pointer_queue_add (&pin_queue, ptr);
-}
-
-gboolean
-sgen_find_optimized_pin_queue_area (void *start, void *end, size_t *first_out, size_t *last_out)
-{
-       size_t first = sgen_pointer_queue_search (&pin_queue, start);
-       size_t last = sgen_pointer_queue_search (&pin_queue, end);
-       SGEN_ASSERT (0, last == pin_queue.next_slot || pin_queue.data [last] >= end, "Pin queue search gone awry");
-       *first_out = first;
-       *last_out = last;
-       return first != last;
-}
-
-void**
-sgen_pinning_get_entry (size_t index)
-{
-       SGEN_ASSERT (0, index <= pin_queue.next_slot, "Pin queue entry out of range");
-       return &pin_queue.data [index];
-}
-
-void
-sgen_find_section_pin_queue_start_end (GCMemSection *section)
-{
-       SGEN_LOG (6, "Pinning from section %p (%p-%p)", section, section->data, section->end_data);
-
-       sgen_find_optimized_pin_queue_area (section->data, section->end_data,
-                       &section->pin_queue_first_entry, &section->pin_queue_last_entry);
-
-       SGEN_LOG (6, "Found %zd pinning addresses in section %p",
-                       section->pin_queue_last_entry - section->pin_queue_first_entry, section);
-}
-
-/*This will setup the given section for the while pin queue. */
-void
-sgen_pinning_setup_section (GCMemSection *section)
-{
-       section->pin_queue_first_entry = 0;
-       section->pin_queue_last_entry = pin_queue.next_slot;
-}
-
-void
-sgen_pinning_trim_queue_to_section (GCMemSection *section)
-{
-       SGEN_ASSERT (0, section->pin_queue_first_entry == 0, "Pin queue trimming assumes the whole pin queue is used by the nursery");
-       pin_queue.next_slot = section->pin_queue_last_entry;
-}
-
-/*
- * This is called when we've run out of memory during a major collection.
- *
- * After collecting potential pin entries and sorting the array, this is what it looks like:
- *
- * +--------------------+---------------------------------------------+--------------------+
- * | major heap entries |               nursery entries               | major heap entries |
- * +--------------------+---------------------------------------------+--------------------+
- *
- * Of course there might not be major heap entries before and/or after the nursery entries,
- * depending on where the major heap sections are in the address space, and whether there
- * were any potential pointers there.
- *
- * When we pin nursery objects, we compact the nursery part of the pin array, which leaves
- * discarded entries after the ones that actually pointed to nursery objects:
- *
- * +--------------------+-----------------+---------------------------+--------------------+
- * | major heap entries | nursery entries | discarded nursery entries | major heap entries |
- * +--------------------+-----------------+---------------------------+--------------------+
- *
- * When, due to being out of memory, we late pin more objects, the pin array looks like
- * this:
- *
- * +--------------------+-----------------+---------------------------+--------------------+--------------+
- * | major heap entries | nursery entries | discarded nursery entries | major heap entries | late entries |
- * +--------------------+-----------------+---------------------------+--------------------+--------------+
- *
- * This function gets rid of the discarded nursery entries by nulling them out.  Note that
- * we can late pin objects not only in the nursery but also in the major heap, which happens
- * when evacuation fails.
- */
-void
-sgen_pin_queue_clear_discarded_entries (GCMemSection *section, size_t max_pin_slot)
-{
-       void **start = sgen_pinning_get_entry (section->pin_queue_last_entry);
-       void **end = sgen_pinning_get_entry (max_pin_slot);
-       void *addr;
-
-       for (; start < end; ++start) {
-               addr = *start;
-               if ((char*)addr < section->data || (char*)addr > section->end_data)
-                       break;
-               *start = NULL;
-       }
-}
-
-/* reduce the info in the pin queue, removing duplicate pointers and sorting them */
-void
-sgen_optimize_pin_queue (void)
-{
-       sgen_pointer_queue_sort_uniq (&pin_queue);
-}
-
-size_t
-sgen_get_pinned_count (void)
-{
-       return pin_queue.next_slot;
-}
-
-void
-sgen_dump_pin_queue (void)
-{
-       int i;
-
-       for (i = 0; i < last_num_pinned; ++i) {
-               void *ptr = pin_queue.data [i];
-               SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", ptr, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (ptr)), sgen_safe_object_get_size (ptr));
-       }
-}
-
-typedef struct _CementHashEntry CementHashEntry;
-struct _CementHashEntry {
-       char *obj;
-       unsigned int count;
-};
-
-static CementHashEntry cement_hash [SGEN_CEMENT_HASH_SIZE];
-
-static gboolean cement_enabled = TRUE;
-
-void
-sgen_cement_init (gboolean enabled)
-{
-       cement_enabled = enabled;
-}
-
-void
-sgen_cement_reset (void)
-{
-       memset (cement_hash, 0, sizeof (cement_hash));
-       binary_protocol_cement_reset ();
-}
-
-gboolean
-sgen_cement_lookup (char *obj)
-{
-       guint hv = sgen_aligned_addr_hash (obj);
-       int i = SGEN_CEMENT_HASH (hv);
-
-       SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense");
-
-       if (!cement_enabled)
-               return FALSE;
-
-       if (!cement_hash [i].obj)
-               return FALSE;
-       if (cement_hash [i].obj != obj)
-               return FALSE;
-
-       return cement_hash [i].count >= SGEN_CEMENT_THRESHOLD;
-}
-
-gboolean
-sgen_cement_lookup_or_register (char *obj)
-{
-       guint hv;
-       int i;
-       CementHashEntry *hash = cement_hash;
-
-       if (!cement_enabled)
-               return FALSE;
-
-       hv = sgen_aligned_addr_hash (obj);
-       i = SGEN_CEMENT_HASH (hv);
-
-       SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects");
-
-       if (!hash [i].obj) {
-               SGEN_ASSERT (5, !hash [i].count, "Cementing hash inconsistent");
-               hash [i].obj = obj;
-       } else if (hash [i].obj != obj) {
-               return FALSE;
-       }
-
-       if (hash [i].count >= SGEN_CEMENT_THRESHOLD)
-               return TRUE;
-
-       ++hash [i].count;
-       if (hash [i].count == SGEN_CEMENT_THRESHOLD) {
-               SGEN_ASSERT (9, sgen_get_current_collection_generation () >= 0, "We can only cement objects when we're in a collection pause.");
-               SGEN_ASSERT (9, SGEN_OBJECT_IS_PINNED (obj), "Can only cement pinned objects");
-               SGEN_CEMENT_OBJECT (obj);
-
-               binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj),
-                               (int)sgen_safe_object_get_size ((GCObject*)obj));
-       }
-
-       return FALSE;
-}
-
-static void
-pin_from_hash (CementHashEntry *hash, gboolean has_been_reset)
-{
-       int i;
-       for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) {
-               if (!hash [i].count)
-                       continue;
-
-               if (has_been_reset)
-                       SGEN_ASSERT (5, hash [i].count >= SGEN_CEMENT_THRESHOLD, "Cementing hash inconsistent");
-
-               sgen_pin_stage_ptr (hash [i].obj);
-               binary_protocol_cement_stage (hash [i].obj);
-               /* FIXME: do pin stats if enabled */
-
-               SGEN_CEMENT_OBJECT (hash [i].obj);
-       }
-}
-
-void
-sgen_pin_cemented_objects (void)
-{
-       pin_from_hash (cement_hash, TRUE);
-}
-
-void
-sgen_cement_clear_below_threshold (void)
-{
-       int i;
-       for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) {
-               if (cement_hash [i].count < SGEN_CEMENT_THRESHOLD) {
-                       cement_hash [i].obj = NULL;
-                       cement_hash [i].count = 0;
-               }
-       }
-}
-
-#endif /* HAVE_SGEN_GC */
diff --git a/mono/metadata/sgen-pinning.h b/mono/metadata/sgen-pinning.h
deleted file mode 100644 (file)
index 2342fe4..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * sgen-pinning.h: All about pinning objects.
- *
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#ifndef __MONO_SGEN_PINNING_H__
-#define __MONO_SGEN_PINNING_H__
-
-#include "mono/metadata/sgen-pointer-queue.h"
-
-enum {
-       PIN_TYPE_STACK,
-       PIN_TYPE_STATIC_DATA,
-       PIN_TYPE_OTHER,
-       PIN_TYPE_MAX
-};
-
-void sgen_pin_stage_ptr (void *ptr);
-void sgen_optimize_pin_queue (void);
-void sgen_init_pinning (void);
-void sgen_finish_pinning (void);
-void sgen_pin_queue_clear_discarded_entries (GCMemSection *section, size_t max_pin_slot);
-size_t sgen_get_pinned_count (void);
-void sgen_pinning_setup_section (GCMemSection *section);
-void sgen_pinning_trim_queue_to_section (GCMemSection *section);
-
-void sgen_dump_pin_queue (void);
-
-gboolean sgen_find_optimized_pin_queue_area (void *start, void *end, size_t *first_out, size_t *last_out);
-void sgen_find_section_pin_queue_start_end (GCMemSection *section);
-void** sgen_pinning_get_entry (size_t index);
-void sgen_pin_objects_in_section (GCMemSection *section, ScanCopyContext ctx);
-
-/* Pinning stats */
-
-void sgen_pin_stats_register_address (char *addr, int pin_type);
-size_t sgen_pin_stats_get_pinned_byte_count (int pin_type);
-SgenPointerQueue *sgen_pin_stats_get_object_list (void);
-void sgen_pin_stats_reset (void);
-
-/* Perpetual pinning, aka cementing */
-
-void sgen_cement_init (gboolean enabled);
-void sgen_cement_reset (void);
-gboolean sgen_cement_lookup (char *obj);
-gboolean sgen_cement_lookup_or_register (char *obj);
-void sgen_pin_cemented_objects (void);
-void sgen_cement_clear_below_threshold (void);
-
-#endif
diff --git a/mono/metadata/sgen-pointer-queue.c b/mono/metadata/sgen-pointer-queue.c
deleted file mode 100644 (file)
index f2de588..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * sgen-pointer-queue.c: A pointer queue that can be sorted.
- *
- * Copyright (C) 2014 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifdef HAVE_SGEN_GC
-
-#include <string.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-pointer-queue.h"
-
-void
-sgen_pointer_queue_clear (SgenPointerQueue *queue)
-{
-       queue->next_slot = 0;
-}
-
-void
-sgen_pointer_queue_init (SgenPointerQueue *queue, int mem_type)
-{
-       queue->next_slot = 0;
-       queue->size = 0;
-       queue->data = NULL;
-       queue->mem_type = mem_type;
-}
-
-static void
-realloc_queue (SgenPointerQueue *queue)
-{
-       size_t new_size = queue->size ? queue->size + queue->size/2 : 1024;
-       void **new_data = sgen_alloc_internal_dynamic (sizeof (void*) * new_size, queue->mem_type, TRUE);
-
-       memcpy (new_data, queue->data, sizeof (void*) * queue->next_slot);
-       sgen_free_internal_dynamic (queue->data, sizeof (void*) * queue->size, queue->mem_type);
-       queue->data = new_data;
-       queue->size = new_size;
-       SGEN_LOG (4, "Reallocated pointer queue to size: %lu", new_size);
-}
-
-gboolean
-sgen_pointer_queue_will_grow (SgenPointerQueue *queue)
-{
-       return queue->next_slot >= queue->size;
-}
-
-void
-sgen_pointer_queue_add (SgenPointerQueue *queue, void *ptr)
-{
-       if (sgen_pointer_queue_will_grow (queue))
-               realloc_queue (queue);
-
-       queue->data [queue->next_slot++] = ptr;
-}
-
-void*
-sgen_pointer_queue_pop (SgenPointerQueue *queue)
-{
-       g_assert (queue->next_slot);
-
-       return queue->data [--queue->next_slot];
-}
-
-size_t
-sgen_pointer_queue_search (SgenPointerQueue *queue, void *addr)
-{
-       size_t first = 0, last = queue->next_slot;
-       while (first < last) {
-               size_t middle = first + ((last - first) >> 1);
-               if (addr <= queue->data [middle])
-                       last = middle;
-               else
-                       first = middle + 1;
-       }
-       g_assert (first == last);
-       return first;
-}
-
-/*
- * Removes all NULL pointers from the queue.
- */
-void
-sgen_pointer_queue_remove_nulls (SgenPointerQueue *queue)
-{
-       void **start, **cur, **end;
-       start = cur = queue->data;
-       end = queue->data + queue->next_slot;
-       while (cur < end) {
-               if (*cur)
-                       *start++ = *cur++;
-               else
-                       ++cur;
-       }
-       queue->next_slot = start - queue->data;
-}
-
-/*
- * Sorts the pointers in the queue, then removes duplicates.
- */
-void
-sgen_pointer_queue_sort_uniq (SgenPointerQueue *queue)
-{
-       void **start, **cur, **end;
-       /* sort and uniq pin_queue: we just sort and we let the rest discard multiple values */
-       /* it may be better to keep ranges of pinned memory instead of individually pinning objects */
-       SGEN_LOG (5, "Sorting pointer queue, size: %lu", queue->next_slot);
-       if (queue->next_slot > 1)
-               sgen_sort_addresses (queue->data, queue->next_slot);
-       start = cur = queue->data;
-       end = queue->data + queue->next_slot;
-       while (cur < end) {
-               *start = *cur++;
-               while (cur < end && *start == *cur)
-                       cur++;
-               start++;
-       };
-       queue->next_slot = start - queue->data;
-       SGEN_LOG (5, "Pointer queue reduced to size: %lu", queue->next_slot);
-}
-
-/*
- * Does a linear search through the pointer queue to find `ptr`.  Returns the index if
- * found, otherwise (size_t)-1.
- */
-size_t
-sgen_pointer_queue_find (SgenPointerQueue *queue, void *ptr)
-{
-       size_t i;
-       for (i = 0; i < queue->next_slot; ++i)
-               if (queue->data [i] == ptr)
-                       return i;
-       return (size_t)-1;
-}
-
-gboolean
-sgen_pointer_queue_is_empty (SgenPointerQueue *queue)
-{
-       return !queue->next_slot;
-}
-
-void
-sgen_pointer_queue_free (SgenPointerQueue *queue)
-{
-       sgen_free_internal_dynamic (queue->data, sizeof (void*) * queue->size, queue->mem_type);
-}
-
-#endif
diff --git a/mono/metadata/sgen-pointer-queue.h b/mono/metadata/sgen-pointer-queue.h
deleted file mode 100644 (file)
index 3352dab..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * sgen-pointer-queue.h: A pointer queue that can be sorted.
- *
- * Copyright (C) 2014 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __MONO_SGEN_POINTER_QUEUE_H__
-#define __MONO_SGEN_POINTER_QUEUE_H__
-
-#include <glib.h>
-
-typedef struct {
-       void **data;
-       size_t size;
-       size_t next_slot;
-       int mem_type;
-} SgenPointerQueue;
-
-#define SGEN_POINTER_QUEUE_INIT(mem_type)      { NULL, 0, 0, (mem_type) }
-
-void sgen_pointer_queue_add (SgenPointerQueue *queue, void *ptr);
-void sgen_pointer_queue_clear (SgenPointerQueue *queue);
-void sgen_pointer_queue_remove_nulls (SgenPointerQueue *queue);
-void sgen_pointer_queue_sort_uniq (SgenPointerQueue *queue);
-size_t sgen_pointer_queue_search (SgenPointerQueue *queue, void *addr);
-size_t sgen_pointer_queue_find (SgenPointerQueue *queue, void *ptr);
-void sgen_pointer_queue_init (SgenPointerQueue *queue, int mem_type);
-void* sgen_pointer_queue_pop (SgenPointerQueue *queue);
-gboolean sgen_pointer_queue_is_empty (SgenPointerQueue *queue);
-void sgen_pointer_queue_free (SgenPointerQueue *queue);
-gboolean sgen_pointer_queue_will_grow (SgenPointerQueue *queue);
-
-#endif
diff --git a/mono/metadata/sgen-protocol-def.h b/mono/metadata/sgen-protocol-def.h
deleted file mode 100644 (file)
index 0df783a..0000000
+++ /dev/null
@@ -1,387 +0,0 @@
-BEGIN_PROTOCOL_ENTRY3 (binary_protocol_collection_requested, TYPE_INT, generation, TYPE_SIZE, requested_size, TYPE_BOOL, force)
-FLUSH ()
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY2 (binary_protocol_collection_begin, TYPE_INT, index, TYPE_INT, generation)
-FLUSH ()
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY4 (binary_protocol_collection_end, TYPE_INT, index, TYPE_INT, generation, TYPE_LONGLONG, num_scanned_objects, TYPE_LONGLONG, num_unique_scanned_objects)
-FLUSH()
-CUSTOM_PRINT (printf ("%d generation %d scanned %lld unique %lld %0.2f%%", entry->index, entry->generation, entry->num_scanned_objects, entry->num_unique_scanned_objects, entry->num_unique_scanned_objects ? (100.0 * (double) entry->num_scanned_objects / (double) entry->num_unique_scanned_objects) : 0.0))
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY0 (binary_protocol_concurrent_start)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY0 (binary_protocol_concurrent_update)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY0 (binary_protocol_concurrent_finish)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY2 (binary_protocol_sweep_begin, TYPE_INT, generation, TYPE_BOOL, full_sweep)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY2 (binary_protocol_sweep_end, TYPE_INT, generation, TYPE_BOOL, full_sweep)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY3 (binary_protocol_world_stopping, TYPE_INT, generation, TYPE_LONGLONG, timestamp, TYPE_POINTER, thread)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (ptr == entry->thread ? 2 : BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY6 (binary_protocol_world_stopped, TYPE_INT, generation, TYPE_LONGLONG, timestamp, TYPE_LONGLONG, total_major_cards, TYPE_LONGLONG, marked_major_cards, TYPE_LONGLONG, total_los_cards, TYPE_LONGLONG, marked_los_cards)
-CUSTOM_PRINT (printf ("generation %d timestamp %lld total %lld marked %lld %0.2f%%", entry->generation, entry->timestamp, entry->total_major_cards + entry->total_los_cards, entry->marked_major_cards + entry->marked_los_cards, 100.0 * (double) (entry->marked_major_cards + entry->marked_los_cards) / (double) (entry->total_major_cards + entry->total_los_cards)))
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY6 (binary_protocol_world_restarting, TYPE_INT, generation, TYPE_LONGLONG, timestamp, TYPE_LONGLONG, total_major_cards, TYPE_LONGLONG, marked_major_cards, TYPE_LONGLONG, total_los_cards, TYPE_LONGLONG, marked_los_cards)
-CUSTOM_PRINT (printf ("generation %d timestamp %lld total %lld marked %lld %0.2f%%", entry->generation, entry->timestamp, entry->total_major_cards + entry->total_los_cards, entry->marked_major_cards + entry->marked_los_cards, 100.0 * (double) (entry->marked_major_cards + entry->marked_los_cards) / (double) (entry->total_major_cards + entry->total_los_cards)))
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY2 (binary_protocol_world_restarted, TYPE_INT, generation, TYPE_LONGLONG, timestamp)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY2 (binary_protocol_block_alloc, TYPE_POINTER, addr, TYPE_SIZE, size)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->addr, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY2 (binary_protocol_block_free, TYPE_POINTER, addr, TYPE_SIZE, size)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->addr, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_block_set_state, TYPE_POINTER, addr, TYPE_SIZE, size, TYPE_INT, old, TYPE_INT, new)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->addr, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY1 (binary_protocol_mark_start, TYPE_INT, generation)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY1 (binary_protocol_mark_end, TYPE_INT, generation)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-/* FIXME: unify sweep and reclaim */
-BEGIN_PROTOCOL_ENTRY1 (binary_protocol_reclaim_start, TYPE_INT, generation)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY1 (binary_protocol_reclaim_end, TYPE_INT, generation)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_alloc, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size, TYPE_POINTER, provenance)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (ptr == entry->vtable)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_copy, TYPE_POINTER, from, TYPE_POINTER, to, TYPE_POINTER, vtable, TYPE_SIZE, size)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->from, entry->size) ? 0 : matches_interval (ptr, entry->to, entry->size) ? 1 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (ptr == entry->vtable)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY2 (binary_protocol_pin_stage, TYPE_POINTER, addr_ptr, TYPE_POINTER, addr)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->addr_ptr ? 0 : ptr == entry->addr ? 1 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY1 (binary_protocol_cement_stage, TYPE_POINTER, addr)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->addr ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_pin, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (ptr == entry->vtable)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_mark, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_scan_begin, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (ptr == entry->vtable)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY2 (binary_protocol_scan_vtype_begin, TYPE_POINTER, obj, TYPE_SIZE, size)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_scan_process_reference, TYPE_POINTER, obj, TYPE_POINTER, ptr, TYPE_POINTER, value)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->obj ? 0 : ptr == entry->ptr ? 1 : ptr == entry->value ? 2 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_scan_stack, TYPE_POINTER, thread, TYPE_POINTER, stack_start, TYPE_POINTER, stack_end, TYPE_INT, skip_reason)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->thread ? 0 : (ptr >= entry->stack_start && ptr < entry->stack_end) ? 1 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_wbarrier, TYPE_POINTER, ptr, TYPE_POINTER, value, TYPE_POINTER, value_vtable)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->ptr ? 0 : ptr == entry->value ? 1 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (ptr == entry->value_vtable)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_global_remset, TYPE_POINTER, ptr, TYPE_POINTER, value, TYPE_POINTER, value_vtable)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->ptr ? 0 : ptr == entry->value ? 1 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (ptr == entry->value_vtable)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY5 (binary_protocol_ptr_update, TYPE_POINTER, ptr, TYPE_POINTER, old_value, TYPE_POINTER, new_value, TYPE_POINTER, vtable, TYPE_SIZE, size)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->ptr ? 0 : matches_interval (ptr, entry->old_value, entry->size) ? 1 : matches_interval (ptr, entry->new_value, entry->size) ? 2 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (ptr == entry->vtable)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_cleanup, TYPE_POINTER, ptr, TYPE_POINTER, vtable, TYPE_SIZE, size)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->ptr, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (ptr == entry->vtable)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY2 (binary_protocol_empty, TYPE_POINTER, start, TYPE_SIZE, size)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->start, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY2 (binary_protocol_thread_suspend, TYPE_POINTER, thread, TYPE_POINTER, stopped_ip)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY1 (binary_protocol_thread_restart, TYPE_POINTER, thread)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY1 (binary_protocol_thread_register, TYPE_POINTER, thread)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY1 (binary_protocol_thread_unregister, TYPE_POINTER, thread)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY6 (binary_protocol_missing_remset, TYPE_POINTER, obj, TYPE_POINTER, obj_vtable, TYPE_INT, offset, TYPE_POINTER, value, TYPE_POINTER, value_vtable, TYPE_BOOL, value_pinned)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->obj ? 0 : ptr == entry->value ? 3 : ptr == (char*)entry->obj + entry->offset ? BINARY_PROTOCOL_MATCH : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (ptr == entry->obj_vtable || ptr == entry->value_vtable)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_alloc_pinned, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size, TYPE_POINTER, provenance)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (ptr == entry->vtable)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_alloc_degraded, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size, TYPE_POINTER, provenance)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (ptr == entry->vtable)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY2 (binary_protocol_card_scan, TYPE_POINTER, start, TYPE_SIZE, size)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->start, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY3 (binary_protocol_cement, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (ptr == entry->vtable)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY0 (binary_protocol_cement_reset)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_dislink_update, TYPE_POINTER, link, TYPE_POINTER, obj, TYPE_BOOL, track, TYPE_BOOL, staged)
-CUSTOM_PRINT(entry->obj ? printf ("link %p obj %p staged %d track %d", entry->link, entry->obj, entry->staged, entry->track) : printf ("link %p obj %p staged %d", entry->link, entry->obj, entry->staged))
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->link ? 0 : ptr == entry->obj ? 1 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_dislink_update_staged, TYPE_POINTER, link, TYPE_POINTER, obj, TYPE_BOOL, track, TYPE_INT, index)
-CUSTOM_PRINT(entry->obj ? printf ("link %p obj %p index %d track %d", entry->link, entry->obj, entry->index, entry->track) : printf ("link %p obj %p index %d", entry->link, entry->obj, entry->index))
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->link ? 0 : ptr == entry->obj ? 1 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_dislink_process_staged, TYPE_POINTER, link, TYPE_POINTER, obj, TYPE_INT, index)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->link ? 0 : ptr == entry->obj ? 1 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY1 (binary_protocol_domain_unload_begin, TYPE_POINTER, domain)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY1 (binary_protocol_domain_unload_end, TYPE_POINTER, domain)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (TRUE)
-MATCH_INDEX (BINARY_PROTOCOL_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_gray_enqueue, TYPE_POINTER, queue, TYPE_POINTER, cursor, TYPE_POINTER, value)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->cursor ? 1 : ptr == entry->value ? 2 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_gray_dequeue, TYPE_POINTER, queue, TYPE_POINTER, cursor, TYPE_POINTER, value)
-DEFAULT_PRINT ()
-IS_ALWAYS_MATCH (FALSE)
-MATCH_INDEX (ptr == entry->cursor ? 1 : ptr == entry->value ? 2 : BINARY_PROTOCOL_NO_MATCH)
-IS_VTABLE_MATCH (FALSE)
-END_PROTOCOL_ENTRY_HEAVY
-
-#undef BEGIN_PROTOCOL_ENTRY0
-#undef BEGIN_PROTOCOL_ENTRY1
-#undef BEGIN_PROTOCOL_ENTRY2
-#undef BEGIN_PROTOCOL_ENTRY3
-#undef BEGIN_PROTOCOL_ENTRY4
-#undef BEGIN_PROTOCOL_ENTRY5
-#undef BEGIN_PROTOCOL_ENTRY6
-#undef BEGIN_PROTOCOL_ENTRY_HEAVY0
-#undef BEGIN_PROTOCOL_ENTRY_HEAVY1
-#undef BEGIN_PROTOCOL_ENTRY_HEAVY2
-#undef BEGIN_PROTOCOL_ENTRY_HEAVY3
-#undef BEGIN_PROTOCOL_ENTRY_HEAVY4
-#undef BEGIN_PROTOCOL_ENTRY_HEAVY5
-#undef BEGIN_PROTOCOL_ENTRY_HEAVY6
-
-#undef FLUSH
-
-#undef DEFAULT_PRINT
-#undef CUSTOM_PRINT
-
-#undef IS_ALWAYS_MATCH
-#undef MATCH_INDEX
-#undef IS_VTABLE_MATCH
-
-#undef END_PROTOCOL_ENTRY
-#undef END_PROTOCOL_ENTRY_HEAVY
diff --git a/mono/metadata/sgen-protocol.c b/mono/metadata/sgen-protocol.c
deleted file mode 100644 (file)
index 1b55b88..0000000
+++ /dev/null
@@ -1,434 +0,0 @@
-/*
- * sgen-protocol.c: Binary protocol of internal activity, to aid
- * debugging.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifdef HAVE_SGEN_GC
-
-#include "config.h"
-#include "sgen-conf.h"
-#include "sgen-gc.h"
-#include "sgen-protocol.h"
-#include "sgen-memory-governor.h"
-#include "sgen-thread-pool.h"
-#include "sgen-client.h"
-#include "mono/utils/mono-membar.h"
-
-#include <errno.h>
-#include <string.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#include <fcntl.h>
-#endif
-
-/* FIXME Implement binary protocol IO on systems that don't have unistd */
-#ifdef HAVE_UNISTD_H
-/* If valid, dump binary protocol to this file */
-static int binary_protocol_file = -1;
-
-/* We set this to -1 to indicate an exclusive lock */
-static volatile int binary_protocol_use_count = 0;
-
-#define BINARY_PROTOCOL_BUFFER_SIZE    (65536 - 2 * 8)
-
-typedef struct _BinaryProtocolBuffer BinaryProtocolBuffer;
-struct _BinaryProtocolBuffer {
-       BinaryProtocolBuffer * volatile next;
-       volatile int index;
-       unsigned char buffer [BINARY_PROTOCOL_BUFFER_SIZE];
-};
-
-static BinaryProtocolBuffer * volatile binary_protocol_buffers = NULL;
-
-static char* filename_or_prefix = NULL;
-static int current_file_index = 0;
-static long long current_file_size = 0;
-static long long file_size_limit;
-
-static char*
-filename_for_index (int index)
-{
-       char *filename;
-
-       SGEN_ASSERT (0, file_size_limit > 0, "Indexed binary protocol filename must only be used with file size limit");
-
-       filename = sgen_alloc_internal_dynamic (strlen (filename_or_prefix) + 32, INTERNAL_MEM_BINARY_PROTOCOL, TRUE);
-       sprintf (filename, "%s.%d", filename_or_prefix, index);
-
-       return filename;
-}
-
-static void
-free_filename (char *filename)
-{
-       SGEN_ASSERT (0, file_size_limit > 0, "Indexed binary protocol filename must only be used with file size limit");
-
-       sgen_free_internal_dynamic (filename, strlen (filename_or_prefix) + 32, INTERNAL_MEM_BINARY_PROTOCOL);
-}
-
-static void
-binary_protocol_open_file (void)
-{
-       char *filename;
-
-       if (file_size_limit > 0)
-               filename = filename_for_index (current_file_index);
-       else
-               filename = filename_or_prefix;
-
-       do {
-               binary_protocol_file = open (filename, O_CREAT|O_WRONLY|O_TRUNC, 0644);
-               if (binary_protocol_file == -1 && errno != EINTR)
-                       break; /* Failed */
-       } while (binary_protocol_file == -1);
-
-       if (file_size_limit > 0)
-               free_filename (filename);
-}
-#endif
-
-void
-binary_protocol_init (const char *filename, long long limit)
-{
-#ifdef HAVE_UNISTD_H
-       filename_or_prefix = sgen_alloc_internal_dynamic (strlen (filename) + 1, INTERNAL_MEM_BINARY_PROTOCOL, TRUE);
-       strcpy (filename_or_prefix, filename);
-
-       file_size_limit = limit;
-
-       binary_protocol_open_file ();
-#endif
-}
-
-gboolean
-binary_protocol_is_enabled (void)
-{
-#ifdef HAVE_UNISTD_H
-       return binary_protocol_file != -1;
-#else
-       return FALSE;
-#endif
-}
-
-#ifdef HAVE_UNISTD_H
-
-static void
-close_binary_protocol_file (void)
-{
-       while (close (binary_protocol_file) == -1 && errno == EINTR)
-               ;
-       binary_protocol_file = -1;
-}
-
-static gboolean
-try_lock_exclusive (void)
-{
-       do {
-               if (binary_protocol_use_count)
-                       return FALSE;
-       } while (InterlockedCompareExchange (&binary_protocol_use_count, -1, 0) != 0);
-       mono_memory_barrier ();
-       return TRUE;
-}
-
-static void
-unlock_exclusive (void)
-{
-       mono_memory_barrier ();
-       SGEN_ASSERT (0, binary_protocol_use_count == -1, "Exclusively locked count must be -1");
-       if (InterlockedCompareExchange (&binary_protocol_use_count, 0, -1) != -1)
-               SGEN_ASSERT (0, FALSE, "Somebody messed with the exclusive lock");
-}
-
-static void
-lock_recursive (void)
-{
-       int old_count;
-       do {
-       retry:
-               old_count = binary_protocol_use_count;
-               if (old_count < 0) {
-                       /* Exclusively locked - retry */
-                       /* FIXME: short back-off */
-                       goto retry;
-               }
-       } while (InterlockedCompareExchange (&binary_protocol_use_count, old_count + 1, old_count) != old_count);
-       mono_memory_barrier ();
-}
-
-static void
-unlock_recursive (void)
-{
-       int old_count;
-       mono_memory_barrier ();
-       do {
-               old_count = binary_protocol_use_count;
-               SGEN_ASSERT (0, old_count > 0, "Locked use count must be at least 1");
-       } while (InterlockedCompareExchange (&binary_protocol_use_count, old_count - 1, old_count) != old_count);
-}
-
-static void
-binary_protocol_flush_buffer (BinaryProtocolBuffer *buffer)
-{
-       ssize_t ret;
-       size_t to_write = buffer->index;
-       size_t written = 0;
-       g_assert (buffer->index > 0);
-
-       while (written < to_write) {
-               ret = write (binary_protocol_file, buffer->buffer + written, to_write - written);
-               if (ret >= 0)
-                       written += ret;
-               else if (errno == EINTR)
-                       continue;
-               else
-                       close_binary_protocol_file ();
-       }
-
-       current_file_size += buffer->index;
-
-       sgen_free_os_memory (buffer, sizeof (BinaryProtocolBuffer), SGEN_ALLOC_INTERNAL);
-}
-
-static void
-binary_protocol_check_file_overflow (void)
-{
-       if (file_size_limit <= 0 || current_file_size < file_size_limit)
-               return;
-
-       close_binary_protocol_file ();
-
-       if (current_file_index > 0) {
-               char *filename = filename_for_index (current_file_index - 1);
-               unlink (filename);
-               free_filename (filename);
-       }
-
-       ++current_file_index;
-       current_file_size = 0;
-
-       binary_protocol_open_file ();
-}
-#endif
-
-void
-binary_protocol_flush_buffers (gboolean force)
-{
-#ifdef HAVE_UNISTD_H
-       int num_buffers = 0, i;
-       BinaryProtocolBuffer *buf;
-       BinaryProtocolBuffer **bufs;
-
-       if (binary_protocol_file == -1)
-               return;
-
-       if (!force && !try_lock_exclusive ())
-               return;
-
-       for (buf = binary_protocol_buffers; buf != NULL; buf = buf->next)
-               ++num_buffers;
-       bufs = sgen_alloc_internal_dynamic (num_buffers * sizeof (BinaryProtocolBuffer*), INTERNAL_MEM_BINARY_PROTOCOL, TRUE);
-       for (buf = binary_protocol_buffers, i = 0; buf != NULL; buf = buf->next, i++)
-               bufs [i] = buf;
-       SGEN_ASSERT (0, i == num_buffers, "Binary protocol buffer count error");
-
-       binary_protocol_buffers = NULL;
-
-       for (i = num_buffers - 1; i >= 0; --i) {
-               binary_protocol_flush_buffer (bufs [i]);
-               binary_protocol_check_file_overflow ();
-       }
-
-       sgen_free_internal_dynamic (buf, num_buffers * sizeof (BinaryProtocolBuffer*), INTERNAL_MEM_BINARY_PROTOCOL);
-
-       if (!force)
-               unlock_exclusive ();
-#endif
-}
-
-#ifdef HAVE_UNISTD_H
-static BinaryProtocolBuffer*
-binary_protocol_get_buffer (int length)
-{
-       BinaryProtocolBuffer *buffer, *new_buffer;
- retry:
-       buffer = binary_protocol_buffers;
-       if (buffer && buffer->index + length <= BINARY_PROTOCOL_BUFFER_SIZE)
-               return buffer;
-
-       new_buffer = sgen_alloc_os_memory (sizeof (BinaryProtocolBuffer), SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, "debugging memory");
-       new_buffer->next = buffer;
-       new_buffer->index = 0;
-
-       if (InterlockedCompareExchangePointer ((void**)&binary_protocol_buffers, new_buffer, buffer) != buffer) {
-               sgen_free_os_memory (new_buffer, sizeof (BinaryProtocolBuffer), SGEN_ALLOC_INTERNAL);
-               goto retry;
-       }
-
-       return new_buffer;
-}
-#endif
-
-static void
-protocol_entry (unsigned char type, gpointer data, int size)
-{
-#ifdef HAVE_UNISTD_H
-       int index;
-       BinaryProtocolBuffer *buffer;
-
-       if (binary_protocol_file == -1)
-               return;
-
-       if (sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()))
-               type |= 0x80;
-
-       lock_recursive ();
-
- retry:
-       buffer = binary_protocol_get_buffer (size + 1);
- retry_same_buffer:
-       index = buffer->index;
-       if (index + 1 + size > BINARY_PROTOCOL_BUFFER_SIZE)
-               goto retry;
-
-       if (InterlockedCompareExchange (&buffer->index, index + 1 + size, index) != index)
-               goto retry_same_buffer;
-
-       /* FIXME: if we're interrupted at this point, we have a buffer
-          entry that contains random data. */
-
-       buffer->buffer [index++] = type;
-       memcpy (buffer->buffer + index, data, size);
-       index += size;
-
-       g_assert (index <= BINARY_PROTOCOL_BUFFER_SIZE);
-
-       unlock_recursive ();
-#endif
-}
-
-#define TYPE_INT int
-#define TYPE_LONGLONG long long
-#define TYPE_SIZE size_t
-#define TYPE_POINTER gpointer
-#define TYPE_BOOL gboolean
-
-#define BEGIN_PROTOCOL_ENTRY0(method) \
-       void method (void) { \
-               int __type = PROTOCOL_ID(method); \
-               gpointer __data = NULL; \
-               int __size = 0; \
-               CLIENT_PROTOCOL_NAME (method) ();
-#define BEGIN_PROTOCOL_ENTRY1(method,t1,f1) \
-       void method (t1 f1) { \
-               PROTOCOL_STRUCT(method) __entry = { f1 }; \
-               int __type = PROTOCOL_ID(method); \
-               gpointer __data = &__entry; \
-               int __size = sizeof (PROTOCOL_STRUCT(method)); \
-               CLIENT_PROTOCOL_NAME (method) (f1);
-#define BEGIN_PROTOCOL_ENTRY2(method,t1,f1,t2,f2) \
-       void method (t1 f1, t2 f2) { \
-               PROTOCOL_STRUCT(method) __entry = { f1, f2 }; \
-               int __type = PROTOCOL_ID(method); \
-               gpointer __data = &__entry; \
-               int __size = sizeof (PROTOCOL_STRUCT(method)); \
-               CLIENT_PROTOCOL_NAME (method) (f1, f2);
-#define BEGIN_PROTOCOL_ENTRY3(method,t1,f1,t2,f2,t3,f3) \
-       void method (t1 f1, t2 f2, t3 f3) { \
-               PROTOCOL_STRUCT(method) __entry = { f1, f2, f3 }; \
-               int __type = PROTOCOL_ID(method); \
-               gpointer __data = &__entry; \
-               int __size = sizeof (PROTOCOL_STRUCT(method)); \
-               CLIENT_PROTOCOL_NAME (method) (f1, f2, f3);
-#define BEGIN_PROTOCOL_ENTRY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
-       void method (t1 f1, t2 f2, t3 f3, t4 f4) { \
-               PROTOCOL_STRUCT(method) __entry = { f1, f2, f3, f4 }; \
-               int __type = PROTOCOL_ID(method); \
-               gpointer __data = &__entry; \
-               int __size = sizeof (PROTOCOL_STRUCT(method)); \
-               CLIENT_PROTOCOL_NAME (method) (f1, f2, f3, f4);
-#define BEGIN_PROTOCOL_ENTRY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
-       void method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5) { \
-               PROTOCOL_STRUCT(method) __entry = { f1, f2, f3, f4, f5 }; \
-               int __type = PROTOCOL_ID(method); \
-               gpointer __data = &__entry; \
-               int __size = sizeof (PROTOCOL_STRUCT(method)); \
-               CLIENT_PROTOCOL_NAME (method) (f1, f2, f3, f4, f5);
-#define BEGIN_PROTOCOL_ENTRY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
-       void method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6) { \
-               PROTOCOL_STRUCT(method) __entry = { f1, f2, f3, f4, f5, f6 }; \
-               int __type = PROTOCOL_ID(method); \
-               gpointer __data = &__entry; \
-               int __size = sizeof (PROTOCOL_STRUCT(method)); \
-               CLIENT_PROTOCOL_NAME (method) (f1, f2, f3, f4, f5, f6);
-
-#define FLUSH() \
-               binary_protocol_flush_buffers (FALSE);
-
-#define DEFAULT_PRINT()
-#define CUSTOM_PRINT(_)
-
-#define IS_ALWAYS_MATCH(_)
-#define MATCH_INDEX(_)
-#define IS_VTABLE_MATCH(_)
-
-#define END_PROTOCOL_ENTRY \
-               protocol_entry (__type, __data, __size); \
-       }
-
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) \
-       BEGIN_PROTOCOL_ENTRY0 (method)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) \
-       BEGIN_PROTOCOL_ENTRY1 (method,t1,f1)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) \
-       BEGIN_PROTOCOL_ENTRY2 (method,t1,f1,t2,f2)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) \
-       BEGIN_PROTOCOL_ENTRY3 (method,t1,f1,t2,f2,t3,f3)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
-       BEGIN_PROTOCOL_ENTRY4 (method,t1,f1,t2,f2,t3,f3,t4,f4)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
-       BEGIN_PROTOCOL_ENTRY5 (method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
-       BEGIN_PROTOCOL_ENTRY6 (method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6)
-
-#define END_PROTOCOL_ENTRY_HEAVY \
-       END_PROTOCOL_ENTRY
-#else
-#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6)
-
-#define END_PROTOCOL_ENTRY_HEAVY
-#endif
-
-#include "sgen-protocol-def.h"
-
-#undef TYPE_INT
-#undef TYPE_LONGLONG
-#undef TYPE_SIZE
-#undef TYPE_POINTER
-#undef TYPE_BOOL
-
-#endif /* HAVE_SGEN_GC */
diff --git a/mono/metadata/sgen-protocol.h b/mono/metadata/sgen-protocol.h
deleted file mode 100644 (file)
index 2b7176e..0000000
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * sgen-protocol.h: Binary protocol of internal activity, to aid
- * debugging.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __MONO_SGENPROTOCOL_H__
-#define __MONO_SGENPROTOCOL_H__
-
-#include "sgen-gc.h"
-
-/* Special indices returned by MATCH_INDEX. */
-#define BINARY_PROTOCOL_NO_MATCH (-1)
-#define BINARY_PROTOCOL_MATCH (-2)
-
-#define PROTOCOL_ID(method) method ## _id
-#define PROTOCOL_STRUCT(method) method ## _struct
-#define CLIENT_PROTOCOL_NAME(method) sgen_client_ ## method
-
-#define TYPE_INT int
-#define TYPE_LONGLONG long long
-#define TYPE_SIZE size_t
-#define TYPE_POINTER gpointer
-#define TYPE_BOOL gboolean
-
-enum {
-#define BEGIN_PROTOCOL_ENTRY0(method) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY1(method,t1,f1) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY2(method,t1,f1,t2,f2) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY3(method,t1,f1,t2,f2,t3,f3) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY4(method,t1,f1,t2,f2,t3,f3,t4,f4) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) PROTOCOL_ID(method),
-#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) PROTOCOL_ID(method),
-
-#define FLUSH()
-
-#define DEFAULT_PRINT()
-#define CUSTOM_PRINT(_)
-
-#define IS_ALWAYS_MATCH(_)
-#define MATCH_INDEX(_)
-#define IS_VTABLE_MATCH(_)
-
-#define END_PROTOCOL_ENTRY
-#define END_PROTOCOL_ENTRY_HEAVY
-
-#include "sgen-protocol-def.h"
-};
-
-#define BEGIN_PROTOCOL_ENTRY0(method)
-#define BEGIN_PROTOCOL_ENTRY1(method,t1,f1) \
-       typedef struct { \
-               t1 f1; \
-       } PROTOCOL_STRUCT(method);
-#define BEGIN_PROTOCOL_ENTRY2(method,t1,f1,t2,f2) \
-       typedef struct { \
-               t1 f1; \
-               t2 f2; \
-       } PROTOCOL_STRUCT(method);
-#define BEGIN_PROTOCOL_ENTRY3(method,t1,f1,t2,f2,t3,f3) \
-       typedef struct { \
-               t1 f1; \
-               t2 f2; \
-               t3 f3; \
-       } PROTOCOL_STRUCT(method);
-#define BEGIN_PROTOCOL_ENTRY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
-       typedef struct { \
-               t1 f1; \
-               t2 f2; \
-               t3 f3; \
-               t4 f4; \
-       } PROTOCOL_STRUCT(method);
-#define BEGIN_PROTOCOL_ENTRY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
-       typedef struct { \
-               t1 f1; \
-               t2 f2; \
-               t3 f3; \
-               t4 f4; \
-               t5 f5; \
-       } PROTOCOL_STRUCT(method);
-#define BEGIN_PROTOCOL_ENTRY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
-       typedef struct { \
-               t1 f1; \
-               t2 f2; \
-               t3 f3; \
-               t4 f4; \
-               t5 f5; \
-               t6 f6; \
-       } PROTOCOL_STRUCT(method);
-
-#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) \
-       BEGIN_PROTOCOL_ENTRY0 (method)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) \
-       BEGIN_PROTOCOL_ENTRY1 (method,t1,f1)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) \
-       BEGIN_PROTOCOL_ENTRY2 (method,t1,f1,t2,f2)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) \
-       BEGIN_PROTOCOL_ENTRY3 (method,t1,f1,t2,f2,t3,f3)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
-       BEGIN_PROTOCOL_ENTRY4 (method,t1,f1,t2,f2,t3,f3,t4,f4)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
-       BEGIN_PROTOCOL_ENTRY5 (method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
-       BEGIN_PROTOCOL_ENTRY6 (method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6)
-
-#define FLUSH()
-
-#define DEFAULT_PRINT()
-#define CUSTOM_PRINT(_)
-
-#define IS_ALWAYS_MATCH(_)
-#define MATCH_INDEX(_)
-#define IS_VTABLE_MATCH(_)
-
-#define END_PROTOCOL_ENTRY
-#define END_PROTOCOL_ENTRY_HEAVY
-
-#include "sgen-protocol-def.h"
-
-/* missing: finalizers, roots, non-store wbarriers */
-
-void binary_protocol_init (const char *filename, long long limit);
-gboolean binary_protocol_is_enabled (void);
-
-void binary_protocol_flush_buffers (gboolean force);
-
-#define BEGIN_PROTOCOL_ENTRY0(method) \
-       void method (void);
-#define BEGIN_PROTOCOL_ENTRY1(method,t1,f1) \
-       void method (t1 f1);
-#define BEGIN_PROTOCOL_ENTRY2(method,t1,f1,t2,f2) \
-       void method (t1 f1, t2 f2);
-#define BEGIN_PROTOCOL_ENTRY3(method,t1,f1,t2,f2,t3,f3) \
-       void method (t1 f1, t2 f2, t3 f3);
-#define BEGIN_PROTOCOL_ENTRY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
-       void method (t1 f1, t2 f2, t3 f3, t4 f4);
-#define BEGIN_PROTOCOL_ENTRY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
-       void method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5);
-#define BEGIN_PROTOCOL_ENTRY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
-       void method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6);
-
-#ifdef SGEN_HEAVY_BINARY_PROTOCOL
-#define binary_protocol_is_heavy_enabled()     binary_protocol_is_enabled ()
-
-#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) \
-       BEGIN_PROTOCOL_ENTRY0 (method)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) \
-       BEGIN_PROTOCOL_ENTRY1 (method,t1,f1)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) \
-       BEGIN_PROTOCOL_ENTRY2 (method,t1,f1,t2,f2)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) \
-       BEGIN_PROTOCOL_ENTRY3 (method,t1,f1,t2,f2,t3,f3)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
-       BEGIN_PROTOCOL_ENTRY4 (method,t1,f1,t2,f2,t3,f3,t4,f4)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
-       BEGIN_PROTOCOL_ENTRY5 (method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5)
-#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
-       BEGIN_PROTOCOL_ENTRY6 (method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6)
-#else
-#define binary_protocol_is_heavy_enabled()     FALSE
-
-#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) \
-       static inline void method (void) {}
-#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) \
-       static inline void method (t1 f1) {}
-#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) \
-       static inline void method (t1 f1, t2 f2) {}
-#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) \
-       static inline void method (t1 f1, t2 f2, t3 f3) {}
-#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
-       static inline void method (t1 f1, t2 f2, t3 f3, t4 f4) {}
-#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
-       static inline void method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5) {}
-#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
-       static inline void method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6) {}
-#endif
-
-#define FLUSH()
-
-#define DEFAULT_PRINT()
-#define CUSTOM_PRINT(_)
-
-#define IS_ALWAYS_MATCH(_)
-#define MATCH_INDEX(_)
-#define IS_VTABLE_MATCH(_)
-
-#define END_PROTOCOL_ENTRY
-#define END_PROTOCOL_ENTRY_HEAVY
-
-#include "sgen-protocol-def.h"
-
-#undef TYPE_INT
-#undef TYPE_LONGLONG
-#undef TYPE_SIZE
-#undef TYPE_POINTER
-#undef TYPE_BOOL
-
-#endif
diff --git a/mono/metadata/sgen-qsort.c b/mono/metadata/sgen-qsort.c
deleted file mode 100644 (file)
index 1290e04..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * sgen-qsort.c: Quicksort.
- *
- * Copyright (C) 2013 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-
-#ifdef HAVE_SGEN_GC
-
-#include "metadata/sgen-gc.h"
-
-#define ELEM(i)                (((unsigned char*)base) + ((i) * width))
-#define SWAP(i,j)      do {                                    \
-               size_t __i = (i), __j = (j);                    \
-               if (__i != __j) {                               \
-                       memcpy (swap_tmp, ELEM (__i), width);   \
-                       memcpy (ELEM (__i), ELEM (__j), width); \
-                       memcpy (ELEM (__j), swap_tmp, width);   \
-               }                                               \
-       } while (0)
-
-static size_t
-partition (void *base, size_t nel, size_t width, int (*compar) (const void*, const void*), unsigned char *pivot_tmp, unsigned char *swap_tmp)
-{
-       size_t pivot_idx = nel >> 1;
-       size_t s, i;
-
-       memcpy (pivot_tmp, ELEM (pivot_idx), width);
-       SWAP (pivot_idx, nel - 1);
-       s = 0;
-       for (i = 0; i < nel - 1; ++i) {
-               if (compar (ELEM (i), pivot_tmp) <= 0) {
-                       SWAP (i, s);
-                       ++s;
-               }
-       }
-       SWAP (s, nel - 1);
-       return s;
-}
-
-static void
-qsort_rec (void *base, size_t nel, size_t width, int (*compar) (const void*, const void*), unsigned char *pivot_tmp, unsigned char *swap_tmp)
-{
-       size_t pivot_idx;
-
-       if (nel <= 1)
-               return;
-
-       pivot_idx = partition (base, nel, width, compar, pivot_tmp, swap_tmp);
-       qsort_rec (base, pivot_idx, width, compar, pivot_tmp, swap_tmp);
-       if (pivot_idx < nel)
-               qsort_rec (ELEM (pivot_idx + 1), nel - pivot_idx - 1, width, compar, pivot_tmp, swap_tmp);
-}
-
-void
-sgen_qsort (void *base, size_t nel, size_t width, int (*compar) (const void*, const void*))
-{
-#ifndef _MSC_VER
-       unsigned char pivot_tmp [width];
-       unsigned char swap_tmp [width];
-#else
-       unsigned char* pivot_tmp = (unsigned char*) alloca(width);
-       unsigned char* swap_tmp = (unsigned char*) alloca(width);
-#endif
-
-       qsort_rec (base, nel, width, compar, pivot_tmp, swap_tmp);
-}
-
-#endif
diff --git a/mono/metadata/sgen-qsort.h b/mono/metadata/sgen-qsort.h
deleted file mode 100644 (file)
index 75577e5..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * sgen-qsort.h: Fast inline sorting
- *
- * Copyright (C) 2014 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#ifndef __MONO_SGENQSORT_H__
-#define __MONO_SGENQSORT_H__
-
-#define DEF_QSORT_INLINE(NAME,ARRAY_TYPE,COMPARE_FUN)  \
-static size_t partition_##NAME (ARRAY_TYPE base[], size_t nel) {       \
-       size_t pivot_idx = nel >> 1;    \
-       size_t s, i;    \
-       ARRAY_TYPE pivot = base [pivot_idx];    \
-       { ARRAY_TYPE tmp = base [pivot_idx]; base [pivot_idx] = base [nel - 1]; base [nel - 1] = tmp; } \
-       s = 0;  \
-       for (i = 0; i < nel - 1; ++i) { \
-               if (COMPARE_FUN (base [i], pivot) <= 0) {       \
-                       { ARRAY_TYPE tmp = base [i]; base [i] = base [s]; base [s] = tmp; }     \
-                       ++s;    \
-               }       \
-       }       \
-       { ARRAY_TYPE tmp = base [s]; base [s] = base [nel - 1]; base [nel - 1] = tmp; } \
-       return s;       \
-}      \
-static void rec_##NAME (ARRAY_TYPE base[], size_t nel) {       \
-       size_t pivot_idx;       \
-       if (nel <= 1)   \
-               return; \
-       pivot_idx = partition_##NAME (base, nel); \
-       rec_##NAME (base, pivot_idx);   \
-       if (pivot_idx < nel)    \
-               rec_##NAME (&base[pivot_idx + 1], nel - pivot_idx - 1); \
-}      \
-static void qsort_##NAME (ARRAY_TYPE base[], size_t nel) {     \
-       rec_##NAME (base, nel); \
-}      \
-
-
-#endif
diff --git a/mono/metadata/sgen-scan-object.h b/mono/metadata/sgen-scan-object.h
deleted file mode 100644 (file)
index 9d1611c..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * sgen-scan-object.h: Generic object scan.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2013 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * Scans one object, using the OBJ_XXX macros.  The start of the
- * object must be given in the variable "char* start".  Afterwards,
- * "start" will point to the start of the next object, if the scanned
- * object contained references.  If not, the value of "start" should
- * be considered undefined after executing this code.  The object's
- * GC descriptor must be in the variable "mword desc".
- *
- * The macro `HANDLE_PTR` will be invoked for every reference encountered while scanning the
- * object.  It is called with two parameters: The pointer to the reference (not the
- * reference itself!) as well as the pointer to the scanned object.
- *
- * Modifiers (automatically undefined):
- *
- * SCAN_OBJECT_NOSCAN - if defined, don't actually scan the object,
- * i.e. don't invoke the OBJ_XXX macros.
- *
- * SCAN_OBJECT_NOVTABLE - desc is provided by the includer, instead of
- * vt.  Complex arrays cannot not be scanned.
- *
- * SCAN_OBJECT_PROTOCOL - if defined, binary protocol the scan.
- * Should only be used for scanning that's done for the actual
- * collection, not for debugging scans.
- */
-
-{
-#ifndef SCAN_OBJECT_NOVTABLE
-#if defined(SGEN_HEAVY_BINARY_PROTOCOL) && defined(SCAN_OBJECT_PROTOCOL)
-       binary_protocol_scan_begin (start, SGEN_LOAD_VTABLE (start), sgen_safe_object_get_size ((GCObject*)start));
-#endif
-#else
-#if defined(SGEN_HEAVY_BINARY_PROTOCOL) && defined(SCAN_OBJECT_PROTOCOL)
-       binary_protocol_scan_vtype_begin (start + SGEN_CLIENT_OBJECT_HEADER_SIZE, size);
-#endif
-#endif
-       switch (desc & DESC_TYPE_MASK) {
-       case DESC_TYPE_RUN_LENGTH:
-#define SCAN OBJ_RUN_LEN_FOREACH_PTR (desc, start)
-#ifndef SCAN_OBJECT_NOSCAN
-               SCAN;
-#endif
-#undef SCAN
-               break;
-       case DESC_TYPE_VECTOR:
-#define SCAN OBJ_VECTOR_FOREACH_PTR (desc, start)
-#ifndef SCAN_OBJECT_NOSCAN
-               SCAN;
-#endif
-#undef SCAN
-               break;
-       case DESC_TYPE_BITMAP:
-#define SCAN OBJ_BITMAP_FOREACH_PTR (desc, start)
-#ifndef SCAN_OBJECT_NOSCAN
-               SCAN;
-#endif
-#undef SCAN
-               break;
-       case DESC_TYPE_COMPLEX:
-               /* this is a complex object */
-#define SCAN OBJ_COMPLEX_FOREACH_PTR (desc, start)
-#ifndef SCAN_OBJECT_NOSCAN
-               SCAN;
-#endif
-#undef SCAN
-               break;
-#ifndef SCAN_OBJECT_NOVTABLE
-       case DESC_TYPE_COMPLEX_ARR:
-               /* this is an array of complex structs */
-#define SCAN OBJ_COMPLEX_ARR_FOREACH_PTR (desc, start)
-#ifndef SCAN_OBJECT_NOSCAN
-               SCAN;
-#endif
-#undef SCAN
-               break;
-#endif
-       case DESC_TYPE_SMALL_PTRFREE:
-       case DESC_TYPE_COMPLEX_PTRFREE:
-               /*Nothing to do*/
-               break;
-       default:
-               g_assert_not_reached ();
-       }
-}
-
-#undef SCAN_OBJECT_NOSCAN
-#undef SCAN_OBJECT_NOVTABLE
-#undef SCAN_OBJECT_PROTOCOL
diff --git a/mono/metadata/sgen-simple-nursery.c b/mono/metadata/sgen-simple-nursery.c
deleted file mode 100644 (file)
index 86ab9c3..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * sgen-simple-nursery.c: Simple always promote nursery.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include <string.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-protocol.h"
-#include "mono/metadata/sgen-layout-stats.h"
-#include "mono/metadata/sgen-client.h"
-
-static inline char*
-alloc_for_promotion (GCVTable *vtable, char *obj, size_t objsize, gboolean has_references)
-{
-       return major_collector.alloc_object (vtable, objsize, has_references);
-}
-
-static SgenFragment*
-build_fragments_get_exclude_head (void)
-{
-       return NULL;
-}
-
-static void
-build_fragments_release_exclude_head (void)
-{
-}
-
-static void
-build_fragments_finish (SgenFragmentAllocator *allocator)
-{
-}
-
-static void
-prepare_to_space (char *to_space_bitmap, size_t space_bitmap_size)
-{
-}
-
-static void
-clear_fragments (void)
-{      
-}
-
-static void
-init_nursery (SgenFragmentAllocator *allocator, char *start, char *end)
-{
-       sgen_fragment_allocator_add (allocator, start, end);
-}
-
-
-/******************************************Copy/Scan functins ************************************************/
-
-#define SGEN_SIMPLE_NURSERY
-
-#define SERIAL_COPY_OBJECT simple_nursery_serial_copy_object
-#define SERIAL_COPY_OBJECT_FROM_OBJ simple_nursery_serial_copy_object_from_obj
-
-#include "sgen-minor-copy-object.h"
-#include "sgen-minor-scan-object.h"
-
-void
-sgen_simple_nursery_init (SgenMinorCollector *collector)
-{
-       collector->is_split = FALSE;
-
-       collector->alloc_for_promotion = alloc_for_promotion;
-
-       collector->prepare_to_space = prepare_to_space;
-       collector->clear_fragments = clear_fragments;
-       collector->build_fragments_get_exclude_head = build_fragments_get_exclude_head;
-       collector->build_fragments_release_exclude_head = build_fragments_release_exclude_head;
-       collector->build_fragments_finish = build_fragments_finish;
-       collector->init_nursery = init_nursery;
-
-       FILL_MINOR_COLLECTOR_COPY_OBJECT (collector);
-       FILL_MINOR_COLLECTOR_SCAN_OBJECT (collector);
-}
-
-
-#endif
diff --git a/mono/metadata/sgen-split-nursery.c b/mono/metadata/sgen-split-nursery.c
deleted file mode 100644 (file)
index 72f9356..0000000
+++ /dev/null
@@ -1,457 +0,0 @@
-/*
- * sgen-splliy-nursery.c: 3-space based nursery collector.
- *
- * Author:
- *     Rodrigo Kumpera Kumpera <kumpera@gmail.com>
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright 2011-2012 Xamarin Inc (http://www.xamarin.com)
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include <string.h>
-#include <stdlib.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-protocol.h"
-#include "mono/metadata/sgen-layout-stats.h"
-#include "mono/metadata/sgen-client.h"
-#include "mono/utils/mono-memory-model.h"
-
-/*
-The nursery is logically divided into 3 spaces: Allocator space and two Survivor spaces.
-
-Objects are born (allocated by the mutator) in the Allocator Space.
-
-The Survivor spaces are divided in a copying collector style From and To spaces.
-The hole of each space switch on each collection.
-
-On each collection we process objects from the nursery this way:
-Objects from the Allocator Space are evacuated into the To Space.
-Objects from the Survivor From Space are evacuated into the old generation.
-
-
-The nursery is physically divided in two parts, set by the promotion barrier.
-
-The Allocator Space takes the botton part of the nursery.
-
-The Survivor spaces are intermingled in the top part of the nursery. It's done
-this way since the required size for the To Space depends on the survivor rate
-of objects from the Allocator Space. 
-
-During a collection when the object scan function see a nursery object it must
-determine if the object needs to be evacuated or left in place. Originally, this
-check was done by checking if a forwarding pointer is installed, but now an object
-can be in the To Space, it won't have a forwarding pointer and it must be left in place.
-
-In order to solve that we classify nursery memory been either in the From Space or in
-the To Space. Since the Allocator Space has the same behavior as the Survivor From Space
-they are unified for this purpoise - a bit confusing at first.
-
-This from/to classification is done on a larger granule than object to make the check efficient
-and, due to that, we must make sure that all fragemnts used to allocate memory from the To Space
-are naturally aligned in both ends to that granule to avoid wronly classifying a From Space object.
-
-TODO:
--The promotion barrier is statically defined to 50% of the nursery, it should be dinamically adjusted based
-on survival rates;
--We apply the same promotion policy to all objects, finalizable ones should age longer in the nursery;
--We apply the same promotion policy to all stages of a collection, maybe we should promote more aggressively
-objects from non-stack roots, specially those found in the remembered set;
--Fix our major collection trigger to happen before we do a minor GC and collect the nursery only once.
--Make the serial fragment allocator fast path inlineable
--Make aging threshold be based on survival rates and survivor occupancy;
--Change promotion barrier to be size and not address based;
--Pre allocate memory for young ages to make sure that on overflow only the older suffer;
--Get rid of par_alloc_buffer_refill_mutex so to the parallel collection of the nursery doesn't suck;
-*/
-
-/*FIXME Move this to a separate header. */
-#define _toi(ptr) ((size_t)ptr)
-#define make_ptr_mask(bits) ((1 << bits) - 1)
-#define align_down(ptr, bits) ((void*)(_toi(ptr) & ~make_ptr_mask (bits)))
-#define align_up(ptr, bits) ((void*) ((_toi(ptr) + make_ptr_mask (bits)) & ~make_ptr_mask (bits)))
-
-/*
-Even though the effective max age is 255, aging that much doesn't make sense.
-It might even make sense to use nimbles for age recording.
-*/
-#define MAX_AGE 15
-
-/*
- * Each age has its allocation buffer.  Whenever an object is to be
- * aged we try to fit it into its new age's allocation buffer.  If
- * that is not possible we get new space from the fragment allocator
- * and set the allocation buffer to that space (minus the space
- * required for the object).
- */
-
-typedef struct {
-       char *next;
-       char *end;
-} AgeAllocationBuffer;
-
-/* Limits the ammount of memory the mutator can have. */
-static char *promotion_barrier;
-
-/*
-Promotion age and alloc ratio are the two nursery knobs to control
-how much effort we want to spend on young objects.
-
-Allocation ratio should be the inverse of the expected survivor rate.
-The more objects surviver, the smaller the alloc ratio much be so we can
-age all objects.
-
-Promote age depends on how much effort we want to spend aging objects before
-we promote them to the old generation. If addional ages don't somewhat improve
-mortality, it's better avoid as they increase the cost of minor collections.
-
-*/
-
-
-/*
-If we're evacuating an object with this age or more, promote it.
-Age is the number of surviving collections of an object.
-*/
-static int promote_age = 2;
-
-/*
-Initial ratio of allocation and survivor spaces.
-This should be read as the fraction of the whole nursery dedicated
-for the allocator space.
-*/
-static float alloc_ratio = 60.f/100.f;
-
-
-static char *region_age;
-static size_t region_age_size;
-static AgeAllocationBuffer age_alloc_buffers [MAX_AGE];
-
-/* The collector allocs from here. */
-static SgenFragmentAllocator collector_allocator;
-
-static inline int
-get_object_age (char *object)
-{
-       size_t idx = (object - sgen_nursery_start) >> SGEN_TO_SPACE_GRANULE_BITS;
-       return region_age [idx];
-}
-
-static void
-set_age_in_range (char *start, char *end, int age)
-{
-       char *region_start;
-       size_t region_idx, length;
-       region_idx = (start - sgen_nursery_start) >> SGEN_TO_SPACE_GRANULE_BITS;
-       region_start = &region_age [region_idx];
-       length = (end - start) >> SGEN_TO_SPACE_GRANULE_BITS;
-       memset (region_start, age, length);
-}
-
-static inline void
-mark_bit (char *space_bitmap, char *pos)
-{
-       size_t idx = (pos - sgen_nursery_start) >> SGEN_TO_SPACE_GRANULE_BITS;
-       size_t byte = idx / 8;
-       int bit = idx & 0x7;
-
-       g_assert (byte < sgen_space_bitmap_size);
-       space_bitmap [byte] |= 1 << bit;
-}
-
-static void
-mark_bits_in_range (char *space_bitmap, char *start, char *end)
-{
-       start = align_down (start, SGEN_TO_SPACE_GRANULE_BITS);
-       end = align_up (end, SGEN_TO_SPACE_GRANULE_BITS);
-
-       for (;start < end; start += SGEN_TO_SPACE_GRANULE_IN_BYTES)
-               mark_bit (space_bitmap, start);
-}
-
-/*
- * This splits the fragments at the point of the promotion barrier.
- * Two allocator are actually involved here: The mutator allocator and
- * the collector allocator.  This function is called with the
- * collector, but it's a copy of the mutator allocator and contains
- * all the fragments in the nursery.  The fragments below the
- * promotion barrier are left with the mutator allocator and the ones
- * above are put into the collector allocator.
- */
-static void
-fragment_list_split (SgenFragmentAllocator *allocator)
-{
-       SgenFragment *prev = NULL, *list = allocator->region_head;
-
-       while (list) {
-               if (list->fragment_end > promotion_barrier) {
-                       if (list->fragment_start < promotion_barrier) {
-                               SgenFragment *res = sgen_fragment_allocator_alloc ();
-
-                               res->fragment_start = promotion_barrier;
-                               res->fragment_next = promotion_barrier;
-                               res->fragment_end = list->fragment_end;
-                               res->next = list->next;
-                               res->next_in_order = list->next_in_order;
-                               g_assert (res->fragment_end > res->fragment_start);
-
-                               list->fragment_end = promotion_barrier;
-                               list->next = list->next_in_order = NULL;
-                               set_age_in_range (list->fragment_start, list->fragment_end, 0);
-
-                               allocator->region_head = allocator->alloc_head = res;
-                               return;
-                       } else {
-                               if (prev)
-                                       prev->next = prev->next_in_order = NULL;
-                               allocator->region_head = allocator->alloc_head = list;
-                               return;
-                       }
-               }
-               set_age_in_range (list->fragment_start, list->fragment_end, 0);
-               prev = list;
-               list = list->next;
-       }
-       allocator->region_head = allocator->alloc_head = NULL;
-}
-
-/******************************************Minor Collector API ************************************************/
-
-#define AGE_ALLOC_BUFFER_MIN_SIZE SGEN_TO_SPACE_GRANULE_IN_BYTES
-#define AGE_ALLOC_BUFFER_DESIRED_SIZE (SGEN_TO_SPACE_GRANULE_IN_BYTES * 8)
-
-static char*
-alloc_for_promotion_slow_path (int age, size_t objsize)
-{
-       char *p;
-       size_t allocated_size;
-       size_t aligned_objsize = (size_t)align_up (objsize, SGEN_TO_SPACE_GRANULE_BITS);
-
-       p = sgen_fragment_allocator_serial_range_alloc (
-               &collector_allocator,
-               MAX (aligned_objsize, AGE_ALLOC_BUFFER_DESIRED_SIZE),
-               MAX (aligned_objsize, AGE_ALLOC_BUFFER_MIN_SIZE),
-               &allocated_size);
-       if (p) {
-               set_age_in_range (p, p + allocated_size, age);
-               sgen_clear_range (age_alloc_buffers [age].next, age_alloc_buffers [age].end);
-               age_alloc_buffers [age].next = p + objsize;
-               age_alloc_buffers [age].end = p + allocated_size;
-       }
-       return p;
-}
-
-static inline char*
-alloc_for_promotion (GCVTable *vtable, char *obj, size_t objsize, gboolean has_references)
-{
-       char *p = NULL;
-       int age;
-
-       age = get_object_age (obj);
-       if (age >= promote_age)
-               return major_collector.alloc_object (vtable, objsize, has_references);
-
-       /* Promote! */
-       ++age;
-
-       p = age_alloc_buffers [age].next;
-       if (G_LIKELY (p + objsize <= age_alloc_buffers [age].end)) {
-        age_alloc_buffers [age].next += objsize;
-       } else {
-               p = alloc_for_promotion_slow_path (age, objsize);
-               if (!p)
-                       return major_collector.alloc_object (vtable, objsize, has_references);
-       }
-
-       /* FIXME: assumes object layout */
-       *(GCVTable**)p = vtable;
-
-       return p;
-}
-
-static char*
-minor_alloc_for_promotion (GCVTable *vtable, char *obj, size_t objsize, gboolean has_references)
-{
-       /*
-       We only need to check for a non-nursery object if we're doing a major collection.
-       */
-       if (!sgen_ptr_in_nursery (obj))
-               return major_collector.alloc_object (vtable, objsize, has_references);
-
-       return alloc_for_promotion (vtable, obj, objsize, has_references);
-}
-
-static SgenFragment*
-build_fragments_get_exclude_head (void)
-{
-       int i;
-       for (i = 0; i < MAX_AGE; ++i) {
-               /*If we OOM'd on the last collection ->end might be null while ->next not.*/
-               if (age_alloc_buffers [i].end)
-                       sgen_clear_range (age_alloc_buffers [i].next, age_alloc_buffers [i].end);
-       }
-
-       return collector_allocator.region_head;
-}
-
-static void
-build_fragments_release_exclude_head (void)
-{
-       sgen_fragment_allocator_release (&collector_allocator);
-}
-
-static void
-build_fragments_finish (SgenFragmentAllocator *allocator)
-{
-       /* We split the fragment list based on the promotion barrier. */
-       collector_allocator = *allocator;
-       fragment_list_split (&collector_allocator);
-}
-
-static void
-prepare_to_space (char *to_space_bitmap, size_t space_bitmap_size)
-{
-       SgenFragment **previous, *frag;
-
-       memset (to_space_bitmap, 0, space_bitmap_size);
-       memset (age_alloc_buffers, 0, sizeof (age_alloc_buffers));
-
-       previous = &collector_allocator.alloc_head;
-
-       for (frag = *previous; frag; frag = *previous) {
-               char *start = align_up (frag->fragment_next, SGEN_TO_SPACE_GRANULE_BITS);
-               char *end = align_down (frag->fragment_end, SGEN_TO_SPACE_GRANULE_BITS);
-
-               /* Fragment is too small to be usable. */
-               if ((end - start) < SGEN_MAX_NURSERY_WASTE) {
-                       sgen_clear_range (frag->fragment_next, frag->fragment_end);
-                       frag->fragment_next = frag->fragment_end = frag->fragment_start;
-                       *previous = frag->next;
-                       continue;
-               }
-
-               /*
-               We need to insert 3 phony objects so the fragments build step can correctly
-               walk the nursery.
-               */
-
-               /* Clean the fragment range. */
-               sgen_clear_range (start, end);
-               /* We need a phony object in between the original fragment start and the effective one. */
-               if (start != frag->fragment_next)
-                       sgen_clear_range (frag->fragment_next, start);
-               /* We need an phony object in between the new fragment end and the original fragment end. */
-               if (end != frag->fragment_end)
-                       sgen_clear_range (end, frag->fragment_end);
-
-               frag->fragment_start = frag->fragment_next = start;
-               frag->fragment_end = end;
-               mark_bits_in_range (to_space_bitmap, start, end);
-               previous = &frag->next;
-       }
-}
-
-static void
-clear_fragments (void)
-{
-       sgen_clear_allocator_fragments (&collector_allocator);
-}
-
-static void
-init_nursery (SgenFragmentAllocator *allocator, char *start, char *end)
-{
-       int alloc_quote = (int)((end - start) * alloc_ratio);
-       promotion_barrier = align_down (start + alloc_quote, 3);
-       sgen_fragment_allocator_add (allocator, start, promotion_barrier);
-       sgen_fragment_allocator_add (&collector_allocator, promotion_barrier, end);
-
-       region_age_size = (end - start) >> SGEN_TO_SPACE_GRANULE_BITS;
-       region_age = g_malloc0 (region_age_size);
-}
-
-static gboolean
-handle_gc_param (const char *opt)
-{
-       if (g_str_has_prefix (opt, "alloc-ratio=")) {
-               const char *arg = strchr (opt, '=') + 1;
-               int percentage = atoi (arg);
-               if (percentage < 1 || percentage > 100) {
-                       fprintf (stderr, "alloc-ratio must be an integer in the range 1-100.\n");
-                       exit (1);
-               }
-               alloc_ratio = (float)percentage / 100.0f;
-               return TRUE;
-       }
-
-       if (g_str_has_prefix (opt, "promotion-age=")) {
-               const char *arg = strchr (opt, '=') + 1;
-               promote_age = atoi (arg);
-               if (promote_age < 1 || promote_age >= MAX_AGE) {
-                       fprintf (stderr, "promotion-age must be an integer in the range 1-%d.\n", MAX_AGE - 1);
-                       exit (1);
-               }
-               return TRUE;
-       }
-       return FALSE;
-}
-
-static void
-print_gc_param_usage (void)
-{
-       fprintf (stderr,
-                       ""
-                       "  alloc-ratio=P (where P is a percentage, an integer in 1-100)\n"
-                       "  promotion-age=P (where P is a number, an integer in 1-%d)\n",
-                       MAX_AGE - 1
-                       );
-}
-
-/******************************************Copy/Scan functins ************************************************/
-
-#define SGEN_SPLIT_NURSERY
-
-#define SERIAL_COPY_OBJECT split_nursery_serial_copy_object
-#define SERIAL_COPY_OBJECT_FROM_OBJ split_nursery_serial_copy_object_from_obj
-
-#include "sgen-minor-copy-object.h"
-#include "sgen-minor-scan-object.h"
-
-void
-sgen_split_nursery_init (SgenMinorCollector *collector)
-{
-       collector->is_split = TRUE;
-
-       collector->alloc_for_promotion = minor_alloc_for_promotion;
-
-       collector->prepare_to_space = prepare_to_space;
-       collector->clear_fragments = clear_fragments;
-       collector->build_fragments_get_exclude_head = build_fragments_get_exclude_head;
-       collector->build_fragments_release_exclude_head = build_fragments_release_exclude_head;
-       collector->build_fragments_finish = build_fragments_finish;
-       collector->init_nursery = init_nursery;
-       collector->handle_gc_param = handle_gc_param;
-       collector->print_gc_param_usage = print_gc_param_usage;
-
-       FILL_MINOR_COLLECTOR_COPY_OBJECT (collector);
-       FILL_MINOR_COLLECTOR_SCAN_OBJECT (collector);
-}
-
-
-#endif
index 35d7b9d329d176c809f4f69b4a39d71acc7f3f2e..bc899f5d59c2fe1ebfda9eadfa9e99e890112ff9 100644 (file)
 #include "config.h"
 #ifdef HAVE_SGEN_GC
 
-#include "metadata/sgen-gc.h"
-#include "metadata/sgen-protocol.h"
-#include "metadata/sgen-memory-governor.h"
-#include "metadata/sgen-thread-pool.h"
+#include "sgen/sgen-gc.h"
+#include "sgen/sgen-protocol.h"
+#include "sgen/sgen-memory-governor.h"
+#include "sgen/sgen-thread-pool.h"
 #include "metadata/profiler-private.h"
-#include "metadata/sgen-client.h"
+#include "sgen/sgen-client.h"
 #include "metadata/sgen-bridge-internal.h"
 #include "metadata/gc-internal.h"
 
diff --git a/mono/metadata/sgen-tagged-pointer.h b/mono/metadata/sgen-tagged-pointer.h
deleted file mode 100644 (file)
index 2d55abb..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * sgen-tagged-pointer.h: Macros for tagging and untagging pointers.
- *
- * Copyright (C) 2014 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __MONO_SGEN_TAGGED_POINTER_H__
-#define __MONO_SGEN_TAGGED_POINTER_H__
-
-#define SGEN_TAGGED_POINTER_MASK       7
-
-#define SGEN_POINTER_IS_TAGGED_1(p)    ((mword)(p) & 1)
-#define SGEN_POINTER_TAG_1(p)          ((void*)((mword)(p) | 1))
-#define SGEN_POINTER_UNTAG_1(p)                ((void*)((mword)(p) & ~1))
-
-#define SGEN_POINTER_IS_TAGGED_2(p)    ((mword)(p) & 2)
-#define SGEN_POINTER_TAG_2(p)          ((void*)((mword)(p) | 2))
-#define SGEN_POINTER_UNTAG_2(p)                ((void*)((mword)(p) & ~2))
-
-#define SGEN_POINTER_TAG_12(p)         ((mword)(p) & 3)
-#define SGEN_POINTER_SET_TAG_12(p,t)   ((void*)(((mword)(p) & ~3) | (t)))
-
-#define SGEN_POINTER_IS_TAGGED_4(p)    ((mword)(p) & 4)
-#define SGEN_POINTER_TAG_4(p)          ((void*)((mword)(p) | 4))
-#define SGEN_POINTER_UNTAG_4(p)                ((void*)((mword)(p) & ~4))
-
-#define SGEN_POINTER_UNTAG_12(p)       ((void*)((mword)(p) & ~3))
-#define SGEN_POINTER_UNTAG_24(p)       ((void*)((mword)(p) & ~6))
-
-#define SGEN_POINTER_IS_TAGGED_ANY(p)  ((mword)(p) & SGEN_TAGGED_POINTER_MASK)
-#define SGEN_POINTER_UNTAG_ALL(p)      ((void*)((mword)(p) & ~SGEN_TAGGED_POINTER_MASK))
-
-#endif
index 38518cf5416a62adcf157effd9af12684a8fc396..270b091e0efdca8a45345d0a9d48f738c64647d9 100644 (file)
 
 #include <stdlib.h>
 
-#include "sgen-gc.h"
+#include "sgen/sgen-gc.h"
 #include "sgen-bridge-internal.h"
-#include "sgen-hash-table.h"
-#include "sgen-qsort.h"
+#include "sgen/sgen-hash-table.h"
+#include "sgen/sgen-qsort.h"
 #include "tabledefs.h"
 #include "utils/mono-logger-internal.h"
 
@@ -664,7 +664,7 @@ push_all (ScanData *data)
        printf ("**scanning %p %s\n", obj, safe_name_bridge (obj));
 #endif
 
-       #include "sgen-scan-object.h"
+       #include "sgen/sgen-scan-object.h"
 }
 
 
@@ -712,7 +712,7 @@ compute_low (ScanData *data)
        char *start = (char*)obj;
        mword desc = sgen_obj_get_descriptor_safe (start);
 
-       #include "sgen-scan-object.h"
+       #include "sgen/sgen-scan-object.h"
 }
 
 static ColorData*
diff --git a/mono/metadata/sgen-thread-pool.c b/mono/metadata/sgen-thread-pool.c
deleted file mode 100644 (file)
index d716038..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * sgen-thread-pool.c: Threadpool for all concurrent GC work.
- *
- * Copyright (C) 2015 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-thread-pool.h"
-#include "mono/metadata/sgen-pointer-queue.h"
-#include "mono/utils/mono-mutex.h"
-#ifndef SGEN_WITHOUT_MONO
-#include "mono/utils/mono-threads.h"
-#endif
-
-static mono_mutex_t lock;
-static mono_cond_t work_cond;
-static mono_cond_t done_cond;
-
-static MonoNativeThreadId thread;
-
-/* Only accessed with the lock held. */
-static SgenPointerQueue job_queue;
-
-static SgenThreadPoolThreadInitFunc thread_init_func;
-static SgenThreadPoolIdleJobFunc idle_job_func;
-static SgenThreadPoolContinueIdleJobFunc continue_idle_job_func;
-
-enum {
-       STATE_WAITING,
-       STATE_IN_PROGRESS,
-       STATE_DONE
-};
-
-/* Assumes that the lock is held. */
-static SgenThreadPoolJob*
-get_job_and_set_in_progress (void)
-{
-       for (size_t i = 0; i < job_queue.next_slot; ++i) {
-               SgenThreadPoolJob *job = job_queue.data [i];
-               if (job->state == STATE_WAITING) {
-                       job->state = STATE_IN_PROGRESS;
-                       return job;
-               }
-       }
-       return NULL;
-}
-
-/* Assumes that the lock is held. */
-static ssize_t
-find_job_in_queue (SgenThreadPoolJob *job)
-{
-       for (ssize_t i = 0; i < job_queue.next_slot; ++i) {
-               if (job_queue.data [i] == job)
-                       return i;
-       }
-       return -1;
-}
-
-/* Assumes that the lock is held. */
-static void
-remove_job (SgenThreadPoolJob *job)
-{
-       ssize_t index;
-       SGEN_ASSERT (0, job->state == STATE_DONE, "Why are we removing a job that's not done?");
-       index = find_job_in_queue (job);
-       SGEN_ASSERT (0, index >= 0, "Why is the job we're trying to remove not in the queue?");
-       job_queue.data [index] = NULL;
-       sgen_pointer_queue_remove_nulls (&job_queue);
-       sgen_thread_pool_job_free (job);
-}
-
-static gboolean
-continue_idle_job (void)
-{
-       if (!continue_idle_job_func)
-               return FALSE;
-       return continue_idle_job_func ();
-}
-
-static mono_native_thread_return_t
-thread_func (void *thread_data)
-{
-       thread_init_func (thread_data);
-
-       mono_mutex_lock (&lock);
-       for (;;) {
-               /*
-                * It's important that we check the continue idle flag with the lock held.
-                * Suppose we didn't check with the lock held, and the result is FALSE.  The
-                * main thread might then set continue idle and signal us before we can take
-                * the lock, and we'd lose the signal.
-                */
-               gboolean do_idle = continue_idle_job ();
-               SgenThreadPoolJob *job = get_job_and_set_in_progress ();
-
-               if (!job && !do_idle) {
-                       /*
-                        * pthread_cond_wait() can return successfully despite the condition
-                        * not being signalled, so we have to run this in a loop until we
-                        * really have work to do.
-                        */
-                       mono_cond_wait (&work_cond, &lock);
-                       continue;
-               }
-
-               mono_mutex_unlock (&lock);
-
-               if (job) {
-                       job->func (thread_data, job);
-
-                       mono_mutex_lock (&lock);
-
-                       SGEN_ASSERT (0, job->state == STATE_IN_PROGRESS, "The job should still be in progress.");
-                       job->state = STATE_DONE;
-                       remove_job (job);
-                       /*
-                        * Only the main GC thread will ever wait on the done condition, so we don't
-                        * have to broadcast.
-                        */
-                       mono_cond_signal (&done_cond);
-               } else {
-                       SGEN_ASSERT (0, do_idle, "Why did we unlock if we still have to wait for idle?");
-                       SGEN_ASSERT (0, idle_job_func, "Why do we have idle work when there's no idle job function?");
-                       do {
-                               idle_job_func (thread_data);
-                               do_idle = continue_idle_job ();
-                       } while (do_idle && !job_queue.next_slot);
-
-                       mono_mutex_lock (&lock);
-
-                       if (!do_idle)
-                               mono_cond_signal (&done_cond);
-               }
-       }
-
-       return 0;
-}
-
-void
-sgen_thread_pool_init (int num_threads, SgenThreadPoolThreadInitFunc init_func, SgenThreadPoolIdleJobFunc idle_func, SgenThreadPoolContinueIdleJobFunc continue_idle_func, void **thread_datas)
-{
-       SGEN_ASSERT (0, num_threads == 1, "We only support 1 thread pool thread for now.");
-
-       mono_mutex_init (&lock);
-       mono_cond_init (&work_cond, NULL);
-       mono_cond_init (&done_cond, NULL);
-
-       thread_init_func = init_func;
-       idle_job_func = idle_func;
-       continue_idle_job_func = continue_idle_func;
-
-       mono_native_thread_create (&thread, thread_func, thread_datas ? thread_datas [0] : NULL);
-}
-
-SgenThreadPoolJob*
-sgen_thread_pool_job_alloc (const char *name, SgenThreadPoolJobFunc func, size_t size)
-{
-       SgenThreadPoolJob *job = sgen_alloc_internal_dynamic (size, INTERNAL_MEM_THREAD_POOL_JOB, TRUE);
-       job->name = name;
-       job->size = size;
-       job->state = STATE_WAITING;
-       job->func = func;
-       return job;
-}
-
-void
-sgen_thread_pool_job_free (SgenThreadPoolJob *job)
-{
-       sgen_free_internal_dynamic (job, job->size, INTERNAL_MEM_THREAD_POOL_JOB);
-}
-
-void
-sgen_thread_pool_job_enqueue (SgenThreadPoolJob *job)
-{
-       mono_mutex_lock (&lock);
-
-       sgen_pointer_queue_add (&job_queue, job);
-       /*
-        * FIXME: We could check whether there is a job in progress.  If there is, there's
-        * no need to signal the condition, at least as long as we have only one thread.
-        */
-       mono_cond_signal (&work_cond);
-
-       mono_mutex_unlock (&lock);
-}
-
-void
-sgen_thread_pool_job_wait (SgenThreadPoolJob *job)
-{
-       SGEN_ASSERT (0, job, "Where's the job?");
-
-       mono_mutex_lock (&lock);
-
-       while (find_job_in_queue (job) >= 0)
-               mono_cond_wait (&done_cond, &lock);
-
-       mono_mutex_unlock (&lock);
-}
-
-void
-sgen_thread_pool_idle_signal (void)
-{
-       SGEN_ASSERT (0, idle_job_func, "Why are we signaling idle without an idle function?");
-
-       mono_mutex_lock (&lock);
-
-       if (continue_idle_job_func ())
-               mono_cond_signal (&work_cond);
-
-       mono_mutex_unlock (&lock);
-}
-
-void
-sgen_thread_pool_idle_wait (void)
-{
-       SGEN_ASSERT (0, idle_job_func, "Why are we waiting for idle without an idle function?");
-
-       mono_mutex_lock (&lock);
-
-       while (continue_idle_job_func ())
-               mono_cond_wait (&done_cond, &lock);
-
-       mono_mutex_unlock (&lock);
-}
-
-void
-sgen_thread_pool_wait_for_all_jobs (void)
-{
-       mono_mutex_lock (&lock);
-
-       while (!sgen_pointer_queue_is_empty (&job_queue))
-               mono_cond_wait (&done_cond, &lock);
-
-       mono_mutex_unlock (&lock);
-}
-
-gboolean
-sgen_thread_pool_is_thread_pool_thread (MonoNativeThreadId some_thread)
-{
-       return some_thread == thread;
-}
-
-#endif
diff --git a/mono/metadata/sgen-thread-pool.h b/mono/metadata/sgen-thread-pool.h
deleted file mode 100644 (file)
index 4dcb3a9..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * sgen-thread-pool.h: Threadpool for all concurrent GC work.
- *
- * Copyright (C) 2015 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __MONO_SGEN_THREAD_POOL_H__
-#define __MONO_SGEN_THREAD_POOL_H__
-
-typedef struct _SgenThreadPoolJob SgenThreadPoolJob;
-
-typedef void (*SgenThreadPoolJobFunc) (void *thread_data, SgenThreadPoolJob *job);
-
-struct _SgenThreadPoolJob {
-       const char *name;
-       SgenThreadPoolJobFunc func;
-       size_t size;
-       volatile gint32 state;
-};
-
-typedef void (*SgenThreadPoolThreadInitFunc) (void*);
-typedef void (*SgenThreadPoolIdleJobFunc) (void*);
-typedef gboolean (*SgenThreadPoolContinueIdleJobFunc) (void);
-
-void sgen_thread_pool_init (int num_threads, SgenThreadPoolThreadInitFunc init_func, SgenThreadPoolIdleJobFunc idle_func, SgenThreadPoolContinueIdleJobFunc continue_idle_func, void **thread_datas);
-
-SgenThreadPoolJob* sgen_thread_pool_job_alloc (const char *name, SgenThreadPoolJobFunc func, size_t size);
-/* This only needs to be called on jobs that are not enqueued. */
-void sgen_thread_pool_job_free (SgenThreadPoolJob *job);
-
-void sgen_thread_pool_job_enqueue (SgenThreadPoolJob *job);
-/* This must only be called after the job has been enqueued. */
-void sgen_thread_pool_job_wait (SgenThreadPoolJob *job);
-
-void sgen_thread_pool_idle_signal (void);
-void sgen_thread_pool_idle_wait (void);
-
-void sgen_thread_pool_wait_for_all_jobs (void);
-
-gboolean sgen_thread_pool_is_thread_pool_thread (MonoNativeThreadId thread);
-
-#endif
index 6d3c8da9fade86d05e0c7a05fe666a36f7c47ce3..94c11ddc0b612a5056ccb7ee5e74067930157885 100644 (file)
@@ -25,9 +25,9 @@
 
 #ifdef HAVE_SGEN_GC
 
-#include "sgen-gc.h"
+#include "sgen/sgen-gc.h"
 #include "sgen-toggleref.h"
-#include "sgen-client.h"
+#include "sgen/sgen-client.h"
 
 
 /*only one of the two can be non null at a given time*/
diff --git a/mono/metadata/sgen-workers.c b/mono/metadata/sgen-workers.c
deleted file mode 100644 (file)
index cbbd770..0000000
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * sgen-workers.c: Worker threads for parallel and concurrent GC.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-#ifdef HAVE_SGEN_GC
-
-#include <string.h>
-
-#include "mono/metadata/sgen-gc.h"
-#include "mono/metadata/sgen-workers.h"
-#include "mono/metadata/sgen-thread-pool.h"
-#include "mono/utils/mono-membar.h"
-#include "mono/metadata/sgen-client.h"
-
-static int workers_num;
-static WorkerData *workers_data;
-
-static SgenSectionGrayQueue workers_distribute_gray_queue;
-static gboolean workers_distribute_gray_queue_inited;
-
-/*
- * Allowed transitions:
- *
- * | from \ to          | NOT WORKING | WORKING | WORK ENQUEUED | NURSERY COLLECTION |
- * |--------------------+-------------+---------+---------------+--------------------|
- * | NOT WORKING        | -           | -       | main          | main               |
- * | WORKING            | worker      | -       | main          | main               |
- * | WORK ENQUEUED      | -           | worker  | -             | main               |
- * | NURSERY COLLECTION | -           | -       | main          | -                  |
- *
- * The WORK ENQUEUED state guarantees that the worker thread will inspect the queue again at
- * least once.  Only after looking at the queue will it go back to WORKING, and then,
- * eventually, to NOT WORKING.  After enqueuing work the main thread transitions the state
- * to WORK ENQUEUED.  Signalling the worker thread to wake up is only necessary if the old
- * state was NOT WORKING.
- */
-
-enum {
-       STATE_NOT_WORKING,
-       STATE_WORKING,
-       STATE_WORK_ENQUEUED,
-       STATE_NURSERY_COLLECTION
-} WorkersStateName;
-
-typedef gint32 State;
-
-static volatile State workers_state;
-
-static SgenObjectOperations * volatile idle_func_object_ops;
-
-static guint64 stat_workers_num_finished;
-
-static gboolean
-set_state (State old_state, State new_state)
-{
-       SGEN_ASSERT (0, old_state != new_state, "Why are we transitioning to the same state?");
-       if (new_state == STATE_NOT_WORKING)
-               SGEN_ASSERT (0, old_state == STATE_WORKING, "We can only transition to NOT WORKING from WORKING");
-       else if (new_state == STATE_WORKING)
-               SGEN_ASSERT (0, old_state == STATE_WORK_ENQUEUED, "We can only transition to WORKING from WORK ENQUEUED");
-       if (new_state == STATE_NOT_WORKING || new_state == STATE_WORKING)
-               SGEN_ASSERT (6, sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "Only the worker thread is allowed to transition to NOT_WORKING or WORKING");
-
-       return InterlockedCompareExchange (&workers_state, new_state, old_state) == old_state;
-}
-
-static void
-assert_nursery_collection (State state)
-{
-       SGEN_ASSERT (0, state == STATE_NURSERY_COLLECTION, "Must be in the nursery collection state");
-}
-
-static gboolean
-state_is_working_or_enqueued (State state)
-{
-       return state == STATE_WORKING || state == STATE_WORK_ENQUEUED;
-}
-
-static void
-workers_signal_enqueue_work (gboolean from_nursery_collection)
-{
-       State old_state;
-       gboolean did_set_state;
-
-       do {
-               old_state = workers_state;
-
-               if (from_nursery_collection)
-                       assert_nursery_collection (old_state);
-               else
-                       SGEN_ASSERT (0, old_state != STATE_NURSERY_COLLECTION, "If we're not in a nursery collection, how come the state is NURSERY COLLECTION?");
-
-               if (old_state == STATE_WORK_ENQUEUED)
-                       break;
-
-               did_set_state = set_state (old_state, STATE_WORK_ENQUEUED);
-               if (from_nursery_collection)
-                       SGEN_ASSERT (0, did_set_state, "Nobody else should be mutating the state");
-       } while (!did_set_state);
-
-       if (!state_is_working_or_enqueued (old_state))
-               sgen_thread_pool_idle_signal ();
-}
-
-void
-sgen_workers_ensure_awake (void)
-{
-       SGEN_ASSERT (0, workers_state != STATE_NURSERY_COLLECTION, "Can't wake workers during nursery collection");
-       workers_signal_enqueue_work (FALSE);
-}
-
-static void
-worker_try_finish (void)
-{
-       State old_state;
-
-       ++stat_workers_num_finished;
-
-       do {
-               old_state = workers_state;
-
-               SGEN_ASSERT (0, old_state != STATE_NOT_WORKING, "How did we get from doing idle work to NOT WORKING without setting it ourselves?");
-               if (old_state == STATE_NURSERY_COLLECTION)
-                       return;
-               if (old_state == STATE_WORK_ENQUEUED)
-                       return;
-               SGEN_ASSERT (0, old_state == STATE_WORKING, "What other possibility is there?");
-
-               /* We are the last thread to go to sleep. */
-       } while (!set_state (old_state, STATE_NOT_WORKING));
-}
-
-static gboolean
-collection_needs_workers (void)
-{
-       return sgen_collection_is_concurrent ();
-}
-
-void
-sgen_workers_enqueue_job (SgenThreadPoolJob *job)
-{
-       if (!collection_needs_workers ()) {
-               job->func (NULL, job);
-               sgen_thread_pool_job_free (job);
-               return;
-       }
-
-       sgen_thread_pool_job_enqueue (job);
-}
-
-void
-sgen_workers_wait_for_jobs_finished (void)
-{
-       sgen_thread_pool_wait_for_all_jobs ();
-       /*
-        * If the idle task was never triggered or it finished before the last job did and
-        * then didn't get triggered again, we might end up in the situation of having
-        * something in the gray queue yet the idle task not working.  The easiest way to
-        * make sure this doesn't stay that way is to just trigger it again after all jobs
-        * have finished.
-        */
-       sgen_workers_ensure_awake ();
-}
-
-void
-sgen_workers_signal_start_nursery_collection_and_wait (void)
-{
-       State old_state;
-
-       do {
-               old_state = workers_state;
-
-               if (old_state != STATE_NOT_WORKING)
-                       SGEN_ASSERT (0, old_state != STATE_NURSERY_COLLECTION, "Why are we transitioning to NURSERY COLLECTION when we're already there?");
-       } while (!set_state (old_state, STATE_NURSERY_COLLECTION));
-
-       sgen_thread_pool_idle_wait ();
-
-       assert_nursery_collection (workers_state);
-}
-
-void
-sgen_workers_signal_finish_nursery_collection (void)
-{
-       assert_nursery_collection (workers_state);
-       workers_signal_enqueue_work (TRUE);
-}
-
-static gboolean
-workers_get_work (WorkerData *data)
-{
-       SgenMajorCollector *major;
-
-       g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
-
-       /* If we're concurrent, steal from the workers distribute gray queue. */
-       major = sgen_get_major_collector ();
-       if (major->is_concurrent) {
-               GrayQueueSection *section = sgen_section_gray_queue_dequeue (&workers_distribute_gray_queue);
-               if (section) {
-                       sgen_gray_object_enqueue_section (&data->private_gray_queue, section);
-                       return TRUE;
-               }
-       }
-
-       /* Nobody to steal from */
-       g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
-       return FALSE;
-}
-
-static void
-concurrent_enqueue_check (char *obj)
-{
-       g_assert (sgen_concurrent_collection_in_progress ());
-       g_assert (!sgen_ptr_in_nursery (obj));
-       g_assert (SGEN_LOAD_VTABLE (obj));
-}
-
-static void
-init_private_gray_queue (WorkerData *data)
-{
-       sgen_gray_object_queue_init (&data->private_gray_queue,
-                       sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL);
-}
-
-static void
-thread_pool_init_func (void *data_untyped)
-{
-       WorkerData *data = data_untyped;
-       SgenMajorCollector *major = sgen_get_major_collector ();
-
-       sgen_client_thread_register_worker ();
-
-       if (!major->is_concurrent)
-               return;
-
-       init_private_gray_queue (data);
-}
-
-static gboolean
-continue_idle_func (void)
-{
-       return state_is_working_or_enqueued (workers_state);
-}
-
-static void
-marker_idle_func (void *data_untyped)
-{
-       WorkerData *data = data_untyped;
-
-       if (!continue_idle_func ())
-               return;
-
-       SGEN_ASSERT (0, sgen_concurrent_collection_in_progress (), "The worker should only mark in concurrent collections.");
-       SGEN_ASSERT (0, sgen_get_current_collection_generation () != GENERATION_NURSERY, "Why are we doing work while there's a nursery collection happening?");
-
-       if (workers_state == STATE_WORK_ENQUEUED) {
-               set_state (STATE_WORK_ENQUEUED, STATE_WORKING);
-               SGEN_ASSERT (0, workers_state != STATE_NOT_WORKING, "How did we get from WORK ENQUEUED to NOT WORKING?");
-       }
-
-       if (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data)) {
-               ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (idle_func_object_ops, &data->private_gray_queue);
-
-               SGEN_ASSERT (0, !sgen_gray_object_queue_is_empty (&data->private_gray_queue), "How is our gray queue empty if we just got work?");
-
-               sgen_drain_gray_stack (32, ctx);
-       } else {
-               worker_try_finish ();
-       }
-}
-
-static void
-init_distribute_gray_queue (void)
-{
-       if (workers_distribute_gray_queue_inited) {
-               g_assert (sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue));
-               g_assert (workers_distribute_gray_queue.locked);
-               return;
-       }
-
-       sgen_section_gray_queue_init (&workers_distribute_gray_queue, TRUE,
-                       sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL);
-       workers_distribute_gray_queue_inited = TRUE;
-}
-
-void
-sgen_workers_init_distribute_gray_queue (void)
-{
-       SGEN_ASSERT (0, sgen_get_major_collector ()->is_concurrent && collection_needs_workers (),
-                       "Why should we init the distribute gray queue if we don't need it?");
-       init_distribute_gray_queue ();
-}
-
-void
-sgen_workers_init (int num_workers)
-{
-       int i;
-       void *workers_data_ptrs [num_workers];
-
-       if (!sgen_get_major_collector ()->is_concurrent) {
-               sgen_thread_pool_init (num_workers, thread_pool_init_func, NULL, NULL, NULL);
-               return;
-       }
-
-       //g_print ("initing %d workers\n", num_workers);
-
-       workers_num = num_workers;
-
-       workers_data = sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA, TRUE);
-       memset (workers_data, 0, sizeof (WorkerData) * num_workers);
-
-       init_distribute_gray_queue ();
-
-       for (i = 0; i < workers_num; ++i)
-               workers_data_ptrs [i] = &workers_data [i];
-
-       sgen_thread_pool_init (num_workers, thread_pool_init_func, marker_idle_func, continue_idle_func, workers_data_ptrs);
-
-       mono_counters_register ("# workers finished", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_workers_num_finished);
-}
-
-void
-sgen_workers_start_all_workers (SgenObjectOperations *object_ops)
-{
-       if (!collection_needs_workers ())
-               return;
-
-       idle_func_object_ops = object_ops;
-       mono_memory_write_barrier ();
-
-       workers_signal_enqueue_work (FALSE);
-}
-
-void
-sgen_workers_join (void)
-{
-       int i;
-
-       SGEN_ASSERT (0, workers_state != STATE_NURSERY_COLLECTION, "Can't be in nursery collection when joining");
-
-       if (!collection_needs_workers ())
-               return;
-
-       sgen_thread_pool_wait_for_all_jobs ();
-       sgen_thread_pool_idle_wait ();
-       SGEN_ASSERT (0, workers_state == STATE_NOT_WORKING, "Can only signal enqueue work when in no work state");
-
-       /* At this point all the workers have stopped. */
-
-       SGEN_ASSERT (0, sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue), "Why is there still work left to do?");
-       for (i = 0; i < workers_num; ++i)
-               SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&workers_data [i].private_gray_queue), "Why is there still work left to do?");
-}
-
-gboolean
-sgen_workers_all_done (void)
-{
-       return workers_state == STATE_NOT_WORKING;
-}
-
-/* Must only be used for debugging */
-gboolean
-sgen_workers_are_working (void)
-{
-       return state_is_working_or_enqueued (workers_state);
-}
-
-void
-sgen_workers_wait (void)
-{
-       sgen_thread_pool_idle_wait ();
-       SGEN_ASSERT (0, sgen_workers_all_done (), "Why are the workers not done after we wait for them?");
-}
-
-SgenSectionGrayQueue*
-sgen_workers_get_distribute_section_gray_queue (void)
-{
-       return &workers_distribute_gray_queue;
-}
-
-#endif
diff --git a/mono/metadata/sgen-workers.h b/mono/metadata/sgen-workers.h
deleted file mode 100644 (file)
index 6755f9e..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * sgen-workers.c: Worker threads for parallel and concurrent GC.
- *
- * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __MONO_SGEN_WORKER_H__
-#define __MONO_SGEN_WORKER_H__
-
-#include "mono/metadata/sgen-thread-pool.h"
-
-typedef struct _WorkerData WorkerData;
-struct _WorkerData {
-       SgenGrayQueue private_gray_queue; /* only read/written by worker thread */
-};
-
-void sgen_workers_init (int num_workers);
-void sgen_workers_start_all_workers (SgenObjectOperations *object_ops);
-void sgen_workers_ensure_awake (void);
-void sgen_workers_init_distribute_gray_queue (void);
-void sgen_workers_enqueue_job (SgenThreadPoolJob *job);
-void sgen_workers_wait_for_jobs_finished (void);
-void sgen_workers_distribute_gray_queue_sections (void);
-void sgen_workers_reset_data (void);
-void sgen_workers_join (void);
-gboolean sgen_workers_all_done (void);
-gboolean sgen_workers_are_working (void);
-void sgen_workers_wait (void);
-SgenSectionGrayQueue* sgen_workers_get_distribute_section_gray_queue (void);
-
-void sgen_workers_signal_start_nursery_collection_and_wait (void);
-void sgen_workers_signal_finish_nursery_collection (void);
-
-#endif
index 74947390964043355d4b6c9bb16eeeef3f54516c..45a3e71b29cecab2c06c9bdf991cc9be5a9e3801 100755 (executable)
@@ -20,6 +20,7 @@ libs= \
 
 sgen_libs = \
        $(monodir)/mono/metadata/libmonoruntimesgen.la  \
+       $(monodir)/mono/sgen/libmonosgen.la     \
        $(monodir)/mono/io-layer/libwapi.la     \
        $(monodir)/mono/utils/libmonoutils.la \
        $(GLIB_LIBS) $(LIBICONV)
@@ -33,6 +34,7 @@ static_libs=  \
 
 sgenstatic_libs = \
        $(monodir)/mono/metadata/libmonoruntimesgen-static.la   \
+       $(monodir)/mono/sgen/libmonosgen-static.la      \
        $(monodir)/mono/io-layer/libwapi.la     \
        $(monodir)/mono/utils/libmonoutils.la \
        $(GLIB_LIBS) $(LIBICONV)
@@ -189,9 +191,9 @@ mono_boehm-main.$(OBJEXT): buildver-boehm.h
 endif
 
 if DISABLE_EXECUTABLES
-buildver-sgen.h: libmini-static.la $(monodir)/mono/metadata/libmonoruntimesgen.la
+buildver-sgen.h: libmini-static.la $(monodir)/mono/metadata/libmonoruntimesgen.la $(monodir)/mono/sgen/libmonosgen.la
 else
-buildver-sgen.h: libmini-static.la $(monodir)/mono/metadata/libmonoruntimesgen-static.la
+buildver-sgen.h: libmini-static.la $(monodir)/mono/metadata/libmonoruntimesgen-static.la $(monodir)/mono/sgen/libmonosgen-static.la
 endif
        @echo "const char *build_date = \"`date`\";" > buildver-sgen.h
 mono_sgen-main-sgen.$(OBJEXT): buildver-sgen.h
diff --git a/mono/sgen/.gitignore b/mono/sgen/.gitignore
new file mode 100644 (file)
index 0000000..70fc4fd
--- /dev/null
@@ -0,0 +1,8 @@
+/Makefile
+/Makefile.in
+/.libs
+/.deps
+/*.lo
+/*.la
+/*.o
+/*.a
diff --git a/mono/sgen/Makefile.am b/mono/sgen/Makefile.am
new file mode 100644 (file)
index 0000000..32673bf
--- /dev/null
@@ -0,0 +1,76 @@
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/mono $(LIBGC_CPPFLAGS) $(GLIB_CFLAGS) $(SHARED_CFLAGS)
+
+if SUPPORT_SGEN
+if DISABLE_EXECUTABLES
+shared_libraries = libmonosgen.la
+else
+if SHARED_MONO
+shared_libraries = libmonosgen.la
+endif
+endif
+libraries = $(shared_libraries) libmonosgen-static.la
+endif
+
+if DISABLE_EXECUTABLES
+noinst_LTLIBRARIES = $(shared_libraries)
+else
+noinst_LTLIBRARIES = $(libraries)
+endif
+
+monosgen_sources = \
+       gc-internal-agnostic.h \
+       sgen-alloc.c \
+       sgen-archdep.h \
+       sgen-cardtable.c \
+       sgen-cardtable.h \
+       sgen-client.h \
+       sgen-conf.h \
+       sgen-copy-object.h \
+       sgen-debug.c \
+       sgen-descriptor.c \
+       sgen-descriptor.h \
+       sgen-fin-weak-hash.c \
+       sgen-gc.c \
+       sgen-gc.h \
+       sgen-gray.c \
+       sgen-gray.h \
+       sgen-hash-table.c \
+       sgen-hash-table.h \
+       sgen-internal.c \
+       sgen-layout-stats.c \
+       sgen-layout-stats.h \
+       sgen-los.c \
+       sgen-major-copy-object.h \
+       sgen-marksweep-drain-gray-stack.h \
+       sgen-marksweep-scan-object-concurrent.h \
+       sgen-marksweep.c \
+       sgen-memory-governor.c \
+       sgen-memory-governor.h \
+       sgen-minor-copy-object.h \
+       sgen-minor-scan-object.h \
+       sgen-nursery-allocator.c \
+       sgen-pinning-stats.c \
+       sgen-pinning.c \
+       sgen-pinning.h \
+       sgen-pointer-queue.c \
+       sgen-pointer-queue.h \
+       sgen-protocol-def.h \
+       sgen-protocol.c \
+       sgen-protocol.h \
+       sgen-qsort.c \
+       sgen-qsort.h \
+       sgen-scan-object.h \
+       sgen-simple-nursery.c \
+       sgen-split-nursery.c \
+       sgen-tagged-pointer.h \
+       sgen-thread-pool.c \
+       sgen-thread-pool.h \
+       sgen-workers.c \
+       sgen-workers.h
+
+libmonosgen_la_SOURCES = $(monosgen_sources)
+libmonosgen_la_CFLAGS = $(SGEN_DEFINES)
+
+libmonosgen_static_la_SOURCES = $(libmonosgen_la_SOURCES)
+libmonosgen_static_la_CFLAGS = $(SGEN_DEFINES)
+libmonosgen_static_la_LDFLAGS = -static
diff --git a/mono/sgen/gc-internal-agnostic.h b/mono/sgen/gc-internal-agnostic.h
new file mode 100644 (file)
index 0000000..72eca3d
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * gc-internal-agnostic.h: Mono-agnostic GC interface.
+ *
+ * Copyright (C) 2015 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MONO_METADATA_GCINTERNALAGNOSTIC_H__
+#define __MONO_METADATA_GCINTERNALAGNOSTIC_H__
+
+#include <config.h>
+#include <glib.h>
+#include <stdio.h>
+
+#include "mono/utils/mono-compiler.h"
+#include "mono/utils/parse.h"
+#include "mono/utils/memfuncs.h"
+
+typedef struct {
+       guint minor_gc_count;
+       guint major_gc_count;
+       guint64 minor_gc_time;
+       guint64 major_gc_time;
+       guint64 major_gc_time_concurrent;
+} GCStats;
+
+extern GCStats gc_stats;
+
+/*
+ * Try to register a foreign thread with the GC, if we fail or the backend
+ * can't cope with this concept - we return FALSE.
+ */
+extern gboolean mono_gc_register_thread (void *baseptr);
+
+gboolean mono_gc_parse_environment_string_extract_number (const char *str, size_t *out);
+
+void* mono_gc_make_descr_for_object (gsize *bitmap, int numbits, size_t obj_size);
+void* mono_gc_make_descr_for_array (int vector, gsize *elem_bitmap, int numbits, size_t elem_size);
+
+/* simple interface for data structures needed in the runtime */
+void* mono_gc_make_descr_from_bitmap (gsize *bitmap, int numbits);
+
+/* Return a root descriptor for a root with all refs */
+void* mono_gc_make_root_descr_all_refs (int numbits);
+
+/* Return the bitmap encoded by a descriptor */
+gsize* mono_gc_get_bitmap_for_descr (void *descr, int *numbits);
+
+/*
+These functions must be used when it's possible that either destination is not
+word aligned or size is not a multiple of word size.
+*/
+void mono_gc_bzero_atomic (void *dest, size_t size);
+void mono_gc_bzero_aligned (void *dest, size_t size);
+void mono_gc_memmove_atomic (void *dest, const void *src, size_t size);
+void mono_gc_memmove_aligned (void *dest, const void *src, size_t size);
+
+FILE *mono_gc_get_logfile (void);
+
+#endif
diff --git a/mono/sgen/sgen-alloc.c b/mono/sgen/sgen-alloc.c
new file mode 100644 (file)
index 0000000..ac017dd
--- /dev/null
@@ -0,0 +1,569 @@
+/*
+ * sgen-alloc.c: Object allocation routines + managed allocators
+ *
+ * Author:
+ *     Paolo Molaro (lupus@ximian.com)
+ *  Rodrigo Kumpera (kumpera@gmail.com)
+ *
+ * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright 2011 Xamarin, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * ######################################################################
+ * ########  Object allocation
+ * ######################################################################
+ * This section of code deals with allocating memory for objects.
+ * There are several ways:
+ * *) allocate large objects
+ * *) allocate normal objects
+ * *) fast lock-free allocation
+ * *) allocation of pinned objects
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-protocol.h"
+#include "mono/sgen/sgen-memory-governor.h"
+#include "mono/sgen/sgen-client.h"
+#include "mono/utils/mono-memory-model.h"
+
+#define ALIGN_UP               SGEN_ALIGN_UP
+#define ALLOC_ALIGN            SGEN_ALLOC_ALIGN
+#define MAX_SMALL_OBJ_SIZE     SGEN_MAX_SMALL_OBJ_SIZE
+
+#ifdef HEAVY_STATISTICS
+static guint64 stat_objects_alloced = 0;
+static guint64 stat_bytes_alloced = 0;
+static guint64 stat_bytes_alloced_los = 0;
+
+#endif
+
+/*
+ * Allocation is done from a Thread Local Allocation Buffer (TLAB). TLABs are allocated
+ * from nursery fragments.
+ * tlab_next is the pointer to the space inside the TLAB where the next object will 
+ * be allocated.
+ * tlab_temp_end is the pointer to the end of the temporary space reserved for
+ * the allocation: it allows us to set the scan starts at reasonable intervals.
+ * tlab_real_end points to the end of the TLAB.
+ */
+
+/*
+ * FIXME: What is faster, a TLS variable pointing to a structure, or separate TLS 
+ * variables for next+temp_end ?
+ */
+#ifdef HAVE_KW_THREAD
+static __thread char *tlab_start;
+static __thread char *tlab_next;
+static __thread char *tlab_temp_end;
+static __thread char *tlab_real_end;
+/* Used by the managed allocator/wbarrier */
+static __thread char **tlab_next_addr MONO_ATTR_USED;
+#endif
+
+#ifdef HAVE_KW_THREAD
+#define TLAB_START     tlab_start
+#define TLAB_NEXT      tlab_next
+#define TLAB_TEMP_END  tlab_temp_end
+#define TLAB_REAL_END  tlab_real_end
+#else
+#define TLAB_START     (__thread_info__->tlab_start)
+#define TLAB_NEXT      (__thread_info__->tlab_next)
+#define TLAB_TEMP_END  (__thread_info__->tlab_temp_end)
+#define TLAB_REAL_END  (__thread_info__->tlab_real_end)
+#endif
+
+static void*
+alloc_degraded (GCVTable *vtable, size_t size, gboolean for_mature)
+{
+       void *p;
+
+       if (!for_mature) {
+               sgen_client_degraded_allocation (size);
+               SGEN_ATOMIC_ADD_P (degraded_mode, size);
+               sgen_ensure_free_space (size);
+       } else {
+               if (sgen_need_major_collection (size))
+                       sgen_perform_collection (size, GENERATION_OLD, "mature allocation failure", !for_mature);
+       }
+
+
+       p = major_collector.alloc_degraded (vtable, size);
+
+       if (!for_mature)
+               binary_protocol_alloc_degraded (p, vtable, size, sgen_client_get_provenance ());
+
+       return p;
+}
+
+static void
+zero_tlab_if_necessary (void *p, size_t size)
+{
+       if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION || nursery_clear_policy == CLEAR_AT_TLAB_CREATION_DEBUG) {
+               memset (p, 0, size);
+       } else {
+               /*
+                * This function is called for all allocations in
+                * TLABs.  TLABs originate from fragments, which are
+                * initialized to be faux arrays.  The remainder of
+                * the fragments are zeroed out at initialization for
+                * CLEAR_AT_GC, so here we just need to make sure that
+                * the array header is zeroed.  Since we don't know
+                * whether we're called for the start of a fragment or
+                * for somewhere in between, we zero in any case, just
+                * to make sure.
+                */
+               sgen_client_zero_array_fill_header (p, size);
+       }
+}
+
+/*
+ * Provide a variant that takes just the vtable for small fixed-size objects.
+ * The aligned size is already computed and stored in vt->gc_descr.
+ * Note: every SGEN_SCAN_START_SIZE or so we are given the chance to do some special
+ * processing. We can keep track of where objects start, for example,
+ * so when we scan the thread stacks for pinned objects, we can start
+ * a search for the pinned object in SGEN_SCAN_START_SIZE chunks.
+ */
+void*
+sgen_alloc_obj_nolock (GCVTable *vtable, size_t size)
+{
+       /* FIXME: handle OOM */
+       void **p;
+       char *new_next;
+       size_t real_size = size;
+       TLAB_ACCESS_INIT;
+       
+       CANARIFY_SIZE(size);
+
+       HEAVY_STAT (++stat_objects_alloced);
+       if (real_size <= SGEN_MAX_SMALL_OBJ_SIZE)
+               HEAVY_STAT (stat_bytes_alloced += size);
+       else
+               HEAVY_STAT (stat_bytes_alloced_los += size);
+
+       size = ALIGN_UP (size);
+
+       SGEN_ASSERT (6, sgen_vtable_get_descriptor (vtable), "VTable without descriptor");
+
+       if (G_UNLIKELY (has_per_allocation_action)) {
+               static int alloc_count;
+               int current_alloc = InterlockedIncrement (&alloc_count);
+
+               if (collect_before_allocs) {
+                       if (((current_alloc % collect_before_allocs) == 0) && nursery_section) {
+                               sgen_perform_collection (0, GENERATION_NURSERY, "collect-before-alloc-triggered", TRUE);
+                               if (!degraded_mode && sgen_can_alloc_size (size) && real_size <= SGEN_MAX_SMALL_OBJ_SIZE) {
+                                       // FIXME:
+                                       g_assert_not_reached ();
+                               }
+                       }
+               } else if (verify_before_allocs) {
+                       if ((current_alloc % verify_before_allocs) == 0)
+                               sgen_check_whole_heap_stw ();
+               }
+       }
+
+       /*
+        * We must already have the lock here instead of after the
+        * fast path because we might be interrupted in the fast path
+        * (after confirming that new_next < TLAB_TEMP_END) by the GC,
+        * and we'll end up allocating an object in a fragment which
+        * no longer belongs to us.
+        *
+        * The managed allocator does not do this, but it's treated
+        * specially by the world-stopping code.
+        */
+
+       if (real_size > SGEN_MAX_SMALL_OBJ_SIZE) {
+               p = sgen_los_alloc_large_inner (vtable, ALIGN_UP (real_size));
+       } else {
+               /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
+
+               p = (void**)TLAB_NEXT;
+               /* FIXME: handle overflow */
+               new_next = (char*)p + size;
+               TLAB_NEXT = new_next;
+
+               if (G_LIKELY (new_next < TLAB_TEMP_END)) {
+                       /* Fast path */
+
+                       /* 
+                        * FIXME: We might need a memory barrier here so the change to tlab_next is 
+                        * visible before the vtable store.
+                        */
+
+                       CANARIFY_ALLOC(p,real_size);
+                       SGEN_LOG (6, "Allocated object %p, vtable: %p (%s), size: %zd", p, vtable, sgen_client_vtable_get_name (vtable), size);
+                       binary_protocol_alloc (p , vtable, size, sgen_client_get_provenance ());
+                       g_assert (*p == NULL);
+                       mono_atomic_store_seq (p, vtable);
+
+                       return p;
+               }
+
+               /* Slow path */
+
+               /* there are two cases: the object is too big or we run out of space in the TLAB */
+               /* we also reach here when the thread does its first allocation after a minor 
+                * collection, since the tlab_ variables are initialized to NULL.
+                * there can be another case (from ORP), if we cooperate with the runtime a bit:
+                * objects that need finalizers can have the high bit set in their size
+                * so the above check fails and we can readily add the object to the queue.
+                * This avoids taking again the GC lock when registering, but this is moot when
+                * doing thread-local allocation, so it may not be a good idea.
+                */
+               if (TLAB_NEXT >= TLAB_REAL_END) {
+                       int available_in_tlab;
+                       /* 
+                        * Run out of space in the TLAB. When this happens, some amount of space
+                        * remains in the TLAB, but not enough to satisfy the current allocation
+                        * request. Currently, we retire the TLAB in all cases, later we could
+                        * keep it if the remaining space is above a treshold, and satisfy the
+                        * allocation directly from the nursery.
+                        */
+                       TLAB_NEXT -= size;
+                       /* when running in degraded mode, we continue allocing that way
+                        * for a while, to decrease the number of useless nursery collections.
+                        */
+                       if (degraded_mode && degraded_mode < DEFAULT_NURSERY_SIZE)
+                               return alloc_degraded (vtable, size, FALSE);
+
+                       available_in_tlab = (int)(TLAB_REAL_END - TLAB_NEXT);//We'll never have tlabs > 2Gb
+                       if (size > tlab_size || available_in_tlab > SGEN_MAX_NURSERY_WASTE) {
+                               /* Allocate directly from the nursery */
+                               p = sgen_nursery_alloc (size);
+                               if (!p) {
+                                       /*
+                                        * We couldn't allocate from the nursery, so we try
+                                        * collecting.  Even after the collection, we might
+                                        * still not have enough memory to allocate the
+                                        * object.  The reason will most likely be that we've
+                                        * run out of memory, but there is the theoretical
+                                        * possibility that other threads might have consumed
+                                        * the freed up memory ahead of us.
+                                        *
+                                        * What we do in this case is allocate degraded, i.e.,
+                                        * from the major heap.
+                                        *
+                                        * Ideally we'd like to detect the case of other
+                                        * threads allocating ahead of us and loop (if we
+                                        * always loop we will loop endlessly in the case of
+                                        * OOM).
+                                        */
+                                       sgen_ensure_free_space (real_size);
+                                       if (!degraded_mode)
+                                               p = sgen_nursery_alloc (size);
+                               }
+                               if (!p)
+                                       return alloc_degraded (vtable, size, FALSE);
+
+                               zero_tlab_if_necessary (p, size);
+                       } else {
+                               size_t alloc_size = 0;
+                               if (TLAB_START)
+                                       SGEN_LOG (3, "Retire TLAB: %p-%p [%ld]", TLAB_START, TLAB_REAL_END, (long)(TLAB_REAL_END - TLAB_NEXT - size));
+                               sgen_nursery_retire_region (p, available_in_tlab);
+
+                               p = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
+                               if (!p) {
+                                       /* See comment above in similar case. */
+                                       sgen_ensure_free_space (tlab_size);
+                                       if (!degraded_mode)
+                                               p = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
+                               }
+                               if (!p)
+                                       return alloc_degraded (vtable, size, FALSE);
+
+                               /* Allocate a new TLAB from the current nursery fragment */
+                               TLAB_START = (char*)p;
+                               TLAB_NEXT = TLAB_START;
+                               TLAB_REAL_END = TLAB_START + alloc_size;
+                               TLAB_TEMP_END = TLAB_START + MIN (SGEN_SCAN_START_SIZE, alloc_size);
+
+                               zero_tlab_if_necessary (TLAB_START, alloc_size);
+
+                               /* Allocate from the TLAB */
+                               p = (void*)TLAB_NEXT;
+                               TLAB_NEXT += size;
+                               sgen_set_nursery_scan_start ((char*)p);
+                       }
+               } else {
+                       /* Reached tlab_temp_end */
+
+                       /* record the scan start so we can find pinned objects more easily */
+                       sgen_set_nursery_scan_start ((char*)p);
+                       /* we just bump tlab_temp_end as well */
+                       TLAB_TEMP_END = MIN (TLAB_REAL_END, TLAB_NEXT + SGEN_SCAN_START_SIZE);
+                       SGEN_LOG (5, "Expanding local alloc: %p-%p", TLAB_NEXT, TLAB_TEMP_END);
+               }
+               CANARIFY_ALLOC(p,real_size);
+       }
+
+       if (G_LIKELY (p)) {
+               SGEN_LOG (6, "Allocated object %p, vtable: %p (%s), size: %zd", p, vtable, sgen_client_vtable_get_name (vtable), size);
+               binary_protocol_alloc (p, vtable, size, sgen_client_get_provenance ());
+               mono_atomic_store_seq (p, vtable);
+       }
+
+       return p;
+}
+
+void*
+sgen_try_alloc_obj_nolock (GCVTable *vtable, size_t size)
+{
+       void **p;
+       char *new_next;
+       size_t real_size = size;
+       TLAB_ACCESS_INIT;
+
+       CANARIFY_SIZE(size);
+
+       size = ALIGN_UP (size);
+       SGEN_ASSERT (9, real_size >= SGEN_CLIENT_MINIMUM_OBJECT_SIZE, "Object too small");
+
+       SGEN_ASSERT (6, sgen_vtable_get_descriptor (vtable), "VTable without descriptor");
+
+       if (real_size > SGEN_MAX_SMALL_OBJ_SIZE)
+               return NULL;
+
+       if (G_UNLIKELY (size > tlab_size)) {
+               /* Allocate directly from the nursery */
+               p = sgen_nursery_alloc (size);
+               if (!p)
+                       return NULL;
+               sgen_set_nursery_scan_start ((char*)p);
+
+               /*FIXME we should use weak memory ops here. Should help specially on x86. */
+               zero_tlab_if_necessary (p, size);
+       } else {
+               int available_in_tlab;
+               char *real_end;
+               /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
+
+               p = (void**)TLAB_NEXT;
+               /* FIXME: handle overflow */
+               new_next = (char*)p + size;
+
+               real_end = TLAB_REAL_END;
+               available_in_tlab = (int)(real_end - (char*)p);//We'll never have tlabs > 2Gb
+
+               if (G_LIKELY (new_next < real_end)) {
+                       TLAB_NEXT = new_next;
+
+                       /* Second case, we overflowed temp end */
+                       if (G_UNLIKELY (new_next >= TLAB_TEMP_END)) {
+                               sgen_set_nursery_scan_start (new_next);
+                               /* we just bump tlab_temp_end as well */
+                               TLAB_TEMP_END = MIN (TLAB_REAL_END, TLAB_NEXT + SGEN_SCAN_START_SIZE);
+                               SGEN_LOG (5, "Expanding local alloc: %p-%p", TLAB_NEXT, TLAB_TEMP_END);
+                       }
+               } else if (available_in_tlab > SGEN_MAX_NURSERY_WASTE) {
+                       /* Allocate directly from the nursery */
+                       p = sgen_nursery_alloc (size);
+                       if (!p)
+                               return NULL;
+
+                       zero_tlab_if_necessary (p, size);
+               } else {
+                       size_t alloc_size = 0;
+
+                       sgen_nursery_retire_region (p, available_in_tlab);
+                       new_next = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
+                       p = (void**)new_next;
+                       if (!p)
+                               return NULL;
+
+                       TLAB_START = (char*)new_next;
+                       TLAB_NEXT = new_next + size;
+                       TLAB_REAL_END = new_next + alloc_size;
+                       TLAB_TEMP_END = new_next + MIN (SGEN_SCAN_START_SIZE, alloc_size);
+                       sgen_set_nursery_scan_start ((char*)p);
+
+                       zero_tlab_if_necessary (new_next, alloc_size);
+               }
+       }
+
+       HEAVY_STAT (++stat_objects_alloced);
+       HEAVY_STAT (stat_bytes_alloced += size);
+
+       CANARIFY_ALLOC(p,real_size);
+       SGEN_LOG (6, "Allocated object %p, vtable: %p (%s), size: %zd", p, vtable, sgen_client_vtable_get_name (vtable), size);
+       binary_protocol_alloc (p, vtable, size, sgen_client_get_provenance ());
+       g_assert (*p == NULL); /* FIXME disable this in non debug builds */
+
+       mono_atomic_store_seq (p, vtable);
+
+       return p;
+}
+
+void*
+sgen_alloc_obj (GCVTable *vtable, size_t size)
+{
+       void *res;
+       TLAB_ACCESS_INIT;
+
+       if (!SGEN_CAN_ALIGN_UP (size))
+               return NULL;
+
+#ifndef DISABLE_CRITICAL_REGION
+
+       if (G_UNLIKELY (has_per_allocation_action)) {
+               static int alloc_count;
+               int current_alloc = InterlockedIncrement (&alloc_count);
+
+               if (verify_before_allocs) {
+                       if ((current_alloc % verify_before_allocs) == 0)
+                               sgen_check_whole_heap_stw ();
+               }
+               if (collect_before_allocs) {
+                       if (((current_alloc % collect_before_allocs) == 0) && nursery_section) {
+                               LOCK_GC;
+                               sgen_perform_collection (0, GENERATION_NURSERY, "collect-before-alloc-triggered", TRUE);
+                               UNLOCK_GC;
+                       }
+               }
+       }
+
+       ENTER_CRITICAL_REGION;
+       res = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size);
+       if (res) {
+               EXIT_CRITICAL_REGION;
+               return res;
+       }
+       EXIT_CRITICAL_REGION;
+#endif
+       LOCK_GC;
+       res = sgen_alloc_obj_nolock ((GCVTable*)vtable, size);
+       UNLOCK_GC;
+       if (G_UNLIKELY (!res))
+               sgen_client_out_of_memory (size);
+       return res;
+}
+
+/*
+ * To be used for interned strings and possibly MonoThread, reflection handles.
+ * We may want to explicitly free these objects.
+ */
+void*
+sgen_alloc_obj_pinned (GCVTable *vtable, size_t size)
+{
+       void **p;
+
+       if (!SGEN_CAN_ALIGN_UP (size))
+               return NULL;
+       size = ALIGN_UP (size);
+
+       LOCK_GC;
+
+       if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
+               /* large objects are always pinned anyway */
+               p = sgen_los_alloc_large_inner ((GCVTable*)vtable, size);
+       } else {
+               SGEN_ASSERT (9, sgen_client_vtable_is_inited (vtable), "class %s:%s is not initialized", sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable));
+               p = major_collector.alloc_small_pinned_obj ((GCVTable*)vtable, size, SGEN_VTABLE_HAS_REFERENCES ((GCVTable*)vtable));
+       }
+       if (G_LIKELY (p)) {
+               SGEN_LOG (6, "Allocated pinned object %p, vtable: %p (%s), size: %zd", p, vtable, sgen_client_vtable_get_name (vtable), size);
+               binary_protocol_alloc_pinned (p, vtable, size, sgen_client_get_provenance ());
+       }
+       UNLOCK_GC;
+       return p;
+}
+
+void*
+sgen_alloc_obj_mature (GCVTable *vtable, size_t size)
+{
+       void *res;
+
+       if (!SGEN_CAN_ALIGN_UP (size))
+               return NULL;
+       size = ALIGN_UP (size);
+
+       LOCK_GC;
+       res = alloc_degraded ((GCVTable*)vtable, size, TRUE);
+       UNLOCK_GC;
+
+       return res;
+}
+
+void
+sgen_init_tlab_info (SgenThreadInfo* info)
+{
+#ifndef HAVE_KW_THREAD
+       SgenThreadInfo *__thread_info__ = info;
+#endif
+
+       info->tlab_start_addr = &TLAB_START;
+       info->tlab_next_addr = &TLAB_NEXT;
+       info->tlab_temp_end_addr = &TLAB_TEMP_END;
+       info->tlab_real_end_addr = &TLAB_REAL_END;
+
+#ifdef HAVE_KW_THREAD
+       tlab_next_addr = &tlab_next;
+#endif
+}
+
+/*
+ * Clear the thread local TLAB variables for all threads.
+ */
+void
+sgen_clear_tlabs (void)
+{
+       SgenThreadInfo *info;
+
+       FOREACH_THREAD (info) {
+               /* A new TLAB will be allocated when the thread does its first allocation */
+               *info->tlab_start_addr = NULL;
+               *info->tlab_next_addr = NULL;
+               *info->tlab_temp_end_addr = NULL;
+               *info->tlab_real_end_addr = NULL;
+       } END_FOREACH_THREAD
+}
+
+void
+sgen_init_allocator (void)
+{
+#if defined(HAVE_KW_THREAD) && !defined(SGEN_WITHOUT_MONO)
+       int tlab_next_addr_offset = -1;
+       int tlab_temp_end_offset = -1;
+
+
+       MONO_THREAD_VAR_OFFSET (tlab_next_addr, tlab_next_addr_offset);
+       MONO_THREAD_VAR_OFFSET (tlab_temp_end, tlab_temp_end_offset);
+
+       mono_tls_key_set_offset (TLS_KEY_SGEN_TLAB_NEXT_ADDR, tlab_next_addr_offset);
+       mono_tls_key_set_offset (TLS_KEY_SGEN_TLAB_TEMP_END, tlab_temp_end_offset);
+
+       g_assert (tlab_next_addr_offset != -1);
+       g_assert (tlab_temp_end_offset != -1);
+#endif
+
+#ifdef HEAVY_STATISTICS
+       mono_counters_register ("# objects allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_alloced);
+       mono_counters_register ("bytes allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_bytes_alloced);
+       mono_counters_register ("bytes allocated in LOS", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_bytes_alloced_los);
+#endif
+}
+
+#endif /*HAVE_SGEN_GC*/
diff --git a/mono/sgen/sgen-archdep.h b/mono/sgen/sgen-archdep.h
new file mode 100644 (file)
index 0000000..410ba6a
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ * sgen-archdep.h: Architecture dependent parts of SGen.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __MONO_SGENARCHDEP_H__
+#define __MONO_SGENARCHDEP_H__
+
+#include <mono/utils/mono-context.h>
+
+/*
+ * Define either USE_MONO_CTX, or
+ * ARCH_SIGCTX_SP/ARCH_SIGCTX_IP/ARCH_STORE_REGS/ARCH_COPY_SIGCTX_REGS.
+ * Define ARCH_NUM_REGS to be the number of general registers in MonoContext, or the
+ * number of registers stored by ARCH_STORE_REGS.
+ */
+
+#if defined(MONO_CROSS_COMPILE)
+
+#define REDZONE_SIZE   0
+
+#define ARCH_NUM_REGS 0
+#define ARCH_STORE_REGS(ptr)
+#define ARCH_SIGCTX_SP(ctx) NULL
+#define ARCH_SIGCTX_IP(ctx) NULL
+#define ARCH_COPY_SIGCTX_REGS(a,ctx)
+
+#elif defined(TARGET_X86)
+
+#define REDZONE_SIZE   0
+
+#define ARCH_NUM_REGS 8
+
+#ifndef MONO_ARCH_HAS_MONO_CONTEXT
+#error 0
+#endif
+
+#define USE_MONO_CTX
+
+#elif defined(TARGET_AMD64)
+
+#define REDZONE_SIZE   128
+
+#define ARCH_NUM_REGS 16
+#define USE_MONO_CTX
+
+#elif defined(TARGET_POWERPC)
+
+#define REDZONE_SIZE   224
+
+#define ARCH_NUM_REGS 32
+#ifdef __APPLE__
+#define ARCH_STORE_REGS(ptr)   \
+       __asm__ __volatile__(   \
+               "stmw r0, 0(%0)\n"      \
+               :                       \
+               : "b" (ptr)             \
+       )
+#else
+#define ARCH_STORE_REGS(ptr)   \
+       __asm__ __volatile__(   \
+               "stmw 0, 0(%0)\n"       \
+               :                       \
+               : "b" (ptr)             \
+       )
+#endif
+#define ARCH_SIGCTX_SP(ctx)    (UCONTEXT_REG_Rn((ctx), 1))
+#define ARCH_SIGCTX_IP(ctx)    (UCONTEXT_REG_NIP((ctx)))
+#define ARCH_COPY_SIGCTX_REGS(a,ctx) do {      \
+       int __i;        \
+       for (__i = 0; __i < 32; ++__i)  \
+               ((a)[__i]) = (gpointer) UCONTEXT_REG_Rn((ctx), __i);    \
+       } while (0)
+
+/* MS_BLOCK_SIZE must be a multiple of the system pagesize, which for some
+   archs is 64k.  */
+#if defined(TARGET_POWERPC64) && _CALL_ELF == 2
+#define ARCH_MIN_MS_BLOCK_SIZE (64*1024)
+#define ARCH_MIN_MS_BLOCK_SIZE_SHIFT   16
+#endif
+
+#elif defined(TARGET_ARM)
+
+#define REDZONE_SIZE   0
+#define USE_MONO_CTX
+
+/* We dont store ip, sp */
+#define ARCH_NUM_REGS 14
+
+#elif defined(TARGET_ARM64)
+
+#ifdef __linux__
+#define REDZONE_SIZE    0
+#elif defined(__APPLE__)
+#define REDZONE_SIZE   128
+#else
+#error "Not implemented."
+#endif
+#define USE_MONO_CTX
+#define ARCH_NUM_REGS 31
+
+#elif defined(__mips__)
+
+#define REDZONE_SIZE   0
+
+#define USE_MONO_CTX
+#define ARCH_NUM_REGS 32
+
+#elif defined(__s390x__)
+
+#define REDZONE_SIZE   0
+
+#define USE_MONO_CTX
+#define ARCH_NUM_REGS 16       
+
+#elif defined(__sparc__)
+
+#define REDZONE_SIZE   0
+
+/* Don't bother with %g0 (%r0), it's always hard-coded to zero */
+#define ARCH_NUM_REGS 15       
+#ifdef __sparcv9
+#define ARCH_STORE_REGS(ptr)   \
+       __asm__ __volatile__(   \
+               "st %%g1,[%0]\n\t"      \
+               "st %%g2,[%0+0x08]\n\t" \
+               "st %%g3,[%0+0x10]\n\t" \
+               "st %%g4,[%0+0x18]\n\t" \
+               "st %%g5,[%0+0x20]\n\t" \
+               "st %%g6,[%0+0x28]\n\t" \
+               "st %%g7,[%0+0x30]\n\t" \
+               "st %%o0,[%0+0x38]\n\t" \
+               "st %%o1,[%0+0x40]\n\t" \
+               "st %%o2,[%0+0x48]\n\t" \
+               "st %%o3,[%0+0x50]\n\t" \
+               "st %%o4,[%0+0x58]\n\t" \
+               "st %%o5,[%0+0x60]\n\t" \
+               "st %%o6,[%0+0x68]\n\t" \
+               "st %%o7,[%0+0x70]\n\t" \
+               :                       \
+               : "r" (ptr)             \
+               : "memory"                      \
+       )
+#else
+#define ARCH_STORE_REGS(ptr)   \
+       __asm__ __volatile__(   \
+               "st %%g1,[%0]\n\t"      \
+               "st %%g2,[%0+0x04]\n\t" \
+               "st %%g3,[%0+0x08]\n\t" \
+               "st %%g4,[%0+0x0c]\n\t" \
+               "st %%g5,[%0+0x10]\n\t" \
+               "st %%g6,[%0+0x14]\n\t" \
+               "st %%g7,[%0+0x18]\n\t" \
+               "st %%o0,[%0+0x1c]\n\t" \
+               "st %%o1,[%0+0x20]\n\t" \
+               "st %%o2,[%0+0x24]\n\t" \
+               "st %%o3,[%0+0x28]\n\t" \
+               "st %%o4,[%0+0x2c]\n\t" \
+               "st %%o5,[%0+0x30]\n\t" \
+               "st %%o6,[%0+0x34]\n\t" \
+               "st %%o7,[%0+0x38]\n\t" \
+               :                       \
+               : "r" (ptr)             \
+               : "memory"                      \
+       )
+#endif
+
+#ifndef REG_SP
+#define REG_SP REG_O6
+#endif
+
+#define ARCH_SIGCTX_SP(ctx)    (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_SP])
+#define ARCH_SIGCTX_IP(ctx)    (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_PC])
+#define ARCH_COPY_SIGCTX_REGS(a,ctx) do {      \
+       (a)[0] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G1]);        \
+       (a)[1] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G2]);        \
+       (a)[2] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G3]);        \
+       (a)[3] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G4]);        \
+       (a)[4] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G5]);        \
+       (a)[5] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G6]);        \
+       (a)[6] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_G7]);        \
+       (a)[7] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O0]);        \
+       (a)[8] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O1]);        \
+       (a)[9] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O2]);        \
+       (a)[10] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O3]);       \
+       (a)[11] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O4]);       \
+       (a)[12] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O5]);       \
+       (a)[13] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O6]);       \
+       (a)[14] = (gpointer) (((ucontext_t *)(ctx))->uc_mcontext.gregs [REG_O7]);       \
+       } while (0)
+
+#endif
+
+#endif /* __MONO_SGENARCHDEP_H__ */
diff --git a/mono/sgen/sgen-cardtable.c b/mono/sgen/sgen-cardtable.c
new file mode 100644 (file)
index 0000000..7fcd0a8
--- /dev/null
@@ -0,0 +1,618 @@
+/*
+ * sgen-cardtable.c: Card table implementation for sgen
+ *
+ * Author:
+ *     Rodrigo Kumpera (rkumpera@novell.com)
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-cardtable.h"
+#include "mono/sgen/sgen-memory-governor.h"
+#include "mono/sgen/sgen-protocol.h"
+#include "mono/sgen/sgen-layout-stats.h"
+#include "mono/sgen/sgen-client.h"
+#include "mono/sgen/gc-internal-agnostic.h"
+#include "mono/utils/mono-memory-model.h"
+
+//#define CARDTABLE_STATS
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
+#include <sys/types.h>
+
+guint8 *sgen_cardtable;
+
+static gboolean need_mod_union;
+
+#ifdef HEAVY_STATISTICS
+guint64 marked_cards;
+guint64 scanned_cards;
+guint64 scanned_objects;
+guint64 remarked_cards;
+
+static guint64 los_marked_cards;
+static guint64 large_objects;
+static guint64 bloby_objects;
+static guint64 los_array_cards;
+static guint64 los_array_remsets;
+
+#endif
+static guint64 major_card_scan_time;
+static guint64 los_card_scan_time;
+
+static guint64 last_major_scan_time;
+static guint64 last_los_scan_time;
+
+static void sgen_card_tables_collect_stats (gboolean begin);
+
+mword
+sgen_card_table_number_of_cards_in_range (mword address, mword size)
+{
+       mword end = address + MAX (1, size) - 1;
+       return (end >> CARD_BITS) - (address >> CARD_BITS) + 1;
+}
+
+static void
+sgen_card_table_wbarrier_set_field (GCObject *obj, gpointer field_ptr, GCObject* value)
+{
+       *(void**)field_ptr = value;
+       if (need_mod_union || sgen_ptr_in_nursery (value))
+               sgen_card_table_mark_address ((mword)field_ptr);
+       sgen_dummy_use (value);
+}
+
+static void
+sgen_card_table_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
+{
+       gpointer *dest = dest_ptr;
+       gpointer *src = src_ptr;
+
+       /*overlapping that required backward copying*/
+       if (src < dest && (src + count) > dest) {
+               gpointer *start = dest;
+               dest += count - 1;
+               src += count - 1;
+
+               for (; dest >= start; --src, --dest) {
+                       gpointer value = *src;
+                       SGEN_UPDATE_REFERENCE_ALLOW_NULL (dest, value);
+                       if (need_mod_union || sgen_ptr_in_nursery (value))
+                               sgen_card_table_mark_address ((mword)dest);
+                       sgen_dummy_use (value);
+               }
+       } else {
+               gpointer *end = dest + count;
+               for (; dest < end; ++src, ++dest) {
+                       gpointer value = *src;
+                       SGEN_UPDATE_REFERENCE_ALLOW_NULL (dest, value);
+                       if (need_mod_union || sgen_ptr_in_nursery (value))
+                               sgen_card_table_mark_address ((mword)dest);
+                       sgen_dummy_use (value);
+               }
+       }       
+}
+
+static void
+sgen_card_table_wbarrier_value_copy (gpointer dest, gpointer src, int count, size_t element_size)
+{
+       size_t size = count * element_size;
+
+#ifdef DISABLE_CRITICAL_REGION
+       LOCK_GC;
+#else
+       TLAB_ACCESS_INIT;
+       ENTER_CRITICAL_REGION;
+#endif
+       mono_gc_memmove_atomic (dest, src, size);
+       sgen_card_table_mark_range ((mword)dest, size);
+#ifdef DISABLE_CRITICAL_REGION
+       UNLOCK_GC;
+#else
+       EXIT_CRITICAL_REGION;
+#endif
+}
+
+static void
+sgen_card_table_wbarrier_object_copy (GCObject* obj, GCObject *src)
+{
+       size_t size = sgen_client_par_object_get_size (SGEN_LOAD_VTABLE_UNCHECKED (obj), obj);
+
+#ifdef DISABLE_CRITICAL_REGION
+       LOCK_GC;
+#else
+       TLAB_ACCESS_INIT;
+       ENTER_CRITICAL_REGION;
+#endif
+       mono_gc_memmove_aligned ((char*)obj + SGEN_CLIENT_OBJECT_HEADER_SIZE, (char*)src + SGEN_CLIENT_OBJECT_HEADER_SIZE,
+                       size - SGEN_CLIENT_OBJECT_HEADER_SIZE);
+       sgen_card_table_mark_range ((mword)obj, size);
+#ifdef DISABLE_CRITICAL_REGION
+       UNLOCK_GC;
+#else
+       EXIT_CRITICAL_REGION;
+#endif 
+}
+
+static void
+sgen_card_table_wbarrier_generic_nostore (gpointer ptr)
+{
+       sgen_card_table_mark_address ((mword)ptr);      
+}
+
+#ifdef SGEN_HAVE_OVERLAPPING_CARDS
+
+guint8 *sgen_shadow_cardtable;
+
+#define SGEN_CARDTABLE_END (sgen_cardtable + CARD_COUNT_IN_BYTES)
+
+static gboolean
+sgen_card_table_region_begin_scanning (mword start, mword size)
+{
+       mword end = start + size;
+       /*XXX this can be improved to work on words and have a single loop induction var */
+       while (start < end) {
+               if (sgen_card_table_card_begin_scanning (start))
+                       return TRUE;
+               start += CARD_SIZE_IN_BYTES;
+       }
+       return FALSE;
+}
+
+#else
+
+static gboolean
+sgen_card_table_region_begin_scanning (mword start, mword size)
+{
+       gboolean res = FALSE;
+       guint8 *card = sgen_card_table_get_card_address (start);
+       guint8 *end = card + sgen_card_table_number_of_cards_in_range (start, size);
+
+       /*XXX this can be improved to work on words and have a branchless body */
+       while (card != end) {
+               if (*card++) {
+                       res = TRUE;
+                       break;
+               }
+       }
+
+       memset (sgen_card_table_get_card_address (start), 0, size >> CARD_BITS);
+
+       return res;
+}
+
+#endif
+
+/*FIXME this assumes that major blocks are multiple of 4K which is pretty reasonable */
+gboolean
+sgen_card_table_get_card_data (guint8 *data_dest, mword address, mword cards)
+{
+       mword *start = (mword*)sgen_card_table_get_card_scan_address (address);
+       mword *dest = (mword*)data_dest;
+       mword *end = (mword*)(data_dest + cards);
+       mword mask = 0;
+
+       for (; dest < end; ++dest, ++start) {
+               mword v = *start;
+               *dest = v;
+               mask |= v;
+
+#ifndef SGEN_HAVE_OVERLAPPING_CARDS
+               *start = 0;
+#endif
+       }
+
+       return mask != 0;
+}
+
+void*
+sgen_card_table_align_pointer (void *ptr)
+{
+       return (void*)((mword)ptr & ~(CARD_SIZE_IN_BYTES - 1));
+}
+
+void
+sgen_card_table_mark_range (mword address, mword size)
+{
+       mword num_cards = sgen_card_table_number_of_cards_in_range (address, size);
+       guint8 *start = sgen_card_table_get_card_address (address);
+
+#ifdef SGEN_HAVE_OVERLAPPING_CARDS
+       /*
+        * FIXME: There's a theoretical bug here, namely that the card table is allocated so
+        * far toward the end of the address space that start + num_cards overflows.
+        */
+       guint8 *end = start + num_cards;
+       SGEN_ASSERT (0, num_cards <= CARD_COUNT_IN_BYTES, "How did we get an object larger than the card table?");
+       if (end > SGEN_CARDTABLE_END) {
+               memset (start, 1, SGEN_CARDTABLE_END - start);
+               memset (sgen_cardtable, 1, end - sgen_cardtable);
+               return;
+       }
+#endif
+
+       memset (start, 1, num_cards);
+}
+
+static gboolean
+sgen_card_table_is_range_marked (guint8 *cards, mword address, mword size)
+{
+       guint8 *end = cards + sgen_card_table_number_of_cards_in_range (address, size);
+
+       /*This is safe since this function is only called by code that only passes continuous card blocks*/
+       while (cards != end) {
+               if (*cards++)
+                       return TRUE;
+       }
+       return FALSE;
+
+}
+
+static void
+sgen_card_table_record_pointer (gpointer address)
+{
+       *sgen_card_table_get_card_address ((mword)address) = 1;
+}
+
+static gboolean
+sgen_card_table_find_address (char *addr)
+{
+       return sgen_card_table_address_is_marked ((mword)addr);
+}
+
+static gboolean
+sgen_card_table_find_address_with_cards (char *cards_start, guint8 *cards, char *addr)
+{
+       cards_start = sgen_card_table_align_pointer (cards_start);
+       return cards [(addr - cards_start) >> CARD_BITS];
+}
+
+static void
+update_mod_union (guint8 *dest, guint8 *start_card, size_t num_cards)
+{
+       int i;
+       for (i = 0; i < num_cards; ++i)
+               dest [i] |= start_card [i];
+}
+
+guint8*
+sgen_card_table_alloc_mod_union (char *obj, mword obj_size)
+{
+       size_t num_cards = sgen_card_table_number_of_cards_in_range ((mword) obj, obj_size);
+       guint8 *mod_union = sgen_alloc_internal_dynamic (num_cards, INTERNAL_MEM_CARDTABLE_MOD_UNION, TRUE);
+       memset (mod_union, 0, num_cards);
+       return mod_union;
+}
+
+void
+sgen_card_table_free_mod_union (guint8 *mod_union, char *obj, mword obj_size)
+{
+       size_t num_cards = sgen_card_table_number_of_cards_in_range ((mword) obj, obj_size);
+       sgen_free_internal_dynamic (mod_union, num_cards, INTERNAL_MEM_CARDTABLE_MOD_UNION);
+}
+
+void
+sgen_card_table_update_mod_union_from_cards (guint8 *dest, guint8 *start_card, size_t num_cards)
+{
+       SGEN_ASSERT (0, dest, "Why don't we have a mod union?");
+       update_mod_union (dest, start_card, num_cards);
+}
+
+void
+sgen_card_table_update_mod_union (guint8 *dest, char *obj, mword obj_size, size_t *out_num_cards)
+{
+       guint8 *start_card = sgen_card_table_get_card_address ((mword)obj);
+#ifndef SGEN_HAVE_OVERLAPPING_CARDS
+       guint8 *end_card = sgen_card_table_get_card_address ((mword)obj + obj_size - 1) + 1;
+#endif
+       size_t num_cards;
+
+#ifdef SGEN_HAVE_OVERLAPPING_CARDS
+       size_t rest;
+
+       rest = num_cards = sgen_card_table_number_of_cards_in_range ((mword) obj, obj_size);
+
+       while (start_card + rest > SGEN_CARDTABLE_END) {
+               size_t count = SGEN_CARDTABLE_END - start_card;
+               sgen_card_table_update_mod_union_from_cards (dest, start_card, count);
+               dest += count;
+               rest -= count;
+               start_card = sgen_cardtable;
+       }
+       num_cards = rest;
+#else
+       num_cards = end_card - start_card;
+#endif
+
+       sgen_card_table_update_mod_union_from_cards (dest, start_card, num_cards);
+
+       if (out_num_cards)
+               *out_num_cards = num_cards;
+}
+
+#ifdef SGEN_HAVE_OVERLAPPING_CARDS
+
+static void
+move_cards_to_shadow_table (mword start, mword size)
+{
+       guint8 *from = sgen_card_table_get_card_address (start);
+       guint8 *to = sgen_card_table_get_shadow_card_address (start);
+       size_t bytes = sgen_card_table_number_of_cards_in_range (start, size);
+
+       if (bytes >= CARD_COUNT_IN_BYTES) {
+               memcpy (sgen_shadow_cardtable, sgen_cardtable, CARD_COUNT_IN_BYTES);
+       } else if (to + bytes > SGEN_SHADOW_CARDTABLE_END) {
+               size_t first_chunk = SGEN_SHADOW_CARDTABLE_END - to;
+               size_t second_chunk = MIN (CARD_COUNT_IN_BYTES, bytes) - first_chunk;
+
+               memcpy (to, from, first_chunk);
+               memcpy (sgen_shadow_cardtable, sgen_cardtable, second_chunk);
+       } else {
+               memcpy (to, from, bytes);
+       }
+}
+
+static void
+clear_cards (mword start, mword size)
+{
+       guint8 *addr = sgen_card_table_get_card_address (start);
+       size_t bytes = sgen_card_table_number_of_cards_in_range (start, size);
+
+       if (bytes >= CARD_COUNT_IN_BYTES) {
+               memset (sgen_cardtable, 0, CARD_COUNT_IN_BYTES);
+       } else if (addr + bytes > SGEN_CARDTABLE_END) {
+               size_t first_chunk = SGEN_CARDTABLE_END - addr;
+
+               memset (addr, 0, first_chunk);
+               memset (sgen_cardtable, 0, bytes - first_chunk);
+       } else {
+               memset (addr, 0, bytes);
+       }
+}
+
+
+#else
+
+static void
+clear_cards (mword start, mword size)
+{
+       memset (sgen_card_table_get_card_address (start), 0, sgen_card_table_number_of_cards_in_range (start, size));
+}
+
+
+#endif
+
+static void
+sgen_card_table_clear_cards (void)
+{
+       /*XXX we could do this in 2 ways. using mincore or iterating over all sections/los objects */
+       sgen_major_collector_iterate_live_block_ranges (clear_cards);
+       sgen_los_iterate_live_block_ranges (clear_cards);
+}
+
+static void
+sgen_card_table_finish_minor_collection (void)
+{
+       sgen_card_tables_collect_stats (FALSE);
+}
+
+static void
+sgen_card_table_scan_remsets (ScanCopyContext ctx)
+{
+       SGEN_TV_DECLARE (atv);
+       SGEN_TV_DECLARE (btv);
+
+       sgen_card_tables_collect_stats (TRUE);
+
+#ifdef SGEN_HAVE_OVERLAPPING_CARDS
+       /*FIXME we should have a bit on each block/los object telling if the object have marked cards.*/
+       /*First we copy*/
+       sgen_major_collector_iterate_live_block_ranges (move_cards_to_shadow_table);
+       sgen_los_iterate_live_block_ranges (move_cards_to_shadow_table);
+
+       /*Then we clear*/
+       sgen_card_table_clear_cards ();
+#endif
+       SGEN_TV_GETTIME (atv);
+       sgen_get_major_collector ()->scan_card_table (FALSE, ctx);
+       SGEN_TV_GETTIME (btv);
+       last_major_scan_time = SGEN_TV_ELAPSED (atv, btv); 
+       major_card_scan_time += last_major_scan_time;
+       sgen_los_scan_card_table (FALSE, ctx);
+       SGEN_TV_GETTIME (atv);
+       last_los_scan_time = SGEN_TV_ELAPSED (btv, atv);
+       los_card_scan_time += last_los_scan_time;
+}
+
+guint8*
+sgen_get_card_table_configuration (int *shift_bits, gpointer *mask)
+{
+#ifndef MANAGED_WBARRIER
+       return NULL;
+#else
+       if (!sgen_cardtable)
+               return NULL;
+
+       *shift_bits = CARD_BITS;
+#ifdef SGEN_HAVE_OVERLAPPING_CARDS
+       *mask = (gpointer)CARD_MASK;
+#else
+       *mask = NULL;
+#endif
+
+       return sgen_cardtable;
+#endif
+}
+
+#if 0
+void
+sgen_card_table_dump_obj_card (char *object, size_t size, void *dummy)
+{
+       guint8 *start = sgen_card_table_get_card_scan_address (object);
+       guint8 *end = start + sgen_card_table_number_of_cards_in_range (object, size);
+       int cnt = 0;
+       printf ("--obj %p %d cards [%p %p]--", object, size, start, end);
+       for (; start < end; ++start) {
+               if (cnt == 0)
+                       printf ("\n\t[%p] ", start);
+               printf ("%x ", *start);
+               ++cnt;
+               if (cnt == 8)
+                       cnt = 0;
+       }
+       printf ("\n");
+}
+#endif
+
+void
+sgen_cardtable_scan_object (char *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx)
+{
+       HEAVY_STAT (++large_objects);
+
+       if (sgen_client_cardtable_scan_object (obj, block_obj_size, cards, mod_union, ctx))
+               return;
+
+       HEAVY_STAT (++bloby_objects);
+       if (cards) {
+               if (sgen_card_table_is_range_marked (cards, (mword)obj, block_obj_size))
+                       ctx.ops->scan_object (obj, sgen_obj_get_descriptor (obj), ctx.queue);
+       } else if (sgen_card_table_region_begin_scanning ((mword)obj, block_obj_size)) {
+               ctx.ops->scan_object (obj, sgen_obj_get_descriptor (obj), ctx.queue);
+       }
+
+       binary_protocol_card_scan (obj, sgen_safe_object_get_size ((GCObject*)obj));
+}
+
+#ifdef CARDTABLE_STATS
+
+typedef struct {
+       int total, marked, remarked, gc_marked; 
+} card_stats;
+
+static card_stats major_stats, los_stats;
+static card_stats *cur_stats;
+
+static void
+count_marked_cards (mword start, mword size)
+{
+       mword end = start + size;
+       while (start <= end) {
+               guint8 card = *sgen_card_table_get_card_address (start);
+               ++cur_stats->total;
+               if (card)
+                       ++cur_stats->marked;
+               if (card == 2)
+                       ++cur_stats->gc_marked;
+               start += CARD_SIZE_IN_BYTES;
+       }
+}
+
+static void
+count_remarked_cards (mword start, mword size)
+{
+       mword end = start + size;
+       while (start <= end) {
+               if (sgen_card_table_address_is_marked (start)) {
+                       ++cur_stats->remarked;
+                       *sgen_card_table_get_card_address (start) = 2;
+               }
+               start += CARD_SIZE_IN_BYTES;
+       }
+}
+
+#endif
+
+static void
+sgen_card_tables_collect_stats (gboolean begin)
+{
+#ifdef CARDTABLE_STATS
+       if (begin) {
+               memset (&major_stats, 0, sizeof (card_stats));
+               memset (&los_stats, 0, sizeof (card_stats));
+               cur_stats = &major_stats;
+               sgen_major_collector_iterate_live_block_ranges (count_marked_cards);
+               cur_stats = &los_stats;
+               sgen_los_iterate_live_block_ranges (count_marked_cards);
+       } else {
+               cur_stats = &major_stats;
+               sgen_major_collector_iterate_live_block_ranges (count_remarked_cards);
+               cur_stats = &los_stats;
+               sgen_los_iterate_live_block_ranges (count_remarked_cards);
+               printf ("cards major (t %d m %d g %d r %d)  los (t %d m %d g %d r %d) major_scan %.2fms los_scan %.2fms\n", 
+                       major_stats.total, major_stats.marked, major_stats.gc_marked, major_stats.remarked,
+                       los_stats.total, los_stats.marked, los_stats.gc_marked, los_stats.remarked,
+                       last_major_scan_time / 10000.0f, last_los_scan_time / 10000.0f);
+       }
+#endif
+}
+
+void
+sgen_card_table_init (SgenRememberedSet *remset)
+{
+       sgen_cardtable = sgen_alloc_os_memory (CARD_COUNT_IN_BYTES, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, "card table");
+
+#ifdef SGEN_HAVE_OVERLAPPING_CARDS
+       sgen_shadow_cardtable = sgen_alloc_os_memory (CARD_COUNT_IN_BYTES, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, "shadow card table");
+#endif
+
+#ifdef HEAVY_STATISTICS
+       mono_counters_register ("marked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &marked_cards);
+       mono_counters_register ("scanned cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &scanned_cards);
+       mono_counters_register ("remarked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &remarked_cards);
+
+       mono_counters_register ("los marked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_marked_cards);
+       mono_counters_register ("los array cards scanned ", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_cards);
+       mono_counters_register ("los array remsets", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_remsets);
+       mono_counters_register ("cardtable scanned objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &scanned_objects);
+       mono_counters_register ("cardtable large objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &large_objects);
+       mono_counters_register ("cardtable bloby objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &bloby_objects);
+#endif
+       mono_counters_register ("cardtable major scan time", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &major_card_scan_time);
+       mono_counters_register ("cardtable los scan time", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &los_card_scan_time);
+
+
+       remset->wbarrier_set_field = sgen_card_table_wbarrier_set_field;
+       remset->wbarrier_arrayref_copy = sgen_card_table_wbarrier_arrayref_copy;
+       remset->wbarrier_value_copy = sgen_card_table_wbarrier_value_copy;
+       remset->wbarrier_object_copy = sgen_card_table_wbarrier_object_copy;
+       remset->wbarrier_generic_nostore = sgen_card_table_wbarrier_generic_nostore;
+       remset->record_pointer = sgen_card_table_record_pointer;
+
+       remset->scan_remsets = sgen_card_table_scan_remsets;
+
+       remset->finish_minor_collection = sgen_card_table_finish_minor_collection;
+       remset->clear_cards = sgen_card_table_clear_cards;
+
+       remset->find_address = sgen_card_table_find_address;
+       remset->find_address_with_cards = sgen_card_table_find_address_with_cards;
+
+       need_mod_union = sgen_get_major_collector ()->is_concurrent;
+}
+
+#endif /*HAVE_SGEN_GC*/
diff --git a/mono/sgen/sgen-cardtable.h b/mono/sgen/sgen-cardtable.h
new file mode 100644 (file)
index 0000000..85a6924
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __MONO_SGEN_CARD_TABLE_INLINES_H__
+#define __MONO_SGEN_CARD_TABLE_INLINES_H__
+
+/*WARNING: This function returns the number of cards regardless of overflow in case of overlapping cards.*/
+mword sgen_card_table_number_of_cards_in_range (mword address, mword size);
+
+void sgen_card_table_reset_region (mword start, mword end);
+void* sgen_card_table_align_pointer (void *ptr);
+void sgen_card_table_mark_range (mword address, mword size);
+void sgen_cardtable_scan_object (char *obj, mword obj_size, guint8 *cards,
+               gboolean mod_union, ScanCopyContext ctx);
+
+gboolean sgen_card_table_get_card_data (guint8 *dest, mword address, mword cards);
+
+guint8* sgen_card_table_alloc_mod_union (char *obj, mword obj_size);
+void sgen_card_table_free_mod_union (guint8 *mod_union, char *obj, mword obj_size);
+
+void sgen_card_table_update_mod_union_from_cards (guint8 *dest, guint8 *start_card, size_t num_cards);
+void sgen_card_table_update_mod_union (guint8 *dest, char *obj, mword obj_size, size_t *out_num_cards);
+
+guint8* sgen_get_card_table_configuration (int *shift_bits, gpointer *mask);
+
+void sgen_card_table_init (SgenRememberedSet *remset);
+
+/*How many bytes a single card covers*/
+#define CARD_BITS 9
+
+/* How many bits of the address space is covered by the card table.
+ * If this value is smaller than the number of address bits, card aliasing is required.
+ */
+#define CARD_TABLE_BITS 32
+
+#define CARD_SIZE_IN_BYTES (1 << CARD_BITS)
+#define CARD_COUNT_BITS (CARD_TABLE_BITS - CARD_BITS)
+#define CARD_COUNT_IN_BYTES (1 << CARD_COUNT_BITS)
+#define CARD_MASK ((1 << CARD_COUNT_BITS) - 1)
+
+#if SIZEOF_VOID_P * 8 > CARD_TABLE_BITS
+#define SGEN_HAVE_OVERLAPPING_CARDS    1
+#endif
+
+extern guint8 *sgen_cardtable;
+
+
+#ifdef SGEN_HAVE_OVERLAPPING_CARDS
+
+static inline guint8*
+sgen_card_table_get_card_address (mword address)
+{
+       return sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK);
+}
+
+extern guint8 *sgen_shadow_cardtable;
+
+#define SGEN_SHADOW_CARDTABLE_END (sgen_shadow_cardtable + CARD_COUNT_IN_BYTES)
+
+static inline guint8*
+sgen_card_table_get_shadow_card_address (mword address)
+{
+       return sgen_shadow_cardtable + ((address >> CARD_BITS) & CARD_MASK);
+}
+
+static inline gboolean
+sgen_card_table_card_begin_scanning (mword address)
+{
+       return *sgen_card_table_get_shadow_card_address (address) != 0;
+}
+
+static inline void
+sgen_card_table_prepare_card_for_scanning (guint8 *card)
+{
+}
+
+#define sgen_card_table_get_card_scan_address sgen_card_table_get_shadow_card_address
+
+#else
+
+static inline guint8*
+sgen_card_table_get_card_address (mword address)
+{
+       return sgen_cardtable + (address >> CARD_BITS);
+}
+
+static inline gboolean
+sgen_card_table_card_begin_scanning (mword address)
+{
+       guint8 *card = sgen_card_table_get_card_address (address);
+       gboolean res = *card;
+       *card = 0;
+       return res;
+}
+
+static inline void
+sgen_card_table_prepare_card_for_scanning (guint8 *card)
+{
+       *card = 0;
+}
+
+#define sgen_card_table_get_card_scan_address sgen_card_table_get_card_address
+
+#endif
+
+static inline gboolean
+sgen_card_table_address_is_marked (mword address)
+{
+       return *sgen_card_table_get_card_address (address) != 0;
+}
+
+static inline void
+sgen_card_table_mark_address (mword address)
+{
+       *sgen_card_table_get_card_address (address) = 1;
+}
+
+static inline size_t
+sgen_card_table_get_card_offset (char *ptr, char *base)
+{
+       return (ptr - base) >> CARD_BITS;
+}
+
+#endif
diff --git a/mono/sgen/sgen-client.h b/mono/sgen/sgen-client.h
new file mode 100644 (file)
index 0000000..5056f46
--- /dev/null
@@ -0,0 +1,299 @@
+/*
+ * sgen-client.h: SGen client interface.
+ *
+ * Copyright (C) 2014 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "mono/sgen/sgen-pointer-queue.h"
+
+/*
+ * Init whatever needs initing.  This is called relatively early in SGen initialization.
+ * Must initialized the small ID for the current thread.
+ */
+void sgen_client_init (void);
+
+/*
+ * The slow path for getting an object's size.  We're passing in the vtable because we've
+ * already fetched it.
+ */
+mword sgen_client_slow_object_get_size (GCVTable *vtable, GCObject* o);
+
+/*
+ * Fill the given range with a dummy object.  If the range is too short to be filled with an
+ * object, null it.  Return `TRUE` if the range was filled with an object, `FALSE` if it was
+ * nulled.
+ */
+gboolean sgen_client_array_fill_range (char *start, size_t size);
+
+/*
+ * This is called if the nursery clearing policy at `clear-at-gc`, which is usually only
+ * used for debugging.  If `size` is large enough for the memory to have been filled with a
+ * dummy, object, zero its header.  Note that there might not actually be a header there.
+ */
+void sgen_client_zero_array_fill_header (void *p, size_t size);
+
+/*
+ * Return whether the given object is an array fill dummy object.
+ */
+gboolean sgen_client_object_is_array_fill (GCObject *o);
+
+/*
+ * Return whether the given finalizable object's finalizer is critical, i.e., needs to run
+ * after all non-critical finalizers have run.
+ */
+gboolean sgen_client_object_has_critical_finalizer (GCObject *obj);
+
+/*
+ * Called after an object is enqueued for finalization.  This is a very low-level callback.
+ * It should almost certainly be a NOP.
+ *
+ * FIXME: Can we merge this with `sgen_client_object_has_critical_finalizer()`?
+ */
+void sgen_client_object_queued_for_finalization (GCObject *obj);
+
+/*
+ * Run the given object's finalizer.
+ */
+void sgen_client_run_finalize (GCObject *obj);
+
+/*
+ * Is called after a collection if there are objects to finalize.  The world is still
+ * stopped.  This will usually notify the finalizer thread that it needs to run.
+ */
+void sgen_client_finalize_notify (void);
+
+/*
+ * Returns TRUE if no ephemerons have been marked.  Will be called again if it returned
+ * FALSE.  If ephemerons are not supported, just return TRUE.
+ */
+gboolean sgen_client_mark_ephemerons (ScanCopyContext ctx);
+
+/*
+ * Clear ephemeron pairs with unreachable keys.
+ * We pass the copy func so we can figure out if an array was promoted or not.
+ */
+void sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx);
+
+/*
+ * This is called for objects that are larger than one card.  If it's possible to scan only
+ * parts of the object based on which cards are marked, do so and return TRUE.  Otherwise,
+ * return FALSE.
+ */
+gboolean sgen_client_cardtable_scan_object (char *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx);
+
+/*
+ * Called after nursery objects have been pinned.  No action is necessary.
+ */
+void sgen_client_nursery_objects_pinned (void **definitely_pinned, int count);
+
+/*
+ * Called at a semi-random point during minor collections.  No action is necessary.
+ */
+void sgen_client_collecting_minor (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue);
+
+/*
+ * Called at semi-random points during major collections.  No action is necessary.
+ */
+void sgen_client_collecting_major_1 (void);
+void sgen_client_collecting_major_2 (void);
+void sgen_client_collecting_major_3 (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue);
+
+/*
+ * Called after a LOS object has been pinned.  No action is necessary.
+ */
+void sgen_client_pinned_los_object (char *obj);
+
+/*
+ * Called for every degraded allocation.  No action is necessary.
+ */
+void sgen_client_degraded_allocation (size_t size);
+
+/*
+ * Called whenever the amount of memory allocated for the managed heap changes.  No action
+ * is necessary.
+ */
+void sgen_client_total_allocated_heap_changed (size_t allocated_heap_size);
+
+/*
+ * Called when an object allocation fails.  The suggested action is to abort the program.
+ *
+ * FIXME: Don't we want to return a BOOL here that indicates whether to retry the
+ * allocation?
+ */
+void sgen_client_out_of_memory (size_t size);
+
+/*
+ * If the client has registered any internal memory types, this must return a string
+ * describing the given type.  Only used for debugging.
+ */
+const char* sgen_client_description_for_internal_mem_type (int type);
+
+/*
+ * Only used for debugging.  `sgen_client_vtable_get_namespace()` may return NULL.
+ */
+gboolean sgen_client_vtable_is_inited (GCVTable *vtable);
+const char* sgen_client_vtable_get_namespace (GCVTable *vtable);
+const char* sgen_client_vtable_get_name (GCVTable *vtable);
+
+/*
+ * Called before starting collections.  The world is already stopped.  No action is
+ * necessary.
+ */
+void sgen_client_pre_collection_checks (void);
+
+/*
+ * Must set the thread's thread info to `info`.  If the thread's small ID was not already
+ * initialized in `sgen_client_init()` (for the main thread, usually), it must be done here.
+ *
+ * `stack_bottom_fallback` is the value passed through via `sgen_thread_register()`.
+ */
+void sgen_client_thread_register (SgenThreadInfo* info, void *stack_bottom_fallback);
+
+void sgen_client_thread_unregister (SgenThreadInfo *p);
+
+/*
+ * Called on each worker thread when it starts up.  Must initialize the thread's small ID.
+ */
+void sgen_client_thread_register_worker (void);
+
+/*
+ * The least this function needs to do is scan all registers and thread stacks.  To do this
+ * conservatively, use `sgen_conservatively_pin_objects_from()`.
+ */
+void sgen_client_scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, ScanCopyContext ctx);
+
+/*
+ * Stop and restart the world, i.e., all threads that interact with the managed heap.  For
+ * single-threaded programs this is a nop.
+ */
+void sgen_client_stop_world (int generation);
+void sgen_client_restart_world (int generation, GGTimingInfo *timing);
+
+/*
+ * Must return FALSE.  The bridge is not supported outside of Mono.
+ */
+gboolean sgen_client_bridge_need_processing (void);
+
+/*
+ * None of these should ever be called.
+ */
+void sgen_client_bridge_reset_data (void);
+void sgen_client_bridge_processing_stw_step (void);
+void sgen_client_bridge_wait_for_processing (void);
+void sgen_client_bridge_processing_finish (int generation);
+gboolean sgen_client_bridge_is_bridge_object (GCObject *obj);
+void sgen_client_bridge_register_finalized_object (GCObject *object);
+
+/*
+ * No action is necessary.
+ */
+void sgen_client_mark_togglerefs (char *start, char *end, ScanCopyContext ctx);
+void sgen_client_clear_togglerefs (char *start, char *end, ScanCopyContext ctx);
+
+/*
+ * Called after collections, reporting the amount of time they took.  No action is
+ * necessary.
+ */
+void sgen_client_log_timing (GGTimingInfo *info, mword last_major_num_sections, mword last_los_memory_usage);
+
+/*
+ * Called to handle `MONO_GC_PARAMS` and `MONO_GC_DEBUG` options.  The `handle` functions
+ * must return TRUE if they have recognized and processed the option, FALSE otherwise.
+ */
+gboolean sgen_client_handle_gc_param (const char *opt);
+void sgen_client_print_gc_params_usage (void);
+gboolean sgen_client_handle_gc_debug (const char *opt);
+void sgen_client_print_gc_debug_usage (void);
+
+/*
+ * Called to obtain an identifier for the current location, such as a method pointer. This
+ * is used for logging the provenances of allocations with the heavy binary protocol.
+ */
+gpointer sgen_client_get_provenance (void);
+
+/*
+ * These client binary protocol functions are called from the respective binary protocol
+ * functions.  No action is necessary.  We suggest implementing them as inline functions in
+ * the client header file so that no overhead is incurred if they don't actually do
+ * anything.
+ */
+
+#define TYPE_INT int
+#define TYPE_LONGLONG long long
+#define TYPE_SIZE size_t
+#define TYPE_POINTER gpointer
+#define TYPE_BOOL gboolean
+
+#define BEGIN_PROTOCOL_ENTRY0(method) \
+       void sgen_client_ ## method (void);
+#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) \
+       void sgen_client_ ## method (void);
+#define BEGIN_PROTOCOL_ENTRY1(method,t1,f1) \
+       void sgen_client_ ## method (t1 f1);
+#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) \
+       void sgen_client_ ## method (t1 f1);
+#define BEGIN_PROTOCOL_ENTRY2(method,t1,f1,t2,f2) \
+       void sgen_client_ ## method (t1 f1, t2 f2);
+#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) \
+       void sgen_client_ ## method (t1 f1, t2 f2);
+#define BEGIN_PROTOCOL_ENTRY3(method,t1,f1,t2,f2,t3,f3) \
+       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3);
+#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) \
+       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3);
+#define BEGIN_PROTOCOL_ENTRY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
+       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4);
+#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
+       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4);
+#define BEGIN_PROTOCOL_ENTRY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
+       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5);
+#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
+       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5);
+#define BEGIN_PROTOCOL_ENTRY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
+       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6);
+#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
+       void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6);
+
+#define FLUSH()
+
+#define DEFAULT_PRINT()
+#define CUSTOM_PRINT(_)
+
+#define IS_ALWAYS_MATCH(_)
+#define MATCH_INDEX(_)
+#define IS_VTABLE_MATCH(_)
+
+#define END_PROTOCOL_ENTRY
+#define END_PROTOCOL_ENTRY_HEAVY
+
+#include "sgen-protocol-def.h"
+
+#undef TYPE_INT
+#undef TYPE_LONGLONG
+#undef TYPE_SIZE
+#undef TYPE_POINTER
+#undef TYPE_BOOL
+
+#ifdef SGEN_WITHOUT_MONO
+/*
+ * Get the current thread's thread info.  This will only be called on managed threads.
+ */
+SgenThreadInfo* mono_thread_info_current (void);
+
+/*
+ * Get the current thread's small ID.  This will be called on managed and worker threads.
+ */
+int mono_thread_info_get_small_id (void);
+#endif
diff --git a/mono/sgen/sgen-conf.h b/mono/sgen/sgen-conf.h
new file mode 100644 (file)
index 0000000..f139a98
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * sgen-conf.h: Tunable parameters and debugging switches.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __MONO_SGENCONF_H__
+#define __MONO_SGENCONF_H__
+
+#include <glib.h>
+
+/*Basic defines and static tunables */
+
+#if SIZEOF_VOID_P == 4
+typedef guint32 mword;
+#else
+typedef guint64 mword;
+#endif
+
+
+/*
+ * Turning on heavy statistics will turn off the managed allocator and
+ * the managed write barrier.
+ */
+// #define HEAVY_STATISTICS
+
+#ifdef HEAVY_STATISTICS
+#define HEAVY_STAT(x)  x
+#else
+#define HEAVY_STAT(x)
+#endif
+
+/*
+ * Define this to allow the user to change the nursery size by
+ * specifying its value in the MONO_GC_PARAMS environmental
+ * variable. See mono_gc_base_init for details.
+ */
+#define USER_CONFIG 1
+
+/*
+ * The binary protocol enables logging a lot of the GC ativity in a way that is not very
+ * intrusive and produces a compact file that can be searched using a custom tool.  This
+ * option enables very fine-grained binary protocol events, which will make the GC a tiny
+ * bit less efficient even if no binary protocol file is generated.
+ */
+//#define SGEN_HEAVY_BINARY_PROTOCOL
+
+/*
+ * This extends the heavy binary protocol to record the provenance of an object
+ * for every allocation.
+ */
+//#define SGEN_OBJECT_PROVENANCE
+
+/*
+ * This enables checks whenever objects are enqueued in gray queues.
+ * Right now the only check done is that we never enqueue nursery
+ * pointers in the concurrent collector.
+ */
+//#define SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+
+/*
+ * This keeps track of where a gray object queue section is and
+ * whether it is where it should be.
+ */
+//#define SGEN_CHECK_GRAY_OBJECT_SECTIONS
+
+/*
+ * Enable this to check every reference update for null references and whether the update is
+ * made in a worker thread.  In only a few cases do we potentially update references by
+ * writing nulls, so we assert in all the cases where it's not allowed.  The concurrent
+ * collector's worker thread is not allowed to update references at all, so we also assert
+ * that we're not in the worker thread.
+ */
+//#define SGEN_CHECK_UPDATE_REFERENCE
+
+/*
+ * Define this and use the "xdomain-checks" MONO_GC_DEBUG option to
+ * have cross-domain checks in the write barrier.
+ */
+//#define XDOMAIN_CHECKS_IN_WBARRIER
+
+/*
+ * Define this to get number of objects marked information in the
+ * concurrent GC DTrace probes.  Has a small performance impact, so
+ * it's disabled by default.
+ */
+//#define SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
+
+/*
+ * Object layout statistics gather a histogram of reference locations
+ * over all scanned objects.  We use this information to improve GC
+ * descriptors to speed up scanning.  This does not provide any
+ * troubleshooting assistance (unless you are troubled in highly
+ * unusual ways) and makes scanning slower.
+ */
+//#define SGEN_OBJECT_LAYOUT_STATISTICS
+
+#ifndef SGEN_HEAVY_BINARY_PROTOCOL
+#ifndef HEAVY_STATISTICS
+#define MANAGED_ALLOCATION
+#ifndef XDOMAIN_CHECKS_IN_WBARRIER
+#define MANAGED_WBARRIER
+#endif
+#endif
+#endif
+
+/*
+ * Maximum level of debug to enable on this build.
+ * Making this a constant enables us to put logging in a lot of places and
+ * not pay its cost on release builds.
+ */
+#define SGEN_MAX_DEBUG_LEVEL 2
+
+/*
+ * Maximum level of asserts to enable on this build.
+ * FIXME replace all magic numbers with defines.
+ */
+#define SGEN_MAX_ASSERT_LEVEL 5
+
+
+#define GC_BITS_PER_WORD (sizeof (mword) * 8)
+
+/*Size of the section used by the copying GC. */
+#define SGEN_SIZEOF_GC_MEM_SECTION     ((sizeof (GCMemSection) + 7) & ~7)
+
+/*
+ * to quickly find the head of an object pinned by a conservative
+ * address we keep track of the objects allocated for each
+ * SGEN_SCAN_START_SIZE memory chunk in the nursery or other memory
+ * sections. Larger values have less memory overhead and bigger
+ * runtime cost. 4-8 KB are reasonable values.
+ */
+#define SGEN_SCAN_START_SIZE (4096*2)
+
+/*
+ * Objects bigger then this go into the large object space.  This size has a few
+ * constraints.  At least two of them must fit into a major heap block.  It must also play
+ * well with the run length GC descriptor, which encodes the object size.
+ */
+#define SGEN_MAX_SMALL_OBJ_SIZE 8000
+
+/*
+ * This is the maximum ammount of memory we're willing to waste in order to speed up allocation.
+ * Wastage comes in thre forms:
+ *
+ * -when building the nursery fragment list, small regions are discarded;
+ * -when allocating memory from a fragment if it ends up below the threshold, we remove it from the fragment list; and
+ * -when allocating a new tlab, we discard the remaining space of the old one
+ *
+ * Increasing this value speeds up allocation but will cause more frequent nursery collections as less space will be used.
+ * Descreasing this value will cause allocation to be slower since we'll have to cycle thru more fragments.
+ * 512 annedoctally keeps wastage under control and doesn't impact allocation performance too much. 
+*/
+#define SGEN_MAX_NURSERY_WASTE 512
+
+
+/*
+ * Minimum allowance for nursery allocations, as a multiple of the size of nursery.
+ *
+ * We allow at least this much allocation to happen to the major heap from multiple
+ * minor collections before triggering a major collection.
+ *
+ * Bigger values increases throughput by allowing more garbage to sit in the major heap.
+ * Smaller values leads to better memory effiency but more frequent major collections.
+ */
+#define SGEN_DEFAULT_ALLOWANCE_NURSERY_SIZE_RATIO 4.0
+
+#define SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO 1.0
+#define SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO 10.0
+
+/*
+ * Default ratio of memory we want to release in a major collection in relation to the the current heap size.
+ *
+ * A major collection target is to free a given amount of memory. This amount is a ratio of the major heap size.
+ *
+ * Values above 0.5 cause the heap to agressively grow when it's small and waste memory when it's big.
+ * Lower values will produce more reasonable sized heaps when it's small, but will be suboptimal at large
+ * sizes as they will use a small fraction only.
+ *
+ */
+#define SGEN_DEFAULT_SAVE_TARGET_RATIO 0.5
+
+#define SGEN_MIN_SAVE_TARGET_RATIO 0.1
+#define SGEN_MAX_SAVE_TARGET_RATIO 2.0
+
+/*
+ * Configurable cementing parameters.
+ *
+ * If there are too many pinned nursery objects with many references
+ * from the major heap, the hash table size must be increased.
+ *
+ * The threshold is the number of references from the major heap to a
+ * pinned nursery object which triggers cementing: if there are more
+ * than that number of references, the pinned object is cemented until
+ * the next major collection.
+ */
+#define SGEN_CEMENT_HASH_SHIFT 6
+#define SGEN_CEMENT_HASH_SIZE  (1 << SGEN_CEMENT_HASH_SHIFT)
+#define SGEN_CEMENT_HASH(hv)   (((hv) ^ ((hv) >> SGEN_CEMENT_HASH_SHIFT)) & (SGEN_CEMENT_HASH_SIZE - 1))
+#define SGEN_CEMENT_THRESHOLD  1000
+
+#endif
diff --git a/mono/sgen/sgen-copy-object.h b/mono/sgen/sgen-copy-object.h
new file mode 100644 (file)
index 0000000..99fd0cc
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * sgen-copy-object.h: This is where objects are copied.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+extern guint64 stat_copy_object_called_nursery;
+extern guint64 stat_objects_copied_nursery;
+
+extern guint64 stat_nursery_copy_object_failed_from_space;
+extern guint64 stat_nursery_copy_object_failed_forwarded;
+extern guint64 stat_nursery_copy_object_failed_pinned;
+
+extern guint64 stat_slots_allocated_in_vain;
+
+/*
+ * Copies an object and enqueues it if a queue is given.
+ *
+ * This function can be used even if the vtable of obj is not valid
+ * anymore, which is the case in the parallel collector.
+ */
+static MONO_ALWAYS_INLINE void
+par_copy_object_no_checks (char *destination, GCVTable *vt, void *obj, mword objsize, SgenGrayQueue *queue)
+{
+       sgen_client_pre_copy_checks (destination, vt, obj, objsize);
+       binary_protocol_copy (obj, destination, vt, objsize);
+
+       /* FIXME: assumes object layout */
+       memcpy (destination + sizeof (mword), (char*)obj + sizeof (mword), objsize - sizeof (mword));
+
+       /* adjust array->bounds */
+       SGEN_ASSERT (9, sgen_vtable_get_descriptor (vt), "vtable %p has no gc descriptor", vt);
+
+       sgen_client_update_copied_object (destination, vt, obj, objsize);
+       obj = destination;
+       if (queue) {
+               SGEN_LOG (9, "Enqueuing gray object %p (%s)", obj, sgen_client_vtable_get_name (vt));
+               GRAY_OBJECT_ENQUEUE (queue, obj, sgen_vtable_get_descriptor (vt));
+       }
+}
+
+/*
+ * This can return OBJ itself on OOM.
+ */
+static MONO_NEVER_INLINE void*
+copy_object_no_checks (void *obj, SgenGrayQueue *queue)
+{
+       GCVTable *vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
+       gboolean has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
+       mword objsize = SGEN_ALIGN_UP (sgen_client_par_object_get_size (vt, obj));
+       /* FIXME: Does this not mark the newly allocated object? */
+       char *destination = COLLECTOR_SERIAL_ALLOC_FOR_PROMOTION (vt, obj, objsize, has_references);
+
+       if (G_UNLIKELY (!destination)) {
+               /* FIXME: Is this path ever tested? */
+               collector_pin_object (obj, queue);
+               sgen_set_pinned_from_failed_allocation (objsize);
+               return obj;
+       }
+
+       if (!has_references)
+               queue = NULL;
+
+       par_copy_object_no_checks (destination, vt, obj, objsize, queue);
+       /* FIXME: mark mod union cards if necessary */
+
+       /* set the forwarding pointer */
+       SGEN_FORWARD_OBJECT (obj, destination);
+
+       return destination;
+}
diff --git a/mono/sgen/sgen-debug.c b/mono/sgen/sgen-debug.c
new file mode 100644 (file)
index 0000000..e713a81
--- /dev/null
@@ -0,0 +1,1401 @@
+/*
+ * sgen-debug.c: Collector debugging
+ *
+ * Author:
+ *     Paolo Molaro (lupus@ximian.com)
+ *  Rodrigo Kumpera (kumpera@gmail.com)
+ *
+ * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright 2011 Xamarin, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-cardtable.h"
+#include "mono/sgen/sgen-protocol.h"
+#include "mono/sgen/sgen-memory-governor.h"
+#include "mono/sgen/sgen-pinning.h"
+#include "mono/sgen/sgen-client.h"
+#ifndef SGEN_WITHOUT_MONO
+#include "mono/metadata/sgen-bridge-internal.h"
+#endif
+
+#define LOAD_VTABLE    SGEN_LOAD_VTABLE
+
+#define object_is_forwarded    SGEN_OBJECT_IS_FORWARDED
+#define object_is_pinned       SGEN_OBJECT_IS_PINNED
+#define safe_object_get_size   sgen_safe_object_get_size
+
+void describe_ptr (char *ptr);
+void check_object (char *start);
+
+/*
+ * ######################################################################
+ * ########  Collector debugging
+ * ######################################################################
+ */
+
+static const char*descriptor_types [] = {
+       "INVALID",
+       "run length",
+       "bitmap",
+       "small pointer-free",
+       "complex",
+       "vector",
+       "complex arrray",
+       "complex pointer-free"
+};
+
+static char* describe_nursery_ptr (char *ptr, gboolean need_setup);
+
+static void
+describe_pointer (char *ptr, gboolean need_setup)
+{
+       GCVTable *vtable;
+       mword desc;
+       int type;
+       char *start;
+       char *forwarded;
+       mword size;
+
+ restart:
+       if (sgen_ptr_in_nursery (ptr)) {
+               start = describe_nursery_ptr (ptr, need_setup);
+               if (!start)
+                       return;
+               ptr = start;
+               vtable = (GCVTable*)LOAD_VTABLE (ptr);
+       } else {
+               if (sgen_ptr_is_in_los (ptr, &start)) {
+                       if (ptr == start)
+                               printf ("Pointer is the start of object %p in LOS space.\n", start);
+                       else
+                               printf ("Pointer is at offset 0x%x of object %p in LOS space.\n", (int)(ptr - start), start);
+                       ptr = start;
+                       mono_sgen_los_describe_pointer (ptr);
+                       vtable = (GCVTable*)LOAD_VTABLE (ptr);
+               } else if (major_collector.ptr_is_in_non_pinned_space (ptr, &start)) {
+                       if (ptr == start)
+                               printf ("Pointer is the start of object %p in oldspace.\n", start);
+                       else if (start)
+                               printf ("Pointer is at offset 0x%x of object %p in oldspace.\n", (int)(ptr - start), start);
+                       else
+                               printf ("Pointer inside oldspace.\n");
+                       if (start)
+                               ptr = start;
+                       vtable = (GCVTable*)major_collector.describe_pointer (ptr);
+               } else if (major_collector.obj_is_from_pinned_alloc (ptr)) {
+                       // FIXME: Handle pointers to the inside of objects
+                       printf ("Pointer is inside a pinned chunk.\n");
+                       vtable = (GCVTable*)LOAD_VTABLE (ptr);
+               } else {
+                       printf ("Pointer unknown.\n");
+                       return;
+               }
+       }
+
+       if (object_is_pinned (ptr))
+               printf ("Object is pinned.\n");
+
+       if ((forwarded = object_is_forwarded (ptr))) {
+               printf ("Object is forwarded to %p:\n", forwarded);
+               ptr = forwarded;
+               goto restart;
+       }
+
+       printf ("VTable: %p\n", vtable);
+       if (vtable == NULL) {
+               printf ("VTable is invalid (empty).\n");
+               goto bridge;
+       }
+       if (sgen_ptr_in_nursery (vtable)) {
+               printf ("VTable is invalid (points inside nursery).\n");
+               goto bridge;
+       }
+       printf ("Class: %s.%s\n", sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable));
+
+       desc = sgen_vtable_get_descriptor ((GCVTable*)vtable);
+       printf ("Descriptor: %lx\n", (long)desc);
+
+       type = desc & DESC_TYPE_MASK;
+       printf ("Descriptor type: %d (%s)\n", type, descriptor_types [type]);
+
+       size = sgen_safe_object_get_size ((GCObject*)ptr);
+       printf ("Size: %d\n", (int)size);
+
+ bridge:
+       ;
+#ifndef SGEN_WITHOUT_MONO
+       sgen_bridge_describe_pointer ((GCObject*)ptr);
+#endif
+}
+
+void
+describe_ptr (char *ptr)
+{
+       describe_pointer (ptr, TRUE);
+}
+
+static gboolean missing_remsets;
+
+/*
+ * We let a missing remset slide if the target object is pinned,
+ * because the store might have happened but the remset not yet added,
+ * but in that case the target must be pinned.  We might theoretically
+ * miss some missing remsets this way, but it's very unlikely.
+ */
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj)    do {    \
+       if (*(ptr) && sgen_ptr_in_nursery ((char*)*(ptr))) { \
+               if (!sgen_get_remset ()->find_address ((char*)(ptr)) && !sgen_cement_lookup (*(ptr))) { \
+                       GCVTable *__vt = SGEN_LOAD_VTABLE ((obj));      \
+                       SGEN_LOG (0, "Oldspace->newspace reference %p at offset %td in object %p (%s.%s) not found in remsets.", *(ptr), (char*)(ptr) - (char*)(obj), (obj), sgen_client_vtable_get_namespace (__vt), sgen_client_vtable_get_name (__vt)); \
+                       binary_protocol_missing_remset ((obj), __vt, (int) ((char*)(ptr) - (char*)(obj)), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
+                       if (!object_is_pinned (*(ptr)))                                                         \
+                               missing_remsets = TRUE;                                                                 \
+               }                                                                                                                               \
+       }                                                                                                                                       \
+       } while (0)
+
+/*
+ * Check that each object reference which points into the nursery can
+ * be found in the remembered sets.
+ */
+static void
+check_consistency_callback (char *start, size_t size, void *dummy)
+{
+       GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
+       mword desc = sgen_vtable_get_descriptor ((GCVTable*)vt);
+       SGEN_LOG (8, "Scanning object %p, vtable: %p (%s)", start, vt, sgen_client_vtable_get_name (vt));
+
+#include "sgen-scan-object.h"
+}
+
+/*
+ * Perform consistency check of the heap.
+ *
+ * Assumes the world is stopped.
+ */
+void
+sgen_check_consistency (void)
+{
+       // Need to add more checks
+
+       missing_remsets = FALSE;
+
+       SGEN_LOG (1, "Begin heap consistency check...");
+
+       // Check that oldspace->newspace pointers are registered with the collector
+       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)check_consistency_callback, NULL);
+
+       sgen_los_iterate_objects ((IterateObjectCallbackFunc)check_consistency_callback, NULL);
+
+       SGEN_LOG (1, "Heap consistency check done.");
+
+       if (!binary_protocol_is_enabled ())
+               g_assert (!missing_remsets);
+}
+
+static gboolean
+is_major_or_los_object_marked (char *obj)
+{
+       if (sgen_safe_object_get_size ((GCObject*)obj) > SGEN_MAX_SMALL_OBJ_SIZE) {
+               return sgen_los_object_is_pinned (obj);
+       } else {
+               return sgen_get_major_collector ()->is_object_live (obj);
+       }
+}
+
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj)    do {    \
+       if (*(ptr) && !sgen_ptr_in_nursery ((char*)*(ptr)) && !is_major_or_los_object_marked ((char*)*(ptr))) { \
+               if (!sgen_get_remset ()->find_address_with_cards (start, cards, (char*)(ptr))) { \
+                       GCVTable *__vt = SGEN_LOAD_VTABLE ((obj));      \
+                       SGEN_LOG (0, "major->major reference %p at offset %td in object %p (%s.%s) not found in remsets.", *(ptr), (char*)(ptr) - (char*)(obj), (obj), sgen_client_vtable_get_namespace (__vt), sgen_client_vtable_get_name (__vt)); \
+                       binary_protocol_missing_remset ((obj), __vt, (int) ((char*)(ptr) - (char*)(obj)), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
+                       missing_remsets = TRUE;                         \
+               }                                                                                                                               \
+       }                                                                                                                                       \
+       } while (0)
+
+static void
+check_mod_union_callback (char *start, size_t size, void *dummy)
+{
+       gboolean in_los = (gboolean) (size_t) dummy;
+       GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
+       mword desc = sgen_vtable_get_descriptor ((GCVTable*)vt);
+       guint8 *cards;
+       SGEN_LOG (8, "Scanning object %p, vtable: %p (%s)", start, vt, sgen_client_vtable_get_name (vt));
+
+       if (!is_major_or_los_object_marked (start))
+               return;
+
+       if (in_los)
+               cards = sgen_los_header_for_object (start)->cardtable_mod_union;
+       else
+               cards = sgen_get_major_collector ()->get_cardtable_mod_union_for_object (start);
+
+       SGEN_ASSERT (0, cards, "we must have mod union for marked major objects");
+
+#include "sgen-scan-object.h"
+}
+
+void
+sgen_check_mod_union_consistency (void)
+{
+       missing_remsets = FALSE;
+
+       major_collector.iterate_objects (ITERATE_OBJECTS_ALL, (IterateObjectCallbackFunc)check_mod_union_callback, (void*)FALSE);
+
+       sgen_los_iterate_objects ((IterateObjectCallbackFunc)check_mod_union_callback, (void*)TRUE);
+
+       if (!binary_protocol_is_enabled ())
+               g_assert (!missing_remsets);
+}
+
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj)    do {                                    \
+               if (*(ptr) && !LOAD_VTABLE (*(ptr)))                                            \
+                       g_error ("Could not load vtable for obj %p slot %zd (size %zd)", obj, (char*)ptr - (char*)obj, (size_t)safe_object_get_size ((GCObject*)obj)); \
+       } while (0)
+
+static void
+check_major_refs_callback (char *start, size_t size, void *dummy)
+{
+       mword desc = sgen_obj_get_descriptor (start);
+
+#include "sgen-scan-object.h"
+}
+
+void
+sgen_check_major_refs (void)
+{
+       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)check_major_refs_callback, NULL);
+       sgen_los_iterate_objects ((IterateObjectCallbackFunc)check_major_refs_callback, NULL);
+}
+
+/* Check that the reference is valid */
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj)    do {    \
+               if (*(ptr)) {   \
+                       g_assert (sgen_client_vtable_get_namespace (SGEN_LOAD_VTABLE_UNCHECKED (*(ptr))));      \
+               }       \
+       } while (0)
+
+/*
+ * check_object:
+ *
+ *   Perform consistency check on an object. Currently we only check that the
+ * reference fields are valid.
+ */
+void
+check_object (char *start)
+{
+       mword desc;
+
+       if (!start)
+               return;
+
+       desc = sgen_obj_get_descriptor (start);
+
+#include "sgen-scan-object.h"
+}
+
+
+static char **valid_nursery_objects;
+static int valid_nursery_object_count;
+static gboolean broken_heap;
+
+static void 
+setup_mono_sgen_scan_area_with_callback (char *object, size_t size, void *data)
+{
+       valid_nursery_objects [valid_nursery_object_count++] = object;
+}
+
+static void
+setup_valid_nursery_objects (void)
+{
+       if (!valid_nursery_objects)
+               valid_nursery_objects = sgen_alloc_os_memory (DEFAULT_NURSERY_SIZE, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, "debugging data");
+       valid_nursery_object_count = 0;
+       sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, setup_mono_sgen_scan_area_with_callback, NULL, FALSE);
+}
+
+static gboolean
+find_object_in_nursery_dump (char *object)
+{
+       int first = 0, last = valid_nursery_object_count;
+       while (first < last) {
+               int middle = first + ((last - first) >> 1);
+               if (object == valid_nursery_objects [middle])
+                       return TRUE;
+
+               if (object < valid_nursery_objects [middle])
+                       last = middle;
+               else
+                       first = middle + 1;
+       }
+       g_assert (first == last);
+       return FALSE;
+}
+
+static void
+iterate_valid_nursery_objects (IterateObjectCallbackFunc callback, void *data)
+{
+       int i;
+       for (i = 0; i < valid_nursery_object_count; ++i) {
+               char *obj = valid_nursery_objects [i];
+               callback (obj, safe_object_get_size ((GCObject*)obj), data);
+       }
+}
+
+static char*
+describe_nursery_ptr (char *ptr, gboolean need_setup)
+{
+       int i;
+
+       if (need_setup)
+               setup_valid_nursery_objects ();
+
+       for (i = 0; i < valid_nursery_object_count - 1; ++i) {
+               if (valid_nursery_objects [i + 1] > ptr)
+                       break;
+       }
+
+       if (i >= valid_nursery_object_count || valid_nursery_objects [i] + safe_object_get_size ((GCObject *)valid_nursery_objects [i]) < ptr) {
+               SGEN_LOG (0, "nursery-ptr (unalloc'd-memory)");
+               return NULL;
+       } else {
+               char *obj = valid_nursery_objects [i];
+               if (obj == ptr)
+                       SGEN_LOG (0, "nursery-ptr %p", obj);
+               else
+                       SGEN_LOG (0, "nursery-ptr %p (interior-ptr offset %td)", obj, ptr - obj);
+               return obj;
+       }
+}
+
+static gboolean
+is_valid_object_pointer (char *object)
+{
+       if (sgen_ptr_in_nursery (object))
+               return find_object_in_nursery_dump (object);
+       
+       if (sgen_los_is_valid_object (object))
+               return TRUE;
+
+       if (major_collector.is_valid_object (object))
+               return TRUE;
+       return FALSE;
+}
+
+static void
+bad_pointer_spew (char *obj, char **slot)
+{
+       char *ptr = *slot;
+       GCVTable *vtable = (GCVTable*)LOAD_VTABLE (obj);
+
+       SGEN_LOG (0, "Invalid object pointer %p at offset %td in object %p (%s.%s):", ptr,
+                       (char*)slot - obj,
+                       obj, sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable));
+       describe_pointer (ptr, FALSE);
+       broken_heap = TRUE;
+}
+
+static void
+missing_remset_spew (char *obj, char **slot)
+{
+       char *ptr = *slot;
+       GCVTable *vtable = (GCVTable*)LOAD_VTABLE (obj);
+
+       SGEN_LOG (0, "Oldspace->newspace reference %p at offset %td in object %p (%s.%s) not found in remsets.",
+                       ptr, (char*)slot - obj, obj, 
+                       sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable));
+
+       broken_heap = TRUE;
+}
+
+/*
+FIXME Flag missing remsets due to pinning as non fatal
+*/
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj)    do {    \
+               if (*(char**)ptr) {     \
+                       if (!is_valid_object_pointer (*(char**)ptr)) {  \
+                               bad_pointer_spew ((char*)obj, (char**)ptr);     \
+                       } else if (!sgen_ptr_in_nursery (obj) && sgen_ptr_in_nursery ((char*)*ptr)) {   \
+                               if (!sgen_get_remset ()->find_address ((char*)(ptr)) && !sgen_cement_lookup ((char*)*(ptr)) && (!allow_missing_pinned || !SGEN_OBJECT_IS_PINNED ((char*)*(ptr)))) \
+                               missing_remset_spew ((char*)obj, (char**)ptr);  \
+                       }       \
+        } \
+       } while (0)
+
+static void
+verify_object_pointers_callback (char *start, size_t size, void *data)
+{
+       gboolean allow_missing_pinned = (gboolean) (size_t) data;
+       mword desc = sgen_obj_get_descriptor (start);
+
+#include "sgen-scan-object.h"
+}
+
+/*
+FIXME:
+-This heap checker is racy regarding inlined write barriers and other JIT tricks that
+depend on OP_DUMMY_USE.
+*/
+void
+sgen_check_whole_heap (gboolean allow_missing_pinned)
+{
+       setup_valid_nursery_objects ();
+
+       broken_heap = FALSE;
+       sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, verify_object_pointers_callback, (void*) (size_t) allow_missing_pinned, FALSE);
+       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, verify_object_pointers_callback, (void*) (size_t) allow_missing_pinned);
+       sgen_los_iterate_objects (verify_object_pointers_callback, (void*) (size_t) allow_missing_pinned);
+
+       g_assert (!broken_heap);
+}
+
+static gboolean
+ptr_in_heap (char *object)
+{
+       if (sgen_ptr_in_nursery (object))
+               return TRUE;
+       
+       if (sgen_los_is_valid_object (object))
+               return TRUE;
+
+       if (major_collector.is_valid_object (object))
+               return TRUE;
+       return FALSE;
+}
+
+/*
+ * sgen_check_objref:
+ *   Do consistency checks on the object reference OBJ. Assert on failure.
+ */
+void
+sgen_check_objref (char *obj)
+{
+       g_assert (ptr_in_heap (obj));
+}
+
+static void
+find_pinning_ref_from_thread (char *obj, size_t size)
+{
+#ifndef SGEN_WITHOUT_MONO
+       int j;
+       SgenThreadInfo *info;
+       char *endobj = obj + size;
+
+       FOREACH_THREAD (info) {
+               char **start = (char**)info->client_info.stack_start;
+               if (info->client_info.skip || info->client_info.gc_disabled)
+                       continue;
+               while (start < (char**)info->client_info.stack_end) {
+                       if (*start >= obj && *start < endobj)
+                               SGEN_LOG (0, "Object %p referenced in thread %p (id %p) at %p, stack: %p-%p", obj, info, (gpointer)mono_thread_info_get_tid (info), start, info->client_info.stack_start, info->client_info.stack_end);
+                       start++;
+               }
+
+               for (j = 0; j < ARCH_NUM_REGS; ++j) {
+#ifdef USE_MONO_CTX
+                       mword w = ((mword*)&info->client_info.ctx) [j];
+#else
+                       mword w = (mword)&info->client_info.regs [j];
+#endif
+
+                       if (w >= (mword)obj && w < (mword)obj + size)
+                               SGEN_LOG (0, "Object %p referenced in saved reg %d of thread %p (id %p)", obj, j, info, (gpointer)mono_thread_info_get_tid (info));
+               } END_FOREACH_THREAD
+       }
+#endif
+}
+
+/*
+ * Debugging function: find in the conservative roots where @obj is being pinned.
+ */
+static G_GNUC_UNUSED void
+find_pinning_reference (char *obj, size_t size)
+{
+       char **start;
+       RootRecord *root;
+       char *endobj = obj + size;
+
+       SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_NORMAL], start, root) {
+               /* if desc is non-null it has precise info */
+               if (!root->root_desc) {
+                       while (start < (char**)root->end_root) {
+                               if (*start >= obj && *start < endobj) {
+                                       SGEN_LOG (0, "Object %p referenced in pinned roots %p-%p\n", obj, start, root->end_root);
+                               }
+                               start++;
+                       }
+               }
+       } SGEN_HASH_TABLE_FOREACH_END;
+
+       find_pinning_ref_from_thread (obj, size);
+}
+
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj)    do {                                    \
+               char* __target = *(char**)ptr;                          \
+               if (__target) {                                         \
+                       if (sgen_ptr_in_nursery (__target)) {           \
+                               g_assert (!SGEN_OBJECT_IS_FORWARDED (__target)); \
+                       } else {                                        \
+                               mword __size = sgen_safe_object_get_size ((GCObject*)__target); \
+                               if (__size <= SGEN_MAX_SMALL_OBJ_SIZE)  \
+                                       g_assert (major_collector.is_object_live (__target)); \
+                               else                                    \
+                                       g_assert (sgen_los_object_is_pinned (__target)); \
+                       }                                               \
+               }                                                       \
+       } while (0)
+
+static void
+check_marked_callback (char *start, size_t size, void *dummy)
+{
+       gboolean flag = (gboolean) (size_t) dummy;
+       mword desc;
+
+       if (sgen_ptr_in_nursery (start)) {
+               if (flag)
+                       SGEN_ASSERT (0, SGEN_OBJECT_IS_PINNED (start), "All objects remaining in the nursery must be pinned");
+       } else if (flag) {
+               if (!sgen_los_object_is_pinned (start))
+                       return;
+       } else {
+               if (!major_collector.is_object_live (start))
+                       return;
+       }
+
+       desc = sgen_obj_get_descriptor_safe (start);
+
+#include "sgen-scan-object.h"
+}
+
+void
+sgen_check_heap_marked (gboolean nursery_must_be_pinned)
+{
+       setup_valid_nursery_objects ();
+
+       iterate_valid_nursery_objects (check_marked_callback, (void*)(size_t)nursery_must_be_pinned);
+       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, check_marked_callback, (void*)FALSE);
+       sgen_los_iterate_objects (check_marked_callback, (void*)TRUE);
+}
+
+static void
+check_nursery_objects_pinned_callback (char *obj, size_t size, void *data /* ScanCopyContext *ctx */)
+{
+       gboolean pinned = (gboolean) (size_t) data;
+
+       g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
+       if (pinned)
+               g_assert (SGEN_OBJECT_IS_PINNED (obj));
+       else
+               g_assert (!SGEN_OBJECT_IS_PINNED (obj));
+}
+
+void
+sgen_check_nursery_objects_pinned (gboolean pinned)
+{
+       sgen_clear_nursery_fragments ();
+       sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
+                       (IterateObjectCallbackFunc)check_nursery_objects_pinned_callback, (void*) (size_t) pinned /* (void*)&ctx */, FALSE);
+}
+
+static void
+verify_scan_starts (char *start, char *end)
+{
+       size_t i;
+
+       for (i = 0; i < nursery_section->num_scan_start; ++i) {
+               char *addr = nursery_section->scan_starts [i];
+               if (addr > start && addr < end)
+                       SGEN_LOG (0, "NFC-BAD SCAN START [%zu] %p for obj [%p %p]", i, addr, start, end);
+       }
+}
+
+void
+sgen_debug_verify_nursery (gboolean do_dump_nursery_content)
+{
+       char *start, *end, *cur, *hole_start;
+
+       if (nursery_canaries_enabled ())
+               SGEN_LOG (0, "Checking nursery canaries...");
+
+       /*This cleans up unused fragments */
+       sgen_nursery_allocator_prepare_for_pinning ();
+
+       hole_start = start = cur = sgen_get_nursery_start ();
+       end = sgen_get_nursery_end ();
+
+       while (cur < end) {
+               size_t ss, size;
+               gboolean is_array_fill;
+
+               if (!*(void**)cur) {
+                       cur += sizeof (void*);
+                       continue;
+               }
+
+               if (object_is_forwarded (cur))
+                       SGEN_LOG (0, "FORWARDED OBJ %p", cur);
+               else if (object_is_pinned (cur))
+                       SGEN_LOG (0, "PINNED OBJ %p", cur);
+
+               ss = safe_object_get_size ((GCObject*)cur);
+               size = SGEN_ALIGN_UP (ss);
+               verify_scan_starts (cur, cur + size);
+               is_array_fill = sgen_client_object_is_array_fill ((GCObject*)cur);
+               if (do_dump_nursery_content) {
+                       GCVTable *vtable = SGEN_LOAD_VTABLE (cur);
+                       if (cur > hole_start)
+                               SGEN_LOG (0, "HOLE [%p %p %d]", hole_start, cur, (int)(cur - hole_start));
+                       SGEN_LOG (0, "OBJ  [%p %p %d %d %s.%s %d]", cur, cur + size, (int)size, (int)ss,
+                                       sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable),
+                                       is_array_fill);
+               }
+               if (nursery_canaries_enabled () && !is_array_fill) {
+                       CHECK_CANARY_FOR_OBJECT (cur);
+                       CANARIFY_SIZE (size);
+               }
+               cur += size;
+               hole_start = cur;
+       }
+}
+
+/*
+ * Checks that no objects in the nursery are fowarded or pinned.  This
+ * is a precondition to restarting the mutator while doing a
+ * concurrent collection.  Note that we don't clear fragments because
+ * we depend on that having happened earlier.
+ */
+void
+sgen_debug_check_nursery_is_clean (void)
+{
+       char *end, *cur;
+
+       cur = sgen_get_nursery_start ();
+       end = sgen_get_nursery_end ();
+
+       while (cur < end) {
+               size_t size;
+
+               if (!*(void**)cur) {
+                       cur += sizeof (void*);
+                       continue;
+               }
+
+               g_assert (!object_is_forwarded (cur));
+               g_assert (!object_is_pinned (cur));
+
+               size = SGEN_ALIGN_UP (safe_object_get_size ((GCObject*)cur));
+               verify_scan_starts (cur, cur + size);
+
+               cur += size;
+       }
+}
+
+static gboolean scan_object_for_specific_ref_precise = TRUE;
+
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj) do {                                       \
+               if ((GCObject*)*(ptr) == key) {                         \
+                       GCVTable *vtable = SGEN_LOAD_VTABLE (*(ptr));   \
+                       g_print ("found ref to %p in object %p (%s.%s) at offset %td\n", \
+                                       key, (obj), sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable), ((char*)(ptr) - (char*)(obj))); \
+               }                                                       \
+       } while (0)
+
+static void
+scan_object_for_specific_ref (char *start, GCObject *key)
+{
+       char *forwarded;
+
+       if ((forwarded = SGEN_OBJECT_IS_FORWARDED (start)))
+               start = forwarded;
+
+       if (scan_object_for_specific_ref_precise) {
+               mword desc = sgen_obj_get_descriptor_safe (start);
+               #include "sgen-scan-object.h"
+       } else {
+               mword *words = (mword*)start;
+               size_t size = safe_object_get_size ((GCObject*)start);
+               int i;
+               for (i = 0; i < size / sizeof (mword); ++i) {
+                       if (words [i] == (mword)key) {
+                               GCVTable *vtable = SGEN_LOAD_VTABLE (start);
+                               g_print ("found possible ref to %p in object %p (%s.%s) at offset %td\n",
+                                               key, start, sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable), i * sizeof (mword));
+                       }
+               }
+       }
+}
+
+static void
+scan_object_for_specific_ref_callback (char *obj, size_t size, GCObject *key)
+{
+       scan_object_for_specific_ref (obj, key);
+}
+
+static void
+check_root_obj_specific_ref (RootRecord *root, GCObject *key, GCObject *obj)
+{
+       if (key != obj)
+               return;
+       g_print ("found ref to %p in root record %p\n", key, root);
+}
+
+static GCObject *check_key = NULL;
+static RootRecord *check_root = NULL;
+
+static void
+check_root_obj_specific_ref_from_marker (void **obj, void *gc_data)
+{
+       check_root_obj_specific_ref (check_root, check_key, *obj);
+}
+
+static void
+scan_roots_for_specific_ref (GCObject *key, int root_type)
+{
+       void **start_root;
+       RootRecord *root;
+       check_key = key;
+
+       SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
+               mword desc = root->root_desc;
+
+               check_root = root;
+
+               switch (desc & ROOT_DESC_TYPE_MASK) {
+               case ROOT_DESC_BITMAP:
+                       desc >>= ROOT_DESC_TYPE_SHIFT;
+                       while (desc) {
+                               if (desc & 1)
+                                       check_root_obj_specific_ref (root, key, *start_root);
+                               desc >>= 1;
+                               start_root++;
+                       }
+                       return;
+               case ROOT_DESC_COMPLEX: {
+                       gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
+                       int bwords = (int) ((*bitmap_data) - 1);
+                       void **start_run = start_root;
+                       bitmap_data++;
+                       while (bwords-- > 0) {
+                               gsize bmap = *bitmap_data++;
+                               void **objptr = start_run;
+                               while (bmap) {
+                                       if (bmap & 1)
+                                               check_root_obj_specific_ref (root, key, *objptr);
+                                       bmap >>= 1;
+                                       ++objptr;
+                               }
+                               start_run += GC_BITS_PER_WORD;
+                       }
+                       break;
+               }
+               case ROOT_DESC_USER: {
+                       SgenUserRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
+                       marker (start_root, check_root_obj_specific_ref_from_marker, NULL);
+                       break;
+               }
+               case ROOT_DESC_RUN_LEN:
+                       g_assert_not_reached ();
+               default:
+                       g_assert_not_reached ();
+               }
+       } SGEN_HASH_TABLE_FOREACH_END;
+
+       check_key = NULL;
+       check_root = NULL;
+}
+
+void
+mono_gc_scan_for_specific_ref (GCObject *key, gboolean precise)
+{
+       void **ptr;
+       RootRecord *root;
+
+       scan_object_for_specific_ref_precise = precise;
+
+       sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
+                       (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key, TRUE);
+
+       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
+
+       sgen_los_iterate_objects ((IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
+
+       scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
+       scan_roots_for_specific_ref (key, ROOT_TYPE_WBARRIER);
+
+       SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], ptr, root) {
+               while (ptr < (void**)root->end_root) {
+                       check_root_obj_specific_ref (root, *ptr, key);
+                       ++ptr;
+               }
+       } SGEN_HASH_TABLE_FOREACH_END;
+
+       if (sgen_is_world_stopped ())
+               find_pinning_ref_from_thread ((char*)key, sizeof (MonoObject));
+}
+
+#ifndef SGEN_WITHOUT_MONO
+
+static MonoDomain *check_domain = NULL;
+
+static void
+check_obj_not_in_domain (MonoObject **o)
+{
+       g_assert (((*o))->vtable->domain != check_domain);
+}
+
+
+static void
+check_obj_not_in_domain_callback (void **o, void *gc_data)
+{
+       g_assert (((MonoObject*)(*o))->vtable->domain != check_domain);
+}
+
+void
+sgen_scan_for_registered_roots_in_domain (MonoDomain *domain, int root_type)
+{
+       void **start_root;
+       RootRecord *root;
+       check_domain = domain;
+       SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
+               mword desc = root->root_desc;
+
+               /* The MonoDomain struct is allowed to hold
+                  references to objects in its own domain. */
+               if (start_root == (void**)domain)
+                       continue;
+
+               switch (desc & ROOT_DESC_TYPE_MASK) {
+               case ROOT_DESC_BITMAP:
+                       desc >>= ROOT_DESC_TYPE_SHIFT;
+                       while (desc) {
+                               if ((desc & 1) && *start_root)
+                                       check_obj_not_in_domain (*start_root);
+                               desc >>= 1;
+                               start_root++;
+                       }
+                       break;
+               case ROOT_DESC_COMPLEX: {
+                       gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
+                       int bwords = (int)((*bitmap_data) - 1);
+                       void **start_run = start_root;
+                       bitmap_data++;
+                       while (bwords-- > 0) {
+                               gsize bmap = *bitmap_data++;
+                               void **objptr = start_run;
+                               while (bmap) {
+                                       if ((bmap & 1) && *objptr)
+                                               check_obj_not_in_domain (*objptr);
+                                       bmap >>= 1;
+                                       ++objptr;
+                               }
+                               start_run += GC_BITS_PER_WORD;
+                       }
+                       break;
+               }
+               case ROOT_DESC_USER: {
+                       SgenUserRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
+                       marker (start_root, check_obj_not_in_domain_callback, NULL);
+                       break;
+               }
+               case ROOT_DESC_RUN_LEN:
+                       g_assert_not_reached ();
+               default:
+                       g_assert_not_reached ();
+               }
+       } SGEN_HASH_TABLE_FOREACH_END;
+
+       check_domain = NULL;
+}
+
+static gboolean
+is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
+{
+       MonoObject *o = (MonoObject*)(obj);
+       MonoObject *ref = (MonoObject*)*(ptr);
+       size_t offset = (char*)(ptr) - (char*)o;
+
+       if (o->vtable->klass == mono_defaults.thread_class && offset == G_STRUCT_OFFSET (MonoThread, internal_thread))
+               return TRUE;
+       if (o->vtable->klass == mono_defaults.internal_thread_class && offset == G_STRUCT_OFFSET (MonoInternalThread, current_appcontext))
+               return TRUE;
+
+#ifndef DISABLE_REMOTING
+       if (mono_defaults.real_proxy_class->supertypes && mono_class_has_parent_fast (o->vtable->klass, mono_defaults.real_proxy_class) &&
+                       offset == G_STRUCT_OFFSET (MonoRealProxy, unwrapped_server))
+               return TRUE;
+#endif
+       /* Thread.cached_culture_info */
+       if (!strcmp (ref->vtable->klass->name_space, "System.Globalization") &&
+                       !strcmp (ref->vtable->klass->name, "CultureInfo") &&
+                       !strcmp(o->vtable->klass->name_space, "System") &&
+                       !strcmp(o->vtable->klass->name, "Object[]"))
+               return TRUE;
+       /*
+        *  at System.IO.MemoryStream.InternalConstructor (byte[],int,int,bool,bool) [0x0004d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:121
+        * at System.IO.MemoryStream..ctor (byte[]) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:81
+        * at (wrapper remoting-invoke-with-check) System.IO.MemoryStream..ctor (byte[]) <IL 0x00020, 0xffffffff>
+        * at System.Runtime.Remoting.Messaging.CADMethodCallMessage.GetArguments () [0x0000d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/CADMessages.cs:327
+        * at System.Runtime.Remoting.Messaging.MethodCall..ctor (System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/MethodCall.cs:87
+        * at System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) [0x00018] in /home/schani/Work/novell/trunk/mcs/class/corlib/System/AppDomain.cs:1213
+        * at (wrapper remoting-invoke-with-check) System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) <IL 0x0003d, 0xffffffff>
+        * at System.Runtime.Remoting.Channels.CrossAppDomainSink.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00008] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Channels/CrossAppDomainChannel.cs:198
+        * at (wrapper runtime-invoke) object.runtime_invoke_CrossAppDomainSink/ProcessMessageRes_object_object (object,intptr,intptr,intptr) <IL 0x0004c, 0xffffffff>
+        */
+       if (!strcmp (ref->vtable->klass->name_space, "System") &&
+                       !strcmp (ref->vtable->klass->name, "Byte[]") &&
+                       !strcmp (o->vtable->klass->name_space, "System.IO") &&
+                       !strcmp (o->vtable->klass->name, "MemoryStream"))
+               return TRUE;
+       return FALSE;
+}
+
+static void
+check_reference_for_xdomain (gpointer *ptr, char *obj, MonoDomain *domain)
+{
+       MonoObject *o = (MonoObject*)(obj);
+       MonoObject *ref = (MonoObject*)*(ptr);
+       size_t offset = (char*)(ptr) - (char*)o;
+       MonoClass *class;
+       MonoClassField *field;
+       char *str;
+
+       if (!ref || ref->vtable->domain == domain)
+               return;
+       if (is_xdomain_ref_allowed (ptr, obj, domain))
+               return;
+
+       field = NULL;
+       for (class = o->vtable->klass; class; class = class->parent) {
+               int i;
+
+               for (i = 0; i < class->field.count; ++i) {
+                       if (class->fields[i].offset == offset) {
+                               field = &class->fields[i];
+                               break;
+                       }
+               }
+               if (field)
+                       break;
+       }
+
+       if (ref->vtable->klass == mono_defaults.string_class)
+               str = mono_string_to_utf8 ((MonoString*)ref);
+       else
+               str = NULL;
+       g_print ("xdomain reference in %p (%s.%s) at offset %d (%s) to %p (%s.%s) (%s)  -  pointed to by:\n",
+                       o, o->vtable->klass->name_space, o->vtable->klass->name,
+                       offset, field ? field->name : "",
+                       ref, ref->vtable->klass->name_space, ref->vtable->klass->name, str ? str : "");
+       mono_gc_scan_for_specific_ref (o, TRUE);
+       if (str)
+               g_free (str);
+}
+
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj)    check_reference_for_xdomain ((ptr), (obj), domain)
+
+static void
+scan_object_for_xdomain_refs (char *start, mword size, void *data)
+{
+       MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (start);
+       MonoDomain *domain = vt->domain;
+       mword desc = sgen_vtable_get_descriptor ((GCVTable*)vt);
+
+       #include "sgen-scan-object.h"
+}
+
+void
+sgen_check_for_xdomain_refs (void)
+{
+       LOSObject *bigobj;
+
+       sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
+                       (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL, FALSE);
+
+       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
+
+       for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+               scan_object_for_xdomain_refs (bigobj->data, sgen_los_object_size (bigobj), NULL);
+}
+
+#endif
+
+/* If not null, dump the heap after each collection into this file */
+static FILE *heap_dump_file = NULL;
+
+void
+sgen_dump_occupied (char *start, char *end, char *section_start)
+{
+       fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
+}
+
+void
+sgen_dump_section (GCMemSection *section, const char *type)
+{
+       char *start = section->data;
+       char *end = section->data + section->size;
+       char *occ_start = NULL;
+
+       fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
+
+       while (start < end) {
+               guint size;
+               //GCVTable *vt;
+               //MonoClass *class;
+
+               if (!*(void**)start) {
+                       if (occ_start) {
+                               sgen_dump_occupied (occ_start, start, section->data);
+                               occ_start = NULL;
+                       }
+                       start += sizeof (void*); /* should be ALLOC_ALIGN, really */
+                       continue;
+               }
+               g_assert (start < section->next_data);
+
+               if (!occ_start)
+                       occ_start = start;
+
+               //vt = (GCVTable*)SGEN_LOAD_VTABLE (start);
+               //class = vt->klass;
+
+               size = SGEN_ALIGN_UP (safe_object_get_size ((GCObject*) start));
+
+               /*
+               fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
+                               start - section->data,
+                               vt->klass->name_space, vt->klass->name,
+                               size);
+               */
+
+               start += size;
+       }
+       if (occ_start)
+               sgen_dump_occupied (occ_start, start, section->data);
+
+       fprintf (heap_dump_file, "</section>\n");
+}
+
+static void
+dump_object (GCObject *obj, gboolean dump_location)
+{
+#ifndef SGEN_WITHOUT_MONO
+       static char class_name [1024];
+
+       MonoClass *class = mono_object_class (obj);
+       int i, j;
+
+       /*
+        * Python's XML parser is too stupid to parse angle brackets
+        * in strings, so we just ignore them;
+        */
+       i = j = 0;
+       while (class->name [i] && j < sizeof (class_name) - 1) {
+               if (!strchr ("<>\"", class->name [i]))
+                       class_name [j++] = class->name [i];
+               ++i;
+       }
+       g_assert (j < sizeof (class_name));
+       class_name [j] = 0;
+
+       fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%zd\"",
+                       class->name_space, class_name,
+                       safe_object_get_size (obj));
+       if (dump_location) {
+               const char *location;
+               if (sgen_ptr_in_nursery (obj))
+                       location = "nursery";
+               else if (safe_object_get_size (obj) <= SGEN_MAX_SMALL_OBJ_SIZE)
+                       location = "major";
+               else
+                       location = "LOS";
+               fprintf (heap_dump_file, " location=\"%s\"", location);
+       }
+       fprintf (heap_dump_file, "/>\n");
+#endif
+}
+
+void
+sgen_debug_enable_heap_dump (const char *filename)
+{
+       heap_dump_file = fopen (filename, "w");
+       if (heap_dump_file) {
+               fprintf (heap_dump_file, "<sgen-dump>\n");
+               sgen_pin_stats_enable ();
+       }
+}
+
+void
+sgen_debug_dump_heap (const char *type, int num, const char *reason)
+{
+       SgenPointerQueue *pinned_objects;
+       LOSObject *bigobj;
+       int i;
+
+       if (!heap_dump_file)
+               return;
+
+       fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
+       if (reason)
+               fprintf (heap_dump_file, " reason=\"%s\"", reason);
+       fprintf (heap_dump_file, ">\n");
+#ifndef SGEN_WITHOUT_MONO
+       fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
+#endif
+       sgen_dump_internal_mem_usage (heap_dump_file);
+       fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
+       /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
+       fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
+
+       fprintf (heap_dump_file, "<pinned-objects>\n");
+       pinned_objects = sgen_pin_stats_get_object_list ();
+       for (i = 0; i < pinned_objects->next_slot; ++i)
+               dump_object (pinned_objects->data [i], TRUE);
+       fprintf (heap_dump_file, "</pinned-objects>\n");
+
+       sgen_dump_section (nursery_section, "nursery");
+
+       major_collector.dump_heap (heap_dump_file);
+
+       fprintf (heap_dump_file, "<los>\n");
+       for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+               dump_object ((GCObject*)bigobj->data, FALSE);
+       fprintf (heap_dump_file, "</los>\n");
+
+       fprintf (heap_dump_file, "</collection>\n");
+}
+
+static char *found_obj;
+
+static void
+find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
+{
+       char *ptr = user_data;
+
+       if (ptr >= obj && ptr < obj + size) {
+               g_assert (!found_obj);
+               found_obj = obj;
+       }
+}
+
+/* for use in the debugger */
+char*
+sgen_find_object_for_ptr (char *ptr)
+{
+       if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
+               found_obj = NULL;
+               sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
+                               find_object_for_ptr_callback, ptr, TRUE);
+               if (found_obj)
+                       return found_obj;
+       }
+
+       found_obj = NULL;
+       sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
+       if (found_obj)
+               return found_obj;
+
+       /*
+        * Very inefficient, but this is debugging code, supposed to
+        * be called from gdb, so we don't care.
+        */
+       found_obj = NULL;
+       major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, find_object_for_ptr_callback, ptr);
+       return found_obj;
+}
+
+#ifndef SGEN_WITHOUT_MONO
+
+static int
+compare_xrefs (const void *a_ptr, const void *b_ptr)
+{
+       const MonoGCBridgeXRef *a = a_ptr;
+       const MonoGCBridgeXRef *b = b_ptr;
+
+       if (a->src_scc_index < b->src_scc_index)
+               return -1;
+       if (a->src_scc_index > b->src_scc_index)
+               return 1;
+
+       if (a->dst_scc_index < b->dst_scc_index)
+               return -1;
+       if (a->dst_scc_index > b->dst_scc_index)
+               return 1;
+
+       return 0;
+}
+
+/*
+static void
+dump_processor_state (SgenBridgeProcessor *p)
+{
+       int i;
+
+       printf ("------\n");
+       printf ("SCCS %d\n", p->num_sccs);
+       for (i = 0; i < p->num_sccs; ++i) {
+               int j;
+               MonoGCBridgeSCC *scc = p->api_sccs [i];
+               printf ("\tSCC %d:", i);
+               for (j = 0; j < scc->num_objs; ++j) {
+                       MonoObject *obj = scc->objs [j];
+                       printf (" %p", obj);
+               }
+               printf ("\n");
+       }
+
+       printf ("XREFS %d\n", p->num_xrefs);
+       for (i = 0; i < p->num_xrefs; ++i)
+               printf ("\t%d -> %d\n", p->api_xrefs [i].src_scc_index, p->api_xrefs [i].dst_scc_index);
+
+       printf ("-------\n");
+}
+*/
+
+gboolean
+sgen_compare_bridge_processor_results (SgenBridgeProcessor *a, SgenBridgeProcessor *b)
+{
+       int i;
+       SgenHashTable obj_to_a_scc = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_BRIDGE_DEBUG, INTERNAL_MEM_BRIDGE_DEBUG, sizeof (int), mono_aligned_addr_hash, NULL);
+       SgenHashTable b_scc_to_a_scc = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_BRIDGE_DEBUG, INTERNAL_MEM_BRIDGE_DEBUG, sizeof (int), g_direct_hash, NULL);
+       MonoGCBridgeXRef *a_xrefs, *b_xrefs;
+       size_t xrefs_alloc_size;
+
+       // dump_processor_state (a);
+       // dump_processor_state (b);
+
+       if (a->num_sccs != b->num_sccs)
+               g_error ("SCCS count expected %d but got %d", a->num_sccs, b->num_sccs);
+       if (a->num_xrefs != b->num_xrefs)
+               g_error ("SCCS count expected %d but got %d", a->num_xrefs, b->num_xrefs);
+
+       /*
+        * First we build a hash of each object in `a` to its respective SCC index within
+        * `a`.  Along the way we also assert that no object is more than one SCC.
+        */
+       for (i = 0; i < a->num_sccs; ++i) {
+               int j;
+               MonoGCBridgeSCC *scc = a->api_sccs [i];
+
+               g_assert (scc->num_objs > 0);
+
+               for (j = 0; j < scc->num_objs; ++j) {
+                       GCObject *obj = scc->objs [j];
+                       gboolean new_entry = sgen_hash_table_replace (&obj_to_a_scc, obj, &i, NULL);
+                       g_assert (new_entry);
+               }
+       }
+
+       /*
+        * Now we check whether each of the objects in `b` are in `a`, and whether the SCCs
+        * of `b` contain the same sets of objects as those of `a`.
+        *
+        * While we're doing this, build a hash table to map from `b` SCC indexes to `a` SCC
+        * indexes.
+        */
+       for (i = 0; i < b->num_sccs; ++i) {
+               MonoGCBridgeSCC *scc = b->api_sccs [i];
+               MonoGCBridgeSCC *a_scc;
+               int *a_scc_index_ptr;
+               int a_scc_index;
+               int j;
+               gboolean new_entry;
+
+               g_assert (scc->num_objs > 0);
+               a_scc_index_ptr = sgen_hash_table_lookup (&obj_to_a_scc, scc->objs [0]);
+               g_assert (a_scc_index_ptr);
+               a_scc_index = *a_scc_index_ptr;
+
+               //g_print ("A SCC %d -> B SCC %d\n", a_scc_index, i);
+
+               a_scc = a->api_sccs [a_scc_index];
+               g_assert (a_scc->num_objs == scc->num_objs);
+
+               for (j = 1; j < scc->num_objs; ++j) {
+                       a_scc_index_ptr = sgen_hash_table_lookup (&obj_to_a_scc, scc->objs [j]);
+                       g_assert (a_scc_index_ptr);
+                       g_assert (*a_scc_index_ptr == a_scc_index);
+               }
+
+               new_entry = sgen_hash_table_replace (&b_scc_to_a_scc, GINT_TO_POINTER (i), &a_scc_index, NULL);
+               g_assert (new_entry);
+       }
+
+       /*
+        * Finally, check that we have the same xrefs.  We do this by making copies of both
+        * xref arrays, and replacing the SCC indexes in the copy for `b` with the
+        * corresponding indexes in `a`.  Then we sort both arrays and assert that they're
+        * the same.
+        *
+        * At the same time, check that no xref is self-referential and that there are no
+        * duplicate ones.
+        */
+
+       xrefs_alloc_size = a->num_xrefs * sizeof (MonoGCBridgeXRef);
+       a_xrefs = sgen_alloc_internal_dynamic (xrefs_alloc_size, INTERNAL_MEM_BRIDGE_DEBUG, TRUE);
+       b_xrefs = sgen_alloc_internal_dynamic (xrefs_alloc_size, INTERNAL_MEM_BRIDGE_DEBUG, TRUE);
+
+       memcpy (a_xrefs, a->api_xrefs, xrefs_alloc_size);
+       for (i = 0; i < b->num_xrefs; ++i) {
+               MonoGCBridgeXRef *xref = &b->api_xrefs [i];
+               int *scc_index_ptr;
+
+               g_assert (xref->src_scc_index != xref->dst_scc_index);
+
+               scc_index_ptr = sgen_hash_table_lookup (&b_scc_to_a_scc, GINT_TO_POINTER (xref->src_scc_index));
+               g_assert (scc_index_ptr);
+               b_xrefs [i].src_scc_index = *scc_index_ptr;
+
+               scc_index_ptr = sgen_hash_table_lookup (&b_scc_to_a_scc, GINT_TO_POINTER (xref->dst_scc_index));
+               g_assert (scc_index_ptr);
+               b_xrefs [i].dst_scc_index = *scc_index_ptr;
+       }
+
+       qsort (a_xrefs, a->num_xrefs, sizeof (MonoGCBridgeXRef), compare_xrefs);
+       qsort (b_xrefs, a->num_xrefs, sizeof (MonoGCBridgeXRef), compare_xrefs);
+
+       for (i = 0; i < a->num_xrefs; ++i) {
+               g_assert (a_xrefs [i].src_scc_index == b_xrefs [i].src_scc_index);
+               g_assert (a_xrefs [i].dst_scc_index == b_xrefs [i].dst_scc_index);
+       }
+
+       sgen_hash_table_clean (&obj_to_a_scc);
+       sgen_hash_table_clean (&b_scc_to_a_scc);
+       sgen_free_internal_dynamic (a_xrefs, xrefs_alloc_size, INTERNAL_MEM_BRIDGE_DEBUG);
+       sgen_free_internal_dynamic (b_xrefs, xrefs_alloc_size, INTERNAL_MEM_BRIDGE_DEBUG);
+
+       return TRUE;
+}
+
+#endif
+
+#endif /*HAVE_SGEN_GC*/
diff --git a/mono/sgen/sgen-descriptor.c b/mono/sgen/sgen-descriptor.c
new file mode 100644 (file)
index 0000000..4747c7c
--- /dev/null
@@ -0,0 +1,377 @@
+/*
+ * sgen-descriptor.c: GC descriptors describe object layout.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef HAVE_PTHREAD_H
+#include <pthread.h>
+#endif
+#ifdef HAVE_SEMAPHORE_H
+#include <semaphore.h>
+#endif
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#ifdef __MACH__
+#undef _XOPEN_SOURCE
+#endif
+#ifdef __MACH__
+#define _XOPEN_SOURCE
+#endif
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/gc-internal-agnostic.h"
+
+#define MAX_USER_DESCRIPTORS 16
+
+#define MAKE_ROOT_DESC(type,val) ((type) | ((val) << ROOT_DESC_TYPE_SHIFT))
+#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
+
+
+static gsize* complex_descriptors = NULL;
+static int complex_descriptors_size = 0;
+static int complex_descriptors_next = 0;
+static SgenUserRootMarkFunc user_descriptors [MAX_USER_DESCRIPTORS];
+static int user_descriptors_next = 0;
+static void *all_ref_root_descrs [32];
+
+#ifdef HEAVY_STATISTICS
+static guint64 stat_scanned_count_per_descriptor [DESC_TYPE_MAX];
+static guint64 stat_copied_count_per_descriptor [DESC_TYPE_MAX];
+#endif
+
+static int
+alloc_complex_descriptor (gsize *bitmap, int numbits)
+{
+       int nwords, res, i;
+
+       numbits = ALIGN_TO (numbits, GC_BITS_PER_WORD);
+       nwords = numbits / GC_BITS_PER_WORD + 1;
+
+       sgen_gc_lock ();
+       res = complex_descriptors_next;
+       /* linear search, so we don't have duplicates with domain load/unload
+        * this should not be performance critical or we'd have bigger issues
+        * (the number and size of complex descriptors should be small).
+        */
+       for (i = 0; i < complex_descriptors_next; ) {
+               if (complex_descriptors [i] == nwords) {
+                       int j, found = TRUE;
+                       for (j = 0; j < nwords - 1; ++j) {
+                               if (complex_descriptors [i + 1 + j] != bitmap [j]) {
+                                       found = FALSE;
+                                       break;
+                               }
+                       }
+                       if (found) {
+                               sgen_gc_unlock ();
+                               return i;
+                       }
+               }
+               i += (int)complex_descriptors [i];
+       }
+       if (complex_descriptors_next + nwords > complex_descriptors_size) {
+               int new_size = complex_descriptors_size * 2 + nwords;
+               complex_descriptors = g_realloc (complex_descriptors, new_size * sizeof (gsize));
+               complex_descriptors_size = new_size;
+       }
+       SGEN_LOG (6, "Complex descriptor %d, size: %d (total desc memory: %d)", res, nwords, complex_descriptors_size);
+       complex_descriptors_next += nwords;
+       complex_descriptors [res] = nwords;
+       for (i = 0; i < nwords - 1; ++i) {
+               complex_descriptors [res + 1 + i] = bitmap [i];
+               SGEN_LOG (6, "\tvalue: %p", (void*)complex_descriptors [res + 1 + i]);
+       }
+       sgen_gc_unlock ();
+       return res;
+}
+
+gsize*
+sgen_get_complex_descriptor (mword desc)
+{
+       return complex_descriptors + (desc >> LOW_TYPE_BITS);
+}
+
+/*
+ * Descriptor builders.
+ */
+void*
+mono_gc_make_descr_for_object (gsize *bitmap, int numbits, size_t obj_size)
+{
+       int first_set = -1, num_set = 0, last_set = -1, i;
+       mword desc = 0;
+       size_t stored_size = obj_size;
+
+       stored_size += SGEN_ALLOC_ALIGN - 1;
+       stored_size &= ~(SGEN_ALLOC_ALIGN - 1);
+
+       for (i = 0; i < numbits; ++i) {
+               if (bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
+                       if (first_set < 0)
+                               first_set = i;
+                       last_set = i;
+                       num_set++;
+               }
+       }
+
+       if (first_set < 0) {
+               SGEN_LOG (6, "Ptrfree descriptor %p, size: %zd", (void*)desc, stored_size);
+               if (stored_size <= MAX_RUNLEN_OBJECT_SIZE && stored_size <= SGEN_MAX_SMALL_OBJ_SIZE)
+                       return (void*)(DESC_TYPE_SMALL_PTRFREE | stored_size);
+               return (void*)DESC_TYPE_COMPLEX_PTRFREE;
+       }
+
+       g_assert (!(stored_size & 0x7));
+
+       SGEN_ASSERT (5, stored_size == SGEN_ALIGN_UP (stored_size), "Size is not aligned");
+
+       /* we know the 2-word header is ptr-free */
+       if (last_set < BITMAP_NUM_BITS + OBJECT_HEADER_WORDS && stored_size <= SGEN_MAX_SMALL_OBJ_SIZE) {
+               desc = DESC_TYPE_BITMAP | ((*bitmap >> OBJECT_HEADER_WORDS) << LOW_TYPE_BITS);
+               SGEN_LOG (6, "Largebitmap descriptor %p, size: %zd, last set: %d", (void*)desc, stored_size, last_set);
+               return (void*) desc;
+       }
+
+       if (stored_size <= MAX_RUNLEN_OBJECT_SIZE && stored_size <= SGEN_MAX_SMALL_OBJ_SIZE) {
+               /* check run-length encoding first: one byte offset, one byte number of pointers
+                * on 64 bit archs, we can have 3 runs, just one on 32.
+                * It may be better to use nibbles.
+                */
+               if (first_set < 256 && num_set < 256 && (first_set + num_set == last_set + 1)) {
+                       desc = DESC_TYPE_RUN_LENGTH | stored_size | (first_set << 16) | (num_set << 24);
+                       SGEN_LOG (6, "Runlen descriptor %p, size: %zd, first set: %d, num set: %d", (void*)desc, stored_size, first_set, num_set);
+                       return (void*) desc;
+               }
+       }
+
+       /* it's a complex object ... */
+       desc = DESC_TYPE_COMPLEX | (alloc_complex_descriptor (bitmap, last_set + 1) << LOW_TYPE_BITS);
+       return (void*) desc;
+}
+
+/* If the array holds references, numbits == 1 and the first bit is set in elem_bitmap */
+void*
+mono_gc_make_descr_for_array (int vector, gsize *elem_bitmap, int numbits, size_t elem_size)
+{
+       int first_set = -1, num_set = 0, last_set = -1, i;
+       mword desc = DESC_TYPE_VECTOR | (vector ? VECTOR_KIND_SZARRAY : VECTOR_KIND_ARRAY);
+       for (i = 0; i < numbits; ++i) {
+               if (elem_bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
+                       if (first_set < 0)
+                               first_set = i;
+                       last_set = i;
+                       num_set++;
+               }
+       }
+
+       if (first_set < 0) {
+               if (elem_size <= MAX_ELEMENT_SIZE)
+                       return (void*)(desc | VECTOR_SUBTYPE_PTRFREE | (elem_size << VECTOR_ELSIZE_SHIFT));
+               return (void*)DESC_TYPE_COMPLEX_PTRFREE;
+       }
+
+       if (elem_size <= MAX_ELEMENT_SIZE) {
+               desc |= elem_size << VECTOR_ELSIZE_SHIFT;
+               if (!num_set) {
+                       return (void*)(desc | VECTOR_SUBTYPE_PTRFREE);
+               }
+               /* Note: we also handle structs with just ref fields */
+               if (num_set * sizeof (gpointer) == elem_size) {
+                       return (void*)(desc | VECTOR_SUBTYPE_REFS | ((gssize)(-1) << 16));
+               }
+               /* FIXME: try run-len first */
+               /* Note: we can't skip the object header here, because it's not present */
+               if (last_set < VECTOR_BITMAP_SIZE) {
+                       return (void*)(desc | VECTOR_SUBTYPE_BITMAP | (*elem_bitmap << 16));
+               }
+       }
+       /* it's am array of complex structs ... */
+       desc = DESC_TYPE_COMPLEX_ARR;
+       desc |= alloc_complex_descriptor (elem_bitmap, last_set + 1) << LOW_TYPE_BITS;
+       return (void*) desc;
+}
+
+/* Return the bitmap encoded by a descriptor */
+gsize*
+mono_gc_get_bitmap_for_descr (void *descr, int *numbits)
+{
+       mword d = (mword)descr;
+       gsize *bitmap;
+
+       switch (d & DESC_TYPE_MASK) {
+       case DESC_TYPE_RUN_LENGTH: {            
+               int first_set = (d >> 16) & 0xff;
+               int num_set = (d >> 24) & 0xff;
+               int i;
+
+               bitmap = g_new0 (gsize, (first_set + num_set + 7) / 8);
+
+               for (i = first_set; i < first_set + num_set; ++i)
+                       bitmap [i / GC_BITS_PER_WORD] |= ((gsize)1 << (i % GC_BITS_PER_WORD));
+
+               *numbits = first_set + num_set;
+
+               return bitmap;
+       }
+
+       case DESC_TYPE_BITMAP: {
+               gsize bmap = (d >> LOW_TYPE_BITS) << OBJECT_HEADER_WORDS;
+
+               bitmap = g_new0 (gsize, 1);
+               bitmap [0] = bmap;
+               *numbits = 0;
+               while (bmap) {
+                       (*numbits) ++;
+                       bmap >>= 1;
+               }
+               return bitmap;
+       }
+
+       case DESC_TYPE_COMPLEX: {
+               gsize *bitmap_data = sgen_get_complex_descriptor (d);
+               int bwords = (int)(*bitmap_data) - 1;//Max scalar object size is 1Mb, which means up to 32k descriptor words
+               int i;
+
+               bitmap = g_new0 (gsize, bwords);
+               *numbits = bwords * GC_BITS_PER_WORD;
+
+               for (i = 0; i < bwords; ++i) {
+                       bitmap [i] = bitmap_data [i + 1];
+               }
+
+               return bitmap;
+       }
+
+       default:
+               g_assert_not_reached ();
+       }
+}
+
+void*
+mono_gc_make_descr_from_bitmap (gsize *bitmap, int numbits)
+{
+       if (numbits == 0) {
+               return (void*)MAKE_ROOT_DESC (ROOT_DESC_BITMAP, 0);
+       } else if (numbits < ((sizeof (*bitmap) * 8) - ROOT_DESC_TYPE_SHIFT)) {
+               return (void*)MAKE_ROOT_DESC (ROOT_DESC_BITMAP, bitmap [0]);
+       } else {
+               mword complex = alloc_complex_descriptor (bitmap, numbits);
+               return (void*)MAKE_ROOT_DESC (ROOT_DESC_COMPLEX, complex);
+       }
+}
+
+void*
+mono_gc_make_root_descr_all_refs (int numbits)
+{
+       gsize *gc_bitmap;
+       void *descr;
+       int num_bytes = numbits / 8;
+
+       if (numbits < 32 && all_ref_root_descrs [numbits])
+               return all_ref_root_descrs [numbits];
+
+       gc_bitmap = g_malloc0 (ALIGN_TO (ALIGN_TO (numbits, 8) + 1, sizeof (gsize)));
+       memset (gc_bitmap, 0xff, num_bytes);
+       if (numbits < ((sizeof (*gc_bitmap) * 8) - ROOT_DESC_TYPE_SHIFT)) 
+               gc_bitmap[0] = GUINT64_TO_LE(gc_bitmap[0]);
+       else if (numbits && num_bytes % (sizeof (*gc_bitmap)))
+               gc_bitmap[num_bytes / 8] = GUINT64_TO_LE(gc_bitmap [num_bytes / 8]);
+       if (numbits % 8)
+               gc_bitmap [numbits / 8] = (1 << (numbits % 8)) - 1;
+       descr = mono_gc_make_descr_from_bitmap (gc_bitmap, numbits);
+       g_free (gc_bitmap);
+
+       if (numbits < 32)
+               all_ref_root_descrs [numbits] = descr;
+
+       return descr;
+}
+
+void*
+sgen_make_user_root_descriptor (SgenUserRootMarkFunc marker)
+{
+       void *descr;
+
+       g_assert (user_descriptors_next < MAX_USER_DESCRIPTORS);
+       descr = (void*)MAKE_ROOT_DESC (ROOT_DESC_USER, (mword)user_descriptors_next);
+       user_descriptors [user_descriptors_next ++] = marker;
+
+       return descr;
+}
+
+void*
+sgen_get_complex_descriptor_bitmap (mword desc)
+{
+       return complex_descriptors + (desc >> ROOT_DESC_TYPE_SHIFT);
+}
+
+SgenUserRootMarkFunc
+sgen_get_user_descriptor_func (mword desc)
+{
+       return user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
+}
+
+#ifdef HEAVY_STATISTICS
+void
+sgen_descriptor_count_scanned_object (mword desc)
+{
+       int type = desc & DESC_TYPE_MASK;
+       SGEN_ASSERT (0, type, "Descriptor type can't be zero");
+       ++stat_scanned_count_per_descriptor [type - 1];
+}
+
+void
+sgen_descriptor_count_copied_object (mword desc)
+{
+       int type = desc & DESC_TYPE_MASK;
+       SGEN_ASSERT (0, type, "Descriptor type can't be zero");
+       ++stat_copied_count_per_descriptor [type - 1];
+}
+#endif
+
+void
+sgen_init_descriptors (void)
+{
+#ifdef HEAVY_STATISTICS
+       mono_counters_register ("# scanned RUN_LENGTH", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_RUN_LENGTH - 1]);
+       mono_counters_register ("# scanned SMALL_PTRFREE", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_SMALL_PTRFREE - 1]);
+       mono_counters_register ("# scanned COMPLEX", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_COMPLEX - 1]);
+       mono_counters_register ("# scanned VECTOR", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_VECTOR - 1]);
+       mono_counters_register ("# scanned BITMAP", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_BITMAP - 1]);
+       mono_counters_register ("# scanned COMPLEX_ARR", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_COMPLEX_ARR - 1]);
+       mono_counters_register ("# scanned COMPLEX_PTRFREE", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scanned_count_per_descriptor [DESC_TYPE_COMPLEX_PTRFREE - 1]);
+
+       mono_counters_register ("# copied RUN_LENGTH", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_RUN_LENGTH - 1]);
+       mono_counters_register ("# copied SMALL_PTRFREE", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_SMALL_PTRFREE - 1]);
+       mono_counters_register ("# copied COMPLEX", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_COMPLEX - 1]);
+       mono_counters_register ("# copied VECTOR", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_VECTOR - 1]);
+       mono_counters_register ("# copied BITMAP", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_BITMAP - 1]);
+       mono_counters_register ("# copied COMPLEX_ARR", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_COMPLEX_ARR - 1]);
+       mono_counters_register ("# copied COMPLEX_PTRFREE", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copied_count_per_descriptor [DESC_TYPE_COMPLEX_PTRFREE - 1]);
+#endif
+}
+
+#endif
diff --git a/mono/sgen/sgen-descriptor.h b/mono/sgen/sgen-descriptor.h
new file mode 100644 (file)
index 0000000..1a9d8cc
--- /dev/null
@@ -0,0 +1,331 @@
+/*
+ * sgen-descriptor.h: GC descriptors describe object layout.
+
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ *
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __MONO_SGEN_DESCRIPTOR_H__
+#define __MONO_SGEN_DESCRIPTOR_H__
+
+#include <mono/sgen/sgen-conf.h>
+
+
+/*
+ * ######################################################################
+ * ########  GC descriptors
+ * ######################################################################
+ * Used to quickly get the info the GC needs about an object: size and
+ * where the references are held.
+ */
+#define OBJECT_HEADER_WORDS (SGEN_CLIENT_OBJECT_HEADER_SIZE / sizeof(gpointer))
+#define LOW_TYPE_BITS 3
+#define DESC_TYPE_MASK ((1 << LOW_TYPE_BITS) - 1)
+#define MAX_RUNLEN_OBJECT_SIZE 0xFFFF
+#define VECTOR_INFO_SHIFT 14
+#define VECTOR_KIND_SHIFT 13
+#define VECTOR_ELSIZE_SHIFT 3
+#define VECTOR_BITMAP_SHIFT 16
+#define VECTOR_BITMAP_SIZE (GC_BITS_PER_WORD - VECTOR_BITMAP_SHIFT)
+#define BITMAP_NUM_BITS (GC_BITS_PER_WORD - LOW_TYPE_BITS)
+#define MAX_ELEMENT_SIZE 0x3ff
+#define VECTOR_SUBTYPE_PTRFREE (DESC_TYPE_V_PTRFREE << VECTOR_INFO_SHIFT)
+#define VECTOR_SUBTYPE_REFS    (DESC_TYPE_V_REFS << VECTOR_INFO_SHIFT)
+#define VECTOR_SUBTYPE_BITMAP  (DESC_TYPE_V_BITMAP << VECTOR_INFO_SHIFT)
+
+#define VECTOR_KIND_SZARRAY  (DESC_TYPE_V_SZARRAY << VECTOR_KIND_SHIFT)
+#define VECTOR_KIND_ARRAY  (DESC_TYPE_V_ARRAY << VECTOR_KIND_SHIFT)
+
+/*
+ * Objects are aligned to 8 bytes boundaries.
+ *
+ * A descriptor is a pointer in GCVTable, so 32 or 64 bits of size.
+ * The low 3 bits define the type of the descriptor. The other bits
+ * depend on the type.
+ *
+ * It's important to be able to quickly identify two properties of classes from their
+ * descriptors: whether they are small enough to live in the regular major heap (size <=
+ * SGEN_MAX_SMALL_OBJ_SIZE), and whether they contain references.
+ *
+ * To that end we have three descriptor types that only apply to small classes: RUN_LENGTH,
+ * BITMAP, and SMALL_PTRFREE.  We also have the type COMPLEX_PTRFREE, which applies to
+ * classes that are either not small or of unknown size (those being strings and arrays).
+ * The lowest two bits of the SMALL_PTRFREE and COMPLEX_PTRFREE tags are the same, so we can
+ * quickly check for references.
+ *
+ * As a general rule the 13 remaining low bits define the size, either
+ * of the whole object or of the elements in the arrays. While for objects
+ * the size is already in bytes, for arrays we need to shift, because
+ * array elements might be smaller than 8 bytes. In case of arrays, we
+ * use two bits to describe what the additional high bits represents,
+ * so the default behaviour can handle element sizes less than 2048 bytes.
+ * The high 16 bits, if 0 it means the object is pointer-free.
+ * This design should make it easy and fast to skip over ptr-free data.
+ * The first 4 types should cover >95% of the objects.
+ * Note that since the size of objects is limited to 64K, larger objects
+ * will be allocated in the large object heap.
+ * If we want 4-bytes alignment, we need to put vector and small bitmap
+ * inside complex.
+ *
+ * We don't use 0 so that 0 isn't a valid GC descriptor.  No deep reason for this other than
+ * to be able to identify a non-inited descriptor for debugging.
+ */
+enum {
+       /* Keep in sync with `descriptor_types` in sgen-debug.c! */
+       DESC_TYPE_RUN_LENGTH = 1,   /* 16 bits aligned byte size | 1-3 (offset, numptr) bytes tuples */
+       DESC_TYPE_BITMAP = 2,       /* | 29-61 bitmap bits */
+       DESC_TYPE_SMALL_PTRFREE = 3,
+       DESC_TYPE_MAX_SMALL_OBJ = 3,
+       DESC_TYPE_COMPLEX = 4,      /* index for bitmap into complex_descriptors */
+       DESC_TYPE_VECTOR = 5,       /* 10 bits element size | 1 bit kind | 2 bits desc | element desc */
+       DESC_TYPE_COMPLEX_ARR = 6,  /* index for bitmap into complex_descriptors */
+       DESC_TYPE_COMPLEX_PTRFREE = 7, /* Nothing, used to encode large ptr objects and strings. */
+       DESC_TYPE_MAX = 7,
+
+       DESC_TYPE_PTRFREE_MASK = 3,
+       DESC_TYPE_PTRFREE_BITS = 3
+};
+
+/* values for array kind */
+enum {
+       DESC_TYPE_V_SZARRAY = 0, /*vector with no bounds data */
+       DESC_TYPE_V_ARRAY = 1, /* array with bounds data */
+};
+
+/* subtypes for arrays and vectors */
+enum {
+       DESC_TYPE_V_PTRFREE = 0,/* there are no refs: keep first so it has a zero value  */
+       DESC_TYPE_V_REFS,       /* all the array elements are refs */
+       DESC_TYPE_V_RUN_LEN,    /* elements are run-length encoded as DESC_TYPE_RUN_LENGTH */
+       DESC_TYPE_V_BITMAP      /* elements are as the bitmap in DESC_TYPE_SMALL_BITMAP */
+};
+
+#define SGEN_DESC_STRING       (DESC_TYPE_COMPLEX_PTRFREE | (1 << LOW_TYPE_BITS))
+
+/* Root bitmap descriptors are simpler: the lower three bits describe the type
+ * and we either have 30/62 bitmap bits or nibble-based run-length,
+ * or a complex descriptor, or a user defined marker function.
+ */
+enum {
+       ROOT_DESC_CONSERVATIVE, /* 0, so matches NULL value */
+       ROOT_DESC_BITMAP,
+       ROOT_DESC_RUN_LEN, 
+       ROOT_DESC_COMPLEX,
+       ROOT_DESC_USER,
+       ROOT_DESC_TYPE_MASK = 0x7,
+       ROOT_DESC_TYPE_SHIFT = 3,
+};
+
+typedef void (*SgenUserMarkFunc)     (void **addr, void *gc_data);
+typedef void (*SgenUserRootMarkFunc) (void *addr, SgenUserMarkFunc mark_func, void *gc_data);
+
+void* sgen_make_user_root_descriptor (SgenUserRootMarkFunc marker);
+
+gsize* sgen_get_complex_descriptor (mword desc);
+void* sgen_get_complex_descriptor_bitmap (mword desc);
+SgenUserRootMarkFunc sgen_get_user_descriptor_func (mword desc);
+
+void sgen_init_descriptors (void);
+
+#ifdef HEAVY_STATISTICS
+void sgen_descriptor_count_scanned_object (mword desc);
+void sgen_descriptor_count_copied_object (mword desc);
+#endif
+
+static inline gboolean
+sgen_gc_descr_has_references (mword desc)
+{
+       /* This covers SMALL_PTRFREE and COMPLEX_PTRFREE */
+       if ((desc & DESC_TYPE_PTRFREE_MASK) == DESC_TYPE_PTRFREE_BITS)
+               return FALSE;
+
+       /*The array is ptr-free*/
+       if ((desc & 0xC007) == (DESC_TYPE_VECTOR | VECTOR_SUBTYPE_PTRFREE))
+               return FALSE;
+
+       return TRUE;
+}
+
+#define SGEN_VTABLE_HAS_REFERENCES(vt) (sgen_gc_descr_has_references (sgen_vtable_get_descriptor ((vt))))
+#define SGEN_OBJECT_HAS_REFERENCES(o)  (SGEN_VTABLE_HAS_REFERENCES (SGEN_LOAD_VTABLE ((o))))
+
+/* helper macros to scan and traverse objects, macros because we resue them in many functions */
+#ifdef __GNUC__
+#define PREFETCH_READ(addr)    __builtin_prefetch ((addr), 0, 1)
+#define PREFETCH_WRITE(addr)   __builtin_prefetch ((addr), 1, 1)
+#else
+#define PREFETCH_READ(addr)
+#define PREFETCH_WRITE(addr)
+#endif
+
+#if defined(__GNUC__) && SIZEOF_VOID_P==4
+#define GNUC_BUILTIN_CTZ(bmap) __builtin_ctz(bmap)
+#elif defined(__GNUC__) && SIZEOF_VOID_P==8
+#define GNUC_BUILTIN_CTZ(bmap) __builtin_ctzl(bmap)
+#endif
+
+/* code using these macros must define a HANDLE_PTR(ptr) macro that does the work */
+#define OBJ_RUN_LEN_FOREACH_PTR(desc,obj)      do {    \
+               if ((desc) & 0xffff0000) {      \
+                       /* there are pointers */        \
+                       void **_objptr_end;     \
+                       void **_objptr = (void**)(obj); \
+                       _objptr += ((desc) >> 16) & 0xff;       \
+                       _objptr_end = _objptr + (((desc) >> 24) & 0xff);        \
+                       while (_objptr < _objptr_end) { \
+                               HANDLE_PTR (_objptr, (obj));    \
+                               _objptr++;      \
+                       };      \
+               }       \
+       } while (0)
+
+/* a bitmap desc means that there are pointer references or we'd have
+ * choosen run-length, instead: add an assert to check.
+ */
+#ifdef __GNUC__
+#define OBJ_BITMAP_FOREACH_PTR(desc,obj)       do {            \
+               /* there are pointers */                        \
+               void **_objptr = (void**)(obj);                 \
+               gsize _bmap = (desc) >> LOW_TYPE_BITS;          \
+               _objptr += OBJECT_HEADER_WORDS;                 \
+               do {                                            \
+                       int _index = GNUC_BUILTIN_CTZ (_bmap);  \
+                       _objptr += _index;                      \
+                       _bmap >>= (_index + 1);                 \
+                       HANDLE_PTR (_objptr, (obj));            \
+                       ++_objptr;                              \
+               } while (_bmap);                                \
+       } while (0)
+#else
+#define OBJ_BITMAP_FOREACH_PTR(desc,obj)       do {    \
+               /* there are pointers */        \
+               void **_objptr = (void**)(obj); \
+               gsize _bmap = (desc) >> LOW_TYPE_BITS;  \
+               _objptr += OBJECT_HEADER_WORDS; \
+               do {    \
+                       if ((_bmap & 1)) {      \
+                               HANDLE_PTR (_objptr, (obj));    \
+                       }       \
+                       _bmap >>= 1;    \
+                       ++_objptr;      \
+               } while (_bmap);        \
+       } while (0)
+#endif
+
+#define OBJ_COMPLEX_FOREACH_PTR(vt,obj)        do {    \
+               /* there are pointers */        \
+               void **_objptr = (void**)(obj); \
+               gsize *bitmap_data = sgen_get_complex_descriptor ((desc)); \
+               gsize bwords = (*bitmap_data) - 1;      \
+               void **start_run = _objptr;     \
+               bitmap_data++;  \
+               while (bwords-- > 0) {  \
+                       gsize _bmap = *bitmap_data++;   \
+                       _objptr = start_run;    \
+                       /*g_print ("bitmap: 0x%x/%d at %p\n", _bmap, bwords, _objptr);*/        \
+                       while (_bmap) { \
+                               if ((_bmap & 1)) {      \
+                                       HANDLE_PTR (_objptr, (obj));    \
+                               }       \
+                               _bmap >>= 1;    \
+                               ++_objptr;      \
+                       }       \
+                       start_run += GC_BITS_PER_WORD;  \
+               }       \
+       } while (0)
+
+/* this one is untested */
+#define OBJ_COMPLEX_ARR_FOREACH_PTR(desc,obj)  do {    \
+               /* there are pointers */        \
+               GCVTable *vt = (GCVTable*)SGEN_LOAD_VTABLE (obj); \
+               gsize *mbitmap_data = sgen_get_complex_descriptor ((desc)); \
+               gsize mbwords = (*mbitmap_data++) - 1;  \
+               gsize el_size = sgen_client_array_element_size (vt);    \
+               char *e_start = sgen_client_array_data_start ((GCObject*)(obj));        \
+               char *e_end = e_start + el_size * sgen_client_array_length ((GCObject*)(obj));  \
+               while (e_start < e_end) {       \
+                       void **_objptr = (void**)e_start;       \
+                       gsize *bitmap_data = mbitmap_data;      \
+                       gsize bwords = mbwords; \
+                       while (bwords-- > 0) {  \
+                               gsize _bmap = *bitmap_data++;   \
+                               void **start_run = _objptr;     \
+                               /*g_print ("bitmap: 0x%x\n", _bmap);*/  \
+                               while (_bmap) { \
+                                       if ((_bmap & 1)) {      \
+                                               HANDLE_PTR (_objptr, (obj));    \
+                                       }       \
+                                       _bmap >>= 1;    \
+                                       ++_objptr;      \
+                               }       \
+                               _objptr = start_run + GC_BITS_PER_WORD; \
+                       }       \
+                       e_start += el_size;     \
+               }       \
+       } while (0)
+
+#define OBJ_VECTOR_FOREACH_PTR(desc,obj)       do {    \
+               /* note: 0xffffc000 excludes DESC_TYPE_V_PTRFREE */     \
+               if ((desc) & 0xffffc000) {                              \
+                       int el_size = ((desc) >> 3) & MAX_ELEMENT_SIZE; \
+                       /* there are pointers */        \
+                       int etype = (desc) & 0xc000;                    \
+                       if (etype == (DESC_TYPE_V_REFS << 14)) {        \
+                               void **p = (void**)sgen_client_array_data_start ((GCObject*)(obj));     \
+                               void **end_refs = (void**)((char*)p + el_size * sgen_client_array_length ((GCObject*)(obj)));   \
+                               /* Note: this code can handle also arrays of struct with only references in them */     \
+                               while (p < end_refs) {  \
+                                       HANDLE_PTR (p, (obj));  \
+                                       ++p;    \
+                               }       \
+                       } else if (etype == DESC_TYPE_V_RUN_LEN << 14) {        \
+                               int offset = ((desc) >> 16) & 0xff;     \
+                               int num_refs = ((desc) >> 24) & 0xff;   \
+                               char *e_start = sgen_client_array_data_start ((GCObject*)(obj));        \
+                               char *e_end = e_start + el_size * sgen_client_array_length ((GCObject*)(obj));  \
+                               while (e_start < e_end) {       \
+                                       void **p = (void**)e_start;     \
+                                       int i;  \
+                                       p += offset;    \
+                                       for (i = 0; i < num_refs; ++i) {        \
+                                               HANDLE_PTR (p + i, (obj));      \
+                                       }       \
+                                       e_start += el_size;     \
+                               }       \
+                       } else if (etype == DESC_TYPE_V_BITMAP << 14) { \
+                               char *e_start = sgen_client_array_data_start ((GCObject*)(obj));        \
+                               char *e_end = e_start + el_size * sgen_client_array_length ((GCObject*)(obj));  \
+                               while (e_start < e_end) {       \
+                                       void **p = (void**)e_start;     \
+                                       gsize _bmap = (desc) >> 16;     \
+                                       /* Note: there is no object header here to skip */      \
+                                       while (_bmap) { \
+                                               if ((_bmap & 1)) {      \
+                                                       HANDLE_PTR (p, (obj));  \
+                                               }       \
+                                               _bmap >>= 1;    \
+                                               ++p;    \
+                                       }       \
+                                       e_start += el_size;     \
+                               }       \
+                       }       \
+               }       \
+       } while (0)
+
+
+#endif
diff --git a/mono/sgen/sgen-fin-weak-hash.c b/mono/sgen/sgen-fin-weak-hash.c
new file mode 100644 (file)
index 0000000..047d9a4
--- /dev/null
@@ -0,0 +1,871 @@
+/*
+ * sgen-fin-weak-hash.c: Finalizers and weak links.
+ *
+ * Author:
+ *     Paolo Molaro (lupus@ximian.com)
+ *  Rodrigo Kumpera (kumpera@gmail.com)
+ *
+ * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright 2011 Xamarin, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-gray.h"
+#include "mono/sgen/sgen-protocol.h"
+#include "mono/sgen/sgen-pointer-queue.h"
+#include "mono/sgen/sgen-client.h"
+#include "mono/utils/mono-membar.h"
+
+#define ptr_in_nursery sgen_ptr_in_nursery
+
+typedef SgenGrayQueue GrayQueue;
+
+static int no_finalize = 0;
+
+#define DISLINK_OBJECT(l)      (REVEAL_POINTER (*(void**)(l)))
+#define DISLINK_TRACK(l)       ((~(size_t)(*(void**)(l))) & 1)
+
+/*
+ * The finalizable hash has the object as the key, the 
+ * disappearing_link hash, has the link address as key.
+ *
+ * Copyright 2011 Xamarin Inc.
+ */
+
+#define TAG_MASK ((mword)0x1)
+
+static inline GCObject*
+tagged_object_get_object (GCObject *object)
+{
+       return (GCObject*)(((mword)object) & ~TAG_MASK);
+}
+
+static inline int
+tagged_object_get_tag (GCObject *object)
+{
+       return ((mword)object) & TAG_MASK;
+}
+
+static inline GCObject*
+tagged_object_apply (void *object, int tag_bits)
+{
+       return (GCObject*)((mword)object | (mword)tag_bits);
+}
+
+static int
+tagged_object_hash (GCObject *o)
+{
+       return sgen_aligned_addr_hash (tagged_object_get_object (o));
+}
+
+static gboolean
+tagged_object_equals (GCObject *a, GCObject *b)
+{
+       return tagged_object_get_object (a) == tagged_object_get_object (b);
+}
+
+static SgenHashTable minor_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
+static SgenHashTable major_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
+
+static SgenHashTable*
+get_finalize_entry_hash_table (int generation)
+{
+       switch (generation) {
+       case GENERATION_NURSERY: return &minor_finalizable_hash;
+       case GENERATION_OLD: return &major_finalizable_hash;
+       default: g_assert_not_reached ();
+       }
+}
+
+#define BRIDGE_OBJECT_MARKED 0x1
+
+/* LOCKING: requires that the GC lock is held */
+void
+sgen_mark_bridge_object (GCObject *obj)
+{
+       SgenHashTable *hash_table = get_finalize_entry_hash_table (ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD);
+
+       sgen_hash_table_set_key (hash_table, obj, tagged_object_apply (obj, BRIDGE_OBJECT_MARKED));
+}
+
+/* LOCKING: requires that the GC lock is held */
+void
+sgen_collect_bridge_objects (int generation, ScanCopyContext ctx)
+{
+       CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
+       GrayQueue *queue = ctx.queue;
+       SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
+       GCObject *object;
+       gpointer dummy G_GNUC_UNUSED;
+       char *copy;
+       SgenPointerQueue moved_fin_objects;
+
+       sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
+
+       if (no_finalize)
+               return;
+
+       SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
+               int tag = tagged_object_get_tag (object);
+               object = tagged_object_get_object (object);
+
+               /* Bridge code told us to ignore this one */
+               if (tag == BRIDGE_OBJECT_MARKED)
+                       continue;
+
+               /* Object is a bridge object and major heap says it's dead  */
+               if (major_collector.is_object_live ((char*)object))
+                       continue;
+
+               /* Nursery says the object is dead. */
+               if (!sgen_gc_is_object_ready_for_finalization (object))
+                       continue;
+
+               if (!sgen_client_bridge_is_bridge_object (object))
+                       continue;
+
+               copy = (char*)object;
+               copy_func ((void**)&copy, queue);
+
+               sgen_client_bridge_register_finalized_object ((GCObject*)copy);
+               
+               if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
+                       /* remove from the list */
+                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
+
+                       /* insert it into the major hash */
+                       sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
+
+                       SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
+
+                       continue;
+               } else if (copy != (char*)object) {
+                       /* update pointer */
+                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
+
+                       /* register for reinsertion */
+                       sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
+
+                       SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
+
+                       continue;
+               }
+       } SGEN_HASH_TABLE_FOREACH_END;
+
+       while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
+               sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
+       }
+
+       sgen_pointer_queue_free (&moved_fin_objects);
+}
+
+
+/* LOCKING: requires that the GC lock is held */
+void
+sgen_finalize_in_range (int generation, ScanCopyContext ctx)
+{
+       CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
+       GrayQueue *queue = ctx.queue;
+       SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
+       GCObject *object;
+       gpointer dummy G_GNUC_UNUSED;
+       SgenPointerQueue moved_fin_objects;
+
+       sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
+
+       if (no_finalize)
+               return;
+       SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
+               int tag = tagged_object_get_tag (object);
+               object = tagged_object_get_object (object);
+               if (!major_collector.is_object_live ((char*)object)) {
+                       gboolean is_fin_ready = sgen_gc_is_object_ready_for_finalization (object);
+                       GCObject *copy = object;
+                       copy_func ((void**)&copy, queue);
+                       if (is_fin_ready) {
+                               /* remove and put in fin_ready_list */
+                               SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
+                               sgen_queue_finalization_entry (copy);
+                               /* Make it survive */
+                               SGEN_LOG (5, "Queueing object for finalization: %p (%s) (was at %p) (%d)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object, sgen_hash_table_num_entries (hash_table));
+                               continue;
+                       } else {
+                               if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
+                                       /* remove from the list */
+                                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
+
+                                       /* insert it into the major hash */
+                                       sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
+
+                                       SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
+
+                                       continue;
+                               } else if (copy != object) {
+                                       /* update pointer */
+                                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
+
+                                       /* register for reinsertion */
+                                       sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
+
+                                       SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
+
+                                       continue;
+                               }
+                       }
+               }
+       } SGEN_HASH_TABLE_FOREACH_END;
+
+       while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
+               sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
+       }
+
+       sgen_pointer_queue_free (&moved_fin_objects);
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+register_for_finalization (GCObject *obj, void *user_data, int generation)
+{
+       SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
+
+       if (no_finalize)
+               return;
+
+       if (user_data) {
+               if (sgen_hash_table_replace (hash_table, obj, NULL, NULL)) {
+                       GCVTable *vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
+                       SGEN_LOG (5, "Added finalizer for object: %p (%s) (%d) to %s table", obj, sgen_client_vtable_get_name (vt), hash_table->num_entries, sgen_generation_name (generation));
+               }
+       } else {
+               if (sgen_hash_table_remove (hash_table, obj, NULL)) {
+                       GCVTable *vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
+                       SGEN_LOG (5, "Removed finalizer for object: %p (%s) (%d)", obj, sgen_client_vtable_get_name (vt), hash_table->num_entries);
+               }
+       }
+}
+
+/*
+ * We're using (mostly) non-locking staging queues for finalizers and weak links to speed
+ * up registering them.  Otherwise we'd have to take the GC lock.
+ *
+ * The queues are arrays of `StageEntry`, plus a `next_entry` index.  Threads add entries to
+ * the queue via `add_stage_entry()` in a linear fashion until it fills up, in which case
+ * `process_stage_entries()` is called to drain it.  A garbage collection will also drain
+ * the queues via the same function.  That implies that `add_stage_entry()`, since it
+ * doesn't take a lock, must be able to run concurrently with `process_stage_entries()`,
+ * though it doesn't have to make progress while the queue is drained.  In fact, once it
+ * detects that the queue is being drained, it blocks until the draining is done.
+ *
+ * The protocol must guarantee that entries in the queue are causally ordered, otherwise two
+ * entries for the same location might get switched, resulting in the earlier one being
+ * committed and the later one ignored.
+ *
+ * `next_entry` is the index of the next entry to be filled, or `-1` if the queue is
+ * currently being drained.  Each entry has a state:
+ *
+ * `STAGE_ENTRY_FREE`: The entry is free.  Its data fields must be `NULL`.
+ *
+ * `STAGE_ENTRY_BUSY`: The entry is currently being filled in.
+ *
+ * `STAGE_ENTRY_USED`: The entry is completely filled in and must be processed in the next
+ * draining round.
+ *
+ * `STAGE_ENTRY_INVALID`: The entry was busy during queue draining and therefore
+ * invalidated.  Entries that are `BUSY` can obviously not be processed during a drain, but
+ * we can't leave them in place because new entries might be inserted before them, including
+ * from the same thread, violating causality.  An alternative would be not to reset
+ * `next_entry` to `0` after a drain, but to the index of the last `BUSY` entry plus one,
+ * but that can potentially waste the whole queue.
+ *
+ * State transitions:
+ *
+ * | from    | to      | filler? | drainer? |
+ * +---------+---------+---------+----------+
+ * | FREE    | BUSY    | X       |          |
+ * | BUSY    | FREE    | X       |          |
+ * | BUSY    | USED    | X       |          |
+ * | BUSY    | INVALID |         | X        |
+ * | USED    | FREE    |         | X        |
+ * | INVALID | FREE    | X       |          |
+ *
+ * `next_entry` can be incremented either by the filler thread that set the corresponding
+ * entry to `BUSY`, or by another filler thread that's trying to get a `FREE` slot.  If that
+ * other thread wasn't allowed to increment, it would block on the first filler thread.
+ *
+ * An entry's state, once it's set from `FREE` to `BUSY` by a filler thread, can only be
+ * changed by that same thread or by the drained.  The drainer can only set a `BUSY` thread
+ * to `INVALID`, so it needs to be set to `FREE` again by the original filler thread.
+ */
+
+#define STAGE_ENTRY_FREE       0
+#define STAGE_ENTRY_BUSY       1
+#define STAGE_ENTRY_USED       2
+#define STAGE_ENTRY_INVALID    3
+
+typedef struct {
+       volatile gint32 state;
+       GCObject *obj;
+       void *user_data;
+} StageEntry;
+
+#define NUM_FIN_STAGE_ENTRIES  1024
+
+static volatile gint32 next_fin_stage_entry = 0;
+static StageEntry fin_stage_entries [NUM_FIN_STAGE_ENTRIES];
+
+/*
+ * This is used to lock the stage when processing is forced, i.e. when it's triggered by a
+ * garbage collection.  In that case, the world is already stopped and there's only one
+ * thread operating on the queue.
+ */
+static void
+lock_stage_for_processing (volatile gint32 *next_entry)
+{
+       *next_entry = -1;
+}
+
+/*
+ * When processing is triggered by an overflow, we don't want to take the GC lock
+ * immediately, and then set `next_index` to `-1`, because another thread might have drained
+ * the queue in the mean time.  Instead, we make sure the overflow is still there, we
+ * atomically set `next_index`, and only once that happened do we take the GC lock.
+ */
+static gboolean
+try_lock_stage_for_processing (int num_entries, volatile gint32 *next_entry)
+{
+       gint32 old = *next_entry;
+       if (old < num_entries)
+               return FALSE;
+       return InterlockedCompareExchange (next_entry, -1, old) == old;
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (GCObject*, void*, int))
+{
+       int i;
+
+       /*
+        * This can happen if after setting `next_index` to `-1` in
+        * `try_lock_stage_for_processing()`, a GC was triggered, which then drained the
+        * queue and reset `next_entry`.
+        *
+        * We have the GC lock now, so if it's still `-1`, we can't be interrupted by a GC.
+        */
+       if (*next_entry != -1)
+               return;
+
+       for (i = 0; i < num_entries; ++i) {
+               gint32 state;
+
+       retry:
+               state = entries [i].state;
+
+               switch (state) {
+               case STAGE_ENTRY_FREE:
+               case STAGE_ENTRY_INVALID:
+                       continue;
+               case STAGE_ENTRY_BUSY:
+                       /* BUSY -> INVALID */
+                       /*
+                        * This must be done atomically, because the filler thread can set
+                        * the entry to `USED`, in which case we must process it, so we must
+                        * detect that eventuality.
+                        */
+                       if (InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_INVALID, STAGE_ENTRY_BUSY) != STAGE_ENTRY_BUSY)
+                               goto retry;
+                       continue;
+               case STAGE_ENTRY_USED:
+                       break;
+               default:
+                       SGEN_ASSERT (0, FALSE, "Invalid stage entry state");
+                       break;
+               }
+
+               /* state is USED */
+
+               process_func (entries [i].obj, entries [i].user_data, i);
+
+               entries [i].obj = NULL;
+               entries [i].user_data = NULL;
+
+               mono_memory_write_barrier ();
+
+               /* USED -> FREE */
+               /*
+                * This transition only happens here, so we don't have to do it atomically.
+                */
+               entries [i].state = STAGE_ENTRY_FREE;
+       }
+
+       mono_memory_write_barrier ();
+
+       *next_entry = 0;
+}
+
+#ifdef HEAVY_STATISTICS
+static guint64 stat_overflow_abort = 0;
+static guint64 stat_wait_for_processing = 0;
+static guint64 stat_increment_other_thread = 0;
+static guint64 stat_index_decremented = 0;
+static guint64 stat_entry_invalidated = 0;
+static guint64 stat_success = 0;
+#endif
+
+static int
+add_stage_entry (int num_entries, volatile gint32 *next_entry, StageEntry *entries, GCObject *obj, void *user_data)
+{
+       gint32 index, new_next_entry, old_next_entry;
+       gint32 previous_state;
+
+ retry:
+       for (;;) {
+               index = *next_entry;
+               if (index >= num_entries) {
+                       HEAVY_STAT (++stat_overflow_abort);
+                       return -1;
+               }
+               if (index < 0) {
+                       /*
+                        * Backed-off waiting is way more efficient than even using a
+                        * dedicated lock for this.
+                        */
+                       while ((index = *next_entry) < 0) {
+                               /*
+                                * This seems like a good value.  Determined by timing
+                                * sgen-weakref-stress.exe.
+                                */
+                               g_usleep (200);
+                               HEAVY_STAT (++stat_wait_for_processing);
+                       }
+                       continue;
+               }
+               /* FREE -> BUSY */
+               if (entries [index].state != STAGE_ENTRY_FREE ||
+                               InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE) {
+                       /*
+                        * If we can't get the entry it must be because another thread got
+                        * it first.  We don't want to wait for that thread to increment
+                        * `next_entry`, so we try to do it ourselves.  Whether we succeed
+                        * or not, we start over.
+                        */
+                       if (*next_entry == index) {
+                               InterlockedCompareExchange (next_entry, index + 1, index);
+                               //g_print ("tried increment for other thread\n");
+                               HEAVY_STAT (++stat_increment_other_thread);
+                       }
+                       continue;
+               }
+               /* state is BUSY now */
+               mono_memory_write_barrier ();
+               /*
+                * Incrementing `next_entry` must happen after setting the state to `BUSY`.
+                * If it were the other way around, it would be possible that after a filler
+                * incremented the index, other threads fill up the queue, the queue is
+                * drained, the original filler finally fills in the slot, but `next_entry`
+                * ends up at the start of the queue, and new entries are written in the
+                * queue in front of, not behind, the original filler's entry.
+                *
+                * We don't actually require that the CAS succeeds, but we do require that
+                * the value of `next_entry` is not lower than our index.  Since the drainer
+                * sets it to `-1`, that also takes care of the case that the drainer is
+                * currently running.
+                */
+               old_next_entry = InterlockedCompareExchange (next_entry, index + 1, index);
+               if (old_next_entry < index) {
+                       /* BUSY -> FREE */
+                       /* INVALID -> FREE */
+                       /*
+                        * The state might still be `BUSY`, or the drainer could have set it
+                        * to `INVALID`.  In either case, there's no point in CASing.  Set
+                        * it to `FREE` and start over.
+                        */
+                       entries [index].state = STAGE_ENTRY_FREE;
+                       HEAVY_STAT (++stat_index_decremented);
+                       continue;
+               }
+               break;
+       }
+
+       SGEN_ASSERT (0, index >= 0 && index < num_entries, "Invalid index");
+
+       entries [index].obj = obj;
+       entries [index].user_data = user_data;
+
+       mono_memory_write_barrier ();
+
+       new_next_entry = *next_entry;
+       mono_memory_read_barrier ();
+       /* BUSY -> USED */
+       /*
+        * A `BUSY` entry will either still be `BUSY` or the drainer will have set it to
+        * `INVALID`.  In the former case, we set it to `USED` and we're finished.  In the
+        * latter case, we reset it to `FREE` and start over.
+        */
+       previous_state = InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_USED, STAGE_ENTRY_BUSY);
+       if (previous_state == STAGE_ENTRY_BUSY) {
+               SGEN_ASSERT (0, new_next_entry >= index || new_next_entry < 0, "Invalid next entry index - as long as we're busy, other thread can only increment or invalidate it");
+               HEAVY_STAT (++stat_success);
+               return index;
+       }
+
+       SGEN_ASSERT (0, previous_state == STAGE_ENTRY_INVALID, "Invalid state transition - other thread can only make busy state invalid");
+       entries [index].obj = NULL;
+       entries [index].user_data = NULL;
+       mono_memory_write_barrier ();
+       /* INVALID -> FREE */
+       entries [index].state = STAGE_ENTRY_FREE;
+
+       HEAVY_STAT (++stat_entry_invalidated);
+
+       goto retry;
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+process_fin_stage_entry (GCObject *obj, void *user_data, int index)
+{
+       if (ptr_in_nursery (obj))
+               register_for_finalization (obj, user_data, GENERATION_NURSERY);
+       else
+               register_for_finalization (obj, user_data, GENERATION_OLD);
+}
+
+/* LOCKING: requires that the GC lock is held */
+void
+sgen_process_fin_stage_entries (void)
+{
+       lock_stage_for_processing (&next_fin_stage_entry);
+       process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
+}
+
+void
+sgen_object_register_for_finalization (GCObject *obj, void *user_data)
+{
+       while (add_stage_entry (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, obj, user_data) == -1) {
+               if (try_lock_stage_for_processing (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry)) {
+                       LOCK_GC;
+                       process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
+                       UNLOCK_GC;
+               }
+       }
+}
+
+/* LOCKING: requires that the GC lock is held */
+static int
+finalizers_with_predicate (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size, SgenHashTable *hash_table)
+{
+       GCObject *object;
+       gpointer dummy G_GNUC_UNUSED;
+       int count;
+
+       if (no_finalize || !out_size || !out_array)
+               return 0;
+       count = 0;
+       SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
+               object = tagged_object_get_object (object);
+
+               if (predicate (object, user_data)) {
+                       /* remove and put in out_array */
+                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
+                       out_array [count ++] = object;
+                       SGEN_LOG (5, "Collecting object for finalization: %p (%s) (%d)", object, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (object)), sgen_hash_table_num_entries (hash_table));
+                       if (count == out_size)
+                               return count;
+                       continue;
+               }
+       } SGEN_HASH_TABLE_FOREACH_END;
+       return count;
+}
+
+/**
+ * sgen_gather_finalizers_if:
+ * @predicate: predicate function
+ * @user_data: predicate function data argument
+ * @out_array: output array
+ * @out_size: size of output array
+ *
+ * Store inside @out_array up to @out_size objects that match @predicate. Returns the number
+ * of stored items. Can be called repeteadly until it returns 0.
+ *
+ * The items are removed from the finalizer data structure, so the caller is supposed
+ * to finalize them.
+ *
+ * @out_array me be on the stack, or registered as a root, to allow the GC to know the
+ * objects are still alive.
+ */
+int
+sgen_gather_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size)
+{
+       int result;
+
+       LOCK_GC;
+       sgen_process_fin_stage_entries ();
+       result = finalizers_with_predicate (predicate, user_data, (GCObject**)out_array, out_size, &minor_finalizable_hash);
+       if (result < out_size) {
+               result += finalizers_with_predicate (predicate, user_data, (GCObject**)out_array + result, out_size - result,
+                       &major_finalizable_hash);
+       }
+       UNLOCK_GC;
+
+       return result;
+}
+
+static SgenHashTable minor_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, sgen_aligned_addr_hash, NULL);
+static SgenHashTable major_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, sgen_aligned_addr_hash, NULL);
+
+static SgenHashTable*
+get_dislink_hash_table (int generation)
+{
+       switch (generation) {
+       case GENERATION_NURSERY: return &minor_disappearing_link_hash;
+       case GENERATION_OLD: return &major_disappearing_link_hash;
+       default: g_assert_not_reached ();
+       }
+}
+
+/* LOCKING: assumes the GC lock is held */
+static void
+add_or_remove_disappearing_link (GCObject *obj, void **link, int generation)
+{
+       SgenHashTable *hash_table = get_dislink_hash_table (generation);
+
+       if (!obj) {
+               if (sgen_hash_table_remove (hash_table, link, NULL)) {
+                       SGEN_LOG (5, "Removed dislink %p (%d) from %s table",
+                                       link, hash_table->num_entries, sgen_generation_name (generation));
+               }
+               return;
+       }
+
+       sgen_hash_table_replace (hash_table, link, NULL, NULL);
+       SGEN_LOG (5, "Added dislink for object: %p (%s) at %p to %s table",
+                       obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE_UNCHECKED (obj)), link, sgen_generation_name (generation));
+}
+
+/* LOCKING: requires that the GC lock is held */
+void
+sgen_null_link_in_range (int generation, gboolean before_finalization, ScanCopyContext ctx)
+{
+       CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
+       GrayQueue *queue = ctx.queue;
+       void **link;
+       gpointer dummy G_GNUC_UNUSED;
+       SgenHashTable *hash = get_dislink_hash_table (generation);
+
+       SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
+               char *object;
+               gboolean track;
+
+               /*
+               We null a weak link before unregistering it, so it's possible that a thread is
+               suspended right in between setting the content to null and staging the unregister.
+
+               The rest of this code cannot handle null links as DISLINK_OBJECT (NULL) produces an invalid address.
+
+               We should simply skip the entry as the staged removal will take place during the next GC.
+               */
+               if (!*link) {
+                       SGEN_LOG (5, "Dislink %p was externally nullified", link);
+                       continue;
+               }
+
+               track = DISLINK_TRACK (link);
+               /*
+                * Tracked references are processed after
+                * finalization handling whereas standard weak
+                * references are processed before.  If an
+                * object is still not marked after finalization
+                * handling it means that it either doesn't have
+                * a finalizer or the finalizer has already run,
+                * so we must null a tracking reference.
+                */
+               if (track != before_finalization) {
+                       object = DISLINK_OBJECT (link);
+                       /*
+                       We should guard against a null object been hidden. This can sometimes happen.
+                       */
+                       if (!object) {
+                               SGEN_LOG (5, "Dislink %p with a hidden null object", link);
+                               continue;
+                       }
+
+                       if (!major_collector.is_object_live (object)) {
+                               if (sgen_gc_is_object_ready_for_finalization (object)) {
+                                       *link = NULL;
+                                       binary_protocol_dislink_update (link, NULL, 0, 0);
+                                       SGEN_LOG (5, "Dislink nullified at %p to GCed object %p", link, object);
+                                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
+                                       continue;
+                               } else {
+                                       char *copy = object;
+                                       copy_func ((void**)&copy, queue);
+
+                                       /* Update pointer if it's moved.  If the object
+                                        * has been moved out of the nursery, we need to
+                                        * remove the link from the minor hash table to
+                                        * the major one.
+                                        *
+                                        * FIXME: what if an object is moved earlier?
+                                        */
+
+                                       if (hash == &minor_disappearing_link_hash && !ptr_in_nursery (copy)) {
+                                               SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
+
+                                               g_assert (copy);
+                                               *link = HIDE_POINTER (copy, track);
+                                               add_or_remove_disappearing_link ((GCObject*)copy, link, GENERATION_OLD);
+                                               binary_protocol_dislink_update (link, copy, track, 0);
+
+                                               SGEN_LOG (5, "Upgraded dislink at %p to major because object %p moved to %p", link, object, copy);
+
+                                               continue;
+                                       } else {
+                                               *link = HIDE_POINTER (copy, track);
+                                               binary_protocol_dislink_update (link, copy, track, 0);
+                                               SGEN_LOG (5, "Updated dislink at %p to %p", link, DISLINK_OBJECT (link));
+                                       }
+                               }
+                       }
+               }
+       } SGEN_HASH_TABLE_FOREACH_END;
+}
+
+/* LOCKING: requires that the GC lock is held */
+void
+sgen_null_links_if (SgenObjectPredicateFunc predicate, void *data, int generation)
+{
+       void **link;
+       gpointer dummy G_GNUC_UNUSED;
+       SgenHashTable *hash = get_dislink_hash_table (generation);
+       SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
+               char *object = DISLINK_OBJECT (link);
+
+               if (!*link)
+                       continue;
+
+               if (predicate ((GCObject*)object, data)) {
+                       *link = NULL;
+                       binary_protocol_dislink_update (link, NULL, 0, 0);
+                       SGEN_LOG (5, "Dislink nullified by predicate at %p to GCed object %p", link, object);
+                       SGEN_HASH_TABLE_FOREACH_REMOVE (FALSE /* TRUE */);
+                       continue;
+               }
+       } SGEN_HASH_TABLE_FOREACH_END;
+}
+
+void
+sgen_remove_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, int generation)
+{
+       SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
+       GCObject *object;
+       gpointer dummy G_GNUC_UNUSED;
+
+       SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
+               object = tagged_object_get_object (object);
+
+               if (predicate (object, user_data)) {
+                       SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
+                       continue;
+               }
+       } SGEN_HASH_TABLE_FOREACH_END;  
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+process_dislink_stage_entry (GCObject *obj, void *_link, int index)
+{
+       void **link = _link;
+
+       if (index >= 0)
+               binary_protocol_dislink_process_staged (link, obj, index);
+
+       add_or_remove_disappearing_link (NULL, link, GENERATION_NURSERY);
+       add_or_remove_disappearing_link (NULL, link, GENERATION_OLD);
+       if (obj) {
+               if (ptr_in_nursery (obj))
+                       add_or_remove_disappearing_link (obj, link, GENERATION_NURSERY);
+               else
+                       add_or_remove_disappearing_link (obj, link, GENERATION_OLD);
+       }
+}
+
+#define NUM_DISLINK_STAGE_ENTRIES      1024
+
+static volatile gint32 next_dislink_stage_entry = 0;
+static StageEntry dislink_stage_entries [NUM_DISLINK_STAGE_ENTRIES];
+
+/* LOCKING: requires that the GC lock is held */
+void
+sgen_process_dislink_stage_entries (void)
+{
+       lock_stage_for_processing (&next_dislink_stage_entry);
+       process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
+}
+
+void
+sgen_register_disappearing_link (GCObject *obj, void **link, gboolean track, gboolean in_gc)
+{
+       if (obj)
+               *link = HIDE_POINTER (obj, track);
+       else
+               *link = NULL;
+
+#if 1
+       if (in_gc) {
+               binary_protocol_dislink_update (link, obj, track, 0);
+               process_dislink_stage_entry (obj, link, -1);
+       } else {
+               int index;
+               binary_protocol_dislink_update (link, obj, track, 1);
+               while ((index = add_stage_entry (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, obj, link)) == -1) {
+                       if (try_lock_stage_for_processing (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry)) {
+                               LOCK_GC;
+                               process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
+                               UNLOCK_GC;
+                       }
+               }
+               binary_protocol_dislink_update_staged (link, obj, track, index);
+       }
+#else
+       if (!in_gc)
+               LOCK_GC;
+       binary_protocol_dislink_update (link, obj, track, 0);
+       process_dislink_stage_entry (obj, link, -1);
+       if (!in_gc)
+               UNLOCK_GC;
+#endif
+}
+
+void
+sgen_init_fin_weak_hash (void)
+{
+#ifdef HEAVY_STATISTICS
+       mono_counters_register ("FinWeak Successes", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_success);
+       mono_counters_register ("FinWeak Overflow aborts", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_overflow_abort);
+       mono_counters_register ("FinWeak Wait for processing", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wait_for_processing);
+       mono_counters_register ("FinWeak Increment other thread", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_increment_other_thread);
+       mono_counters_register ("FinWeak Index decremented", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_index_decremented);
+       mono_counters_register ("FinWeak Entry invalidated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_entry_invalidated);
+#endif
+}
+
+#endif /* HAVE_SGEN_GC */
diff --git a/mono/sgen/sgen-gc.c b/mono/sgen/sgen-gc.c
new file mode 100644 (file)
index 0000000..b152646
--- /dev/null
@@ -0,0 +1,3460 @@
+/*
+ * sgen-gc.c: Simple generational GC.
+ *
+ * Author:
+ *     Paolo Molaro (lupus@ximian.com)
+ *  Rodrigo Kumpera (kumpera@gmail.com)
+ *
+ * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ *
+ * Thread start/stop adapted from Boehm's GC:
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
+ * Copyright (c) 2000-2004 by Hewlett-Packard Company.  All rights reserved.
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright 2011 Xamarin, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Important: allocation provides always zeroed memory, having to do
+ * a memset after allocation is deadly for performance.
+ * Memory usage at startup is currently as follows:
+ * 64 KB pinned space
+ * 64 KB internal space
+ * size of nursery
+ * We should provide a small memory config with half the sizes
+ *
+ * We currently try to make as few mono assumptions as possible:
+ * 1) 2-word header with no GC pointers in it (first vtable, second to store the
+ *    forwarding ptr)
+ * 2) gc descriptor is the second word in the vtable (first word in the class)
+ * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
+ * 4) there is a function to get an object's size and the number of
+ *    elements in an array.
+ * 5) we know the special way bounds are allocated for complex arrays
+ * 6) we know about proxies and how to treat them when domains are unloaded
+ *
+ * Always try to keep stack usage to a minimum: no recursive behaviour
+ * and no large stack allocs.
+ *
+ * General description.
+ * Objects are initially allocated in a nursery using a fast bump-pointer technique.
+ * When the nursery is full we start a nursery collection: this is performed with a
+ * copying GC.
+ * When the old generation is full we start a copying GC of the old generation as well:
+ * this will be changed to mark&sweep with copying when fragmentation becomes to severe
+ * in the future.  Maybe we'll even do both during the same collection like IMMIX.
+ *
+ * The things that complicate this description are:
+ * *) pinned objects: we can't move them so we need to keep track of them
+ * *) no precise info of the thread stacks and registers: we need to be able to
+ *    quickly find the objects that may be referenced conservatively and pin them
+ *    (this makes the first issues more important)
+ * *) large objects are too expensive to be dealt with using copying GC: we handle them
+ *    with mark/sweep during major collections
+ * *) some objects need to not move even if they are small (interned strings, Type handles):
+ *    we use mark/sweep for them, too: they are not allocated in the nursery, but inside
+ *    PinnedChunks regions
+ */
+
+/*
+ * TODO:
+
+ *) we could have a function pointer in MonoClass to implement
+  customized write barriers for value types
+
+ *) investigate the stuff needed to advance a thread to a GC-safe
+  point (single-stepping, read from unmapped memory etc) and implement it.
+  This would enable us to inline allocations and write barriers, for example,
+  or at least parts of them, like the write barrier checks.
+  We may need this also for handling precise info on stacks, even simple things
+  as having uninitialized data on the stack and having to wait for the prolog
+  to zero it. Not an issue for the last frame that we scan conservatively.
+  We could always not trust the value in the slots anyway.
+
+ *) modify the jit to save info about references in stack locations:
+  this can be done just for locals as a start, so that at least
+  part of the stack is handled precisely.
+
+ *) test/fix endianess issues
+
+ *) Implement a card table as the write barrier instead of remembered
+    sets?  Card tables are not easy to implement with our current
+    memory layout.  We have several different kinds of major heap
+    objects: Small objects in regular blocks, small objects in pinned
+    chunks and LOS objects.  If we just have a pointer we have no way
+    to tell which kind of object it points into, therefore we cannot
+    know where its card table is.  The least we have to do to make
+    this happen is to get rid of write barriers for indirect stores.
+    (See next item)
+
+ *) Get rid of write barriers for indirect stores.  We can do this by
+    telling the GC to wbarrier-register an object once we do an ldloca
+    or ldelema on it, and to unregister it once it's not used anymore
+    (it can only travel downwards on the stack).  The problem with
+    unregistering is that it needs to happen eventually no matter
+    what, even if exceptions are thrown, the thread aborts, etc.
+    Rodrigo suggested that we could do only the registering part and
+    let the collector find out (pessimistically) when it's safe to
+    unregister, namely when the stack pointer of the thread that
+    registered the object is higher than it was when the registering
+    happened.  This might make for a good first implementation to get
+    some data on performance.
+
+ *) Some sort of blacklist support?  Blacklists is a concept from the
+    Boehm GC: if during a conservative scan we find pointers to an
+    area which we might use as heap, we mark that area as unusable, so
+    pointer retention by random pinning pointers is reduced.
+
+ *) experiment with max small object size (very small right now - 2kb,
+    because it's tied to the max freelist size)
+
+  *) add an option to mmap the whole heap in one chunk: it makes for many
+     simplifications in the checks (put the nursery at the top and just use a single
+     check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
+     not flexible (too much of the address space may be used by default or we can't
+     increase the heap as needed) and we'd need a race-free mechanism to return memory
+     back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
+     was written to, munmap is needed, but the following mmap may not find the same segment
+     free...)
+
+ *) memzero the major fragments after restarting the world and optionally a smaller
+    chunk at a time
+
+ *) investigate having fragment zeroing threads
+
+ *) separate locks for finalization and other minor stuff to reduce
+    lock contention
+
+ *) try a different copying order to improve memory locality
+
+ *) a thread abort after a store but before the write barrier will
+    prevent the write barrier from executing
+
+ *) specialized dynamically generated markers/copiers
+
+ *) Dynamically adjust TLAB size to the number of threads.  If we have
+    too many threads that do allocation, we might need smaller TLABs,
+    and we might get better performance with larger TLABs if we only
+    have a handful of threads.  We could sum up the space left in all
+    assigned TLABs and if that's more than some percentage of the
+    nursery size, reduce the TLAB size.
+
+ *) Explore placing unreachable objects on unused nursery memory.
+       Instead of memset'ng a region to zero, place an int[] covering it.
+       A good place to start is add_nursery_frag. The tricky thing here is
+       placing those objects atomically outside of a collection.
+
+ *) Allocation should use asymmetric Dekker synchronization:
+       http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
+       This should help weak consistency archs.
+ */
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#ifdef __MACH__
+#undef _XOPEN_SOURCE
+#define _XOPEN_SOURCE
+#define _DARWIN_C_SOURCE
+#endif
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef HAVE_PTHREAD_H
+#include <pthread.h>
+#endif
+#ifdef HAVE_PTHREAD_NP_H
+#include <pthread_np.h>
+#endif
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdlib.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-cardtable.h"
+#include "mono/sgen/sgen-protocol.h"
+#include "mono/sgen/sgen-memory-governor.h"
+#include "mono/sgen/sgen-hash-table.h"
+#include "mono/sgen/sgen-cardtable.h"
+#include "mono/sgen/sgen-pinning.h"
+#include "mono/sgen/sgen-workers.h"
+#include "mono/sgen/sgen-client.h"
+#include "mono/sgen/sgen-pointer-queue.h"
+#include "mono/sgen/gc-internal-agnostic.h"
+#include "mono/utils/mono-proclib.h"
+#include "mono/utils/mono-memory-model.h"
+#include "mono/utils/hazard-pointer.h"
+
+#include <mono/utils/memcheck.h>
+
+#undef pthread_create
+#undef pthread_join
+#undef pthread_detach
+
+/*
+ * ######################################################################
+ * ########  Types and constants used by the GC.
+ * ######################################################################
+ */
+
+/* 0 means not initialized, 1 is initialized, -1 means in progress */
+static int gc_initialized = 0;
+/* If set, check if we need to do something every X allocations */
+gboolean has_per_allocation_action;
+/* If set, do a heap check every X allocation */
+guint32 verify_before_allocs = 0;
+/* If set, do a minor collection before every X allocation */
+guint32 collect_before_allocs = 0;
+/* If set, do a whole heap check before each collection */
+static gboolean whole_heap_check_before_collection = FALSE;
+/* If set, do a heap consistency check before each minor collection */
+static gboolean consistency_check_at_minor_collection = FALSE;
+/* If set, do a mod union consistency check before each finishing collection pause */
+static gboolean mod_union_consistency_check = FALSE;
+/* If set, check whether mark bits are consistent after major collections */
+static gboolean check_mark_bits_after_major_collection = FALSE;
+/* If set, check that all nursery objects are pinned/not pinned, depending on context */
+static gboolean check_nursery_objects_pinned = FALSE;
+/* If set, do a few checks when the concurrent collector is used */
+static gboolean do_concurrent_checks = FALSE;
+/* If set, do a plausibility check on the scan_starts before and after
+   each collection */
+static gboolean do_scan_starts_check = FALSE;
+
+/*
+ * If the major collector is concurrent and this is FALSE, we will
+ * never initiate a synchronous major collection, unless requested via
+ * GC.Collect().
+ */
+static gboolean allow_synchronous_major = TRUE;
+static gboolean disable_minor_collections = FALSE;
+static gboolean disable_major_collections = FALSE;
+static gboolean do_verify_nursery = FALSE;
+static gboolean do_dump_nursery_content = FALSE;
+static gboolean enable_nursery_canaries = FALSE;
+
+#ifdef HEAVY_STATISTICS
+guint64 stat_objects_alloced_degraded = 0;
+guint64 stat_bytes_alloced_degraded = 0;
+
+guint64 stat_copy_object_called_nursery = 0;
+guint64 stat_objects_copied_nursery = 0;
+guint64 stat_copy_object_called_major = 0;
+guint64 stat_objects_copied_major = 0;
+
+guint64 stat_scan_object_called_nursery = 0;
+guint64 stat_scan_object_called_major = 0;
+
+guint64 stat_slots_allocated_in_vain;
+
+guint64 stat_nursery_copy_object_failed_from_space = 0;
+guint64 stat_nursery_copy_object_failed_forwarded = 0;
+guint64 stat_nursery_copy_object_failed_pinned = 0;
+guint64 stat_nursery_copy_object_failed_to_space = 0;
+
+static guint64 stat_wbarrier_add_to_global_remset = 0;
+static guint64 stat_wbarrier_set_arrayref = 0;
+static guint64 stat_wbarrier_arrayref_copy = 0;
+static guint64 stat_wbarrier_generic_store = 0;
+static guint64 stat_wbarrier_generic_store_atomic = 0;
+static guint64 stat_wbarrier_set_root = 0;
+static guint64 stat_wbarrier_value_copy = 0;
+static guint64 stat_wbarrier_object_copy = 0;
+#endif
+
+static guint64 stat_pinned_objects = 0;
+
+static guint64 time_minor_pre_collection_fragment_clear = 0;
+static guint64 time_minor_pinning = 0;
+static guint64 time_minor_scan_remsets = 0;
+static guint64 time_minor_scan_pinned = 0;
+static guint64 time_minor_scan_roots = 0;
+static guint64 time_minor_finish_gray_stack = 0;
+static guint64 time_minor_fragment_creation = 0;
+
+static guint64 time_major_pre_collection_fragment_clear = 0;
+static guint64 time_major_pinning = 0;
+static guint64 time_major_scan_pinned = 0;
+static guint64 time_major_scan_roots = 0;
+static guint64 time_major_scan_mod_union = 0;
+static guint64 time_major_finish_gray_stack = 0;
+static guint64 time_major_free_bigobjs = 0;
+static guint64 time_major_los_sweep = 0;
+static guint64 time_major_sweep = 0;
+static guint64 time_major_fragment_creation = 0;
+
+static guint64 time_max = 0;
+
+static SGEN_TV_DECLARE (time_major_conc_collection_start);
+static SGEN_TV_DECLARE (time_major_conc_collection_end);
+
+static SGEN_TV_DECLARE (last_minor_collection_start_tv);
+static SGEN_TV_DECLARE (last_minor_collection_end_tv);
+
+int gc_debug_level = 0;
+FILE* gc_debug_file;
+
+/*
+void
+mono_gc_flush_info (void)
+{
+       fflush (gc_debug_file);
+}
+*/
+
+#define TV_DECLARE SGEN_TV_DECLARE
+#define TV_GETTIME SGEN_TV_GETTIME
+#define TV_ELAPSED SGEN_TV_ELAPSED
+
+static SGEN_TV_DECLARE (sgen_init_timestamp);
+
+NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
+
+#define object_is_forwarded    SGEN_OBJECT_IS_FORWARDED
+#define object_is_pinned       SGEN_OBJECT_IS_PINNED
+#define pin_object             SGEN_PIN_OBJECT
+
+#define ptr_in_nursery sgen_ptr_in_nursery
+
+#define LOAD_VTABLE    SGEN_LOAD_VTABLE
+
+gboolean
+nursery_canaries_enabled (void)
+{
+       return enable_nursery_canaries;
+}
+
+#define safe_object_get_size   sgen_safe_object_get_size
+
+/*
+ * ######################################################################
+ * ########  Global data.
+ * ######################################################################
+ */
+LOCK_DECLARE (gc_mutex);
+gboolean sgen_try_free_some_memory;
+
+#define SCAN_START_SIZE        SGEN_SCAN_START_SIZE
+
+size_t degraded_mode = 0;
+
+static mword bytes_pinned_from_failed_allocation = 0;
+
+GCMemSection *nursery_section = NULL;
+static volatile mword lowest_heap_address = ~(mword)0;
+static volatile mword highest_heap_address = 0;
+
+LOCK_DECLARE (sgen_interruption_mutex);
+
+int current_collection_generation = -1;
+static volatile gboolean concurrent_collection_in_progress = FALSE;
+
+/* objects that are ready to be finalized */
+static SgenPointerQueue fin_ready_queue = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_FINALIZE_READY);
+static SgenPointerQueue critical_fin_queue = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_FINALIZE_READY);
+
+/* registered roots: the key to the hash is the root start address */
+/* 
+ * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
+ */
+SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
+       SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), sgen_aligned_addr_hash, NULL),
+       SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), sgen_aligned_addr_hash, NULL),
+       SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), sgen_aligned_addr_hash, NULL)
+};
+static mword roots_size = 0; /* amount of memory in the root set */
+
+/* The size of a TLAB */
+/* The bigger the value, the less often we have to go to the slow path to allocate a new 
+ * one, but the more space is wasted by threads not allocating much memory.
+ * FIXME: Tune this.
+ * FIXME: Make this self-tuning for each thread.
+ */
+guint32 tlab_size = (1024 * 4);
+
+#define MAX_SMALL_OBJ_SIZE     SGEN_MAX_SMALL_OBJ_SIZE
+
+#define ALLOC_ALIGN            SGEN_ALLOC_ALIGN
+
+#define ALIGN_UP               SGEN_ALIGN_UP
+
+#ifdef SGEN_DEBUG_INTERNAL_ALLOC
+MonoNativeThreadId main_gc_thread = NULL;
+#endif
+
+/*Object was pinned during the current collection*/
+static mword objects_pinned;
+
+/*
+ * ######################################################################
+ * ########  Macros and function declarations.
+ * ######################################################################
+ */
+
+typedef SgenGrayQueue GrayQueue;
+
+/* forward declarations */
+static void scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx);
+
+static void pin_from_roots (void *start_nursery, void *end_nursery, ScanCopyContext ctx);
+static void finish_gray_stack (int generation, ScanCopyContext ctx);
+
+
+SgenMajorCollector major_collector;
+SgenMinorCollector sgen_minor_collector;
+/* FIXME: get rid of this */
+static GrayQueue gray_queue;
+
+static SgenRememberedSet remset;
+
+/* The gray queue to use from the main collection thread. */
+#define WORKERS_DISTRIBUTE_GRAY_QUEUE  (&gray_queue)
+
+/*
+ * The gray queue a worker job must use.  If we're not parallel or
+ * concurrent, we use the main gray queue.
+ */
+static SgenGrayQueue*
+sgen_workers_get_job_gray_queue (WorkerData *worker_data)
+{
+       return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
+}
+
+static void
+gray_queue_redirect (SgenGrayQueue *queue)
+{
+       gboolean wake = FALSE;
+
+       for (;;) {
+               GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
+               if (!section)
+                       break;
+               sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
+               wake = TRUE;
+       }
+
+       if (wake) {
+               g_assert (concurrent_collection_in_progress);
+               sgen_workers_ensure_awake ();
+       }
+}
+
+static void
+gray_queue_enable_redirect (SgenGrayQueue *queue)
+{
+       if (!concurrent_collection_in_progress)
+               return;
+
+       sgen_gray_queue_set_alloc_prepare (queue, gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
+       gray_queue_redirect (queue);
+}
+
+void
+sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
+{
+       while (start < end) {
+               size_t size;
+               char *obj;
+
+               if (!*(void**)start) {
+                       start += sizeof (void*); /* should be ALLOC_ALIGN, really */
+                       continue;
+               }
+
+               if (allow_flags) {
+                       if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
+                               obj = start;
+               } else {
+                       obj = start;
+               }
+
+               if (!sgen_client_object_is_array_fill ((GCObject*)obj)) {
+                       CHECK_CANARY_FOR_OBJECT (obj);
+                       size = ALIGN_UP (safe_object_get_size ((GCObject*)obj));
+                       callback (obj, size, data);
+                       CANARIFY_SIZE (size);
+               } else {
+                       size = ALIGN_UP (safe_object_get_size ((GCObject*)obj));
+               }
+
+               start += size;
+       }
+}
+
+/*
+ * sgen_add_to_global_remset:
+ *
+ *   The global remset contains locations which point into newspace after
+ * a minor collection. This can happen if the objects they point to are pinned.
+ *
+ * LOCKING: If called from a parallel collector, the global remset
+ * lock must be held.  For serial collectors that is not necessary.
+ */
+void
+sgen_add_to_global_remset (gpointer ptr, gpointer obj)
+{
+       SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Target pointer of global remset must be in the nursery");
+
+       HEAVY_STAT (++stat_wbarrier_add_to_global_remset);
+
+       if (!major_collector.is_concurrent) {
+               SGEN_ASSERT (5, current_collection_generation != -1, "Global remsets can only be added during collections");
+       } else {
+               if (current_collection_generation == -1)
+                       SGEN_ASSERT (5, sgen_concurrent_collection_in_progress (), "Global remsets outside of collection pauses can only be added by the concurrent collector");
+       }
+
+       if (!object_is_pinned (obj))
+               SGEN_ASSERT (5, sgen_minor_collector.is_split || sgen_concurrent_collection_in_progress (), "Non-pinned objects can only remain in nursery if it is a split nursery");
+       else if (sgen_cement_lookup_or_register (obj))
+               return;
+
+       remset.record_pointer (ptr);
+
+       sgen_pin_stats_register_global_remset (obj);
+
+       SGEN_LOG (8, "Adding global remset for %p", ptr);
+       binary_protocol_global_remset (ptr, obj, (gpointer)SGEN_LOAD_VTABLE (obj));
+}
+
+/*
+ * sgen_drain_gray_stack:
+ *
+ *   Scan objects in the gray stack until the stack is empty. This should be called
+ * frequently after each object is copied, to achieve better locality and cache
+ * usage.
+ *
+ * max_objs is the maximum number of objects to scan, or -1 to scan until the stack is
+ * empty.
+ */
+gboolean
+sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
+{
+       ScanObjectFunc scan_func = ctx.ops->scan_object;
+       GrayQueue *queue = ctx.queue;
+
+       if (current_collection_generation == GENERATION_OLD && major_collector.drain_gray_stack)
+               return major_collector.drain_gray_stack (ctx);
+
+       do {
+               int i;
+               for (i = 0; i != max_objs; ++i) {
+                       char *obj;
+                       mword desc;
+                       GRAY_OBJECT_DEQUEUE (queue, &obj, &desc);
+                       if (!obj)
+                               return TRUE;
+                       SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
+                       scan_func (obj, desc, queue);
+               }
+       } while (max_objs < 0);
+       return FALSE;
+}
+
+/*
+ * Addresses in the pin queue are already sorted. This function finds
+ * the object header for each address and pins the object. The
+ * addresses must be inside the nursery section.  The (start of the)
+ * address array is overwritten with the addresses of the actually
+ * pinned objects.  Return the number of pinned objects.
+ */
+static int
+pin_objects_from_nursery_pin_queue (gboolean do_scan_objects, ScanCopyContext ctx)
+{
+       GCMemSection *section = nursery_section;
+       void **start =  sgen_pinning_get_entry (section->pin_queue_first_entry);
+       void **end = sgen_pinning_get_entry (section->pin_queue_last_entry);
+       void *start_nursery = section->data;
+       void *end_nursery = section->next_data;
+       void *last = NULL;
+       int count = 0;
+       void *search_start;
+       void *addr;
+       void *pinning_front = start_nursery;
+       size_t idx;
+       void **definitely_pinned = start;
+       ScanObjectFunc scan_func = ctx.ops->scan_object;
+       SgenGrayQueue *queue = ctx.queue;
+
+       sgen_nursery_allocator_prepare_for_pinning ();
+
+       while (start < end) {
+               void *obj_to_pin = NULL;
+               size_t obj_to_pin_size = 0;
+               mword desc;
+
+               addr = *start;
+
+               SGEN_ASSERT (0, addr >= start_nursery && addr < end_nursery, "Potential pinning address out of range");
+               SGEN_ASSERT (0, addr >= last, "Pin queue not sorted");
+
+               if (addr == last) {
+                       ++start;
+                       continue;
+               }
+
+               SGEN_LOG (5, "Considering pinning addr %p", addr);
+               /* We've already processed everything up to pinning_front. */
+               if (addr < pinning_front) {
+                       start++;
+                       continue;
+               }
+
+               /*
+                * Find the closest scan start <= addr.  We might search backward in the
+                * scan_starts array because entries might be NULL.  In the worst case we
+                * start at start_nursery.
+                */
+               idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
+               SGEN_ASSERT (0, idx < section->num_scan_start, "Scan start index out of range");
+               search_start = (void*)section->scan_starts [idx];
+               if (!search_start || search_start > addr) {
+                       while (idx) {
+                               --idx;
+                               search_start = section->scan_starts [idx];
+                               if (search_start && search_start <= addr)
+                                       break;
+                       }
+                       if (!search_start || search_start > addr)
+                               search_start = start_nursery;
+               }
+
+               /*
+                * If the pinning front is closer than the scan start we found, start
+                * searching at the front.
+                */
+               if (search_start < pinning_front)
+                       search_start = pinning_front;
+
+               /*
+                * Now addr should be in an object a short distance from search_start.
+                *
+                * search_start must point to zeroed mem or point to an object.
+                */
+               do {
+                       size_t obj_size, canarified_obj_size;
+
+                       /* Skip zeros. */
+                       if (!*(void**)search_start) {
+                               search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
+                               /* The loop condition makes sure we don't overrun addr. */
+                               continue;
+                       }
+
+                       canarified_obj_size = obj_size = ALIGN_UP (safe_object_get_size ((GCObject*)search_start));
+
+                       /*
+                        * Filler arrays are marked by an invalid sync word.  We don't
+                        * consider them for pinning.  They are not delimited by canaries,
+                        * either.
+                        */
+                       if (!sgen_client_object_is_array_fill ((GCObject*)search_start)) {
+                               CHECK_CANARY_FOR_OBJECT (search_start);
+                               CANARIFY_SIZE (canarified_obj_size);
+
+                               if (addr >= search_start && (char*)addr < (char*)search_start + obj_size) {
+                                       /* This is the object we're looking for. */
+                                       obj_to_pin = search_start;
+                                       obj_to_pin_size = canarified_obj_size;
+                                       break;
+                               }
+                       }
+
+                       /* Skip to the next object */
+                       search_start = (void*)((char*)search_start + canarified_obj_size);
+               } while (search_start <= addr);
+
+               /* We've searched past the address we were looking for. */
+               if (!obj_to_pin) {
+                       pinning_front = search_start;
+                       goto next_pin_queue_entry;
+               }
+
+               /*
+                * We've found an object to pin.  It might still be a dummy array, but we
+                * can advance the pinning front in any case.
+                */
+               pinning_front = (char*)obj_to_pin + obj_to_pin_size;
+
+               /*
+                * If this is a dummy array marking the beginning of a nursery
+                * fragment, we don't pin it.
+                */
+               if (sgen_client_object_is_array_fill ((GCObject*)obj_to_pin))
+                       goto next_pin_queue_entry;
+
+               /*
+                * Finally - pin the object!
+                */
+               desc = sgen_obj_get_descriptor_safe (obj_to_pin);
+               if (do_scan_objects) {
+                       scan_func (obj_to_pin, desc, queue);
+               } else {
+                       SGEN_LOG (4, "Pinned object %p, vtable %p (%s), count %d\n",
+                                       obj_to_pin, *(void**)obj_to_pin, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj_to_pin)), count);
+                       binary_protocol_pin (obj_to_pin,
+                                       (gpointer)LOAD_VTABLE (obj_to_pin),
+                                       safe_object_get_size (obj_to_pin));
+
+                       pin_object (obj_to_pin);
+                       GRAY_OBJECT_ENQUEUE (queue, obj_to_pin, desc);
+                       sgen_pin_stats_register_object (obj_to_pin, obj_to_pin_size);
+                       definitely_pinned [count] = obj_to_pin;
+                       count++;
+               }
+
+       next_pin_queue_entry:
+               last = addr;
+               ++start;
+       }
+       sgen_client_nursery_objects_pinned (definitely_pinned, count);
+       stat_pinned_objects += count;
+       return count;
+}
+
+static void
+pin_objects_in_nursery (gboolean do_scan_objects, ScanCopyContext ctx)
+{
+       size_t reduced_to;
+
+       if (nursery_section->pin_queue_first_entry == nursery_section->pin_queue_last_entry)
+               return;
+
+       reduced_to = pin_objects_from_nursery_pin_queue (do_scan_objects, ctx);
+       nursery_section->pin_queue_last_entry = nursery_section->pin_queue_first_entry + reduced_to;
+}
+
+/*
+ * This function is only ever called (via `collector_pin_object()` in `sgen-copy-object.h`)
+ * when we can't promote an object because we're out of memory.
+ */
+void
+sgen_pin_object (void *object, GrayQueue *queue)
+{
+       /*
+        * All pinned objects are assumed to have been staged, so we need to stage as well.
+        * Also, the count of staged objects shows that "late pinning" happened.
+        */
+       sgen_pin_stage_ptr (object);
+
+       SGEN_PIN_OBJECT (object);
+       binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
+
+       ++objects_pinned;
+       sgen_pin_stats_register_object (object, safe_object_get_size (object));
+
+       GRAY_OBJECT_ENQUEUE (queue, object, sgen_obj_get_descriptor_safe (object));
+}
+
+/* Sort the addresses in array in increasing order.
+ * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
+ */
+void
+sgen_sort_addresses (void **array, size_t size)
+{
+       size_t i;
+       void *tmp;
+
+       for (i = 1; i < size; ++i) {
+               size_t child = i;
+               while (child > 0) {
+                       size_t parent = (child - 1) / 2;
+
+                       if (array [parent] >= array [child])
+                               break;
+
+                       tmp = array [parent];
+                       array [parent] = array [child];
+                       array [child] = tmp;
+
+                       child = parent;
+               }
+       }
+
+       for (i = size - 1; i > 0; --i) {
+               size_t end, root;
+               tmp = array [i];
+               array [i] = array [0];
+               array [0] = tmp;
+
+               end = i - 1;
+               root = 0;
+
+               while (root * 2 + 1 <= end) {
+                       size_t child = root * 2 + 1;
+
+                       if (child < end && array [child] < array [child + 1])
+                               ++child;
+                       if (array [root] >= array [child])
+                               break;
+
+                       tmp = array [root];
+                       array [root] = array [child];
+                       array [child] = tmp;
+
+                       root = child;
+               }
+       }
+}
+
+/* 
+ * Scan the memory between start and end and queue values which could be pointers
+ * to the area between start_nursery and end_nursery for later consideration.
+ * Typically used for thread stacks.
+ */
+void
+sgen_conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
+{
+       int count = 0;
+
+#ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
+       VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
+#endif
+
+       while (start < end) {
+               if (*start >= start_nursery && *start < end_nursery) {
+                       /*
+                        * *start can point to the middle of an object
+                        * note: should we handle pointing at the end of an object?
+                        * pinning in C# code disallows pointing at the end of an object
+                        * but there is some small chance that an optimizing C compiler
+                        * may keep the only reference to an object by pointing
+                        * at the end of it. We ignore this small chance for now.
+                        * Pointers to the end of an object are indistinguishable
+                        * from pointers to the start of the next object in memory
+                        * so if we allow that we'd need to pin two objects...
+                        * We queue the pointer in an array, the
+                        * array will then be sorted and uniqued. This way
+                        * we can coalesce several pinning pointers and it should
+                        * be faster since we'd do a memory scan with increasing
+                        * addresses. Note: we can align the address to the allocation
+                        * alignment, so the unique process is more effective.
+                        */
+                       mword addr = (mword)*start;
+                       addr &= ~(ALLOC_ALIGN - 1);
+                       if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
+                               SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
+                               sgen_pin_stage_ptr ((void*)addr);
+                               binary_protocol_pin_stage (start, (void*)addr);
+                               count++;
+                       }
+
+                       /*
+                        * FIXME: It seems we're registering objects from all over the heap
+                        * (at least from the nursery and the LOS), but we're only
+                        * registering pinned addresses in the nursery.  What's up with
+                        * that?
+                        *
+                        * Also, why wouldn't we register addresses once the pinning queue
+                        * is sorted and uniqued?
+                        */
+                       if (ptr_in_nursery ((void*)addr))
+                               sgen_pin_stats_register_address ((char*)addr, pin_type);
+               }
+               start++;
+       }
+       if (count)
+               SGEN_LOG (7, "found %d potential pinned heap pointers", count);
+}
+
+/*
+ * The first thing we do in a collection is to identify pinned objects.
+ * This function considers all the areas of memory that need to be
+ * conservatively scanned.
+ */
+static void
+pin_from_roots (void *start_nursery, void *end_nursery, ScanCopyContext ctx)
+{
+       void **start_root;
+       RootRecord *root;
+       SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
+       /* objects pinned from the API are inside these roots */
+       SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
+               SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
+               sgen_conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
+       } SGEN_HASH_TABLE_FOREACH_END;
+       /* now deal with the thread stacks
+        * in the future we should be able to conservatively scan only:
+        * *) the cpu registers
+        * *) the unmanaged stack frames
+        * *) the _last_ managed stack frame
+        * *) pointers slots in managed frames
+        */
+       sgen_client_scan_thread_data (start_nursery, end_nursery, FALSE, ctx);
+}
+
+static void
+unpin_objects_from_queue (SgenGrayQueue *queue)
+{
+       for (;;) {
+               char *addr;
+               mword desc;
+               GRAY_OBJECT_DEQUEUE (queue, &addr, &desc);
+               if (!addr)
+                       break;
+               g_assert (SGEN_OBJECT_IS_PINNED (addr));
+               SGEN_UNPIN_OBJECT (addr);
+       }
+}
+
+static void
+single_arg_user_copy_or_mark (void **obj, void *gc_data)
+{
+       ScanCopyContext *ctx = gc_data;
+       ctx->ops->copy_or_mark_object (obj, ctx->queue);
+}
+
+/*
+ * The memory area from start_root to end_root contains pointers to objects.
+ * Their position is precisely described by @desc (this means that the pointer
+ * can be either NULL or the pointer to the start of an object).
+ * This functions copies them to to_space updates them.
+ *
+ * This function is not thread-safe!
+ */
+static void
+precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
+{
+       CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
+       SgenGrayQueue *queue = ctx.queue;
+
+       switch (desc & ROOT_DESC_TYPE_MASK) {
+       case ROOT_DESC_BITMAP:
+               desc >>= ROOT_DESC_TYPE_SHIFT;
+               while (desc) {
+                       if ((desc & 1) && *start_root) {
+                               copy_func (start_root, queue);
+                               SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
+                       }
+                       desc >>= 1;
+                       start_root++;
+               }
+               return;
+       case ROOT_DESC_COMPLEX: {
+               gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
+               gsize bwords = (*bitmap_data) - 1;
+               void **start_run = start_root;
+               bitmap_data++;
+               while (bwords-- > 0) {
+                       gsize bmap = *bitmap_data++;
+                       void **objptr = start_run;
+                       while (bmap) {
+                               if ((bmap & 1) && *objptr) {
+                                       copy_func (objptr, queue);
+                                       SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
+                               }
+                               bmap >>= 1;
+                               ++objptr;
+                       }
+                       start_run += GC_BITS_PER_WORD;
+               }
+               break;
+       }
+       case ROOT_DESC_USER: {
+               SgenUserRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
+               marker (start_root, single_arg_user_copy_or_mark, &ctx);
+               break;
+       }
+       case ROOT_DESC_RUN_LEN:
+               g_assert_not_reached ();
+       default:
+               g_assert_not_reached ();
+       }
+}
+
+static void
+reset_heap_boundaries (void)
+{
+       lowest_heap_address = ~(mword)0;
+       highest_heap_address = 0;
+}
+
+void
+sgen_update_heap_boundaries (mword low, mword high)
+{
+       mword old;
+
+       do {
+               old = lowest_heap_address;
+               if (low >= old)
+                       break;
+       } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
+
+       do {
+               old = highest_heap_address;
+               if (high <= old)
+                       break;
+       } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
+}
+
+/*
+ * Allocate and setup the data structures needed to be able to allocate objects
+ * in the nursery. The nursery is stored in nursery_section.
+ */
+static void
+alloc_nursery (void)
+{
+       GCMemSection *section;
+       char *data;
+       size_t scan_starts;
+       size_t alloc_size;
+
+       if (nursery_section)
+               return;
+       SGEN_LOG (2, "Allocating nursery size: %zu", (size_t)sgen_nursery_size);
+       /* later we will alloc a larger area for the nursery but only activate
+        * what we need. The rest will be used as expansion if we have too many pinned
+        * objects in the existing nursery.
+        */
+       /* FIXME: handle OOM */
+       section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
+
+       alloc_size = sgen_nursery_size;
+
+       /* If there isn't enough space even for the nursery we should simply abort. */
+       g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
+
+       data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
+       sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
+       SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)sgen_gc_get_total_heap_allocation ());
+       section->data = section->next_data = data;
+       section->size = alloc_size;
+       section->end_data = data + sgen_nursery_size;
+       scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
+       section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
+       section->num_scan_start = scan_starts;
+
+       nursery_section = section;
+
+       sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
+}
+
+FILE *
+mono_gc_get_logfile (void)
+{
+       return gc_debug_file;
+}
+
+static void
+scan_finalizer_entries (SgenPointerQueue *fin_queue, ScanCopyContext ctx)
+{
+       CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
+       SgenGrayQueue *queue = ctx.queue;
+       size_t i;
+
+       for (i = 0; i < fin_queue->next_slot; ++i) {
+               void *obj = fin_queue->data [i];
+               if (!obj)
+                       continue;
+               SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
+               copy_func (&fin_queue->data [i], queue);
+       }
+}
+
+static const char*
+generation_name (int generation)
+{
+       switch (generation) {
+       case GENERATION_NURSERY: return "nursery";
+       case GENERATION_OLD: return "old";
+       default: g_assert_not_reached ();
+       }
+}
+
+const char*
+sgen_generation_name (int generation)
+{
+       return generation_name (generation);
+}
+
+static void
+finish_gray_stack (int generation, ScanCopyContext ctx)
+{
+       TV_DECLARE (atv);
+       TV_DECLARE (btv);
+       int done_with_ephemerons, ephemeron_rounds = 0;
+       char *start_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_start () : NULL;
+       char *end_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_end () : (char*)-1;
+       SgenGrayQueue *queue = ctx.queue;
+
+       /*
+        * We copied all the reachable objects. Now it's the time to copy
+        * the objects that were not referenced by the roots, but by the copied objects.
+        * we built a stack of objects pointed to by gray_start: they are
+        * additional roots and we may add more items as we go.
+        * We loop until gray_start == gray_objects which means no more objects have
+        * been added. Note this is iterative: no recursion is involved.
+        * We need to walk the LO list as well in search of marked big objects
+        * (use a flag since this is needed only on major collections). We need to loop
+        * here as well, so keep a counter of marked LO (increasing it in copy_object).
+        *   To achieve better cache locality and cache usage, we drain the gray stack 
+        * frequently, after each object is copied, and just finish the work here.
+        */
+       sgen_drain_gray_stack (-1, ctx);
+       TV_GETTIME (atv);
+       SGEN_LOG (2, "%s generation done", generation_name (generation));
+
+       /*
+       Reset bridge data, we might have lingering data from a previous collection if this is a major
+       collection trigged by minor overflow.
+
+       We must reset the gathered bridges since their original block might be evacuated due to major
+       fragmentation in the meanwhile and the bridge code should not have to deal with that.
+       */
+       if (sgen_client_bridge_need_processing ())
+               sgen_client_bridge_reset_data ();
+
+       /*
+        * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
+        * before processing finalizable objects and non-tracking weak links to avoid finalizing/clearing
+        * objects that are in fact reachable.
+        */
+       done_with_ephemerons = 0;
+       do {
+               done_with_ephemerons = sgen_client_mark_ephemerons (ctx);
+               sgen_drain_gray_stack (-1, ctx);
+               ++ephemeron_rounds;
+       } while (!done_with_ephemerons);
+
+       sgen_client_mark_togglerefs (start_addr, end_addr, ctx);
+
+       if (sgen_client_bridge_need_processing ()) {
+               /*Make sure the gray stack is empty before we process bridge objects so we get liveness right*/
+               sgen_drain_gray_stack (-1, ctx);
+               sgen_collect_bridge_objects (generation, ctx);
+               if (generation == GENERATION_OLD)
+                       sgen_collect_bridge_objects (GENERATION_NURSERY, ctx);
+
+               /*
+               Do the first bridge step here, as the collector liveness state will become useless after that.
+
+               An important optimization is to only proccess the possibly dead part of the object graph and skip
+               over all live objects as we transitively know everything they point must be alive too.
+
+               The above invariant is completely wrong if we let the gray queue be drained and mark/copy everything.
+
+               This has the unfortunate side effect of making overflow collections perform the first step twice, but
+               given we now have heuristics that perform major GC in anticipation of minor overflows this should not
+               be a big deal.
+               */
+               sgen_client_bridge_processing_stw_step ();
+       }
+
+       /*
+       Make sure we drain the gray stack before processing disappearing links and finalizers.
+       If we don't make sure it is empty we might wrongly see a live object as dead.
+       */
+       sgen_drain_gray_stack (-1, ctx);
+
+       /*
+       We must clear weak links that don't track resurrection before processing object ready for
+       finalization so they can be cleared before that.
+       */
+       sgen_null_link_in_range (generation, TRUE, ctx);
+       if (generation == GENERATION_OLD)
+               sgen_null_link_in_range (GENERATION_NURSERY, TRUE, ctx);
+
+
+       /* walk the finalization queue and move also the objects that need to be
+        * finalized: use the finalized objects as new roots so the objects they depend
+        * on are also not reclaimed. As with the roots above, only objects in the nursery
+        * are marked/copied.
+        */
+       sgen_finalize_in_range (generation, ctx);
+       if (generation == GENERATION_OLD)
+               sgen_finalize_in_range (GENERATION_NURSERY, ctx);
+       /* drain the new stack that might have been created */
+       SGEN_LOG (6, "Precise scan of gray area post fin");
+       sgen_drain_gray_stack (-1, ctx);
+
+       /*
+        * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
+        */
+       done_with_ephemerons = 0;
+       do {
+               done_with_ephemerons = sgen_client_mark_ephemerons (ctx);
+               sgen_drain_gray_stack (-1, ctx);
+               ++ephemeron_rounds;
+       } while (!done_with_ephemerons);
+
+       sgen_client_clear_unreachable_ephemerons (ctx);
+
+       /*
+        * We clear togglerefs only after all possible chances of revival are done. 
+        * This is semantically more inline with what users expect and it allows for
+        * user finalizers to correctly interact with TR objects.
+       */
+       sgen_client_clear_togglerefs (start_addr, end_addr, ctx);
+
+       TV_GETTIME (btv);
+       SGEN_LOG (2, "Finalize queue handling scan for %s generation: %ld usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
+
+       /*
+        * handle disappearing links
+        * Note we do this after checking the finalization queue because if an object
+        * survives (at least long enough to be finalized) we don't clear the link.
+        * This also deals with a possible issue with the monitor reclamation: with the Boehm
+        * GC a finalized object my lose the monitor because it is cleared before the finalizer is
+        * called.
+        */
+       g_assert (sgen_gray_object_queue_is_empty (queue));
+       for (;;) {
+               sgen_null_link_in_range (generation, FALSE, ctx);
+               if (generation == GENERATION_OLD)
+                       sgen_null_link_in_range (GENERATION_NURSERY, FALSE, ctx);
+               if (sgen_gray_object_queue_is_empty (queue))
+                       break;
+               sgen_drain_gray_stack (-1, ctx);
+       }
+
+       g_assert (sgen_gray_object_queue_is_empty (queue));
+
+       sgen_gray_object_queue_trim_free_list (queue);
+}
+
+void
+sgen_check_section_scan_starts (GCMemSection *section)
+{
+       size_t i;
+       for (i = 0; i < section->num_scan_start; ++i) {
+               if (section->scan_starts [i]) {
+                       mword size = safe_object_get_size ((GCObject*) section->scan_starts [i]);
+                       SGEN_ASSERT (0, size >= SGEN_CLIENT_MINIMUM_OBJECT_SIZE && size <= MAX_SMALL_OBJ_SIZE, "Weird object size at scan starts.");
+               }
+       }
+}
+
+static void
+check_scan_starts (void)
+{
+       if (!do_scan_starts_check)
+               return;
+       sgen_check_section_scan_starts (nursery_section);
+       major_collector.check_scan_starts ();
+}
+
+static void
+scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx)
+{
+       void **start_root;
+       RootRecord *root;
+       SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
+               SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
+               precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
+       } SGEN_HASH_TABLE_FOREACH_END;
+}
+
+static void
+init_stats (void)
+{
+       static gboolean inited = FALSE;
+
+       if (inited)
+               return;
+
+       mono_counters_register ("Collection max time",  MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME | MONO_COUNTER_MONOTONIC, &time_max);
+
+       mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_pre_collection_fragment_clear);
+       mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_pinning);
+       mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_remsets);
+       mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_pinned);
+       mono_counters_register ("Minor scan roots", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_roots);
+       mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_fragment_creation);
+
+       mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_pre_collection_fragment_clear);
+       mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_pinning);
+       mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_pinned);
+       mono_counters_register ("Major scan roots", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_roots);
+       mono_counters_register ("Major scan mod union", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_mod_union);
+       mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_finish_gray_stack);
+       mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_free_bigobjs);
+       mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_los_sweep);
+       mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_sweep);
+       mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_fragment_creation);
+
+       mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_pinned_objects);
+
+#ifdef HEAVY_STATISTICS
+       mono_counters_register ("WBarrier remember pointer", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_add_to_global_remset);
+       mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_set_arrayref);
+       mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_arrayref_copy);
+       mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_generic_store);
+       mono_counters_register ("WBarrier generic atomic store called", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_generic_store_atomic);
+       mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_set_root);
+       mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_value_copy);
+       mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_object_copy);
+
+       mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_alloced_degraded);
+       mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_bytes_alloced_degraded);
+
+       mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copy_object_called_nursery);
+       mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_copied_nursery);
+       mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copy_object_called_major);
+       mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_copied_major);
+
+       mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scan_object_called_nursery);
+       mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scan_object_called_major);
+
+       mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_slots_allocated_in_vain);
+
+       mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_from_space);
+       mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_forwarded);
+       mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_pinned);
+       mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_to_space);
+
+       sgen_nursery_allocator_init_heavy_stats ();
+#endif
+
+       inited = TRUE;
+}
+
+
+static void
+reset_pinned_from_failed_allocation (void)
+{
+       bytes_pinned_from_failed_allocation = 0;
+}
+
+void
+sgen_set_pinned_from_failed_allocation (mword objsize)
+{
+       bytes_pinned_from_failed_allocation += objsize;
+}
+
+gboolean
+sgen_collection_is_concurrent (void)
+{
+       switch (current_collection_generation) {
+       case GENERATION_NURSERY:
+               return FALSE;
+       case GENERATION_OLD:
+               return concurrent_collection_in_progress;
+       default:
+               g_error ("Invalid current generation %d", current_collection_generation);
+       }
+       return FALSE;
+}
+
+gboolean
+sgen_concurrent_collection_in_progress (void)
+{
+       return concurrent_collection_in_progress;
+}
+
+typedef struct {
+       SgenThreadPoolJob job;
+       SgenObjectOperations *ops;
+} ScanJob;
+
+static void
+job_remembered_set_scan (void *worker_data_untyped, SgenThreadPoolJob *job)
+{
+       WorkerData *worker_data = worker_data_untyped;
+       ScanJob *job_data = (ScanJob*)job;
+       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
+       remset.scan_remsets (ctx);
+}
+
+typedef struct {
+       SgenThreadPoolJob job;
+       SgenObjectOperations *ops;
+       char *heap_start;
+       char *heap_end;
+       int root_type;
+} ScanFromRegisteredRootsJob;
+
+static void
+job_scan_from_registered_roots (void *worker_data_untyped, SgenThreadPoolJob *job)
+{
+       WorkerData *worker_data = worker_data_untyped;
+       ScanFromRegisteredRootsJob *job_data = (ScanFromRegisteredRootsJob*)job;
+       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
+
+       scan_from_registered_roots (job_data->heap_start, job_data->heap_end, job_data->root_type, ctx);
+}
+
+typedef struct {
+       SgenThreadPoolJob job;
+       SgenObjectOperations *ops;
+       char *heap_start;
+       char *heap_end;
+} ScanThreadDataJob;
+
+static void
+job_scan_thread_data (void *worker_data_untyped, SgenThreadPoolJob *job)
+{
+       WorkerData *worker_data = worker_data_untyped;
+       ScanThreadDataJob *job_data = (ScanThreadDataJob*)job;
+       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
+
+       sgen_client_scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE, ctx);
+}
+
+typedef struct {
+       SgenThreadPoolJob job;
+       SgenObjectOperations *ops;
+       SgenPointerQueue *queue;
+} ScanFinalizerEntriesJob;
+
+static void
+job_scan_finalizer_entries (void *worker_data_untyped, SgenThreadPoolJob *job)
+{
+       WorkerData *worker_data = worker_data_untyped;
+       ScanFinalizerEntriesJob *job_data = (ScanFinalizerEntriesJob*)job;
+       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
+
+       scan_finalizer_entries (job_data->queue, ctx);
+}
+
+static void
+job_scan_major_mod_union_card_table (void *worker_data_untyped, SgenThreadPoolJob *job)
+{
+       WorkerData *worker_data = worker_data_untyped;
+       ScanJob *job_data = (ScanJob*)job;
+       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
+
+       g_assert (concurrent_collection_in_progress);
+       major_collector.scan_card_table (TRUE, ctx);
+}
+
+static void
+job_scan_los_mod_union_card_table (void *worker_data_untyped, SgenThreadPoolJob *job)
+{
+       WorkerData *worker_data = worker_data_untyped;
+       ScanJob *job_data = (ScanJob*)job;
+       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
+
+       g_assert (concurrent_collection_in_progress);
+       sgen_los_scan_card_table (TRUE, ctx);
+}
+
+static void
+init_gray_queue (void)
+{
+       if (sgen_collection_is_concurrent ())
+               sgen_workers_init_distribute_gray_queue ();
+       sgen_gray_object_queue_init (&gray_queue, NULL);
+}
+
+static void
+enqueue_scan_from_roots_jobs (char *heap_start, char *heap_end, SgenObjectOperations *ops)
+{
+       ScanFromRegisteredRootsJob *scrrj;
+       ScanThreadDataJob *stdj;
+       ScanFinalizerEntriesJob *sfej;
+
+       /* registered roots, this includes static fields */
+
+       scrrj = (ScanFromRegisteredRootsJob*)sgen_thread_pool_job_alloc ("scan from registered roots normal", job_scan_from_registered_roots, sizeof (ScanFromRegisteredRootsJob));
+       scrrj->ops = ops;
+       scrrj->heap_start = heap_start;
+       scrrj->heap_end = heap_end;
+       scrrj->root_type = ROOT_TYPE_NORMAL;
+       sgen_workers_enqueue_job (&scrrj->job);
+
+       scrrj = (ScanFromRegisteredRootsJob*)sgen_thread_pool_job_alloc ("scan from registered roots wbarrier", job_scan_from_registered_roots, sizeof (ScanFromRegisteredRootsJob));
+       scrrj->ops = ops;
+       scrrj->heap_start = heap_start;
+       scrrj->heap_end = heap_end;
+       scrrj->root_type = ROOT_TYPE_WBARRIER;
+       sgen_workers_enqueue_job (&scrrj->job);
+
+       /* Threads */
+
+       stdj = (ScanThreadDataJob*)sgen_thread_pool_job_alloc ("scan thread data", job_scan_thread_data, sizeof (ScanThreadDataJob));
+       stdj->heap_start = heap_start;
+       stdj->heap_end = heap_end;
+       sgen_workers_enqueue_job (&stdj->job);
+
+       /* Scan the list of objects ready for finalization. */
+
+       sfej = (ScanFinalizerEntriesJob*)sgen_thread_pool_job_alloc ("scan finalizer entries", job_scan_finalizer_entries, sizeof (ScanFinalizerEntriesJob));
+       sfej->queue = &fin_ready_queue;
+       sfej->ops = ops;
+       sgen_workers_enqueue_job (&sfej->job);
+
+       sfej = (ScanFinalizerEntriesJob*)sgen_thread_pool_job_alloc ("scan critical finalizer entries", job_scan_finalizer_entries, sizeof (ScanFinalizerEntriesJob));
+       sfej->queue = &critical_fin_queue;
+       sfej->ops = ops;
+       sgen_workers_enqueue_job (&sfej->job);
+}
+
+/*
+ * Perform a nursery collection.
+ *
+ * Return whether any objects were late-pinned due to being out of memory.
+ */
+static gboolean
+collect_nursery (SgenGrayQueue *unpin_queue, gboolean finish_up_concurrent_mark)
+{
+       gboolean needs_major;
+       size_t max_garbage_amount;
+       char *nursery_next;
+       mword fragment_total;
+       ScanJob *sj;
+       SgenObjectOperations *object_ops = &sgen_minor_collector.serial_ops;
+       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (object_ops, &gray_queue);
+       TV_DECLARE (atv);
+       TV_DECLARE (btv);
+
+       if (disable_minor_collections)
+               return TRUE;
+
+       TV_GETTIME (last_minor_collection_start_tv);
+       atv = last_minor_collection_start_tv;
+
+       binary_protocol_collection_begin (gc_stats.minor_gc_count, GENERATION_NURSERY);
+
+       if (do_verify_nursery || do_dump_nursery_content)
+               sgen_debug_verify_nursery (do_dump_nursery_content);
+
+       current_collection_generation = GENERATION_NURSERY;
+
+       SGEN_ASSERT (0, !sgen_collection_is_concurrent (), "Why is the nursery collection concurrent?");
+
+       reset_pinned_from_failed_allocation ();
+
+       check_scan_starts ();
+
+       sgen_nursery_alloc_prepare_for_minor ();
+
+       degraded_mode = 0;
+       objects_pinned = 0;
+       nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
+       /* FIXME: optimize later to use the higher address where an object can be present */
+       nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
+
+       SGEN_LOG (1, "Start nursery collection %d %p-%p, size: %d", gc_stats.minor_gc_count, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ()));
+       max_garbage_amount = nursery_next - sgen_get_nursery_start ();
+       g_assert (nursery_section->size >= max_garbage_amount);
+
+       /* world must be stopped already */
+       TV_GETTIME (btv);
+       time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
+
+       sgen_client_pre_collection_checks ();
+
+       nursery_section->next_data = nursery_next;
+
+       major_collector.start_nursery_collection ();
+
+       sgen_memgov_minor_collection_start ();
+
+       init_gray_queue ();
+
+       gc_stats.minor_gc_count ++;
+
+       if (whole_heap_check_before_collection) {
+               sgen_clear_nursery_fragments ();
+               sgen_check_whole_heap (finish_up_concurrent_mark);
+       }
+       if (consistency_check_at_minor_collection)
+               sgen_check_consistency ();
+
+       sgen_process_fin_stage_entries ();
+       sgen_process_dislink_stage_entries ();
+
+       /* pin from pinned handles */
+       sgen_init_pinning ();
+       sgen_client_binary_protocol_mark_start (GENERATION_NURSERY);
+       pin_from_roots (sgen_get_nursery_start (), nursery_next, ctx);
+       /* pin cemented objects */
+       sgen_pin_cemented_objects ();
+       /* identify pinned objects */
+       sgen_optimize_pin_queue ();
+       sgen_pinning_setup_section (nursery_section);
+
+       pin_objects_in_nursery (FALSE, ctx);
+       sgen_pinning_trim_queue_to_section (nursery_section);
+
+       TV_GETTIME (atv);
+       time_minor_pinning += TV_ELAPSED (btv, atv);
+       SGEN_LOG (2, "Finding pinned pointers: %zd in %ld usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
+       SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
+
+       /*
+        * FIXME: When we finish a concurrent collection we do a nursery collection first,
+        * as part of which we scan the card table.  Then, later, we scan the mod union
+        * cardtable.  We should only have to do one.
+        */
+       sj = (ScanJob*)sgen_thread_pool_job_alloc ("scan remset", job_remembered_set_scan, sizeof (ScanJob));
+       sj->ops = object_ops;
+       sgen_workers_enqueue_job (&sj->job);
+
+       /* we don't have complete write barrier yet, so we scan all the old generation sections */
+       TV_GETTIME (btv);
+       time_minor_scan_remsets += TV_ELAPSED (atv, btv);
+       SGEN_LOG (2, "Old generation scan: %ld usecs", TV_ELAPSED (atv, btv));
+
+       sgen_drain_gray_stack (-1, ctx);
+
+       /* FIXME: Why do we do this at this specific, seemingly random, point? */
+       sgen_client_collecting_minor (&fin_ready_queue, &critical_fin_queue);
+
+       TV_GETTIME (atv);
+       time_minor_scan_pinned += TV_ELAPSED (btv, atv);
+
+       enqueue_scan_from_roots_jobs (sgen_get_nursery_start (), nursery_next, object_ops);
+
+       TV_GETTIME (btv);
+       time_minor_scan_roots += TV_ELAPSED (atv, btv);
+
+       finish_gray_stack (GENERATION_NURSERY, ctx);
+
+       TV_GETTIME (atv);
+       time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
+       sgen_client_binary_protocol_mark_end (GENERATION_NURSERY);
+
+       if (objects_pinned) {
+               sgen_optimize_pin_queue ();
+               sgen_pinning_setup_section (nursery_section);
+       }
+
+       /* walk the pin_queue, build up the fragment list of free memory, unmark
+        * pinned objects as we go, memzero() the empty fragments so they are ready for the
+        * next allocations.
+        */
+       sgen_client_binary_protocol_reclaim_start (GENERATION_NURSERY);
+       fragment_total = sgen_build_nursery_fragments (nursery_section, unpin_queue);
+       if (!fragment_total)
+               degraded_mode = 1;
+
+       /* Clear TLABs for all threads */
+       sgen_clear_tlabs ();
+
+       sgen_client_binary_protocol_reclaim_end (GENERATION_NURSERY);
+       TV_GETTIME (btv);
+       time_minor_fragment_creation += TV_ELAPSED (atv, btv);
+       SGEN_LOG (2, "Fragment creation: %ld usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
+
+       if (consistency_check_at_minor_collection)
+               sgen_check_major_refs ();
+
+       major_collector.finish_nursery_collection ();
+
+       TV_GETTIME (last_minor_collection_end_tv);
+       gc_stats.minor_gc_time += TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
+
+       sgen_debug_dump_heap ("minor", gc_stats.minor_gc_count - 1, NULL);
+
+       /* prepare the pin queue for the next collection */
+       sgen_finish_pinning ();
+       if (sgen_have_pending_finalizers ()) {
+               SGEN_LOG (4, "Finalizer-thread wakeup");
+               sgen_client_finalize_notify ();
+       }
+       sgen_pin_stats_reset ();
+       /* clear cemented hash */
+       sgen_cement_clear_below_threshold ();
+
+       g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
+
+       remset.finish_minor_collection ();
+
+       check_scan_starts ();
+
+       binary_protocol_flush_buffers (FALSE);
+
+       sgen_memgov_minor_collection_end ();
+
+       /*objects are late pinned because of lack of memory, so a major is a good call*/
+       needs_major = objects_pinned > 0;
+       current_collection_generation = -1;
+       objects_pinned = 0;
+
+       binary_protocol_collection_end (gc_stats.minor_gc_count - 1, GENERATION_NURSERY, 0, 0);
+
+       if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
+               sgen_check_nursery_objects_pinned (unpin_queue != NULL);
+
+       return needs_major;
+}
+
+static void
+scan_nursery_objects_callback (char *obj, size_t size, ScanCopyContext *ctx)
+{
+       /*
+        * This is called on all objects in the nursery, including pinned ones, so we need
+        * to use sgen_obj_get_descriptor_safe(), which masks out the vtable tag bits.
+        */
+       ctx->ops->scan_object (obj, sgen_obj_get_descriptor_safe (obj), ctx->queue);
+}
+
+static void
+scan_nursery_objects (ScanCopyContext ctx)
+{
+       sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
+                       (IterateObjectCallbackFunc)scan_nursery_objects_callback, (void*)&ctx, FALSE);
+}
+
+typedef enum {
+       COPY_OR_MARK_FROM_ROOTS_SERIAL,
+       COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT,
+       COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT
+} CopyOrMarkFromRootsMode;
+
+static void
+major_copy_or_mark_from_roots (size_t *old_next_pin_slot, CopyOrMarkFromRootsMode mode, gboolean scan_whole_nursery, SgenObjectOperations *object_ops)
+{
+       LOSObject *bigobj;
+       TV_DECLARE (atv);
+       TV_DECLARE (btv);
+       /* FIXME: only use these values for the precise scan
+        * note that to_space pointers should be excluded anyway...
+        */
+       char *heap_start = NULL;
+       char *heap_end = (char*)-1;
+       ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (object_ops, WORKERS_DISTRIBUTE_GRAY_QUEUE);
+       gboolean concurrent = mode != COPY_OR_MARK_FROM_ROOTS_SERIAL;
+
+       SGEN_ASSERT (0, !!concurrent == !!concurrent_collection_in_progress, "We've been called with the wrong mode.");
+
+       if (scan_whole_nursery)
+               SGEN_ASSERT (0, mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT, "Scanning whole nursery only makes sense when we're finishing a concurrent collection.");
+
+       if (concurrent) {
+               /*This cleans up unused fragments */
+               sgen_nursery_allocator_prepare_for_pinning ();
+
+               if (do_concurrent_checks)
+                       sgen_debug_check_nursery_is_clean ();
+       } else {
+               /* The concurrent collector doesn't touch the nursery. */
+               sgen_nursery_alloc_prepare_for_major ();
+       }
+
+       init_gray_queue ();
+
+       TV_GETTIME (atv);
+
+       /* Pinning depends on this */
+       sgen_clear_nursery_fragments ();
+
+       if (whole_heap_check_before_collection)
+               sgen_check_whole_heap (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT);
+
+       TV_GETTIME (btv);
+       time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
+
+       if (!sgen_collection_is_concurrent ())
+               nursery_section->next_data = sgen_get_nursery_end ();
+       /* we should also coalesce scanning from sections close to each other
+        * and deal with pointers outside of the sections later.
+        */
+
+       objects_pinned = 0;
+
+       sgen_client_pre_collection_checks ();
+
+       if (!concurrent) {
+               /* Remsets are not useful for a major collection */
+               remset.clear_cards ();
+       }
+
+       sgen_process_fin_stage_entries ();
+       sgen_process_dislink_stage_entries ();
+
+       TV_GETTIME (atv);
+       sgen_init_pinning ();
+       SGEN_LOG (6, "Collecting pinned addresses");
+       pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, ctx);
+
+       if (mode != COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
+               if (major_collector.is_concurrent) {
+                       /*
+                        * The concurrent major collector cannot evict
+                        * yet, so we need to pin cemented objects to
+                        * not break some asserts.
+                        *
+                        * FIXME: We could evict now!
+                        */
+                       sgen_pin_cemented_objects ();
+               }
+       }
+
+       sgen_optimize_pin_queue ();
+
+       sgen_client_collecting_major_1 ();
+
+       /*
+        * pin_queue now contains all candidate pointers, sorted and
+        * uniqued.  We must do two passes now to figure out which
+        * objects are pinned.
+        *
+        * The first is to find within the pin_queue the area for each
+        * section.  This requires that the pin_queue be sorted.  We
+        * also process the LOS objects and pinned chunks here.
+        *
+        * The second, destructive, pass is to reduce the section
+        * areas to pointers to the actually pinned objects.
+        */
+       SGEN_LOG (6, "Pinning from sections");
+       /* first pass for the sections */
+       sgen_find_section_pin_queue_start_end (nursery_section);
+       /* identify possible pointers to the insize of large objects */
+       SGEN_LOG (6, "Pinning from large objects");
+       for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
+               size_t dummy;
+               if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy, &dummy)) {
+                       binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((GCObject*)(bigobj->data))));
+
+                       if (sgen_los_object_is_pinned (bigobj->data)) {
+                               SGEN_ASSERT (0, mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT, "LOS objects can only be pinned here after concurrent marking.");
+                               continue;
+                       }
+                       sgen_los_pin_object (bigobj->data);
+                       if (SGEN_OBJECT_HAS_REFERENCES (bigobj->data))
+                               GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data, sgen_obj_get_descriptor (bigobj->data));
+                       sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((GCObject*) bigobj->data));
+                       SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data,
+                                       sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (bigobj->data)),
+                                       (unsigned long)sgen_los_object_size (bigobj));
+
+                       sgen_client_pinned_los_object (bigobj->data);
+               }
+       }
+       /* second pass for the sections */
+
+       /*
+        * Concurrent mark never follows references into the nursery.  In the start and
+        * finish pauses we must scan live nursery objects, though.
+        *
+        * In the finish pause we do this conservatively by scanning all nursery objects.
+        * Previously we would only scan pinned objects here.  We assumed that all objects
+        * that were pinned during the nursery collection immediately preceding this finish
+        * mark would be pinned again here.  Due to the way we get the stack end for the GC
+        * thread, however, that's not necessarily the case: we scan part of the stack used
+        * by the GC itself, which changes constantly, so pinning isn't entirely
+        * deterministic.
+        *
+        * The split nursery also complicates things because non-pinned objects can survive
+        * in the nursery.  That's why we need to do a full scan of the nursery for it, too.
+        *
+        * In the future we shouldn't do a preceding nursery collection at all and instead
+        * do the finish pause with promotion from the nursery.
+        *
+        * A further complication arises when we have late-pinned objects from the preceding
+        * nursery collection.  Those are the result of being out of memory when trying to
+        * evacuate objects.  They won't be found from the roots, so we just scan the whole
+        * nursery.
+        *
+        * Non-concurrent mark evacuates from the nursery, so it's
+        * sufficient to just scan pinned nursery objects.
+        */
+       if (scan_whole_nursery || mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT || (concurrent && sgen_minor_collector.is_split)) {
+               scan_nursery_objects (ctx);
+       } else {
+               pin_objects_in_nursery (concurrent, ctx);
+               if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
+                       sgen_check_nursery_objects_pinned (mode != COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT);
+       }
+
+       major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
+       if (old_next_pin_slot)
+               *old_next_pin_slot = sgen_get_pinned_count ();
+
+       TV_GETTIME (btv);
+       time_major_pinning += TV_ELAPSED (atv, btv);
+       SGEN_LOG (2, "Finding pinned pointers: %zd in %ld usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
+       SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
+
+       major_collector.init_to_space ();
+
+       /*
+        * The concurrent collector doesn't move objects, neither on
+        * the major heap nor in the nursery, so we can mark even
+        * before pinning has finished.  For the non-concurrent
+        * collector we start the workers after pinning.
+        */
+       if (mode != COPY_OR_MARK_FROM_ROOTS_SERIAL) {
+               SGEN_ASSERT (0, sgen_workers_all_done (), "Why are the workers not done when we start or finish a major collection?");
+               sgen_workers_start_all_workers (object_ops);
+               gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
+       }
+
+#ifdef SGEN_DEBUG_INTERNAL_ALLOC
+       main_gc_thread = mono_native_thread_self ();
+#endif
+
+       sgen_client_collecting_major_2 ();
+
+       TV_GETTIME (atv);
+       time_major_scan_pinned += TV_ELAPSED (btv, atv);
+
+       sgen_client_collecting_major_3 (&fin_ready_queue, &critical_fin_queue);
+
+       /*
+        * FIXME: is this the right context?  It doesn't seem to contain a copy function
+        * unless we're concurrent.
+        */
+       enqueue_scan_from_roots_jobs (heap_start, heap_end, object_ops);
+
+       TV_GETTIME (btv);
+       time_major_scan_roots += TV_ELAPSED (atv, btv);
+
+       if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
+               ScanJob *sj;
+
+               /* Mod union card table */
+               sj = (ScanJob*)sgen_thread_pool_job_alloc ("scan mod union cardtable", job_scan_major_mod_union_card_table, sizeof (ScanJob));
+               sj->ops = object_ops;
+               sgen_workers_enqueue_job (&sj->job);
+
+               sj = (ScanJob*)sgen_thread_pool_job_alloc ("scan LOS mod union cardtable", job_scan_los_mod_union_card_table, sizeof (ScanJob));
+               sj->ops = object_ops;
+               sgen_workers_enqueue_job (&sj->job);
+
+               TV_GETTIME (atv);
+               time_major_scan_mod_union += TV_ELAPSED (btv, atv);
+       }
+}
+
+static void
+major_finish_copy_or_mark (void)
+{
+       if (!concurrent_collection_in_progress)
+               return;
+
+       /*
+        * Prepare the pin queue for the next collection.  Since pinning runs on the worker
+        * threads we must wait for the jobs to finish before we can reset it.
+        */
+       sgen_workers_wait_for_jobs_finished ();
+       sgen_finish_pinning ();
+
+       sgen_pin_stats_reset ();
+
+       if (do_concurrent_checks)
+               sgen_debug_check_nursery_is_clean ();
+}
+
+static void
+major_start_collection (gboolean concurrent, size_t *old_next_pin_slot)
+{
+       SgenObjectOperations *object_ops;
+
+       binary_protocol_collection_begin (gc_stats.major_gc_count, GENERATION_OLD);
+
+       current_collection_generation = GENERATION_OLD;
+
+       g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
+
+       sgen_cement_reset ();
+
+       if (concurrent) {
+               g_assert (major_collector.is_concurrent);
+               concurrent_collection_in_progress = TRUE;
+
+               object_ops = &major_collector.major_ops_concurrent_start;
+       } else {
+               object_ops = &major_collector.major_ops_serial;
+       }
+
+       reset_pinned_from_failed_allocation ();
+
+       sgen_memgov_major_collection_start ();
+
+       //count_ref_nonref_objs ();
+       //consistency_check ();
+
+       check_scan_starts ();
+
+       degraded_mode = 0;
+       SGEN_LOG (1, "Start major collection %d", gc_stats.major_gc_count);
+       gc_stats.major_gc_count ++;
+
+       if (major_collector.start_major_collection)
+               major_collector.start_major_collection ();
+
+       major_copy_or_mark_from_roots (old_next_pin_slot, concurrent ? COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT : COPY_OR_MARK_FROM_ROOTS_SERIAL, FALSE, object_ops);
+       major_finish_copy_or_mark ();
+}
+
+static void
+major_finish_collection (const char *reason, size_t old_next_pin_slot, gboolean forced, gboolean scan_whole_nursery)
+{
+       ScannedObjectCounts counts;
+       SgenObjectOperations *object_ops;
+       TV_DECLARE (atv);
+       TV_DECLARE (btv);
+
+       TV_GETTIME (btv);
+
+       if (concurrent_collection_in_progress) {
+               object_ops = &major_collector.major_ops_concurrent_finish;
+
+               major_copy_or_mark_from_roots (NULL, COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT, scan_whole_nursery, object_ops);
+
+               major_finish_copy_or_mark ();
+
+               sgen_workers_join ();
+
+               SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&gray_queue), "Why is the gray queue not empty after workers have finished working?");
+
+#ifdef SGEN_DEBUG_INTERNAL_ALLOC
+               main_gc_thread = NULL;
+#endif
+
+               if (do_concurrent_checks)
+                       sgen_debug_check_nursery_is_clean ();
+       } else {
+               SGEN_ASSERT (0, !scan_whole_nursery, "scan_whole_nursery only applies to concurrent collections");
+               object_ops = &major_collector.major_ops_serial;
+       }
+
+       /*
+        * The workers have stopped so we need to finish gray queue
+        * work that might result from finalization in the main GC
+        * thread.  Redirection must therefore be turned off.
+        */
+       sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
+       g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
+
+       /* all the objects in the heap */
+       finish_gray_stack (GENERATION_OLD, CONTEXT_FROM_OBJECT_OPERATIONS (object_ops, &gray_queue));
+       TV_GETTIME (atv);
+       time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
+
+       SGEN_ASSERT (0, sgen_workers_all_done (), "Can't have workers working after joining");
+
+       if (objects_pinned) {
+               g_assert (!concurrent_collection_in_progress);
+
+               /*
+                * This is slow, but we just OOM'd.
+                *
+                * See comment at `sgen_pin_queue_clear_discarded_entries` for how the pin
+                * queue is laid out at this point.
+                */
+               sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
+               /*
+                * We need to reestablish all pinned nursery objects in the pin queue
+                * because they're needed for fragment creation.  Unpinning happens by
+                * walking the whole queue, so it's not necessary to reestablish where major
+                * heap block pins are - all we care is that they're still in there
+                * somewhere.
+                */
+               sgen_optimize_pin_queue ();
+               sgen_find_section_pin_queue_start_end (nursery_section);
+               objects_pinned = 0;
+       }
+
+       reset_heap_boundaries ();
+       sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
+
+       if (!concurrent_collection_in_progress) {
+               /* walk the pin_queue, build up the fragment list of free memory, unmark
+                * pinned objects as we go, memzero() the empty fragments so they are ready for the
+                * next allocations.
+                */
+               if (!sgen_build_nursery_fragments (nursery_section, NULL))
+                       degraded_mode = 1;
+
+               /* prepare the pin queue for the next collection */
+               sgen_finish_pinning ();
+
+               /* Clear TLABs for all threads */
+               sgen_clear_tlabs ();
+
+               sgen_pin_stats_reset ();
+       }
+
+       sgen_cement_clear_below_threshold ();
+
+       if (check_mark_bits_after_major_collection)
+               sgen_check_heap_marked (concurrent_collection_in_progress);
+
+       TV_GETTIME (btv);
+       time_major_fragment_creation += TV_ELAPSED (atv, btv);
+
+       binary_protocol_sweep_begin (GENERATION_OLD, !major_collector.sweeps_lazily);
+
+       TV_GETTIME (atv);
+       time_major_free_bigobjs += TV_ELAPSED (btv, atv);
+
+       sgen_los_sweep ();
+
+       TV_GETTIME (btv);
+       time_major_los_sweep += TV_ELAPSED (atv, btv);
+
+       major_collector.sweep ();
+
+       binary_protocol_sweep_end (GENERATION_OLD, !major_collector.sweeps_lazily);
+
+       TV_GETTIME (atv);
+       time_major_sweep += TV_ELAPSED (btv, atv);
+
+       sgen_debug_dump_heap ("major", gc_stats.major_gc_count - 1, reason);
+
+       if (sgen_have_pending_finalizers ()) {
+               SGEN_LOG (4, "Finalizer-thread wakeup");
+               sgen_client_finalize_notify ();
+       }
+
+       g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
+
+       sgen_memgov_major_collection_end (forced);
+       current_collection_generation = -1;
+
+       memset (&counts, 0, sizeof (ScannedObjectCounts));
+       major_collector.finish_major_collection (&counts);
+
+       g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
+
+       SGEN_ASSERT (0, sgen_workers_all_done (), "Can't have workers working after major collection has finished");
+       if (concurrent_collection_in_progress)
+               concurrent_collection_in_progress = FALSE;
+
+       check_scan_starts ();
+
+       binary_protocol_flush_buffers (FALSE);
+
+       //consistency_check ();
+
+       binary_protocol_collection_end (gc_stats.major_gc_count - 1, GENERATION_OLD, counts.num_scanned_objects, counts.num_unique_scanned_objects);
+}
+
+static gboolean
+major_do_collection (const char *reason, gboolean forced)
+{
+       TV_DECLARE (time_start);
+       TV_DECLARE (time_end);
+       size_t old_next_pin_slot;
+
+       if (disable_major_collections)
+               return FALSE;
+
+       if (major_collector.get_and_reset_num_major_objects_marked) {
+               long long num_marked = major_collector.get_and_reset_num_major_objects_marked ();
+               g_assert (!num_marked);
+       }
+
+       /* world must be stopped already */
+       TV_GETTIME (time_start);
+
+       major_start_collection (FALSE, &old_next_pin_slot);
+       major_finish_collection (reason, old_next_pin_slot, forced, FALSE);
+
+       TV_GETTIME (time_end);
+       gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
+
+       /* FIXME: also report this to the user, preferably in gc-end. */
+       if (major_collector.get_and_reset_num_major_objects_marked)
+               major_collector.get_and_reset_num_major_objects_marked ();
+
+       return bytes_pinned_from_failed_allocation > 0;
+}
+
+static void
+major_start_concurrent_collection (const char *reason)
+{
+       TV_DECLARE (time_start);
+       TV_DECLARE (time_end);
+       long long num_objects_marked;
+
+       if (disable_major_collections)
+               return;
+
+       TV_GETTIME (time_start);
+       SGEN_TV_GETTIME (time_major_conc_collection_start);
+
+       num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
+       g_assert (num_objects_marked == 0);
+
+       binary_protocol_concurrent_start ();
+
+       // FIXME: store reason and pass it when finishing
+       major_start_collection (TRUE, NULL);
+
+       gray_queue_redirect (&gray_queue);
+
+       num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
+
+       TV_GETTIME (time_end);
+       gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
+
+       current_collection_generation = -1;
+}
+
+/*
+ * Returns whether the major collection has finished.
+ */
+static gboolean
+major_should_finish_concurrent_collection (void)
+{
+       SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&gray_queue), "Why is the gray queue not empty before we have started doing anything?");
+       return sgen_workers_all_done ();
+}
+
+static void
+major_update_concurrent_collection (void)
+{
+       TV_DECLARE (total_start);
+       TV_DECLARE (total_end);
+
+       TV_GETTIME (total_start);
+
+       binary_protocol_concurrent_update ();
+
+       major_collector.update_cardtable_mod_union ();
+       sgen_los_update_cardtable_mod_union ();
+
+       TV_GETTIME (total_end);
+       gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end);
+}
+
+static void
+major_finish_concurrent_collection (gboolean forced)
+{
+       TV_DECLARE (total_start);
+       TV_DECLARE (total_end);
+       gboolean late_pinned;
+       SgenGrayQueue unpin_queue;
+       memset (&unpin_queue, 0, sizeof (unpin_queue));
+
+       TV_GETTIME (total_start);
+
+       binary_protocol_concurrent_finish ();
+
+       /*
+        * The major collector can add global remsets which are processed in the finishing
+        * nursery collection, below.  That implies that the workers must have finished
+        * marking before the nursery collection is allowed to run, otherwise we might miss
+        * some remsets.
+        */
+       sgen_workers_wait ();
+
+       SGEN_TV_GETTIME (time_major_conc_collection_end);
+       gc_stats.major_gc_time_concurrent += SGEN_TV_ELAPSED (time_major_conc_collection_start, time_major_conc_collection_end);
+
+       major_collector.update_cardtable_mod_union ();
+       sgen_los_update_cardtable_mod_union ();
+
+       late_pinned = collect_nursery (&unpin_queue, TRUE);
+
+       if (mod_union_consistency_check)
+               sgen_check_mod_union_consistency ();
+
+       current_collection_generation = GENERATION_OLD;
+       major_finish_collection ("finishing", -1, forced, late_pinned);
+
+       if (whole_heap_check_before_collection)
+               sgen_check_whole_heap (FALSE);
+
+       unpin_objects_from_queue (&unpin_queue);
+       sgen_gray_object_queue_deinit (&unpin_queue);
+
+       TV_GETTIME (total_end);
+       gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end) - TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
+
+       current_collection_generation = -1;
+}
+
+/*
+ * Ensure an allocation request for @size will succeed by freeing enough memory.
+ *
+ * LOCKING: The GC lock MUST be held.
+ */
+void
+sgen_ensure_free_space (size_t size)
+{
+       int generation_to_collect = -1;
+       const char *reason = NULL;
+
+       if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
+               if (sgen_need_major_collection (size)) {
+                       reason = "LOS overflow";
+                       generation_to_collect = GENERATION_OLD;
+               }
+       } else {
+               if (degraded_mode) {
+                       if (sgen_need_major_collection (size)) {
+                               reason = "Degraded mode overflow";
+                               generation_to_collect = GENERATION_OLD;
+                       }
+               } else if (sgen_need_major_collection (size)) {
+                       reason = "Minor allowance";
+                       generation_to_collect = GENERATION_OLD;
+               } else {
+                       generation_to_collect = GENERATION_NURSERY;
+                       reason = "Nursery full";                        
+               }
+       }
+
+       if (generation_to_collect == -1) {
+               if (concurrent_collection_in_progress && sgen_workers_all_done ()) {
+                       generation_to_collect = GENERATION_OLD;
+                       reason = "Finish concurrent collection";
+               }
+       }
+
+       if (generation_to_collect == -1)
+               return;
+       sgen_perform_collection (size, generation_to_collect, reason, FALSE);
+}
+
+/*
+ * LOCKING: Assumes the GC lock is held.
+ */
+void
+sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
+{
+       TV_DECLARE (gc_start);
+       TV_DECLARE (gc_end);
+       TV_DECLARE (gc_total_start);
+       TV_DECLARE (gc_total_end);
+       GGTimingInfo infos [2];
+       int overflow_generation_to_collect = -1;
+       int oldest_generation_collected = generation_to_collect;
+       const char *overflow_reason = NULL;
+
+       binary_protocol_collection_requested (generation_to_collect, requested_size, wait_to_finish ? 1 : 0);
+
+       SGEN_ASSERT (0, generation_to_collect == GENERATION_NURSERY || generation_to_collect == GENERATION_OLD, "What generation is this?");
+
+       TV_GETTIME (gc_start);
+
+       sgen_stop_world (generation_to_collect);
+
+       TV_GETTIME (gc_total_start);
+
+       if (concurrent_collection_in_progress) {
+               /*
+                * We update the concurrent collection.  If it finished, we're done.  If
+                * not, and we've been asked to do a nursery collection, we do that.
+                */
+               gboolean finish = major_should_finish_concurrent_collection () || (wait_to_finish && generation_to_collect == GENERATION_OLD);
+
+               if (finish) {
+                       major_finish_concurrent_collection (wait_to_finish);
+                       oldest_generation_collected = GENERATION_OLD;
+               } else {
+                       sgen_workers_signal_start_nursery_collection_and_wait ();
+
+                       major_update_concurrent_collection ();
+                       if (generation_to_collect == GENERATION_NURSERY)
+                               collect_nursery (NULL, FALSE);
+
+                       sgen_workers_signal_finish_nursery_collection ();
+               }
+
+               goto done;
+       }
+
+       /*
+        * If we've been asked to do a major collection, and the major collector wants to
+        * run synchronously (to evacuate), we set the flag to do that.
+        */
+       if (generation_to_collect == GENERATION_OLD &&
+                       allow_synchronous_major &&
+                       major_collector.want_synchronous_collection &&
+                       *major_collector.want_synchronous_collection) {
+               wait_to_finish = TRUE;
+       }
+
+       SGEN_ASSERT (0, !concurrent_collection_in_progress, "Why did this not get handled above?");
+
+       /*
+        * There's no concurrent collection in progress.  Collect the generation we're asked
+        * to collect.  If the major collector is concurrent and we're not forced to wait,
+        * start a concurrent collection.
+        */
+       // FIXME: extract overflow reason
+       if (generation_to_collect == GENERATION_NURSERY) {
+               if (collect_nursery (NULL, FALSE)) {
+                       overflow_generation_to_collect = GENERATION_OLD;
+                       overflow_reason = "Minor overflow";
+               }
+       } else {
+               if (major_collector.is_concurrent && !wait_to_finish) {
+                       collect_nursery (NULL, FALSE);
+                       major_start_concurrent_collection (reason);
+                       // FIXME: set infos[0] properly
+                       goto done;
+               }
+
+               if (major_do_collection (reason, wait_to_finish)) {
+                       overflow_generation_to_collect = GENERATION_NURSERY;
+                       overflow_reason = "Excessive pinning";
+               }
+       }
+
+       TV_GETTIME (gc_end);
+
+       memset (infos, 0, sizeof (infos));
+       infos [0].generation = generation_to_collect;
+       infos [0].reason = reason;
+       infos [0].is_overflow = FALSE;
+       infos [1].generation = -1;
+       infos [0].total_time = SGEN_TV_ELAPSED (gc_start, gc_end);
+
+       SGEN_ASSERT (0, !concurrent_collection_in_progress, "Why did this not get handled above?");
+
+       if (overflow_generation_to_collect != -1) {
+               /*
+                * We need to do an overflow collection, either because we ran out of memory
+                * or the nursery is fully pinned.
+                */
+
+               infos [1].generation = overflow_generation_to_collect;
+               infos [1].reason = overflow_reason;
+               infos [1].is_overflow = TRUE;
+               gc_start = gc_end;
+
+               if (overflow_generation_to_collect == GENERATION_NURSERY)
+                       collect_nursery (NULL, FALSE);
+               else
+                       major_do_collection (overflow_reason, wait_to_finish);
+
+               TV_GETTIME (gc_end);
+               infos [1].total_time = SGEN_TV_ELAPSED (gc_start, gc_end);
+
+               oldest_generation_collected = MAX (oldest_generation_collected, overflow_generation_to_collect);
+       }
+
+       SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)sgen_gc_get_total_heap_allocation (), (unsigned long)los_memory_usage);
+
+       /* this also sets the proper pointers for the next allocation */
+       if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
+               /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
+               SGEN_LOG (1, "nursery collection didn't find enough room for %zd alloc (%zd pinned)", requested_size, sgen_get_pinned_count ());
+               sgen_dump_pin_queue ();
+               degraded_mode = 1;
+       }
+
+ done:
+       g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
+
+       TV_GETTIME (gc_total_end);
+       time_max = MAX (time_max, TV_ELAPSED (gc_total_start, gc_total_end));
+
+       sgen_restart_world (oldest_generation_collected, infos);
+}
+
+/*
+ * ######################################################################
+ * ########  Memory allocation from the OS
+ * ######################################################################
+ * This section of code deals with getting memory from the OS and
+ * allocating memory for GC-internal data structures.
+ * Internal memory can be handled with a freelist for small objects.
+ */
+
+/*
+ * Debug reporting.
+ */
+G_GNUC_UNUSED static void
+report_internal_mem_usage (void)
+{
+       printf ("Internal memory usage:\n");
+       sgen_report_internal_mem_usage ();
+       printf ("Pinned memory usage:\n");
+       major_collector.report_pinned_memory_usage ();
+}
+
+/*
+ * ######################################################################
+ * ########  Finalization support
+ * ######################################################################
+ */
+
+/*
+ * If the object has been forwarded it means it's still referenced from a root. 
+ * If it is pinned it's still alive as well.
+ * A LOS object is only alive if we have pinned it.
+ * Return TRUE if @obj is ready to be finalized.
+ */
+static inline gboolean
+sgen_is_object_alive (void *object)
+{
+       if (ptr_in_nursery (object))
+               return sgen_nursery_is_object_alive (object);
+
+       return sgen_major_is_object_alive (object);
+}
+
+/*
+ * This function returns true if @object is either alive and belongs to the
+ * current collection - major collections are full heap, so old gen objects
+ * are never alive during a minor collection.
+ */
+static inline int
+sgen_is_object_alive_and_on_current_collection (char *object)
+{
+       if (ptr_in_nursery (object))
+               return sgen_nursery_is_object_alive (object);
+
+       if (current_collection_generation == GENERATION_NURSERY)
+               return FALSE;
+
+       return sgen_major_is_object_alive (object);
+}
+
+
+gboolean
+sgen_gc_is_object_ready_for_finalization (void *object)
+{
+       return !sgen_is_object_alive (object);
+}
+
+void
+sgen_queue_finalization_entry (GCObject *obj)
+{
+       gboolean critical = sgen_client_object_has_critical_finalizer (obj);
+
+       sgen_pointer_queue_add (critical ? &critical_fin_queue : &fin_ready_queue, obj);
+
+       sgen_client_object_queued_for_finalization (obj);
+}
+
+gboolean
+sgen_object_is_live (void *obj)
+{
+       return sgen_is_object_alive_and_on_current_collection (obj);
+}
+
+/*
+ * `System.GC.WaitForPendingFinalizers` first checks `sgen_have_pending_finalizers()` to
+ * determine whether it can exit quickly.  The latter must therefore only return FALSE if
+ * all finalizers have really finished running.
+ *
+ * `sgen_gc_invoke_finalizers()` first dequeues a finalizable object, and then finalizes it.
+ * This means that just checking whether the queues are empty leaves the possibility that an
+ * object might have been dequeued but not yet finalized.  That's why we need the additional
+ * flag `pending_unqueued_finalizer`.
+ */
+
+static volatile gboolean pending_unqueued_finalizer = FALSE;
+
+int
+sgen_gc_invoke_finalizers (void)
+{
+       int count = 0;
+
+       g_assert (!pending_unqueued_finalizer);
+
+       /* FIXME: batch to reduce lock contention */
+       while (sgen_have_pending_finalizers ()) {
+               void *obj;
+
+               LOCK_GC;
+
+               /*
+                * We need to set `pending_unqueued_finalizer` before dequeing the
+                * finalizable object.
+                */
+               if (!sgen_pointer_queue_is_empty (&fin_ready_queue)) {
+                       pending_unqueued_finalizer = TRUE;
+                       mono_memory_write_barrier ();
+                       obj = sgen_pointer_queue_pop (&fin_ready_queue);
+               } else if (!sgen_pointer_queue_is_empty (&critical_fin_queue)) {
+                       pending_unqueued_finalizer = TRUE;
+                       mono_memory_write_barrier ();
+                       obj = sgen_pointer_queue_pop (&critical_fin_queue);
+               } else {
+                       obj = NULL;
+               }
+
+               if (obj)
+                       SGEN_LOG (7, "Finalizing object %p (%s)", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
+
+               UNLOCK_GC;
+
+               if (!obj)
+                       break;
+
+               count++;
+               /* the object is on the stack so it is pinned */
+               /*g_print ("Calling finalizer for object: %p (%s)\n", obj, sgen_client_object_safe_name (obj));*/
+               sgen_client_run_finalize (obj);
+       }
+
+       if (pending_unqueued_finalizer) {
+               mono_memory_write_barrier ();
+               pending_unqueued_finalizer = FALSE;
+       }
+
+       return count;
+}
+
+gboolean
+sgen_have_pending_finalizers (void)
+{
+       return pending_unqueued_finalizer || !sgen_pointer_queue_is_empty (&fin_ready_queue) || !sgen_pointer_queue_is_empty (&critical_fin_queue);
+}
+
+/*
+ * ######################################################################
+ * ########  registered roots support
+ * ######################################################################
+ */
+
+/*
+ * We do not coalesce roots.
+ */
+int
+sgen_register_root (char *start, size_t size, void *descr, int root_type)
+{
+       RootRecord new_root;
+       int i;
+       LOCK_GC;
+       for (i = 0; i < ROOT_TYPE_NUM; ++i) {
+               RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
+               /* we allow changing the size and the descriptor (for thread statics etc) */
+               if (root) {
+                       size_t old_size = root->end_root - start;
+                       root->end_root = start + size;
+                       g_assert (((root->root_desc != 0) && (descr != NULL)) ||
+                                         ((root->root_desc == 0) && (descr == NULL)));
+                       root->root_desc = (mword)descr;
+                       roots_size += size;
+                       roots_size -= old_size;
+                       UNLOCK_GC;
+                       return TRUE;
+               }
+       }
+
+       new_root.end_root = start + size;
+       new_root.root_desc = (mword)descr;
+
+       sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
+       roots_size += size;
+
+       SGEN_LOG (3, "Added root for range: %p-%p, descr: %p  (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
+
+       UNLOCK_GC;
+       return TRUE;
+}
+
+void
+sgen_deregister_root (char* addr)
+{
+       int root_type;
+       RootRecord root;
+
+       LOCK_GC;
+       for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
+               if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
+                       roots_size -= (root.end_root - addr);
+       }
+       UNLOCK_GC;
+}
+
+/*
+ * ######################################################################
+ * ########  Thread handling (stop/start code)
+ * ######################################################################
+ */
+
+int
+sgen_get_current_collection_generation (void)
+{
+       return current_collection_generation;
+}
+
+void*
+sgen_thread_register (SgenThreadInfo* info, void *stack_bottom_fallback)
+{
+#ifndef HAVE_KW_THREAD
+       info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
+#endif
+
+       sgen_init_tlab_info (info);
+
+       sgen_client_thread_register (info, stack_bottom_fallback);
+
+       return info;
+}
+
+void
+sgen_thread_unregister (SgenThreadInfo *p)
+{
+       sgen_client_thread_unregister (p);
+}
+
+/*
+ * ######################################################################
+ * ########  Write barriers
+ * ######################################################################
+ */
+
+/*
+ * Note: the write barriers first do the needed GC work and then do the actual store:
+ * this way the value is visible to the conservative GC scan after the write barrier
+ * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
+ * the conservative scan, otherwise by the remembered set scan.
+ */
+
+void
+mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
+{
+       HEAVY_STAT (++stat_wbarrier_arrayref_copy);
+       /*This check can be done without taking a lock since dest_ptr array is pinned*/
+       if (ptr_in_nursery (dest_ptr) || count <= 0) {
+               mono_gc_memmove_aligned (dest_ptr, src_ptr, count * sizeof (gpointer));
+               return;
+       }
+
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+       if (binary_protocol_is_heavy_enabled ()) {
+               int i;
+               for (i = 0; i < count; ++i) {
+                       gpointer dest = (gpointer*)dest_ptr + i;
+                       gpointer obj = *((gpointer*)src_ptr + i);
+                       if (obj)
+                               binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
+               }
+       }
+#endif
+
+       remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
+}
+
+void
+mono_gc_wbarrier_generic_nostore (gpointer ptr)
+{
+       gpointer obj;
+
+       HEAVY_STAT (++stat_wbarrier_generic_store);
+
+       sgen_client_wbarrier_generic_nostore_check (ptr);
+
+       obj = *(gpointer*)ptr;
+       if (obj)
+               binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
+
+       /*
+        * We need to record old->old pointer locations for the
+        * concurrent collector.
+        */
+       if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
+               SGEN_LOG (8, "Skipping remset at %p", ptr);
+               return;
+       }
+
+       SGEN_LOG (8, "Adding remset at %p", ptr);
+
+       remset.wbarrier_generic_nostore (ptr);
+}
+
+void
+mono_gc_wbarrier_generic_store (gpointer ptr, GCObject* value)
+{
+       SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (value)) : "null");
+       SGEN_UPDATE_REFERENCE_ALLOW_NULL (ptr, value);
+       if (ptr_in_nursery (value))
+               mono_gc_wbarrier_generic_nostore (ptr);
+       sgen_dummy_use (value);
+}
+
+/* Same as mono_gc_wbarrier_generic_store () but performs the store
+ * as an atomic operation with release semantics.
+ */
+void
+mono_gc_wbarrier_generic_store_atomic (gpointer ptr, GCObject *value)
+{
+       HEAVY_STAT (++stat_wbarrier_generic_store_atomic);
+
+       SGEN_LOG (8, "Wbarrier atomic store at %p to %p (%s)", ptr, value, value ? sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (value)) : "null");
+
+       InterlockedWritePointer (ptr, value);
+
+       if (ptr_in_nursery (value))
+               mono_gc_wbarrier_generic_nostore (ptr);
+
+       sgen_dummy_use (value);
+}
+
+void
+sgen_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
+{
+       GCObject **dest = _dest;
+       GCObject **src = _src;
+
+       while (size) {
+               if (bitmap & 0x1)
+                       mono_gc_wbarrier_generic_store (dest, *src);
+               else
+                       *dest = *src;
+               ++src;
+               ++dest;
+               size -= SIZEOF_VOID_P;
+               bitmap >>= 1;
+       }
+}
+
+/*
+ * ######################################################################
+ * ########  Other mono public interface functions.
+ * ######################################################################
+ */
+
+void
+sgen_gc_collect (int generation)
+{
+       LOCK_GC;
+       if (generation > 1)
+               generation = 1;
+       sgen_perform_collection (0, generation, "user request", TRUE);
+       UNLOCK_GC;
+}
+
+int
+sgen_gc_collection_count (int generation)
+{
+       if (generation == 0)
+               return gc_stats.minor_gc_count;
+       return gc_stats.major_gc_count;
+}
+
+size_t
+sgen_gc_get_used_size (void)
+{
+       gint64 tot = 0;
+       LOCK_GC;
+       tot = los_memory_usage;
+       tot += nursery_section->next_data - nursery_section->data;
+       tot += major_collector.get_used_size ();
+       /* FIXME: account for pinned objects */
+       UNLOCK_GC;
+       return tot;
+}
+
+GCObject*
+sgen_weak_link_get (void **link_addr)
+{
+       void * volatile *link_addr_volatile;
+       void *ptr;
+       GCObject *obj;
+ retry:
+       link_addr_volatile = link_addr;
+       ptr = (void*)*link_addr_volatile;
+       /*
+        * At this point we have a hidden pointer.  If the GC runs
+        * here, it will not recognize the hidden pointer as a
+        * reference, and if the object behind it is not referenced
+        * elsewhere, it will be freed.  Once the world is restarted
+        * we reveal the pointer, giving us a pointer to a freed
+        * object.  To make sure we don't return it, we load the
+        * hidden pointer again.  If it's still the same, we can be
+        * sure the object reference is valid.
+        */
+       if (ptr)
+               obj = (GCObject*) REVEAL_POINTER (ptr);
+       else
+               return NULL;
+
+       mono_memory_barrier ();
+
+       /*
+        * During the second bridge processing step the world is
+        * running again.  That step processes all weak links once
+        * more to null those that refer to dead objects.  Before that
+        * is completed, those links must not be followed, so we
+        * conservatively wait for bridge processing when any weak
+        * link is dereferenced.
+        */
+       sgen_client_bridge_wait_for_processing ();
+
+       if ((void*)*link_addr_volatile != ptr)
+               goto retry;
+
+       return obj;
+}
+
+gboolean
+sgen_set_allow_synchronous_major (gboolean flag)
+{
+       if (!major_collector.is_concurrent)
+               return flag;
+
+       allow_synchronous_major = flag;
+       return TRUE;
+}
+
+void
+sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...)
+{
+       va_list ap;
+
+       va_start (ap, description_format);
+
+       fprintf (stderr, "Warning: In environment variable `%s': ", env_var);
+       vfprintf (stderr, description_format, ap);
+       if (fallback)
+               fprintf (stderr, " - %s", fallback);
+       fprintf (stderr, "\n");
+
+       va_end (ap);
+}
+
+static gboolean
+parse_double_in_interval (const char *env_var, const char *opt_name, const char *opt, double min, double max, double *result)
+{
+       char *endptr;
+       double val = strtod (opt, &endptr);
+       if (endptr == opt) {
+               sgen_env_var_error (env_var, "Using default value.", "`%s` must be a number.", opt_name);
+               return FALSE;
+       }
+       else if (val < min || val > max) {
+               sgen_env_var_error (env_var, "Using default value.", "`%s` must be between %.2f - %.2f.", opt_name, min, max);
+               return FALSE;
+       }
+       *result = val;
+       return TRUE;
+}
+
+void
+sgen_gc_init (void)
+{
+       const char *env;
+       char **opts, **ptr;
+       char *major_collector_opt = NULL;
+       char *minor_collector_opt = NULL;
+       size_t max_heap = 0;
+       size_t soft_limit = 0;
+       int result;
+       gboolean debug_print_allowance = FALSE;
+       double allowance_ratio = 0, save_target = 0;
+       gboolean cement_enabled = TRUE;
+
+       do {
+               result = InterlockedCompareExchange (&gc_initialized, -1, 0);
+               switch (result) {
+               case 1:
+                       /* already inited */
+                       return;
+               case -1:
+                       /* being inited by another thread */
+                       g_usleep (1000);
+                       break;
+               case 0:
+                       /* we will init it */
+                       break;
+               default:
+                       g_assert_not_reached ();
+               }
+       } while (result != 0);
+
+       SGEN_TV_GETTIME (sgen_init_timestamp);
+
+#ifdef SGEN_WITHOUT_MONO
+       mono_thread_smr_init ();
+#endif
+
+       LOCK_INIT (gc_mutex);
+
+       gc_debug_file = stderr;
+
+       LOCK_INIT (sgen_interruption_mutex);
+
+       if ((env = g_getenv (MONO_GC_PARAMS_NAME))) {
+               opts = g_strsplit (env, ",", -1);
+               for (ptr = opts; *ptr; ++ptr) {
+                       char *opt = *ptr;
+                       if (g_str_has_prefix (opt, "major=")) {
+                               opt = strchr (opt, '=') + 1;
+                               major_collector_opt = g_strdup (opt);
+                       } else if (g_str_has_prefix (opt, "minor=")) {
+                               opt = strchr (opt, '=') + 1;
+                               minor_collector_opt = g_strdup (opt);
+                       }
+               }
+       } else {
+               opts = NULL;
+       }
+
+       init_stats ();
+       sgen_init_internal_allocator ();
+       sgen_init_nursery_allocator ();
+       sgen_init_fin_weak_hash ();
+       sgen_init_hash_table ();
+       sgen_init_descriptors ();
+       sgen_init_gray_queues ();
+       sgen_init_allocator ();
+
+       sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
+       sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
+
+       sgen_client_init ();
+
+       if (!minor_collector_opt) {
+               sgen_simple_nursery_init (&sgen_minor_collector);
+       } else {
+               if (!strcmp (minor_collector_opt, "simple")) {
+               use_simple_nursery:
+                       sgen_simple_nursery_init (&sgen_minor_collector);
+               } else if (!strcmp (minor_collector_opt, "split")) {
+                       sgen_split_nursery_init (&sgen_minor_collector);
+               } else {
+                       sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `simple` instead.", "Unknown minor collector `%s'.", minor_collector_opt);
+                       goto use_simple_nursery;
+               }
+       }
+
+       if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
+       use_marksweep_major:
+               sgen_marksweep_init (&major_collector);
+       } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
+               sgen_marksweep_conc_init (&major_collector);
+       } else {
+               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `marksweep` instead.", "Unknown major collector `%s'.", major_collector_opt);
+               goto use_marksweep_major;
+       }
+
+       sgen_nursery_size = DEFAULT_NURSERY_SIZE;
+
+       if (major_collector.is_concurrent)
+               cement_enabled = FALSE;
+
+       if (opts) {
+               gboolean usage_printed = FALSE;
+
+               for (ptr = opts; *ptr; ++ptr) {
+                       char *opt = *ptr;
+                       if (!strcmp (opt, ""))
+                               continue;
+                       if (g_str_has_prefix (opt, "major="))
+                               continue;
+                       if (g_str_has_prefix (opt, "minor="))
+                               continue;
+                       if (g_str_has_prefix (opt, "max-heap-size=")) {
+                               size_t page_size = mono_pagesize ();
+                               size_t max_heap_candidate = 0;
+                               opt = strchr (opt, '=') + 1;
+                               if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap_candidate)) {
+                                       max_heap = (max_heap_candidate + page_size - 1) & ~(size_t)(page_size - 1);
+                                       if (max_heap != max_heap_candidate)
+                                               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Rounding up.", "`max-heap-size` size must be a multiple of %d.", page_size);
+                               } else {
+                                       sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`max-heap-size` must be an integer.");
+                               }
+                               continue;
+                       }
+                       if (g_str_has_prefix (opt, "soft-heap-limit=")) {
+                               opt = strchr (opt, '=') + 1;
+                               if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
+                                       if (soft_limit <= 0) {
+                                               sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be positive.");
+                                               soft_limit = 0;
+                                       }
+                               } else {
+                                       sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be an integer.");
+                               }
+                               continue;
+                       }
+
+#ifdef USER_CONFIG
+                       if (g_str_has_prefix (opt, "nursery-size=")) {
+                               size_t val;
+                               opt = strchr (opt, '=') + 1;
+                               if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
+                                       if ((val & (val - 1))) {
+                                               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be a power of two.");
+                                               continue;
+                                       }
+
+                                       if (val < SGEN_MAX_NURSERY_WASTE) {
+                                               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.",
+                                                               "`nursery-size` must be at least %d bytes.", SGEN_MAX_NURSERY_WASTE);
+                                               continue;
+                                       }
+
+                                       sgen_nursery_size = val;
+                                       sgen_nursery_bits = 0;
+                                       while (ONE_P << (++ sgen_nursery_bits) != sgen_nursery_size)
+                                               ;
+                               } else {
+                                       sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be an integer.");
+                                       continue;
+                               }
+                               continue;
+                       }
+#endif
+                       if (g_str_has_prefix (opt, "save-target-ratio=")) {
+                               double val;
+                               opt = strchr (opt, '=') + 1;
+                               if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "save-target-ratio", opt,
+                                               SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO, &val)) {
+                                       save_target = val;
+                               }
+                               continue;
+                       }
+                       if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
+                               double val;
+                               opt = strchr (opt, '=') + 1;
+                               if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "default-allowance-ratio", opt,
+                                               SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, &val)) {
+                                       allowance_ratio = val;
+                               }
+                               continue;
+                       }
+                       if (g_str_has_prefix (opt, "allow-synchronous-major=")) {
+                               if (!major_collector.is_concurrent) {
+                                       sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`allow-synchronous-major` is only valid for the concurrent major collector.");
+                                       continue;
+                               }
+
+                               opt = strchr (opt, '=') + 1;
+
+                               if (!strcmp (opt, "yes")) {
+                                       allow_synchronous_major = TRUE;
+                               } else if (!strcmp (opt, "no")) {
+                                       allow_synchronous_major = FALSE;
+                               } else {
+                                       sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`allow-synchronous-major` must be either `yes' or `no'.");
+                                       continue;
+                               }
+                       }
+
+                       if (!strcmp (opt, "cementing")) {
+                               cement_enabled = TRUE;
+                               continue;
+                       }
+                       if (!strcmp (opt, "no-cementing")) {
+                               cement_enabled = FALSE;
+                               continue;
+                       }
+
+                       if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
+                               continue;
+
+                       if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
+                               continue;
+
+                       if (sgen_client_handle_gc_param (opt))
+                               continue;
+
+                       sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Unknown option `%s`.", opt);
+
+                       if (usage_printed)
+                               continue;
+
+                       fprintf (stderr, "\n%s must be a comma-delimited list of one or more of the following:\n", MONO_GC_PARAMS_NAME);
+                       fprintf (stderr, "  max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
+                       fprintf (stderr, "  soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
+                       fprintf (stderr, "  nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
+                       fprintf (stderr, "  major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-conc', `marksweep-par')\n");
+                       fprintf (stderr, "  minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
+                       fprintf (stderr, "  wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
+                       fprintf (stderr, "  [no-]cementing\n");
+                       if (major_collector.is_concurrent)
+                               fprintf (stderr, "  allow-synchronous-major=FLAG (where FLAG is `yes' or `no')\n");
+                       if (major_collector.print_gc_param_usage)
+                               major_collector.print_gc_param_usage ();
+                       if (sgen_minor_collector.print_gc_param_usage)
+                               sgen_minor_collector.print_gc_param_usage ();
+                       sgen_client_print_gc_params_usage ();
+                       fprintf (stderr, " Experimental options:\n");
+                       fprintf (stderr, "  save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
+                       fprintf (stderr, "  default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
+                       fprintf (stderr, "\n");
+
+                       usage_printed = TRUE;
+               }
+               g_strfreev (opts);
+       }
+
+       if (major_collector_opt)
+               g_free (major_collector_opt);
+
+       if (minor_collector_opt)
+               g_free (minor_collector_opt);
+
+       alloc_nursery ();
+
+       if (major_collector.is_concurrent && cement_enabled) {
+               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`cementing` is not supported on concurrent major collectors.");
+               cement_enabled = FALSE;
+       }
+
+       sgen_cement_init (cement_enabled);
+
+       if ((env = g_getenv (MONO_GC_DEBUG_NAME))) {
+               gboolean usage_printed = FALSE;
+
+               opts = g_strsplit (env, ",", -1);
+               for (ptr = opts; ptr && *ptr; ptr ++) {
+                       char *opt = *ptr;
+                       if (!strcmp (opt, ""))
+                               continue;
+                       if (opt [0] >= '0' && opt [0] <= '9') {
+                               gc_debug_level = atoi (opt);
+                               opt++;
+                               if (opt [0] == ':')
+                                       opt++;
+                               if (opt [0]) {
+                                       char *rf = g_strdup_printf ("%s.%d", opt, mono_process_current_pid ());
+                                       gc_debug_file = fopen (rf, "wb");
+                                       if (!gc_debug_file)
+                                               gc_debug_file = stderr;
+                                       g_free (rf);
+                               }
+                       } else if (!strcmp (opt, "print-allowance")) {
+                               debug_print_allowance = TRUE;
+                       } else if (!strcmp (opt, "print-pinning")) {
+                               sgen_pin_stats_enable ();
+                       } else if (!strcmp (opt, "verify-before-allocs")) {
+                               verify_before_allocs = 1;
+                               has_per_allocation_action = TRUE;
+                       } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
+                               char *arg = strchr (opt, '=') + 1;
+                               verify_before_allocs = atoi (arg);
+                               has_per_allocation_action = TRUE;
+                       } else if (!strcmp (opt, "collect-before-allocs")) {
+                               collect_before_allocs = 1;
+                               has_per_allocation_action = TRUE;
+                       } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
+                               char *arg = strchr (opt, '=') + 1;
+                               has_per_allocation_action = TRUE;
+                               collect_before_allocs = atoi (arg);
+                       } else if (!strcmp (opt, "verify-before-collections")) {
+                               whole_heap_check_before_collection = TRUE;
+                       } else if (!strcmp (opt, "check-at-minor-collections")) {
+                               consistency_check_at_minor_collection = TRUE;
+                               nursery_clear_policy = CLEAR_AT_GC;
+                       } else if (!strcmp (opt, "mod-union-consistency-check")) {
+                               if (!major_collector.is_concurrent) {
+                                       sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`mod-union-consistency-check` only works with concurrent major collector.");
+                                       continue;
+                               }
+                               mod_union_consistency_check = TRUE;
+                       } else if (!strcmp (opt, "check-mark-bits")) {
+                               check_mark_bits_after_major_collection = TRUE;
+                       } else if (!strcmp (opt, "check-nursery-pinned")) {
+                               check_nursery_objects_pinned = TRUE;
+                       } else if (!strcmp (opt, "clear-at-gc")) {
+                               nursery_clear_policy = CLEAR_AT_GC;
+                       } else if (!strcmp (opt, "clear-nursery-at-gc")) {
+                               nursery_clear_policy = CLEAR_AT_GC;
+                       } else if (!strcmp (opt, "clear-at-tlab-creation")) {
+                               nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
+                       } else if (!strcmp (opt, "debug-clear-at-tlab-creation")) {
+                               nursery_clear_policy = CLEAR_AT_TLAB_CREATION_DEBUG;
+                       } else if (!strcmp (opt, "check-scan-starts")) {
+                               do_scan_starts_check = TRUE;
+                       } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
+                               do_verify_nursery = TRUE;
+                       } else if (!strcmp (opt, "check-concurrent")) {
+                               if (!major_collector.is_concurrent) {
+                                       sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`check-concurrent` only works with concurrent major collectors.");
+                                       continue;
+                               }
+                               do_concurrent_checks = TRUE;
+                       } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
+                               do_dump_nursery_content = TRUE;
+                       } else if (!strcmp (opt, "disable-minor")) {
+                               disable_minor_collections = TRUE;
+                       } else if (!strcmp (opt, "disable-major")) {
+                               disable_major_collections = TRUE;
+                       } else if (g_str_has_prefix (opt, "heap-dump=")) {
+                               char *filename = strchr (opt, '=') + 1;
+                               nursery_clear_policy = CLEAR_AT_GC;
+                               sgen_debug_enable_heap_dump (filename);
+                       } else if (g_str_has_prefix (opt, "binary-protocol=")) {
+                               char *filename = strchr (opt, '=') + 1;
+                               char *colon = strrchr (filename, ':');
+                               size_t limit = -1;
+                               if (colon) {
+                                       if (!mono_gc_parse_environment_string_extract_number (colon + 1, &limit)) {
+                                               sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring limit.", "Binary protocol file size limit must be an integer.");
+                                               limit = -1;
+                                       }
+                                       *colon = '\0';
+                               }
+                               binary_protocol_init (filename, (long long)limit);
+                       } else if (!strcmp (opt, "nursery-canaries")) {
+                               do_verify_nursery = TRUE;
+                               enable_nursery_canaries = TRUE;
+                       } else if (!sgen_client_handle_gc_debug (opt)) {
+                               sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "Unknown option `%s`.", opt);
+
+                               if (usage_printed)
+                                       continue;
+
+                               fprintf (stderr, "\n%s must be of the format [<l>[:<filename>]|<option>]+ where <l> is a debug level 0-9.\n", MONO_GC_DEBUG_NAME);
+                               fprintf (stderr, "Valid <option>s are:\n");
+                               fprintf (stderr, "  collect-before-allocs[=<n>]\n");
+                               fprintf (stderr, "  verify-before-allocs[=<n>]\n");
+                               fprintf (stderr, "  check-at-minor-collections\n");
+                               fprintf (stderr, "  check-mark-bits\n");
+                               fprintf (stderr, "  check-nursery-pinned\n");
+                               fprintf (stderr, "  verify-before-collections\n");
+                               fprintf (stderr, "  verify-nursery-at-minor-gc\n");
+                               fprintf (stderr, "  dump-nursery-at-minor-gc\n");
+                               fprintf (stderr, "  disable-minor\n");
+                               fprintf (stderr, "  disable-major\n");
+                               fprintf (stderr, "  check-concurrent\n");
+                               fprintf (stderr, "  clear-[nursery-]at-gc\n");
+                               fprintf (stderr, "  clear-at-tlab-creation\n");
+                               fprintf (stderr, "  debug-clear-at-tlab-creation\n");
+                               fprintf (stderr, "  check-scan-starts\n");
+                               fprintf (stderr, "  print-allowance\n");
+                               fprintf (stderr, "  print-pinning\n");
+                               fprintf (stderr, "  heap-dump=<filename>\n");
+                               fprintf (stderr, "  binary-protocol=<filename>[:<file-size-limit>]\n");
+                               fprintf (stderr, "  nursery-canaries\n");
+                               sgen_client_print_gc_debug_usage ();
+                               fprintf (stderr, "\n");
+
+                               usage_printed = TRUE;
+                       }
+               }
+               g_strfreev (opts);
+       }
+
+       if (check_mark_bits_after_major_collection)
+               nursery_clear_policy = CLEAR_AT_GC;
+
+       if (major_collector.post_param_init)
+               major_collector.post_param_init (&major_collector);
+
+       if (major_collector.needs_thread_pool)
+               sgen_workers_init (1);
+
+       sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
+
+       memset (&remset, 0, sizeof (remset));
+
+       sgen_card_table_init (&remset);
+
+       gc_initialized = 1;
+}
+
+NurseryClearPolicy
+sgen_get_nursery_clear_policy (void)
+{
+       return nursery_clear_policy;
+}
+
+void
+sgen_gc_lock (void)
+{
+       LOCK_GC;
+}
+
+void
+sgen_gc_unlock (void)
+{
+       gboolean try_free = sgen_try_free_some_memory;
+       sgen_try_free_some_memory = FALSE;
+       mono_mutex_unlock (&gc_mutex);
+       if (try_free)
+               mono_thread_hazardous_try_free_some ();
+}
+
+void
+sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
+{
+       major_collector.iterate_live_block_ranges (callback);
+}
+
+SgenMajorCollector*
+sgen_get_major_collector (void)
+{
+       return &major_collector;
+}
+
+SgenRememberedSet*
+sgen_get_remset (void)
+{
+       return &remset;
+}
+
+static void
+count_cards (long long *major_total, long long *major_marked, long long *los_total, long long *los_marked)
+{
+       sgen_get_major_collector ()->count_cards (major_total, major_marked);
+       sgen_los_count_cards (los_total, los_marked);
+}
+
+static gboolean world_is_stopped = FALSE;
+
+/* LOCKING: assumes the GC lock is held */
+void
+sgen_stop_world (int generation)
+{
+       long long major_total = -1, major_marked = -1, los_total = -1, los_marked = -1;
+
+       SGEN_ASSERT (0, !world_is_stopped, "Why are we stopping a stopped world?");
+
+       binary_protocol_world_stopping (generation, sgen_timestamp (), (gpointer)mono_native_thread_id_get ());
+
+       sgen_client_stop_world (generation);
+
+       world_is_stopped = TRUE;
+
+       if (binary_protocol_is_heavy_enabled ())
+               count_cards (&major_total, &major_marked, &los_total, &los_marked);
+       binary_protocol_world_stopped (generation, sgen_timestamp (), major_total, major_marked, los_total, los_marked);
+}
+
+/* LOCKING: assumes the GC lock is held */
+void
+sgen_restart_world (int generation, GGTimingInfo *timing)
+{
+       long long major_total = -1, major_marked = -1, los_total = -1, los_marked = -1;
+
+       SGEN_ASSERT (0, world_is_stopped, "Why are we restarting a running world?");
+
+       if (binary_protocol_is_heavy_enabled ())
+               count_cards (&major_total, &major_marked, &los_total, &los_marked);
+       binary_protocol_world_restarting (generation, sgen_timestamp (), major_total, major_marked, los_total, los_marked);
+
+       sgen_client_restart_world (generation, timing);
+
+       world_is_stopped = FALSE;
+
+       binary_protocol_world_restarted (generation, sgen_timestamp ());
+
+       sgen_try_free_some_memory = TRUE;
+
+       if (sgen_client_bridge_need_processing ())
+               sgen_client_bridge_processing_finish (generation);
+
+       sgen_memgov_collection_end (generation, timing, timing ? 2 : 0);
+}
+
+gboolean
+sgen_is_world_stopped (void)
+{
+       return world_is_stopped;
+}
+
+void
+sgen_check_whole_heap_stw (void)
+{
+       sgen_stop_world (0);
+       sgen_clear_nursery_fragments ();
+       sgen_check_whole_heap (FALSE);
+       sgen_restart_world (0, NULL);
+}
+
+gint64
+sgen_timestamp (void)
+{
+       SGEN_TV_DECLARE (timestamp);
+       SGEN_TV_GETTIME (timestamp);
+       return SGEN_TV_ELAPSED (sgen_init_timestamp, timestamp);
+}
+
+#endif /* HAVE_SGEN_GC */
diff --git a/mono/sgen/sgen-gc.h b/mono/sgen/sgen-gc.h
new file mode 100644 (file)
index 0000000..35d4cfe
--- /dev/null
@@ -0,0 +1,1071 @@
+/*
+ * sgen-gc.c: Simple generational GC.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __MONO_SGENGC_H__
+#define __MONO_SGENGC_H__
+
+/* pthread impl */
+#include "config.h"
+
+#ifdef HAVE_SGEN_GC
+
+typedef struct _SgenThreadInfo SgenThreadInfo;
+#undef THREAD_INFO_TYPE
+#define THREAD_INFO_TYPE SgenThreadInfo
+
+#include <glib.h>
+#include <stdio.h>
+#ifdef HAVE_PTHREAD_H
+#include <pthread.h>
+#endif
+#include <stdint.h>
+#include "mono/utils/mono-compiler.h"
+#include "mono/utils/atomic.h"
+#include "mono/utils/mono-mutex.h"
+#include "mono/sgen/sgen-conf.h"
+#include "mono/sgen/sgen-descriptor.h"
+#include "mono/sgen/sgen-gray.h"
+#include "mono/sgen/sgen-hash-table.h"
+#include "mono/sgen/sgen-protocol.h"
+
+/* The method used to clear the nursery */
+/* Clearing at nursery collections is the safest, but has bad interactions with caches.
+ * Clearing at TLAB creation is much faster, but more complex and it might expose hard
+ * to find bugs.
+ */
+typedef enum {
+       CLEAR_AT_GC,
+       CLEAR_AT_TLAB_CREATION,
+       CLEAR_AT_TLAB_CREATION_DEBUG
+} NurseryClearPolicy;
+
+NurseryClearPolicy sgen_get_nursery_clear_policy (void);
+
+#if !defined(__MACH__) && !MONO_MACH_ARCH_SUPPORTED && defined(HAVE_PTHREAD_KILL)
+#define SGEN_POSIX_STW 1
+#endif
+
+/*
+ * The nursery section uses this struct.
+ */
+typedef struct _GCMemSection GCMemSection;
+struct _GCMemSection {
+       char *data;
+       mword size;
+       /* pointer where more data could be allocated if it fits */
+       char *next_data;
+       char *end_data;
+       /*
+        * scan starts is an array of pointers to objects equally spaced in the allocation area
+        * They let use quickly find pinned objects from pinning pointers.
+        */
+       char **scan_starts;
+       /* in major collections indexes in the pin_queue for objects that pin this section */
+       size_t pin_queue_first_entry;
+       size_t pin_queue_last_entry;
+       size_t num_scan_start;
+};
+
+/*
+ * Recursion is not allowed for the thread lock.
+ */
+#define LOCK_DECLARE(name) mono_mutex_t name
+/* if changing LOCK_INIT to something that isn't idempotent, look at
+   its use in mono_gc_base_init in sgen-gc.c */
+#define LOCK_INIT(name)        mono_mutex_init (&(name))
+#define LOCK_GC do {                                           \
+               MONO_TRY_BLOCKING       \
+               mono_mutex_lock (&gc_mutex);                    \
+               MONO_FINISH_TRY_BLOCKING        \
+       } while (0)
+#define UNLOCK_GC do { sgen_gc_unlock (); } while (0)
+
+extern LOCK_DECLARE (sgen_interruption_mutex);
+
+#define LOCK_INTERRUPTION mono_mutex_lock (&sgen_interruption_mutex)
+#define UNLOCK_INTERRUPTION mono_mutex_unlock (&sgen_interruption_mutex)
+
+/* FIXME: Use InterlockedAdd & InterlockedAdd64 to reduce the CAS cost. */
+#define SGEN_CAS       InterlockedCompareExchange
+#define SGEN_CAS_PTR   InterlockedCompareExchangePointer
+#define SGEN_ATOMIC_ADD(x,i)   do {                                    \
+               int __old_x;                                            \
+               do {                                                    \
+                       __old_x = (x);                                  \
+               } while (InterlockedCompareExchange (&(x), __old_x + (i), __old_x) != __old_x); \
+       } while (0)
+#define SGEN_ATOMIC_ADD_P(x,i) do { \
+               size_t __old_x;                                            \
+               do {                                                    \
+                       __old_x = (x);                                  \
+               } while (InterlockedCompareExchangePointer ((void**)&(x), (void*)(__old_x + (i)), (void*)__old_x) != (void*)__old_x); \
+       } while (0)
+
+
+#ifndef HOST_WIN32
+/* we intercept pthread_create calls to know which threads exist */
+#define USE_PTHREAD_INTERCEPT 1
+#endif
+
+#ifdef HEAVY_STATISTICS
+extern guint64 stat_objects_alloced_degraded;
+extern guint64 stat_bytes_alloced_degraded;
+extern guint64 stat_copy_object_called_major;
+extern guint64 stat_objects_copied_major;
+#endif
+
+#define SGEN_ASSERT(level, a, ...) do {        \
+       if (G_UNLIKELY ((level) <= SGEN_MAX_ASSERT_LEVEL && !(a))) {    \
+               g_error (__VA_ARGS__);  \
+} } while (0)
+
+
+#define SGEN_LOG(level, format, ...) do {      \
+       if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) {        \
+               mono_gc_printf (gc_debug_file, format, ##__VA_ARGS__);  \
+} } while (0)
+
+#define SGEN_COND_LOG(level, cond, format, ...) do {   \
+       if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) {        \
+               if (cond)       \
+                       mono_gc_printf (gc_debug_file, format, ##__VA_ARGS__);  \
+} } while (0)
+
+extern int gc_debug_level;
+extern FILE* gc_debug_file;
+
+extern int current_collection_generation;
+
+extern unsigned int sgen_global_stop_count;
+
+#define SGEN_ALLOC_ALIGN               8
+#define SGEN_ALLOC_ALIGN_BITS  3
+
+/* s must be non-negative */
+#define SGEN_CAN_ALIGN_UP(s)           ((s) <= SIZE_MAX - (SGEN_ALLOC_ALIGN - 1))
+#define SGEN_ALIGN_UP(s)               (((s)+(SGEN_ALLOC_ALIGN-1)) & ~(SGEN_ALLOC_ALIGN-1))
+
+#if SIZEOF_VOID_P == 4
+#define ONE_P 1
+#else
+#define ONE_P 1ll
+#endif
+
+static inline guint
+sgen_aligned_addr_hash (gconstpointer ptr)
+{
+       return GPOINTER_TO_UINT (ptr) >> 3;
+}
+
+/*
+ * The link pointer is hidden by negating each bit.  We use the lowest
+ * bit of the link (before negation) to store whether it needs
+ * resurrection tracking.
+ */
+#define HIDE_POINTER(p,t)      ((gpointer)(~((size_t)(p)|((t)?1:0))))
+#define REVEAL_POINTER(p)      ((gpointer)((~(size_t)(p))&~3L))
+
+#define SGEN_PTR_IN_NURSERY(p,bits,start,end)  (((mword)(p) & ~((1 << (bits)) - 1)) == (mword)(start))
+
+#ifdef USER_CONFIG
+
+/* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */
+#define DEFAULT_NURSERY_SIZE (sgen_nursery_size)
+extern size_t sgen_nursery_size;
+/* The number of trailing 0 bits in DEFAULT_NURSERY_SIZE */
+#define DEFAULT_NURSERY_BITS (sgen_nursery_bits)
+extern int sgen_nursery_bits;
+
+#else
+
+#define DEFAULT_NURSERY_SIZE (4*1024*1024)
+#define DEFAULT_NURSERY_BITS 22
+
+#endif
+
+extern char *sgen_nursery_start;
+extern char *sgen_nursery_end;
+
+static inline MONO_ALWAYS_INLINE gboolean
+sgen_ptr_in_nursery (void *p)
+{
+       return SGEN_PTR_IN_NURSERY ((p), DEFAULT_NURSERY_BITS, sgen_nursery_start, sgen_nursery_end);
+}
+
+static inline MONO_ALWAYS_INLINE char*
+sgen_get_nursery_start (void)
+{
+       return sgen_nursery_start;
+}
+
+static inline MONO_ALWAYS_INLINE char*
+sgen_get_nursery_end (void)
+{
+       return sgen_nursery_end;
+}
+
+/*
+ * We use the lowest three bits in the vtable pointer of objects to tag whether they're
+ * forwarded, pinned, and/or cemented.  These are the valid states:
+ *
+ * | State            | bits |
+ * |------------------+------+
+ * | default          |  000 |
+ * | forwarded        |  001 |
+ * | pinned           |  010 |
+ * | pinned, cemented |  110 |
+ *
+ * We store them in the vtable slot because the bits are used in the sync block for other
+ * purposes: if we merge them and alloc the sync blocks aligned to 8 bytes, we can change
+ * this and use bit 3 in the syncblock (with the lower two bits both set for forwarded, that
+ * would be an invalid combination for the monitor and hash code).
+ */
+
+#include "sgen-tagged-pointer.h"
+
+#define SGEN_VTABLE_BITS_MASK  SGEN_TAGGED_POINTER_MASK
+
+#define SGEN_POINTER_IS_TAGGED_FORWARDED(p)    SGEN_POINTER_IS_TAGGED_1((p))
+#define SGEN_POINTER_TAG_FORWARDED(p)          SGEN_POINTER_TAG_1((p))
+
+#define SGEN_POINTER_IS_TAGGED_PINNED(p)       SGEN_POINTER_IS_TAGGED_2((p))
+#define SGEN_POINTER_TAG_PINNED(p)             SGEN_POINTER_TAG_2((p))
+
+#define SGEN_POINTER_IS_TAGGED_CEMENTED(p)     SGEN_POINTER_IS_TAGGED_4((p))
+#define SGEN_POINTER_TAG_CEMENTED(p)           SGEN_POINTER_TAG_4((p))
+
+#define SGEN_POINTER_UNTAG_VTABLE(p)           SGEN_POINTER_UNTAG_ALL((p))
+
+/* returns NULL if not forwarded, or the forwarded address */
+#define SGEN_VTABLE_IS_FORWARDED(vtable) (SGEN_POINTER_IS_TAGGED_FORWARDED ((vtable)) ? SGEN_POINTER_UNTAG_VTABLE ((vtable)) : NULL)
+#define SGEN_OBJECT_IS_FORWARDED(obj) (SGEN_VTABLE_IS_FORWARDED (((mword*)(obj))[0]))
+
+#define SGEN_VTABLE_IS_PINNED(vtable) SGEN_POINTER_IS_TAGGED_PINNED ((vtable))
+#define SGEN_OBJECT_IS_PINNED(obj) (SGEN_VTABLE_IS_PINNED (((mword*)(obj))[0]))
+
+#define SGEN_OBJECT_IS_CEMENTED(obj) (SGEN_POINTER_IS_TAGGED_CEMENTED (((mword*)(obj))[0]))
+
+/* set the forwarded address fw_addr for object obj */
+#define SGEN_FORWARD_OBJECT(obj,fw_addr) do {                          \
+               *(void**)(obj) = SGEN_POINTER_TAG_FORWARDED ((fw_addr));        \
+       } while (0)
+#define SGEN_PIN_OBJECT(obj) do {      \
+               *(void**)(obj) = SGEN_POINTER_TAG_PINNED (*(void**)(obj)); \
+       } while (0)
+#define SGEN_CEMENT_OBJECT(obj) do {   \
+               *(void**)(obj) = SGEN_POINTER_TAG_CEMENTED (*(void**)(obj)); \
+       } while (0)
+/* Unpins and uncements */
+#define SGEN_UNPIN_OBJECT(obj) do {    \
+               *(void**)(obj) = SGEN_POINTER_UNTAG_VTABLE (*(void**)(obj)); \
+       } while (0)
+
+/*
+ * Since we set bits in the vtable, use the macro to load it from the pointer to
+ * an object that is potentially pinned.
+ */
+#define SGEN_LOAD_VTABLE(obj)          SGEN_POINTER_UNTAG_ALL (SGEN_LOAD_VTABLE_UNCHECKED ((obj)))
+
+/*
+List of what each bit on of the vtable gc bits means. 
+*/
+enum {
+       SGEN_GC_BIT_BRIDGE_OBJECT = 1,
+       SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT = 2,
+       SGEN_GC_BIT_FINALIZER_AWARE = 4,
+};
+
+/* the runtime can register areas of memory as roots: we keep two lists of roots,
+ * a pinned root set for conservatively scanned roots and a normal one for
+ * precisely scanned roots (currently implemented as a single list).
+ */
+typedef struct _RootRecord RootRecord;
+struct _RootRecord {
+       char *end_root;
+       mword root_desc;
+};
+
+enum {
+       ROOT_TYPE_NORMAL = 0, /* "normal" roots */
+       ROOT_TYPE_PINNED = 1, /* roots without a GC descriptor */
+       ROOT_TYPE_WBARRIER = 2, /* roots with a write barrier */
+       ROOT_TYPE_NUM
+};
+
+extern SgenHashTable roots_hash [ROOT_TYPE_NUM];
+
+int sgen_register_root (char *start, size_t size, void *descr, int root_type);
+void sgen_deregister_root (char* addr);
+
+typedef void (*IterateObjectCallbackFunc) (char*, size_t, void*);
+
+void sgen_gc_init (void);
+
+void sgen_os_init (void);
+
+void sgen_update_heap_boundaries (mword low, mword high);
+
+void sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags);
+void sgen_check_section_scan_starts (GCMemSection *section);
+
+void sgen_conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type);
+
+/* Keep in sync with description_for_type() in sgen-internal.c! */
+enum {
+       INTERNAL_MEM_PIN_QUEUE,
+       INTERNAL_MEM_FRAGMENT,
+       INTERNAL_MEM_SECTION,
+       INTERNAL_MEM_SCAN_STARTS,
+       INTERNAL_MEM_FIN_TABLE,
+       INTERNAL_MEM_FINALIZE_ENTRY,
+       INTERNAL_MEM_FINALIZE_READY,
+       INTERNAL_MEM_DISLINK_TABLE,
+       INTERNAL_MEM_DISLINK,
+       INTERNAL_MEM_ROOTS_TABLE,
+       INTERNAL_MEM_ROOT_RECORD,
+       INTERNAL_MEM_STATISTICS,
+       INTERNAL_MEM_STAT_PINNED_CLASS,
+       INTERNAL_MEM_STAT_REMSET_CLASS,
+       INTERNAL_MEM_GRAY_QUEUE,
+       INTERNAL_MEM_MS_TABLES,
+       INTERNAL_MEM_MS_BLOCK_INFO,
+       INTERNAL_MEM_MS_BLOCK_INFO_SORT,
+       INTERNAL_MEM_WORKER_DATA,
+       INTERNAL_MEM_THREAD_POOL_JOB,
+       INTERNAL_MEM_BRIDGE_DATA,
+       INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE,
+       INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE_ENTRY,
+       INTERNAL_MEM_BRIDGE_HASH_TABLE,
+       INTERNAL_MEM_BRIDGE_HASH_TABLE_ENTRY,
+       INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE,
+       INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE_ENTRY,
+       INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE,
+       INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE_ENTRY,
+       INTERNAL_MEM_TARJAN_OBJ_BUCKET,
+       INTERNAL_MEM_BRIDGE_DEBUG,
+       INTERNAL_MEM_TOGGLEREF_DATA,
+       INTERNAL_MEM_CARDTABLE_MOD_UNION,
+       INTERNAL_MEM_BINARY_PROTOCOL,
+       INTERNAL_MEM_TEMPORARY,
+       INTERNAL_MEM_FIRST_CLIENT
+};
+
+enum {
+       GENERATION_NURSERY,
+       GENERATION_OLD,
+       GENERATION_MAX
+};
+
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+#define BINARY_PROTOCOL_ARG(x) ,x
+#else
+#define BINARY_PROTOCOL_ARG(x)
+#endif
+
+void sgen_init_internal_allocator (void);
+
+#define SGEN_DEFINE_OBJECT_VTABLE
+#ifdef SGEN_CLIENT_HEADER
+#include SGEN_CLIENT_HEADER
+#else
+#include "metadata/sgen-client-mono.h"
+#endif
+#undef SGEN_DEFINE_OBJECT_VTABLE
+
+/* eventually share with MonoThread? */
+/*
+ * This structure extends the MonoThreadInfo structure.
+ */
+struct _SgenThreadInfo {
+       SgenClientThreadInfo client_info;
+
+       char **tlab_next_addr;
+       char **tlab_start_addr;
+       char **tlab_temp_end_addr;
+       char **tlab_real_end_addr;
+
+#ifndef HAVE_KW_THREAD
+       char *tlab_start;
+       char *tlab_next;
+       char *tlab_temp_end;
+       char *tlab_real_end;
+#endif
+};
+
+gboolean sgen_is_worker_thread (MonoNativeThreadId thread);
+
+typedef void (*CopyOrMarkObjectFunc) (void**, SgenGrayQueue*);
+typedef void (*ScanObjectFunc) (char *obj, mword desc, SgenGrayQueue*);
+typedef void (*ScanVTypeFunc) (char *full_object, char *start, mword desc, SgenGrayQueue* BINARY_PROTOCOL_ARG (size_t size));
+
+typedef struct {
+       CopyOrMarkObjectFunc copy_or_mark_object;
+       ScanObjectFunc scan_object;
+       ScanVTypeFunc scan_vtype;
+       /*FIXME add allocation function? */
+} SgenObjectOperations;
+
+typedef struct
+{
+       SgenObjectOperations *ops;
+       SgenGrayQueue *queue;
+} ScanCopyContext;
+
+#define CONTEXT_FROM_OBJECT_OPERATIONS(ops, queue) ((ScanCopyContext) { (ops), (queue) })
+
+void sgen_report_internal_mem_usage (void);
+void sgen_dump_internal_mem_usage (FILE *heap_dump_file);
+void sgen_dump_section (GCMemSection *section, const char *type);
+void sgen_dump_occupied (char *start, char *end, char *section_start);
+
+void sgen_register_fixed_internal_mem_type (int type, size_t size);
+
+void* sgen_alloc_internal (int type);
+void sgen_free_internal (void *addr, int type);
+
+void* sgen_alloc_internal_dynamic (size_t size, int type, gboolean assert_on_failure);
+void sgen_free_internal_dynamic (void *addr, size_t size, int type);
+
+void sgen_pin_stats_enable (void);
+void sgen_pin_stats_register_object (char *obj, size_t size);
+void sgen_pin_stats_register_global_remset (char *obj);
+void sgen_pin_stats_print_class_stats (void);
+
+void sgen_sort_addresses (void **array, size_t size);
+void sgen_add_to_global_remset (gpointer ptr, gpointer obj);
+
+int sgen_get_current_collection_generation (void);
+gboolean sgen_collection_is_concurrent (void);
+gboolean sgen_concurrent_collection_in_progress (void);
+
+typedef struct _SgenFragment SgenFragment;
+
+struct _SgenFragment {
+       SgenFragment *next;
+       char *fragment_start;
+       char *fragment_next; /* the current soft limit for allocation */
+       char *fragment_end;
+       SgenFragment *next_in_order; /* We use a different entry for all active fragments so we can avoid SMR. */
+};
+
+typedef struct {
+       SgenFragment *alloc_head; /* List head to be used when allocating memory. Walk with fragment_next. */
+       SgenFragment *region_head; /* List head of the region used by this allocator. Walk with next_in_order. */
+} SgenFragmentAllocator;
+
+void sgen_fragment_allocator_add (SgenFragmentAllocator *allocator, char *start, char *end);
+void sgen_fragment_allocator_release (SgenFragmentAllocator *allocator);
+void* sgen_fragment_allocator_serial_alloc (SgenFragmentAllocator *allocator, size_t size);
+void* sgen_fragment_allocator_par_alloc (SgenFragmentAllocator *allocator, size_t size);
+void* sgen_fragment_allocator_serial_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size);
+void* sgen_fragment_allocator_par_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size);
+SgenFragment* sgen_fragment_allocator_alloc (void);
+void sgen_clear_allocator_fragments (SgenFragmentAllocator *allocator);
+void sgen_clear_range (char *start, char *end);
+
+
+/*
+This is a space/speed compromise as we need to make sure the from/to space check is both O(1)
+and only hit cache hot memory. On a 4Mb nursery it requires 1024 bytes, or 3% of your average
+L1 cache. On small configs with a 512kb nursery, this goes to 0.4%.
+
+Experimental results on how much space we waste with a 4Mb nursery:
+
+Note that the wastage applies to the half nursery, or 2Mb:
+
+Test 1 (compiling corlib):
+9: avg: 3.1k
+8: avg: 1.6k
+
+*/
+#define SGEN_TO_SPACE_GRANULE_BITS 9
+#define SGEN_TO_SPACE_GRANULE_IN_BYTES (1 << SGEN_TO_SPACE_GRANULE_BITS)
+
+extern char *sgen_space_bitmap;
+extern size_t sgen_space_bitmap_size;
+
+static inline gboolean
+sgen_nursery_is_to_space (char *object)
+{
+       size_t idx = (object - sgen_nursery_start) >> SGEN_TO_SPACE_GRANULE_BITS;
+       size_t byte = idx >> 3;
+       size_t bit = idx & 0x7;
+
+       SGEN_ASSERT (4, sgen_ptr_in_nursery (object), "object %p is not in nursery [%p - %p]", object, sgen_get_nursery_start (), sgen_get_nursery_end ());
+       SGEN_ASSERT (4, byte < sgen_space_bitmap_size, "byte index %zd out of range (%zd)", byte, sgen_space_bitmap_size);
+
+       return (sgen_space_bitmap [byte] & (1 << bit)) != 0;
+}
+
+static inline gboolean
+sgen_nursery_is_from_space (char *object)
+{
+       return !sgen_nursery_is_to_space (object);
+}
+
+static inline gboolean
+sgen_nursery_is_object_alive (char *obj)
+{
+       /* FIXME put this asserts under a non default level */
+       g_assert (sgen_ptr_in_nursery (obj));
+
+       if (sgen_nursery_is_to_space (obj))
+               return TRUE;
+
+       if (SGEN_OBJECT_IS_PINNED (obj) || SGEN_OBJECT_IS_FORWARDED (obj))
+               return TRUE;
+
+       return FALSE;
+}
+
+typedef struct {
+       gboolean is_split;
+
+       char* (*alloc_for_promotion) (GCVTable *vtable, char *obj, size_t objsize, gboolean has_references);
+
+       SgenObjectOperations serial_ops;
+
+       void (*prepare_to_space) (char *to_space_bitmap, size_t space_bitmap_size);
+       void (*clear_fragments) (void);
+       SgenFragment* (*build_fragments_get_exclude_head) (void);
+       void (*build_fragments_release_exclude_head) (void);
+       void (*build_fragments_finish) (SgenFragmentAllocator *allocator);
+       void (*init_nursery) (SgenFragmentAllocator *allocator, char *start, char *end);
+
+       gboolean (*handle_gc_param) (const char *opt); /* Optional */
+       void (*print_gc_param_usage) (void); /* Optional */
+} SgenMinorCollector;
+
+extern SgenMinorCollector sgen_minor_collector;
+
+void sgen_simple_nursery_init (SgenMinorCollector *collector);
+void sgen_split_nursery_init (SgenMinorCollector *collector);
+
+/* Updating references */
+
+#ifdef SGEN_CHECK_UPDATE_REFERENCE
+gboolean sgen_thread_pool_is_thread_pool_thread (MonoNativeThreadId some_thread) MONO_INTERNAL;
+static inline void
+sgen_update_reference (void **p, void *o, gboolean allow_null)
+{
+       if (!allow_null)
+               SGEN_ASSERT (0, o, "Cannot update a reference with a NULL pointer");
+       SGEN_ASSERT (0, !sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "Can't update a reference in the worker thread");
+       *p = o;
+}
+
+#define SGEN_UPDATE_REFERENCE_ALLOW_NULL(p,o)  sgen_update_reference ((void**)(p), (void*)(o), TRUE)
+#define SGEN_UPDATE_REFERENCE(p,o)             sgen_update_reference ((void**)(p), (void*)(o), FALSE)
+#else
+#define SGEN_UPDATE_REFERENCE_ALLOW_NULL(p,o)  (*(void**)(p) = (void*)(o))
+#define SGEN_UPDATE_REFERENCE(p,o)             SGEN_UPDATE_REFERENCE_ALLOW_NULL ((p), (o))
+#endif
+
+/* Major collector */
+
+typedef void (*sgen_cardtable_block_callback) (mword start, mword size);
+void sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback);
+
+typedef enum {
+       ITERATE_OBJECTS_SWEEP = 1,
+       ITERATE_OBJECTS_NON_PINNED = 2,
+       ITERATE_OBJECTS_PINNED = 4,
+       ITERATE_OBJECTS_ALL = ITERATE_OBJECTS_NON_PINNED | ITERATE_OBJECTS_PINNED,
+       ITERATE_OBJECTS_SWEEP_NON_PINNED = ITERATE_OBJECTS_SWEEP | ITERATE_OBJECTS_NON_PINNED,
+       ITERATE_OBJECTS_SWEEP_PINNED = ITERATE_OBJECTS_SWEEP | ITERATE_OBJECTS_PINNED,
+       ITERATE_OBJECTS_SWEEP_ALL = ITERATE_OBJECTS_SWEEP | ITERATE_OBJECTS_NON_PINNED | ITERATE_OBJECTS_PINNED
+} IterateObjectsFlags;
+
+typedef struct
+{
+       size_t num_scanned_objects;
+       size_t num_unique_scanned_objects;
+} ScannedObjectCounts;
+
+typedef struct _SgenMajorCollector SgenMajorCollector;
+struct _SgenMajorCollector {
+       size_t section_size;
+       gboolean is_concurrent;
+       gboolean needs_thread_pool;
+       gboolean supports_cardtable;
+       gboolean sweeps_lazily;
+
+       /*
+        * This is set to TRUE by the sweep if the next major
+        * collection should be synchronous (for evacuation).  For
+        * non-concurrent collectors, this should be NULL.
+        */
+       gboolean *want_synchronous_collection;
+
+       void* (*alloc_heap) (mword nursery_size, mword nursery_align, int nursery_bits);
+       gboolean (*is_object_live) (char *obj);
+       void* (*alloc_small_pinned_obj) (GCVTable *vtable, size_t size, gboolean has_references);
+       void* (*alloc_degraded) (GCVTable *vtable, size_t size);
+
+       SgenObjectOperations major_ops_serial;
+       SgenObjectOperations major_ops_concurrent_start;
+       SgenObjectOperations major_ops_concurrent;
+       SgenObjectOperations major_ops_concurrent_finish;
+
+       void* (*alloc_object) (GCVTable *vtable, size_t size, gboolean has_references);
+       void (*free_pinned_object) (char *obj, size_t size);
+
+       /*
+        * This is used for domain unloading, heap walking from the logging profiler, and
+        * debugging.  Can assume the world is stopped.
+        */
+       void (*iterate_objects) (IterateObjectsFlags flags, IterateObjectCallbackFunc callback, void *data);
+
+       void (*free_non_pinned_object) (char *obj, size_t size);
+       void (*pin_objects) (SgenGrayQueue *queue);
+       void (*pin_major_object) (char *obj, SgenGrayQueue *queue);
+       void (*scan_card_table) (gboolean mod_union, ScanCopyContext ctx);
+       void (*iterate_live_block_ranges) (sgen_cardtable_block_callback callback);
+       void (*update_cardtable_mod_union) (void);
+       void (*init_to_space) (void);
+       void (*sweep) (void);
+       gboolean (*have_swept) (void);
+       void (*finish_sweeping) (void);
+       void (*free_swept_blocks) (size_t allowance);
+       void (*check_scan_starts) (void);
+       void (*dump_heap) (FILE *heap_dump_file);
+       gint64 (*get_used_size) (void);
+       void (*start_nursery_collection) (void);
+       void (*finish_nursery_collection) (void);
+       void (*start_major_collection) (void);
+       void (*finish_major_collection) (ScannedObjectCounts *counts);
+       gboolean (*drain_gray_stack) (ScanCopyContext ctx);
+       gboolean (*ptr_is_in_non_pinned_space) (char *ptr, char **start);
+       gboolean (*obj_is_from_pinned_alloc) (char *obj);
+       void (*report_pinned_memory_usage) (void);
+       size_t (*get_num_major_sections) (void);
+       size_t (*get_bytes_survived_last_sweep) (void);
+       gboolean (*handle_gc_param) (const char *opt);
+       void (*print_gc_param_usage) (void);
+       void (*post_param_init) (SgenMajorCollector *collector);
+       gboolean (*is_valid_object) (char *object);
+       GCVTable* (*describe_pointer) (char *pointer);
+       guint8* (*get_cardtable_mod_union_for_object) (char *object);
+       long long (*get_and_reset_num_major_objects_marked) (void);
+       void (*count_cards) (long long *num_total_cards, long long *num_marked_cards);
+};
+
+extern SgenMajorCollector major_collector;
+
+void sgen_marksweep_init (SgenMajorCollector *collector);
+void sgen_marksweep_fixed_init (SgenMajorCollector *collector);
+void sgen_marksweep_par_init (SgenMajorCollector *collector);
+void sgen_marksweep_fixed_par_init (SgenMajorCollector *collector);
+void sgen_marksweep_conc_init (SgenMajorCollector *collector);
+SgenMajorCollector* sgen_get_major_collector (void);
+
+
+typedef struct _SgenRememberedSet {
+       void (*wbarrier_set_field) (GCObject *obj, gpointer field_ptr, GCObject* value);
+       void (*wbarrier_arrayref_copy) (gpointer dest_ptr, gpointer src_ptr, int count);
+       void (*wbarrier_value_copy) (gpointer dest, gpointer src, int count, size_t element_size);
+       void (*wbarrier_object_copy) (GCObject* obj, GCObject *src);
+       void (*wbarrier_generic_nostore) (gpointer ptr);
+       void (*record_pointer) (gpointer ptr);
+
+       void (*scan_remsets) (ScanCopyContext ctx);
+
+       void (*clear_cards) (void);
+
+       void (*finish_minor_collection) (void);
+       gboolean (*find_address) (char *addr);
+       gboolean (*find_address_with_cards) (char *cards_start, guint8 *cards, char *addr);
+} SgenRememberedSet;
+
+SgenRememberedSet *sgen_get_remset (void);
+
+/*
+ * These must be kept in sync with object.h.  They're here for using SGen independently of
+ * Mono.
+ */
+void mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count);
+void mono_gc_wbarrier_generic_nostore (gpointer ptr);
+void mono_gc_wbarrier_generic_store (gpointer ptr, GCObject* value);
+void mono_gc_wbarrier_generic_store_atomic (gpointer ptr, GCObject *value);
+
+void sgen_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap);
+
+static inline mword
+sgen_obj_get_descriptor (char *obj)
+{
+       GCVTable *vtable = SGEN_LOAD_VTABLE_UNCHECKED (obj);
+       SGEN_ASSERT (9, !SGEN_POINTER_IS_TAGGED_ANY (vtable), "Object can't be tagged");
+       return sgen_vtable_get_descriptor (vtable);
+}
+
+static inline mword
+sgen_obj_get_descriptor_safe (char *obj)
+{
+       GCVTable *vtable = (GCVTable*)SGEN_LOAD_VTABLE (obj);
+       return sgen_vtable_get_descriptor (vtable);
+}
+
+static inline mword
+sgen_safe_object_get_size (GCObject *obj)
+{
+       char *forwarded;
+
+       if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj)))
+               obj = (GCObject*)forwarded;
+
+       return sgen_client_par_object_get_size ((GCVTable*)SGEN_LOAD_VTABLE (obj), obj);
+}
+
+static inline gboolean
+sgen_safe_object_is_small (GCObject *obj, int type)
+{
+       if (type <= DESC_TYPE_MAX_SMALL_OBJ)
+               return TRUE;
+       return SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)obj)) <= SGEN_MAX_SMALL_OBJ_SIZE;
+}
+
+/*
+ * This variant guarantees to return the exact size of the object
+ * before alignment. Needed for canary support.
+ */
+static inline guint
+sgen_safe_object_get_size_unaligned (GCObject *obj)
+{
+       char *forwarded;
+
+       if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
+               obj = (GCObject*)forwarded;
+       }
+
+       return sgen_client_slow_object_get_size ((GCVTable*)SGEN_LOAD_VTABLE (obj), obj);
+}
+
+#ifdef SGEN_CLIENT_HEADER
+#include SGEN_CLIENT_HEADER
+#else
+#include "metadata/sgen-client-mono.h"
+#endif
+
+gboolean sgen_object_is_live (void *obj);
+
+void  sgen_init_fin_weak_hash (void);
+
+/* FIXME: move the toggleref stuff out of here */
+void sgen_mark_togglerefs (char *start, char *end, ScanCopyContext ctx);
+void sgen_clear_togglerefs (char *start, char *end, ScanCopyContext ctx);
+
+void sgen_process_togglerefs (void);
+void sgen_register_test_toggleref_callback (void);
+
+void sgen_mark_bridge_object (GCObject *obj);
+void sgen_collect_bridge_objects (int generation, ScanCopyContext ctx);
+
+typedef gboolean (*SgenObjectPredicateFunc) (GCObject *obj, void *user_data);
+
+void sgen_null_links_if (SgenObjectPredicateFunc predicate, void *data, int generation);
+
+gboolean sgen_gc_is_object_ready_for_finalization (void *object);
+void sgen_gc_lock (void);
+void sgen_gc_unlock (void);
+
+void sgen_queue_finalization_entry (GCObject *obj);
+const char* sgen_generation_name (int generation);
+
+void sgen_finalize_in_range (int generation, ScanCopyContext ctx);
+void sgen_null_link_in_range (int generation, gboolean before_finalization, ScanCopyContext ctx);
+void sgen_process_fin_stage_entries (void);
+gboolean sgen_have_pending_finalizers (void);
+void sgen_object_register_for_finalization (GCObject *obj, void *user_data);
+
+int sgen_gather_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size);
+void sgen_remove_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, int generation);
+
+void sgen_process_dislink_stage_entries (void);
+void sgen_register_disappearing_link (GCObject *obj, void **link, gboolean track, gboolean in_gc);
+
+GCObject* sgen_weak_link_get (void **link_addr);
+
+gboolean sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx);
+
+enum {
+       SPACE_NURSERY,
+       SPACE_MAJOR,
+       SPACE_LOS
+};
+
+void sgen_pin_object (void *object, SgenGrayQueue *queue);
+void sgen_set_pinned_from_failed_allocation (mword objsize);
+
+void sgen_ensure_free_space (size_t size);
+void sgen_gc_collect (int generation);
+void sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish);
+
+int sgen_gc_collection_count (int generation);
+/* FIXME: what exactly does this return? */
+size_t sgen_gc_get_used_size (void);
+size_t sgen_gc_get_total_heap_allocation (void);
+
+/* STW */
+
+typedef struct {
+       int generation;
+       const char *reason;
+       gboolean is_overflow;
+       gint64 total_time;
+       gint64 stw_time;
+       gint64 bridge_time;
+} GGTimingInfo;
+
+void sgen_stop_world (int generation);
+void sgen_restart_world (int generation, GGTimingInfo *timing);
+gboolean sgen_is_world_stopped (void);
+
+gboolean sgen_set_allow_synchronous_major (gboolean flag);
+
+/* LOS */
+
+typedef struct _LOSObject LOSObject;
+struct _LOSObject {
+       LOSObject *next;
+       mword size; /* this is the object size, lowest bit used for pin/mark */
+       guint8 * volatile cardtable_mod_union; /* only used by the concurrent collector */
+#if SIZEOF_VOID_P < 8
+       mword dummy;            /* to align object to sizeof (double) */
+#endif
+       char data [MONO_ZERO_LEN_ARRAY];
+};
+
+extern LOSObject *los_object_list;
+extern mword los_memory_usage;
+
+void sgen_los_free_object (LOSObject *obj);
+void* sgen_los_alloc_large_inner (GCVTable *vtable, size_t size);
+void sgen_los_sweep (void);
+gboolean sgen_ptr_is_in_los (char *ptr, char **start);
+void sgen_los_iterate_objects (IterateObjectCallbackFunc cb, void *user_data);
+void sgen_los_iterate_live_block_ranges (sgen_cardtable_block_callback callback);
+void sgen_los_scan_card_table (gboolean mod_union, ScanCopyContext ctx);
+void sgen_los_update_cardtable_mod_union (void);
+void sgen_los_count_cards (long long *num_total_cards, long long *num_marked_cards);
+gboolean sgen_los_is_valid_object (char *object);
+gboolean mono_sgen_los_describe_pointer (char *ptr);
+LOSObject* sgen_los_header_for_object (char *data);
+mword sgen_los_object_size (LOSObject *obj);
+void sgen_los_pin_object (char *obj);
+gboolean sgen_los_object_is_pinned (char *obj);
+void sgen_los_mark_mod_union_card (GCObject *mono_obj, void **ptr);
+
+
+/* nursery allocator */
+
+void sgen_clear_nursery_fragments (void);
+void sgen_nursery_allocator_prepare_for_pinning (void);
+void sgen_nursery_allocator_set_nursery_bounds (char *nursery_start, char *nursery_end);
+mword sgen_build_nursery_fragments (GCMemSection *nursery_section, SgenGrayQueue *unpin_queue);
+void sgen_init_nursery_allocator (void);
+void sgen_nursery_allocator_init_heavy_stats (void);
+void sgen_init_allocator (void);
+char* sgen_nursery_alloc_get_upper_alloc_bound (void);
+void* sgen_nursery_alloc (size_t size);
+void* sgen_nursery_alloc_range (size_t size, size_t min_size, size_t *out_alloc_size);
+gboolean sgen_can_alloc_size (size_t size);
+void sgen_nursery_retire_region (void *address, ptrdiff_t size);
+
+void sgen_nursery_alloc_prepare_for_minor (void);
+void sgen_nursery_alloc_prepare_for_major (void);
+
+char* sgen_alloc_for_promotion (char *obj, size_t objsize, gboolean has_references);
+
+void* sgen_alloc_obj_nolock (GCVTable *vtable, size_t size);
+void* sgen_try_alloc_obj_nolock (GCVTable *vtable, size_t size);
+
+/* Threads */
+
+void* sgen_thread_register (SgenThreadInfo* info, void *addr);
+void sgen_thread_unregister (SgenThreadInfo *p);
+
+/* Finalization/ephemeron support */
+
+static inline gboolean
+sgen_major_is_object_alive (void *object)
+{
+       mword objsize;
+
+       /* Oldgen objects can be pinned and forwarded too */
+       if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
+               return TRUE;
+
+       /*
+        * FIXME: major_collector.is_object_live() also calculates the
+        * size.  Avoid the double calculation.
+        */
+       objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)object));
+       if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
+               return sgen_los_object_is_pinned (object);
+
+       return major_collector.is_object_live (object);
+}
+
+/*
+ * This function returns true if @object is either alive or it belongs to the old gen
+ * and we're currently doing a minor collection.
+ */
+static inline int
+sgen_is_object_alive_for_current_gen (char *object)
+{
+       if (sgen_ptr_in_nursery (object))
+               return sgen_nursery_is_object_alive (object);
+
+       if (current_collection_generation == GENERATION_NURSERY)
+               return TRUE;
+
+       return sgen_major_is_object_alive (object);
+}
+
+int sgen_gc_invoke_finalizers (void);
+
+/* Other globals */
+
+extern GCMemSection *nursery_section;
+extern guint32 collect_before_allocs;
+extern guint32 verify_before_allocs;
+extern gboolean has_per_allocation_action;
+extern size_t degraded_mode;
+extern int default_nursery_size;
+extern guint32 tlab_size;
+extern NurseryClearPolicy nursery_clear_policy;
+extern gboolean sgen_try_free_some_memory;
+
+extern LOCK_DECLARE (gc_mutex);
+
+/* Nursery helpers. */
+
+static inline void
+sgen_set_nursery_scan_start (char *p)
+{
+       size_t idx = (p - (char*)nursery_section->data) / SGEN_SCAN_START_SIZE;
+       char *old = nursery_section->scan_starts [idx];
+       if (!old || old > p)
+               nursery_section->scan_starts [idx] = p;
+}
+
+
+/* Object Allocation */
+
+typedef enum {
+       ATYPE_NORMAL,
+       ATYPE_VECTOR,
+       ATYPE_SMALL,
+       ATYPE_STRING,
+       ATYPE_NUM
+} SgenAllocatorType;
+
+void sgen_init_tlab_info (SgenThreadInfo* info);
+void sgen_clear_tlabs (void);
+
+void* sgen_alloc_obj (GCVTable *vtable, size_t size);
+void* sgen_alloc_obj_pinned (GCVTable *vtable, size_t size);
+void* sgen_alloc_obj_mature (GCVTable *vtable, size_t size);
+
+/* Debug support */
+
+void sgen_check_consistency (void);
+void sgen_check_mod_union_consistency (void);
+void sgen_check_major_refs (void);
+void sgen_check_whole_heap (gboolean allow_missing_pinning);
+void sgen_check_whole_heap_stw (void);
+void sgen_check_objref (char *obj);
+void sgen_check_heap_marked (gboolean nursery_must_be_pinned);
+void sgen_check_nursery_objects_pinned (gboolean pinned);
+void sgen_check_for_xdomain_refs (void);
+char* sgen_find_object_for_ptr (char *ptr);
+
+void mono_gc_scan_for_specific_ref (GCObject *key, gboolean precise);
+
+void sgen_debug_enable_heap_dump (const char *filename);
+void sgen_debug_dump_heap (const char *type, int num, const char *reason);
+
+void sgen_debug_verify_nursery (gboolean do_dump_nursery_content);
+void sgen_debug_check_nursery_is_clean (void);
+
+/* Write barrier support */
+
+/*
+ * This causes the compile to extend the liveness of 'v' till the call to dummy_use
+ */
+static inline void
+sgen_dummy_use (gpointer v) {
+#if defined(__GNUC__)
+       __asm__ volatile ("" : "=r"(v) : "r"(v));
+#elif defined(_MSC_VER)
+       static volatile gpointer ptr;
+       ptr = v;
+#else
+#error "Implement sgen_dummy_use for your compiler"
+#endif
+}
+
+/* Environment variable parsing */
+
+#define MONO_GC_PARAMS_NAME    "MONO_GC_PARAMS"
+#define MONO_GC_DEBUG_NAME     "MONO_GC_DEBUG"
+
+void sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...);
+
+/* Utilities */
+
+void sgen_qsort (void *base, size_t nel, size_t width, int (*compar) (const void*, const void*));
+gint64 sgen_timestamp (void);
+
+/*
+ * Canary (guard word) support
+ * Notes:
+ * - CANARY_SIZE must be multiple of word size in bytes
+ * - Canary space is not included on checks against SGEN_MAX_SMALL_OBJ_SIZE
+ */
+gboolean nursery_canaries_enabled (void);
+
+#define CANARY_SIZE 8
+#define CANARY_STRING  "koupepia"
+
+#define CANARIFY_SIZE(size) if (nursery_canaries_enabled ()) { \
+                       size = size + CANARY_SIZE;      \
+               }
+
+#define CANARIFY_ALLOC(addr,size) if (nursery_canaries_enabled ()) {   \
+                               memcpy ((char*) (addr) + (size), CANARY_STRING, CANARY_SIZE);   \
+                       }
+
+#define CANARY_VALID(addr) (strncmp ((char*) (addr), CANARY_STRING, CANARY_SIZE) == 0)
+
+#define CHECK_CANARY_FOR_OBJECT(addr) if (nursery_canaries_enabled ()) {       \
+                               char* canary_ptr = (char*) (addr) + sgen_safe_object_get_size_unaligned ((GCObject *) (addr));  \
+                               if (!CANARY_VALID(canary_ptr)) {        \
+                                       char canary_copy[CANARY_SIZE +1];       \
+                                       strncpy (canary_copy, canary_ptr, CANARY_SIZE); \
+                                       canary_copy[CANARY_SIZE] = 0;   \
+                                       g_error ("CORRUPT CANARY:\naddr->%p\ntype->%s\nexcepted->'%s'\nfound->'%s'\n", (char*) addr, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE ((addr))), CANARY_STRING, canary_copy);      \
+                               } }
+
+#endif /* HAVE_SGEN_GC */
+
+#endif /* __MONO_SGENGC_H__ */
diff --git a/mono/sgen/sgen-gray.c b/mono/sgen/sgen-gray.c
new file mode 100644 (file)
index 0000000..331484a
--- /dev/null
@@ -0,0 +1,383 @@
+/*
+ * sgen-gray.c: Gray queue management.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-protocol.h"
+
+#ifdef HEAVY_STATISTICS
+guint64 stat_gray_queue_section_alloc;
+guint64 stat_gray_queue_section_free;
+guint64 stat_gray_queue_enqueue_fast_path;
+guint64 stat_gray_queue_dequeue_fast_path;
+guint64 stat_gray_queue_enqueue_slow_path;
+guint64 stat_gray_queue_dequeue_slow_path;
+#endif
+
+#define GRAY_QUEUE_LENGTH_LIMIT        64
+
+#ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
+#define STATE_TRANSITION(s,o,n)        do {                                    \
+               int __old = (o);                                        \
+               if (InterlockedCompareExchange ((volatile int*)&(s)->state, (n), __old) != __old) \
+                       g_assert_not_reached ();                        \
+       } while (0)
+#define STATE_SET(s,v)         (s)->state = (v)
+#define STATE_ASSERT(s,v)      g_assert ((s)->state == (v))
+#else
+#define STATE_TRANSITION(s,o,n)
+#define STATE_SET(s,v)
+#define STATE_ASSERT(s,v)
+#endif
+
+void
+sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue)
+{
+       GrayQueueSection *section;
+
+       HEAVY_STAT (stat_gray_queue_section_alloc ++);
+
+       if (queue->alloc_prepare_func)
+               queue->alloc_prepare_func (queue);
+
+       if (queue->free_list) {
+               /* Use the previously allocated queue sections if possible */
+               section = queue->free_list;
+               queue->free_list = section->next;
+               STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
+       } else {
+               /* Allocate a new section */
+               section = sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE);
+               STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING);
+       }
+
+       section->size = SGEN_GRAY_QUEUE_SECTION_SIZE;
+
+       STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
+
+       /* Link it with the others */
+       section->next = queue->first;
+       queue->first = section;
+       queue->cursor = section->entries - 1;
+}
+
+void
+sgen_gray_object_free_queue_section (GrayQueueSection *section)
+{
+       HEAVY_STAT (stat_gray_queue_section_free ++);
+
+       STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_FREED);
+       sgen_free_internal (section, INTERNAL_MEM_GRAY_QUEUE);
+}
+
+/*
+ * The following two functions are called in the inner loops of the
+ * collector, so they need to be as fast as possible.  We have macros
+ * for them in sgen-gc.h.
+ */
+
+void
+sgen_gray_object_enqueue (SgenGrayQueue *queue, char *obj, mword desc)
+{
+       GrayQueueEntry entry = SGEN_GRAY_QUEUE_ENTRY (obj, desc);
+
+       HEAVY_STAT (stat_gray_queue_enqueue_slow_path ++);
+
+       SGEN_ASSERT (9, obj, "enqueueing a null object");
+       //sgen_check_objref (obj);
+
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+       if (queue->enqueue_check_func)
+               queue->enqueue_check_func (obj);
+#endif
+
+       if (G_UNLIKELY (!queue->first || queue->cursor == GRAY_LAST_CURSOR_POSITION (queue->first))) {
+               if (queue->first) {
+                       /* Set the current section size back to default, might have been changed by sgen_gray_object_dequeue_section */
+                       queue->first->size = SGEN_GRAY_QUEUE_SECTION_SIZE;
+               }
+
+               sgen_gray_object_alloc_queue_section (queue);
+       }
+       STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
+       SGEN_ASSERT (9, queue->cursor <= GRAY_LAST_CURSOR_POSITION (queue->first), "gray queue %p overflow, first %p, cursor %p", queue, queue->first, queue->cursor);
+       *++queue->cursor = entry;
+
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+       binary_protocol_gray_enqueue (queue, queue->cursor, obj);
+#endif
+}
+
+GrayQueueEntry
+sgen_gray_object_dequeue (SgenGrayQueue *queue)
+{
+       GrayQueueEntry entry;
+
+       HEAVY_STAT (stat_gray_queue_dequeue_slow_path ++);
+
+       if (sgen_gray_object_queue_is_empty (queue)) {
+               entry.obj = NULL;
+               return entry;
+       }
+
+       STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
+       SGEN_ASSERT (9, queue->cursor >= GRAY_FIRST_CURSOR_POSITION (queue->first), "gray queue %p underflow", queue);
+
+       entry = *queue->cursor--;
+
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+       binary_protocol_gray_dequeue (queue, queue->cursor + 1, entry.obj);
+#endif
+
+       if (G_UNLIKELY (queue->cursor < GRAY_FIRST_CURSOR_POSITION (queue->first))) {
+               GrayQueueSection *section = queue->first;
+               queue->first = section->next;
+               section->next = queue->free_list;
+
+               STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
+
+               queue->free_list = section;
+               queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL;
+       }
+
+       return entry;
+}
+
+GrayQueueSection*
+sgen_gray_object_dequeue_section (SgenGrayQueue *queue)
+{
+       GrayQueueSection *section;
+
+       if (!queue->first)
+               return NULL;
+
+       section = queue->first;
+       queue->first = section->next;
+
+       section->next = NULL;
+       section->size = queue->cursor - section->entries + 1;
+
+       queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL;
+
+       STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
+
+       return section;
+}
+
+void
+sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section)
+{
+       STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
+
+       if (queue->first)
+               queue->first->size = queue->cursor - queue->first->entries + 1;
+
+       section->next = queue->first;
+       queue->first = section;
+       queue->cursor = queue->first->entries + queue->first->size - 1;
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+       if (queue->enqueue_check_func) {
+               int i;
+               for (i = 0; i < section->size; ++i)
+                       queue->enqueue_check_func (section->entries [i].obj);
+       }
+#endif
+}
+
+void
+sgen_gray_object_queue_trim_free_list (SgenGrayQueue *queue)
+{
+       GrayQueueSection *section, *next;
+       int i = 0;
+       for (section = queue->free_list; section && i < GRAY_QUEUE_LENGTH_LIMIT - 1; section = section->next) {
+               STATE_ASSERT (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
+               i ++;
+       }
+       if (!section)
+               return;
+       while (section->next) {
+               next = section->next;
+               section->next = next->next;
+               STATE_TRANSITION (next, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
+               sgen_gray_object_free_queue_section (next);
+       }
+}
+
+void
+sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func)
+{
+       g_assert (sgen_gray_object_queue_is_empty (queue));
+
+       queue->alloc_prepare_func = NULL;
+       queue->alloc_prepare_data = NULL;
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+       queue->enqueue_check_func = enqueue_check_func;
+#endif
+
+       /* Free the extra sections allocated during the last collection */
+       sgen_gray_object_queue_trim_free_list (queue);
+}
+
+static void
+invalid_prepare_func (SgenGrayQueue *queue)
+{
+       g_assert_not_reached ();
+}
+
+void
+sgen_gray_object_queue_init_invalid (SgenGrayQueue *queue)
+{
+       sgen_gray_object_queue_init (queue, NULL);
+       queue->alloc_prepare_func = invalid_prepare_func;
+       queue->alloc_prepare_data = NULL;
+}
+
+void
+sgen_gray_queue_set_alloc_prepare (SgenGrayQueue *queue, GrayQueueAllocPrepareFunc alloc_prepare_func, void *data)
+{
+       SGEN_ASSERT (0, !queue->alloc_prepare_func && !queue->alloc_prepare_data, "Can't set gray queue alloc-prepare twice");
+       queue->alloc_prepare_func = alloc_prepare_func;
+       queue->alloc_prepare_data = data;
+}
+
+void
+sgen_gray_object_queue_init_with_alloc_prepare (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func,
+               GrayQueueAllocPrepareFunc alloc_prepare_func, void *data)
+{
+       sgen_gray_object_queue_init (queue, enqueue_check_func);
+       sgen_gray_queue_set_alloc_prepare (queue, alloc_prepare_func, data);
+}
+
+void
+sgen_gray_object_queue_deinit (SgenGrayQueue *queue)
+{
+       g_assert (!queue->first);
+       while (queue->free_list) {
+               GrayQueueSection *next = queue->free_list->next;
+               STATE_TRANSITION (queue->free_list, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
+               sgen_gray_object_free_queue_section (queue->free_list);
+               queue->free_list = next;
+       }
+}
+
+void
+sgen_gray_object_queue_disable_alloc_prepare (SgenGrayQueue *queue)
+{
+       queue->alloc_prepare_func = NULL;
+       queue->alloc_prepare_data = NULL;
+}
+
+static void
+lock_section_queue (SgenSectionGrayQueue *queue)
+{
+       if (!queue->locked)
+               return;
+
+       mono_mutex_lock (&queue->lock);
+}
+
+static void
+unlock_section_queue (SgenSectionGrayQueue *queue)
+{
+       if (!queue->locked)
+               return;
+
+       mono_mutex_unlock (&queue->lock);
+}
+
+void
+sgen_section_gray_queue_init (SgenSectionGrayQueue *queue, gboolean locked, GrayQueueEnqueueCheckFunc enqueue_check_func)
+{
+       g_assert (sgen_section_gray_queue_is_empty (queue));
+
+       queue->locked = locked;
+       if (locked) {
+               mono_mutex_init_recursive (&queue->lock);
+       }
+
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+       queue->enqueue_check_func = enqueue_check_func;
+#endif
+}
+
+gboolean
+sgen_section_gray_queue_is_empty (SgenSectionGrayQueue *queue)
+{
+       return !queue->first;
+}
+
+GrayQueueSection*
+sgen_section_gray_queue_dequeue (SgenSectionGrayQueue *queue)
+{
+       GrayQueueSection *section;
+
+       lock_section_queue (queue);
+
+       if (queue->first) {
+               section = queue->first;
+               queue->first = section->next;
+
+               STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
+
+               section->next = NULL;
+       } else {
+               section = NULL;
+       }
+
+       unlock_section_queue (queue);
+
+       return section;
+}
+
+void
+sgen_section_gray_queue_enqueue (SgenSectionGrayQueue *queue, GrayQueueSection *section)
+{
+       STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
+
+       lock_section_queue (queue);
+
+       section->next = queue->first;
+       queue->first = section;
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+       if (queue->enqueue_check_func) {
+               int i;
+               for (i = 0; i < section->size; ++i)
+                       queue->enqueue_check_func (section->entries [i].obj);
+       }
+#endif
+
+       unlock_section_queue (queue);
+}
+
+void
+sgen_init_gray_queues (void)
+{
+#ifdef HEAVY_STATISTICS
+       mono_counters_register ("Gray Queue alloc section", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_section_alloc);
+       mono_counters_register ("Gray Queue free section", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_section_free);
+       mono_counters_register ("Gray Queue enqueue fast path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_enqueue_fast_path);
+       mono_counters_register ("Gray Queue dequeue fast path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_dequeue_fast_path);
+       mono_counters_register ("Gray Queue enqueue slow path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_enqueue_slow_path);
+       mono_counters_register ("Gray Queue dequeue slow path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_dequeue_slow_path);
+#endif
+}
+#endif
diff --git a/mono/sgen/sgen-gray.h b/mono/sgen/sgen-gray.h
new file mode 100644 (file)
index 0000000..966a377
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+ * sgen-gray.h: Gray queue management.
+ *
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __MONO_SGEN_GRAY_H__
+#define __MONO_SGEN_GRAY_H__
+
+#include "mono/sgen/sgen-protocol.h"
+
+/*
+ * This gray queue has to be as optimized as possible, because it is in the core of
+ * the mark/copy phase of the garbage collector. The memory access has then to be as
+ * cache friendly as possible. That's why we use a cursor based implementation.
+ * 
+ * This simply consist in maintaining a pointer to the current element in the
+ * queue. In addition to using this cursor, we use a simple linked list of arrays,
+ * called sections, so that we have the cache friendliness of arrays without having
+ * the cost of memory reallocation of a dynaic array, not the cost of memory
+ * indirection of a linked list.
+ * 
+ * This implementation also allows the dequeuing of a whole section at a time. This is
+ * for example used in the parallel GC because it would be too costly to take one element 
+ * at a time. This imply the main constraint that, because we don't carry the cursor
+ * with the section, we still have to store the index of the last element. This is done 
+ * through the 'size' field on the section, which default value is it's maximum value
+ * SGEN_GRAY_QUEUE_SECTION_SIZE. This field is updated in multiple cases :
+ *  - section allocation : default value
+ *  - object push : default value if we fill the current queue first
+ *  - section dequeue : position of the cursor in the dequeued section
+ *  - section enqueue : position of the cursor in the previously first section in the queue
+ * 
+ * The previous implementation was an index based access where we would store the index
+ * of the last element in the section. This was less efficient because we would have
+ * to make 1 memory access for the index value, 1 for the base address of the objects
+ * array and another 1 for the actual value in the array.
+ */
+
+/* SGEN_GRAY_QUEUE_HEADER_SIZE is number of machine words */
+#ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
+#define SGEN_GRAY_QUEUE_HEADER_SIZE    4
+#else
+#define SGEN_GRAY_QUEUE_HEADER_SIZE    2
+#endif
+
+#define SGEN_GRAY_QUEUE_SECTION_SIZE   (128 - SGEN_GRAY_QUEUE_HEADER_SIZE)
+
+#ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
+typedef enum {
+       GRAY_QUEUE_SECTION_STATE_FLOATING,
+       GRAY_QUEUE_SECTION_STATE_ENQUEUED,
+       GRAY_QUEUE_SECTION_STATE_FREE_LIST,
+       GRAY_QUEUE_SECTION_STATE_FREED
+} GrayQueueSectionState;
+#endif
+
+typedef struct _GrayQueueEntry GrayQueueEntry;
+struct _GrayQueueEntry {
+       char *obj;
+       mword desc;
+};
+
+#define SGEN_GRAY_QUEUE_ENTRY(obj,desc)        { (obj), (desc) }
+
+/*
+ * This is a stack now instead of a queue, so the most recently added items are removed
+ * first, improving cache locality, and keeping the stack size manageable.
+ */
+typedef struct _GrayQueueSection GrayQueueSection;
+struct _GrayQueueSection {
+#ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
+       /*
+        * The dummy is here so that the state doesn't get overwritten
+        * by the internal allocator once the section is freed.
+        */
+       int dummy;
+       GrayQueueSectionState state;
+#endif
+       int size;
+       GrayQueueSection *next;
+       GrayQueueEntry entries [SGEN_GRAY_QUEUE_SECTION_SIZE];
+};
+
+typedef struct _SgenGrayQueue SgenGrayQueue;
+
+typedef void (*GrayQueueAllocPrepareFunc) (SgenGrayQueue*);
+typedef void (*GrayQueueEnqueueCheckFunc) (char*);
+
+struct _SgenGrayQueue {
+       GrayQueueEntry *cursor;
+       GrayQueueSection *first;
+       GrayQueueSection *free_list;
+       GrayQueueAllocPrepareFunc alloc_prepare_func;
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+       GrayQueueEnqueueCheckFunc enqueue_check_func;
+#endif
+       void *alloc_prepare_data;
+};
+
+typedef struct _SgenSectionGrayQueue SgenSectionGrayQueue;
+
+struct _SgenSectionGrayQueue {
+       GrayQueueSection *first;
+       gboolean locked;
+       mono_mutex_t lock;
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+       GrayQueueEnqueueCheckFunc enqueue_check_func;
+#endif
+};
+
+#define GRAY_LAST_CURSOR_POSITION(s) ((s)->entries + SGEN_GRAY_QUEUE_SECTION_SIZE - 1)
+#define GRAY_FIRST_CURSOR_POSITION(s) ((s)->entries)
+
+#ifdef HEAVY_STATISTICS
+extern guint64 stat_gray_queue_section_alloc;
+extern guint64 stat_gray_queue_section_free;
+extern guint64 stat_gray_queue_enqueue_fast_path;
+extern guint64 stat_gray_queue_dequeue_fast_path;
+extern guint64 stat_gray_queue_enqueue_slow_path;
+extern guint64 stat_gray_queue_dequeue_slow_path;
+#endif
+
+void sgen_init_gray_queues (void);
+
+void sgen_gray_object_enqueue (SgenGrayQueue *queue, char *obj, mword desc);
+GrayQueueEntry sgen_gray_object_dequeue (SgenGrayQueue *queue);
+GrayQueueSection* sgen_gray_object_dequeue_section (SgenGrayQueue *queue);
+void sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section);
+void sgen_gray_object_queue_trim_free_list (SgenGrayQueue *queue);
+void sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func);
+void sgen_gray_object_queue_init_invalid (SgenGrayQueue *queue);
+void sgen_gray_queue_set_alloc_prepare (SgenGrayQueue *queue, GrayQueueAllocPrepareFunc alloc_prepare_func, void *data);
+void sgen_gray_object_queue_init_with_alloc_prepare (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func,
+               GrayQueueAllocPrepareFunc func, void *data);
+void sgen_gray_object_queue_deinit (SgenGrayQueue *queue);
+void sgen_gray_object_queue_disable_alloc_prepare (SgenGrayQueue *queue);
+void sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue);
+void sgen_gray_object_free_queue_section (GrayQueueSection *section);
+
+void sgen_section_gray_queue_init (SgenSectionGrayQueue *queue, gboolean locked,
+               GrayQueueEnqueueCheckFunc enqueue_check_func);
+gboolean sgen_section_gray_queue_is_empty (SgenSectionGrayQueue *queue);
+GrayQueueSection* sgen_section_gray_queue_dequeue (SgenSectionGrayQueue *queue);
+void sgen_section_gray_queue_enqueue (SgenSectionGrayQueue *queue, GrayQueueSection *section);
+
+gboolean sgen_gray_object_fill_prefetch (SgenGrayQueue *queue);
+
+static inline gboolean
+sgen_gray_object_queue_is_empty (SgenGrayQueue *queue)
+{
+       return queue->first == NULL;
+}
+
+static inline MONO_ALWAYS_INLINE void
+GRAY_OBJECT_ENQUEUE (SgenGrayQueue *queue, char* obj, mword desc)
+{
+#if SGEN_MAX_DEBUG_LEVEL >= 9
+       sgen_gray_object_enqueue (queue, obj, desc);
+#else
+       if (G_UNLIKELY (!queue->first || queue->cursor == GRAY_LAST_CURSOR_POSITION (queue->first))) {
+               sgen_gray_object_enqueue (queue, obj, desc);
+       } else {
+               GrayQueueEntry entry = SGEN_GRAY_QUEUE_ENTRY (obj, desc);
+
+               HEAVY_STAT (stat_gray_queue_enqueue_fast_path ++);
+
+               *++queue->cursor = entry;
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+               binary_protocol_gray_enqueue (queue, queue->cursor, obj);
+#endif
+       }
+#endif
+}
+
+static inline MONO_ALWAYS_INLINE void
+GRAY_OBJECT_DEQUEUE (SgenGrayQueue *queue, char** obj, mword *desc)
+{
+       GrayQueueEntry entry;
+#if SGEN_MAX_DEBUG_LEVEL >= 9
+       entry = sgen_gray_object_dequeue (queue);
+       *obj = entry.obj;
+       *desc = entry.desc;
+#else
+       if (!queue->first) {
+               HEAVY_STAT (stat_gray_queue_dequeue_fast_path ++);
+
+               *obj = NULL;
+       } else if (G_UNLIKELY (queue->cursor == GRAY_FIRST_CURSOR_POSITION (queue->first))) {
+               entry = sgen_gray_object_dequeue (queue);
+               *obj = entry.obj;
+               *desc = entry.desc;
+       } else {
+               HEAVY_STAT (stat_gray_queue_dequeue_fast_path ++);
+
+               entry = *queue->cursor--;
+               *obj = entry.obj;
+               *desc = entry.desc;
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+               binary_protocol_gray_dequeue (queue, queue->cursor + 1, *obj);
+#endif
+       }
+#endif
+}
+
+#endif
diff --git a/mono/sgen/sgen-hash-table.c b/mono/sgen/sgen-hash-table.c
new file mode 100644 (file)
index 0000000..61c78e9
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * sgen-hash-table.c
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "config.h"
+
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+
+#include <mono/sgen/sgen-gc.h>
+#include <mono/sgen/sgen-hash-table.h>
+
+#ifdef HEAVY_STATISTICS
+static guint64 stat_lookups;
+static guint64 stat_lookup_iterations;
+static guint64 stat_lookup_max_iterations;
+#endif
+
+static void
+rehash (SgenHashTable *hash_table)
+{
+       SgenHashTableEntry **old_hash = hash_table->table;
+       guint old_hash_size = hash_table->size;
+       guint i, hash, new_size;
+       SgenHashTableEntry **new_hash;
+       SgenHashTableEntry *entry, *next;
+
+       if (!old_hash) {
+               sgen_register_fixed_internal_mem_type (hash_table->entry_mem_type,
+                               sizeof (SgenHashTableEntry*) + sizeof (gpointer) + hash_table->data_size);
+               new_size = 13;
+       } else {
+               new_size = g_spaced_primes_closest (hash_table->num_entries);
+       }
+
+       new_hash = sgen_alloc_internal_dynamic (new_size * sizeof (SgenHashTableEntry*), hash_table->table_mem_type, TRUE);
+       for (i = 0; i < old_hash_size; ++i) {
+               for (entry = old_hash [i]; entry; entry = next) {
+                       hash = hash_table->hash_func (entry->key) % new_size;
+                       next = entry->next;
+                       entry->next = new_hash [hash];
+                       new_hash [hash] = entry;
+               }
+       }
+       sgen_free_internal_dynamic (old_hash, old_hash_size * sizeof (SgenHashTableEntry*), hash_table->table_mem_type);
+       hash_table->table = new_hash;
+       hash_table->size = new_size;
+}
+
+static void
+rehash_if_necessary (SgenHashTable *hash_table)
+{
+       if (hash_table->num_entries >= hash_table->size * 2)
+               rehash (hash_table);
+
+       SGEN_ASSERT (1, hash_table->size, "rehash guarantees size > 0");
+}
+
+static SgenHashTableEntry*
+lookup (SgenHashTable *hash_table, gpointer key, guint *_hash)
+{
+       SgenHashTableEntry *entry;
+       guint hash;
+       GEqualFunc equal = hash_table->equal_func;
+#ifdef HEAVY_STATISTICS
+       guint64 iterations = 0;
+       ++stat_lookups;
+#endif
+
+       if (!hash_table->size)
+               return NULL;
+
+       hash = hash_table->hash_func (key) % hash_table->size;
+       if (_hash)
+               *_hash = hash;
+
+       for (entry = hash_table->table [hash]; entry; entry = entry->next) {
+#ifdef HEAVY_STATISTICS
+               ++stat_lookup_iterations;
+               ++iterations;
+               if (iterations > stat_lookup_max_iterations)
+                       stat_lookup_max_iterations = iterations;
+#endif
+               if ((equal && equal (entry->key, key)) || (!equal && entry->key == key))
+                       return entry;
+       }
+       return NULL;
+}
+
+gpointer
+sgen_hash_table_lookup (SgenHashTable *hash_table, gpointer key)
+{
+       SgenHashTableEntry *entry = lookup (hash_table, key, NULL);
+       if (!entry)
+               return NULL;
+       return entry->data;
+}
+
+gboolean
+sgen_hash_table_replace (SgenHashTable *hash_table, gpointer key, gpointer new_value, gpointer old_value)
+{
+       guint hash;
+       SgenHashTableEntry *entry;
+
+       rehash_if_necessary (hash_table);
+       entry = lookup (hash_table, key, &hash);
+
+       if (entry) {
+               if (old_value)
+                       memcpy (old_value, entry->data, hash_table->data_size); 
+               memcpy (entry->data, new_value, hash_table->data_size);
+               return FALSE;
+       }
+
+       entry = sgen_alloc_internal (hash_table->entry_mem_type);
+       entry->key = key;
+       memcpy (entry->data, new_value, hash_table->data_size);
+
+       entry->next = hash_table->table [hash];
+       hash_table->table [hash] = entry;
+
+       hash_table->num_entries++;
+
+       return TRUE;
+}
+
+gboolean
+sgen_hash_table_set_value (SgenHashTable *hash_table, gpointer key, gpointer new_value, gpointer old_value)
+{
+       guint hash;
+       SgenHashTableEntry *entry;
+
+       entry = lookup (hash_table, key, &hash);
+
+       if (entry) {
+               if (old_value)
+                       memcpy (old_value, entry->data, hash_table->data_size);
+               memcpy (entry->data, new_value, hash_table->data_size);
+               return TRUE;
+       }
+
+       return FALSE;
+}
+
+gboolean
+sgen_hash_table_set_key (SgenHashTable *hash_table, gpointer old_key, gpointer new_key)
+{
+       guint hash;
+       SgenHashTableEntry *entry;
+
+       entry = lookup (hash_table, old_key, &hash);
+
+       if (entry) {
+               entry->key = new_key;
+               return TRUE;
+       }
+
+       return FALSE;
+}
+
+gboolean
+sgen_hash_table_remove (SgenHashTable *hash_table, gpointer key, gpointer data_return)
+{
+       SgenHashTableEntry *entry, *prev;
+       guint hash;
+       GEqualFunc equal = hash_table->equal_func;
+
+       rehash_if_necessary (hash_table);
+       hash = hash_table->hash_func (key) % hash_table->size;
+
+       prev = NULL;
+       for (entry = hash_table->table [hash]; entry; entry = entry->next) {
+               if ((equal && equal (entry->key, key)) || (!equal && entry->key == key)) {
+                       if (prev)
+                               prev->next = entry->next;
+                       else
+                               hash_table->table [hash] = entry->next;
+
+                       hash_table->num_entries--;
+
+                       if (data_return)
+                               memcpy (data_return, entry->data, hash_table->data_size);
+
+                       sgen_free_internal (entry, hash_table->entry_mem_type);
+
+                       return TRUE;
+               }
+               prev = entry;
+       }
+
+       return FALSE;
+}
+
+void
+sgen_hash_table_clean (SgenHashTable *hash_table)
+{
+       guint i;
+
+       if (!hash_table->size) {
+               SGEN_ASSERT (1, !hash_table->table, "clean should reset hash_table->table");
+               SGEN_ASSERT (1, !hash_table->num_entries, "clean should reset hash_table->num_entries");
+               return;
+       }
+
+       for (i = 0; i < hash_table->size; ++i) {
+               SgenHashTableEntry *entry = hash_table->table [i];
+               while (entry) {
+                       SgenHashTableEntry *next = entry->next;
+                       sgen_free_internal (entry, hash_table->entry_mem_type);
+                       entry = next;
+               }
+       }
+
+       sgen_free_internal_dynamic (hash_table->table, hash_table->size * sizeof (SgenHashTableEntry*), hash_table->table_mem_type);
+
+       hash_table->table = NULL;
+       hash_table->size = 0;
+       hash_table->num_entries = 0;
+}
+
+void
+sgen_init_hash_table (void)
+{
+#ifdef HEAVY_STATISTICS
+       mono_counters_register ("Hash table lookups", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_lookups);
+       mono_counters_register ("Hash table lookup iterations", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_lookup_iterations);
+       mono_counters_register ("Hash table lookup max iterations", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_lookup_max_iterations);
+#endif
+}
+
+#endif
diff --git a/mono/sgen/sgen-hash-table.h b/mono/sgen/sgen-hash-table.h
new file mode 100644 (file)
index 0000000..e6cfe43
--- /dev/null
@@ -0,0 +1,77 @@
+#ifndef __MONO_SGENHASHTABLE_H__
+#define __MONO_SGENHASHTABLE_H__
+
+#include "config.h"
+
+#ifdef HAVE_SGEN_GC
+
+#include <glib.h>
+
+/* hash tables */
+
+typedef struct _SgenHashTableEntry SgenHashTableEntry;
+struct _SgenHashTableEntry {
+       SgenHashTableEntry *next;
+       gpointer key;
+       char data [MONO_ZERO_LEN_ARRAY]; /* data is pointer-aligned */
+};
+
+typedef struct {
+       int table_mem_type;
+       int entry_mem_type;
+       size_t data_size;
+       GHashFunc hash_func;
+       GEqualFunc equal_func;
+       SgenHashTableEntry **table;
+       guint size;
+       guint num_entries;
+} SgenHashTable;
+
+#define SGEN_HASH_TABLE_INIT(table_type,entry_type,data_size,hash_func,equal_func)     { (table_type), (entry_type), (data_size), (hash_func), (equal_func), NULL, 0, 0 }
+#define SGEN_HASH_TABLE_ENTRY_SIZE(data_size)                  ((data_size) + sizeof (SgenHashTableEntry*) + sizeof (gpointer))
+
+gpointer sgen_hash_table_lookup (SgenHashTable *table, gpointer key);
+gboolean sgen_hash_table_replace (SgenHashTable *table, gpointer key, gpointer new_value, gpointer old_value);
+gboolean sgen_hash_table_set_value (SgenHashTable *table, gpointer key, gpointer new_value, gpointer old_value);
+gboolean sgen_hash_table_set_key (SgenHashTable *hash_table, gpointer old_key, gpointer new_key);
+gboolean sgen_hash_table_remove (SgenHashTable *table, gpointer key, gpointer data_return);
+
+void sgen_hash_table_clean (SgenHashTable *table);
+
+void sgen_init_hash_table (void);
+
+#define sgen_hash_table_num_entries(h) ((h)->num_entries)
+
+#define sgen_hash_table_key_for_value_pointer(v)       (((SgenHashTableEntry*)((char*)(v) - G_STRUCT_OFFSET (SgenHashTableEntry, data)))->key)
+
+#define SGEN_HASH_TABLE_FOREACH(h,k,v) do {                            \
+               SgenHashTable *__hash_table = (h);                      \
+               SgenHashTableEntry **__table = __hash_table->table;     \
+               guint __i;                                              \
+               for (__i = 0; __i < (h)->size; ++__i) {                 \
+                       SgenHashTableEntry **__iter, **__next;                  \
+                       for (__iter = &__table [__i]; *__iter; __iter = __next) {       \
+                               SgenHashTableEntry *__entry = *__iter;  \
+                               __next = &__entry->next;        \
+                               (k) = __entry->key;                     \
+                               (v) = (gpointer)__entry->data;
+
+/* The loop must be continue'd after using this! */
+#define SGEN_HASH_TABLE_FOREACH_REMOVE(free)   do {                    \
+               *__iter = *__next;      \
+               __next = __iter;        \
+               --__hash_table->num_entries;                            \
+               if ((free))                                             \
+                       sgen_free_internal (__entry, __hash_table->entry_mem_type); \
+       } while (0)
+
+#define SGEN_HASH_TABLE_FOREACH_SET_KEY(k)     ((__entry)->key = (k))
+
+#define SGEN_HASH_TABLE_FOREACH_END                                    \
+                       }                                               \
+               }                                                       \
+       } while (0)
+
+#endif
+
+#endif
diff --git a/mono/sgen/sgen-internal.c b/mono/sgen/sgen-internal.c
new file mode 100644 (file)
index 0000000..4ccff74
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+ * sgen-internal.c: Internal lock-free memory allocator.
+ *
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/utils/lock-free-alloc.h"
+#include "mono/sgen/sgen-memory-governor.h"
+#include "mono/sgen/sgen-client.h"
+
+/* keep each size a multiple of ALLOC_ALIGN */
+#if SIZEOF_VOID_P == 4
+static const int allocator_sizes [] = {
+          8,   16,   24,   32,   40,   48,   64,   80,
+         96,  128,  160,  192,  224,  248,  296,  320,
+        384,  448,  504,  528,  584,  680,  816, 1088,
+       1360, 2044, 2336, 2728, 3272, 4092, 5456, 8188 };
+#else
+static const int allocator_sizes [] = {
+          8,   16,   24,   32,   40,   48,   64,   80,
+         96,  128,  160,  192,  224,  248,  320,  328,
+        384,  448,  528,  584,  680,  816, 1016, 1088,
+       1360, 2040, 2336, 2728, 3272, 4088, 5456, 8184 };
+#endif
+
+#define NUM_ALLOCATORS (sizeof (allocator_sizes) / sizeof (int))
+
+static int allocator_block_sizes [NUM_ALLOCATORS];
+
+static MonoLockFreeAllocSizeClass size_classes [NUM_ALLOCATORS];
+static MonoLockFreeAllocator allocators [NUM_ALLOCATORS];
+
+#ifdef HEAVY_STATISTICS
+static int allocator_sizes_stats [NUM_ALLOCATORS];
+#endif
+
+static size_t
+block_size (size_t slot_size)
+{
+       static int pagesize = -1;
+
+       int size;
+
+       if (pagesize == -1)
+               pagesize = mono_pagesize ();
+
+       for (size = pagesize; size < LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) {
+               if (slot_size * 2 <= LOCK_FREE_ALLOC_SB_USABLE_SIZE (size))
+                       return size;
+       }
+       return LOCK_FREE_ALLOC_SB_MAX_SIZE;
+}
+
+/*
+ * Find the allocator index for memory chunks that can contain @size
+ * objects.
+ */
+static int
+index_for_size (size_t size)
+{
+       int slot;
+       /* do a binary search or lookup table later. */
+       for (slot = 0; slot < NUM_ALLOCATORS; ++slot) {
+               if (allocator_sizes [slot] >= size)
+                       return slot;
+       }
+       g_assert_not_reached ();
+       return -1;
+}
+
+/*
+ * Allocator indexes for the fixed INTERNAL_MEM_XXX types.  -1 if that
+ * type is dynamic.
+ */
+static int fixed_type_allocator_indexes [INTERNAL_MEM_MAX];
+
+void
+sgen_register_fixed_internal_mem_type (int type, size_t size)
+{
+       int slot;
+
+       g_assert (type >= 0 && type < INTERNAL_MEM_MAX);
+       g_assert (size <= allocator_sizes [NUM_ALLOCATORS - 1]);
+
+       slot = index_for_size (size);
+       g_assert (slot >= 0);
+
+       if (fixed_type_allocator_indexes [type] == -1)
+               fixed_type_allocator_indexes [type] = slot;
+       else
+               g_assert (fixed_type_allocator_indexes [type] == slot);
+}
+
+static const char*
+description_for_type (int type)
+{
+       switch (type) {
+       case INTERNAL_MEM_PIN_QUEUE: return "pin-queue";
+       case INTERNAL_MEM_FRAGMENT: return "fragment";
+       case INTERNAL_MEM_SECTION: return "section";
+       case INTERNAL_MEM_SCAN_STARTS: return "scan-starts";
+       case INTERNAL_MEM_FIN_TABLE: return "fin-table";
+       case INTERNAL_MEM_FINALIZE_ENTRY: return "finalize-entry";
+       case INTERNAL_MEM_FINALIZE_READY: return "finalize-ready";
+       case INTERNAL_MEM_DISLINK_TABLE: return "dislink-table";
+       case INTERNAL_MEM_DISLINK: return "dislink";
+       case INTERNAL_MEM_ROOTS_TABLE: return "roots-table";
+       case INTERNAL_MEM_ROOT_RECORD: return "root-record";
+       case INTERNAL_MEM_STATISTICS: return "statistics";
+       case INTERNAL_MEM_STAT_PINNED_CLASS: return "pinned-class";
+       case INTERNAL_MEM_STAT_REMSET_CLASS: return "remset-class";
+       case INTERNAL_MEM_GRAY_QUEUE: return "gray-queue";
+       case INTERNAL_MEM_MS_TABLES: return "marksweep-tables";
+       case INTERNAL_MEM_MS_BLOCK_INFO: return "marksweep-block-info";
+       case INTERNAL_MEM_MS_BLOCK_INFO_SORT: return "marksweep-block-info-sort";
+       case INTERNAL_MEM_WORKER_DATA: return "worker-data";
+       case INTERNAL_MEM_THREAD_POOL_JOB: return "thread-pool-job";
+       case INTERNAL_MEM_BRIDGE_DATA: return "bridge-data";
+       case INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE: return "old-bridge-hash-table";
+       case INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE_ENTRY: return "old-bridge-hash-table-entry";
+       case INTERNAL_MEM_BRIDGE_HASH_TABLE: return "bridge-hash-table";
+       case INTERNAL_MEM_BRIDGE_HASH_TABLE_ENTRY: return "bridge-hash-table-entry";
+       case INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE: return "tarjan-bridge-hash-table";
+       case INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE_ENTRY: return "tarjan-bridge-hash-table-entry";
+       case INTERNAL_MEM_TARJAN_OBJ_BUCKET: return "tarjan-bridge-object-buckets";
+       case INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE: return "bridge-alive-hash-table";
+       case INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE_ENTRY: return "bridge-alive-hash-table-entry";
+       case INTERNAL_MEM_BRIDGE_DEBUG: return "bridge-debug";
+       case INTERNAL_MEM_TOGGLEREF_DATA: return "toggleref-data";
+       case INTERNAL_MEM_CARDTABLE_MOD_UNION: return "cardtable-mod-union";
+       case INTERNAL_MEM_BINARY_PROTOCOL: return "binary-protocol";
+       case INTERNAL_MEM_TEMPORARY: return "temporary";
+       default: {
+               const char *description = sgen_client_description_for_internal_mem_type (type);
+               SGEN_ASSERT (0, description, "Unknown internal mem type");
+               return description;
+       }
+       }
+}
+
+void*
+sgen_alloc_internal_dynamic (size_t size, int type, gboolean assert_on_failure)
+{
+       int index;
+       void *p;
+
+       if (size > allocator_sizes [NUM_ALLOCATORS - 1]) {
+               p = sgen_alloc_os_memory (size, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, NULL);
+               if (!p)
+                       sgen_assert_memory_alloc (NULL, size, description_for_type (type));
+       } else {
+               index = index_for_size (size);
+
+#ifdef HEAVY_STATISTICS
+               ++ allocator_sizes_stats [index];
+#endif
+
+               p = mono_lock_free_alloc (&allocators [index]);
+               if (!p)
+                       sgen_assert_memory_alloc (NULL, size, description_for_type (type));
+               memset (p, 0, size);
+       }
+       return p;
+}
+
+void
+sgen_free_internal_dynamic (void *addr, size_t size, int type)
+{
+       if (!addr)
+               return;
+
+       if (size > allocator_sizes [NUM_ALLOCATORS - 1])
+               sgen_free_os_memory (addr, size, SGEN_ALLOC_INTERNAL);
+       else
+               mono_lock_free_free (addr, block_size (size));
+}
+
+void*
+sgen_alloc_internal (int type)
+{
+       int index, size;
+       void *p;
+
+       index = fixed_type_allocator_indexes [type];
+       g_assert (index >= 0 && index < NUM_ALLOCATORS);
+
+#ifdef HEAVY_STATISTICS
+       ++ allocator_sizes_stats [index];
+#endif
+
+       size = allocator_sizes [index];
+
+       p = mono_lock_free_alloc (&allocators [index]);
+       memset (p, 0, size);
+
+       return p;
+}
+
+void
+sgen_free_internal (void *addr, int type)
+{
+       int index;
+
+       if (!addr)
+               return;
+
+       index = fixed_type_allocator_indexes [type];
+       g_assert (index >= 0 && index < NUM_ALLOCATORS);
+
+       mono_lock_free_free (addr, allocator_block_sizes [index]);
+}
+
+void
+sgen_dump_internal_mem_usage (FILE *heap_dump_file)
+{
+       /*
+       int i;
+
+       fprintf (heap_dump_file, "<other-mem-usage type=\"large-internal\" size=\"%lld\"/>\n", large_internal_bytes_alloced);
+       fprintf (heap_dump_file, "<other-mem-usage type=\"pinned-chunks\" size=\"%lld\"/>\n", pinned_chunk_bytes_alloced);
+       for (i = 0; i < INTERNAL_MEM_MAX; ++i) {
+               fprintf (heap_dump_file, "<other-mem-usage type=\"%s\" size=\"%ld\"/>\n",
+                               description_for_type (i), unmanaged_allocator.small_internal_mem_bytes [i]);
+       }
+       */
+}
+
+void
+sgen_report_internal_mem_usage (void)
+{
+       int i G_GNUC_UNUSED;
+#ifdef HEAVY_STATISTICS
+       printf ("size -> # allocations\n");
+       for (i = 0; i < NUM_ALLOCATORS; ++i)
+               printf ("%d -> %d\n", allocator_sizes [i], allocator_sizes_stats [i]);
+#endif
+}
+
+void
+sgen_init_internal_allocator (void)
+{
+       int i, size;
+
+       for (i = 0; i < INTERNAL_MEM_MAX; ++i)
+               fixed_type_allocator_indexes [i] = -1;
+
+       for (i = 0; i < NUM_ALLOCATORS; ++i) {
+               allocator_block_sizes [i] = block_size (allocator_sizes [i]);
+               mono_lock_free_allocator_init_size_class (&size_classes [i], allocator_sizes [i], allocator_block_sizes [i]);
+               mono_lock_free_allocator_init_allocator (&allocators [i], &size_classes [i]);
+       }
+
+       for (size = mono_pagesize (); size <= LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) {
+               int max_size = LOCK_FREE_ALLOC_SB_USABLE_SIZE (size) / 2;
+               /*
+                * we assert that allocator_sizes contains the biggest possible object size
+                * per block (4K => 4080 / 2 = 2040, 8k => 8176 / 2 = 4088, 16k => 16368 / 2 = 8184 on 64bits),
+                * so that we do not get different block sizes for sizes that should go to the same one
+                */
+               g_assert (allocator_sizes [index_for_size (max_size)] == max_size);
+       }
+}
+
+#endif
diff --git a/mono/sgen/sgen-layout-stats.c b/mono/sgen/sgen-layout-stats.c
new file mode 100644 (file)
index 0000000..2f9ca11
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright Xamarin Inc (http://www.xamarin.com)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include "sgen/sgen-gc.h"
+#include "sgen/sgen-layout-stats.h"
+
+#ifdef SGEN_OBJECT_LAYOUT_STATISTICS
+
+#define NUM_HISTOGRAM_ENTRIES  (1 << SGEN_OBJECT_LAYOUT_BITMAP_BITS)
+
+static unsigned long histogram [NUM_HISTOGRAM_ENTRIES];
+static unsigned long count_bitmap_overflow;
+static unsigned long count_ref_array;
+static unsigned long count_vtype_array;
+
+void
+sgen_object_layout_scanned_bitmap (unsigned int bitmap)
+{
+       g_assert (!(bitmap >> SGEN_OBJECT_LAYOUT_BITMAP_BITS));
+       ++histogram [bitmap];
+}
+
+void
+sgen_object_layout_scanned_bitmap_overflow (void)
+{
+       ++count_bitmap_overflow;
+}
+
+void
+sgen_object_layout_scanned_ref_array (void)
+{
+       ++count_ref_array;
+}
+
+void
+sgen_object_layout_scanned_vtype_array (void)
+{
+       ++count_vtype_array;
+}
+
+void
+sgen_object_layout_dump (FILE *out)
+{
+       int i;
+
+       for (i = 0; i < NUM_HISTOGRAM_ENTRIES; ++i) {
+               if (!histogram [i])
+                       continue;
+               fprintf (out, "%d %lu\n", i, histogram [i]);
+       }
+       fprintf (out, "bitmap-overflow %lu\n", count_bitmap_overflow);
+       fprintf (out, "ref-array %lu\n", count_ref_array);
+       fprintf (out, "vtype-array %lu\n", count_vtype_array);
+}
+
+#endif
+#endif
diff --git a/mono/sgen/sgen-layout-stats.h b/mono/sgen/sgen-layout-stats.h
new file mode 100644 (file)
index 0000000..3853d34
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright Xamarin Inc (http://www.xamarin.com)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __MONO_SGEN_LAYOUT_STATS_H__
+#define __MONO_SGEN_LAYOUT_STATS_H__
+
+#ifdef SGEN_OBJECT_LAYOUT_STATISTICS
+
+#define SGEN_OBJECT_LAYOUT_BITMAP_BITS 16
+
+void sgen_object_layout_scanned_bitmap (unsigned int bitmap);
+void sgen_object_layout_scanned_bitmap_overflow (void);
+void sgen_object_layout_scanned_ref_array (void);
+void sgen_object_layout_scanned_vtype_array (void);
+
+void sgen_object_layout_dump (FILE *out);
+
+#define SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP   unsigned int __object_layout_bitmap = 0
+#define SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP(o,p) do {            \
+               int __index = ((void**)(p)) - ((void**)(((char*)(o)) + SGEN_CLIENT_OBJECT_HEADER_SIZE)); \
+               if (__index >= SGEN_OBJECT_LAYOUT_BITMAP_BITS)          \
+                       __object_layout_bitmap = (unsigned int)-1;      \
+               else if (__object_layout_bitmap != (unsigned int)-1)    \
+                       __object_layout_bitmap |= (1 << __index);       \
+       } while (0)
+#define SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP do {               \
+               if (__object_layout_bitmap == (unsigned int)-1)         \
+                       sgen_object_layout_scanned_bitmap_overflow ();  \
+               else                                                    \
+                       sgen_object_layout_scanned_bitmap (__object_layout_bitmap); \
+       } while (0)
+
+#else
+
+#define sgen_object_layout_scanned_bitmap(bitmap)
+#define sgen_object_layout_scanned_bitmap_overflow()
+#define sgen_object_layout_scanned_ref_array()
+#define sgen_object_layout_scanned_vtype_array()
+
+#define sgen_object_layout_dump(out)
+
+#define SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP
+#define SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP(o,p)
+#define SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP
+
+#endif
+
+#endif
diff --git a/mono/sgen/sgen-los.c b/mono/sgen/sgen-los.c
new file mode 100644 (file)
index 0000000..8ec09c5
--- /dev/null
@@ -0,0 +1,711 @@
+/*
+ * sgen-los.c: Large objects space.
+ *
+ * Author:
+ *     Paolo Molaro (lupus@ximian.com)
+ *
+ * Copyright 2005-2010 Novell, Inc (http://www.novell.com)
+ *
+ * Thread start/stop adapted from Boehm's GC:
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
+ * Copyright (c) 2000-2004 by Hewlett-Packard Company.  All rights reserved.
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-protocol.h"
+#include "mono/sgen/sgen-cardtable.h"
+#include "mono/sgen/sgen-memory-governor.h"
+#include "mono/sgen/sgen-client.h"
+
+#define LOS_SECTION_SIZE       (1024 * 1024)
+
+/*
+ * This shouldn't be much smaller or larger than MAX_SMALL_OBJ_SIZE.
+ * Must be at least sizeof (LOSSection).
+ */
+#define LOS_CHUNK_SIZE         4096
+#define LOS_CHUNK_BITS         12
+
+/* Largest object that can be allocated in a section. */
+#define LOS_SECTION_OBJECT_LIMIT       (LOS_SECTION_SIZE - LOS_CHUNK_SIZE - sizeof (LOSObject))
+//#define LOS_SECTION_OBJECT_LIMIT     0
+#define LOS_SECTION_NUM_CHUNKS         ((LOS_SECTION_SIZE >> LOS_CHUNK_BITS) - 1)
+
+#define LOS_SECTION_FOR_OBJ(obj)       ((LOSSection*)((mword)(obj) & ~(mword)(LOS_SECTION_SIZE - 1)))
+#define LOS_CHUNK_INDEX(obj,section)   (((char*)(obj) - (char*)(section)) >> LOS_CHUNK_BITS)
+
+#define LOS_NUM_FAST_SIZES             32
+
+typedef struct _LOSFreeChunks LOSFreeChunks;
+struct _LOSFreeChunks {
+       LOSFreeChunks *next_size;
+       size_t size;
+};
+
+typedef struct _LOSSection LOSSection;
+struct _LOSSection {
+       LOSSection *next;
+       size_t num_free_chunks;
+       unsigned char *free_chunk_map;
+};
+
+LOSObject *los_object_list = NULL;
+mword los_memory_usage = 0;
+
+static LOSSection *los_sections = NULL;
+static LOSFreeChunks *los_fast_free_lists [LOS_NUM_FAST_SIZES]; /* 0 is for larger sizes */
+static mword los_num_objects = 0;
+static int los_num_sections = 0;
+
+//#define USE_MALLOC
+//#define LOS_CONSISTENCY_CHECK
+//#define LOS_DUMMY
+
+#ifdef LOS_DUMMY
+#define LOS_SEGMENT_SIZE       (4096 * 1024)
+
+static char *los_segment = NULL;
+static int los_segment_index = 0;
+#endif
+
+#ifdef LOS_CONSISTENCY_CHECK
+static void
+los_consistency_check (void)
+{
+       LOSSection *section;
+       LOSObject *obj;
+       int i;
+       mword memory_usage = 0;
+
+       for (obj = los_object_list; obj; obj = obj->next) {
+               char *end = obj->data + obj->size;
+               int start_index, num_chunks;
+
+               memory_usage += obj->size;
+
+               if (obj->size > LOS_SECTION_OBJECT_LIMIT)
+                       continue;
+
+               section = LOS_SECTION_FOR_OBJ (obj);
+
+               g_assert (end <= (char*)section + LOS_SECTION_SIZE);
+
+               start_index = LOS_CHUNK_INDEX (obj, section);
+               num_chunks = (obj->size + sizeof (LOSObject) + LOS_CHUNK_SIZE - 1) >> LOS_CHUNK_BITS;
+               for (i = start_index; i < start_index + num_chunks; ++i)
+                       g_assert (!section->free_chunk_map [i]);
+       }
+
+       for (i = 0; i < LOS_NUM_FAST_SIZES; ++i) {
+               LOSFreeChunks *size_chunks;
+               for (size_chunks = los_fast_free_lists [i]; size_chunks; size_chunks = size_chunks->next_size) {
+                       LOSSection *section = LOS_SECTION_FOR_OBJ (size_chunks);
+                       int j, num_chunks, start_index;
+
+                       if (i == 0)
+                               g_assert (size_chunks->size >= LOS_NUM_FAST_SIZES * LOS_CHUNK_SIZE);
+                       else
+                               g_assert (size_chunks->size == i * LOS_CHUNK_SIZE);
+
+                       num_chunks = size_chunks->size >> LOS_CHUNK_BITS;
+                       start_index = LOS_CHUNK_INDEX (size_chunks, section);
+                       for (j = start_index; j < start_index + num_chunks; ++j)
+                               g_assert (section->free_chunk_map [j]);
+               }
+       }
+
+       g_assert (los_memory_usage == memory_usage);
+}
+#endif
+
+static void
+add_free_chunk (LOSFreeChunks *free_chunks, size_t size)
+{
+       size_t num_chunks = size >> LOS_CHUNK_BITS;
+
+       free_chunks->size = size;
+
+       if (num_chunks >= LOS_NUM_FAST_SIZES)
+               num_chunks = 0;
+       free_chunks->next_size = los_fast_free_lists [num_chunks];
+       los_fast_free_lists [num_chunks] = free_chunks;
+}
+
+static LOSFreeChunks*
+get_from_size_list (LOSFreeChunks **list, size_t size)
+{
+       LOSFreeChunks *free_chunks = NULL;
+       LOSSection *section;
+       size_t i, num_chunks, start_index;
+
+
+       g_assert ((size & (LOS_CHUNK_SIZE - 1)) == 0);
+
+       while (*list) {
+               free_chunks = *list;
+               if (free_chunks->size >= size)
+                       break;
+               list = &(*list)->next_size;
+       }
+
+       if (!*list)
+               return NULL;
+
+       *list = free_chunks->next_size;
+
+       if (free_chunks->size > size)
+               add_free_chunk ((LOSFreeChunks*)((char*)free_chunks + size), free_chunks->size - size);
+
+       num_chunks = size >> LOS_CHUNK_BITS;
+
+       section = LOS_SECTION_FOR_OBJ (free_chunks);
+
+       start_index = LOS_CHUNK_INDEX (free_chunks, section);
+       for (i = start_index; i < start_index + num_chunks; ++i) {
+               g_assert (section->free_chunk_map [i]);
+               section->free_chunk_map [i] = 0;
+       }
+
+       section->num_free_chunks -= size >> LOS_CHUNK_BITS;
+       g_assert (section->num_free_chunks >= 0);
+
+       return free_chunks;
+}
+
+static LOSObject*
+get_los_section_memory (size_t size)
+{
+       LOSSection *section;
+       LOSFreeChunks *free_chunks;
+       size_t num_chunks;
+
+       size += LOS_CHUNK_SIZE - 1;
+       size &= ~(LOS_CHUNK_SIZE - 1);
+
+       num_chunks = size >> LOS_CHUNK_BITS;
+
+       g_assert (size > 0 && size - sizeof (LOSObject) <= LOS_SECTION_OBJECT_LIMIT);
+       g_assert (num_chunks > 0);
+
+ retry:
+       if (num_chunks >= LOS_NUM_FAST_SIZES) {
+               free_chunks = get_from_size_list (&los_fast_free_lists [0], size);
+       } else {
+               size_t i;
+               for (i = num_chunks; i < LOS_NUM_FAST_SIZES; ++i) {
+                       free_chunks = get_from_size_list (&los_fast_free_lists [i], size);
+                       if (free_chunks)
+                               break;
+               }
+               if (!free_chunks)
+                       free_chunks = get_from_size_list (&los_fast_free_lists [0], size);
+       }
+
+       if (free_chunks)
+               return (LOSObject*)free_chunks;
+
+       if (!sgen_memgov_try_alloc_space (LOS_SECTION_SIZE, SPACE_LOS))
+               return NULL;
+
+       section = sgen_alloc_os_memory_aligned (LOS_SECTION_SIZE, LOS_SECTION_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, NULL);
+
+       if (!section)
+               return NULL;
+
+       free_chunks = (LOSFreeChunks*)((char*)section + LOS_CHUNK_SIZE);
+       free_chunks->size = LOS_SECTION_SIZE - LOS_CHUNK_SIZE;
+       free_chunks->next_size = los_fast_free_lists [0];
+       los_fast_free_lists [0] = free_chunks;
+
+       section->num_free_chunks = LOS_SECTION_NUM_CHUNKS;
+
+       section->free_chunk_map = (unsigned char*)section + sizeof (LOSSection);
+       g_assert (sizeof (LOSSection) + LOS_SECTION_NUM_CHUNKS + 1 <= LOS_CHUNK_SIZE);
+       section->free_chunk_map [0] = 0;
+       memset (section->free_chunk_map + 1, 1, LOS_SECTION_NUM_CHUNKS);
+
+       section->next = los_sections;
+       los_sections = section;
+
+       ++los_num_sections;
+
+       goto retry;
+}
+
+static void
+free_los_section_memory (LOSObject *obj, size_t size)
+{
+       LOSSection *section = LOS_SECTION_FOR_OBJ (obj);
+       size_t num_chunks, i, start_index;
+
+       size += LOS_CHUNK_SIZE - 1;
+       size &= ~(LOS_CHUNK_SIZE - 1);
+
+       num_chunks = size >> LOS_CHUNK_BITS;
+
+       g_assert (size > 0 && size - sizeof (LOSObject) <= LOS_SECTION_OBJECT_LIMIT);
+       g_assert (num_chunks > 0);
+
+       section->num_free_chunks += num_chunks;
+       g_assert (section->num_free_chunks <= LOS_SECTION_NUM_CHUNKS);
+
+       /*
+        * We could free the LOS section here if it's empty, but we
+        * can't unless we also remove its free chunks from the fast
+        * free lists.  Instead, we do it in los_sweep().
+        */
+
+       start_index = LOS_CHUNK_INDEX (obj, section);
+       for (i = start_index; i < start_index + num_chunks; ++i) {
+               g_assert (!section->free_chunk_map [i]);
+               section->free_chunk_map [i] = 1;
+       }
+
+       add_free_chunk ((LOSFreeChunks*)obj, size);
+}
+
+static int pagesize;
+
+void
+sgen_los_free_object (LOSObject *obj)
+{
+       SGEN_ASSERT (0, !obj->cardtable_mod_union, "We should never free a LOS object with a mod-union table.");
+
+#ifndef LOS_DUMMY
+       size_t size = obj->size;
+       SGEN_LOG (4, "Freed large object %p, size %lu", obj->data, (unsigned long)obj->size);
+       binary_protocol_empty (obj->data, obj->size);
+
+       los_memory_usage -= size;
+       los_num_objects--;
+
+#ifdef USE_MALLOC
+       free (obj);
+#else
+       if (size > LOS_SECTION_OBJECT_LIMIT) {
+               if (!pagesize)
+                       pagesize = mono_pagesize ();
+               size += sizeof (LOSObject);
+               size += pagesize - 1;
+               size &= ~(pagesize - 1);
+               sgen_free_os_memory (obj, size, SGEN_ALLOC_HEAP);
+               sgen_memgov_release_space (size, SPACE_LOS);
+       } else {
+               free_los_section_memory (obj, size + sizeof (LOSObject));
+#ifdef LOS_CONSISTENCY_CHECKS
+               los_consistency_check ();
+#endif
+       }
+#endif
+#endif
+}
+
+/*
+ * Objects with size >= MAX_SMALL_SIZE are allocated in the large object space.
+ * They are currently kept track of with a linked list.
+ * They don't move, so there is no need to pin them during collection
+ * and we avoid the memcpy overhead.
+ */
+void*
+sgen_los_alloc_large_inner (GCVTable *vtable, size_t size)
+{
+       LOSObject *obj = NULL;
+       void **vtslot;
+
+       g_assert (size > SGEN_MAX_SMALL_OBJ_SIZE);
+       g_assert ((size & 1) == 0);
+
+       /*
+        * size + sizeof (LOSObject) <= SSIZE_MAX - (mono_pagesize () - 1)
+        *
+        * therefore:
+        *
+        * size <= SSIZE_MAX - (mono_pagesize () - 1) - sizeof (LOSObject)
+        */
+       if (size > SSIZE_MAX - (mono_pagesize () - 1) - sizeof (LOSObject))
+               return NULL;
+
+#ifdef LOS_DUMMY
+       if (!los_segment)
+               los_segment = sgen_alloc_os_memory (LOS_SEGMENT_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, NULL);
+       los_segment_index = ALIGN_UP (los_segment_index);
+
+       obj = (LOSObject*)(los_segment + los_segment_index);
+       los_segment_index += size + sizeof (LOSObject);
+       g_assert (los_segment_index <= LOS_SEGMENT_SIZE);
+#else
+       sgen_ensure_free_space (size);
+
+#ifdef USE_MALLOC
+       obj = malloc (size + sizeof (LOSObject));
+       memset (obj, 0, size + sizeof (LOSObject));
+#else
+       if (size > LOS_SECTION_OBJECT_LIMIT) {
+               size_t alloc_size = size;
+               if (!pagesize)
+                       pagesize = mono_pagesize ();
+               alloc_size += sizeof (LOSObject);
+               alloc_size += pagesize - 1;
+               alloc_size &= ~(pagesize - 1);
+               if (sgen_memgov_try_alloc_space (alloc_size, SPACE_LOS)) {
+                       obj = sgen_alloc_os_memory (alloc_size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, NULL);
+               }
+       } else {
+               obj = get_los_section_memory (size + sizeof (LOSObject));
+               if (obj)
+                       memset (obj, 0, size + sizeof (LOSObject));
+       }
+#endif
+#endif
+       if (!obj)
+               return NULL;
+       g_assert (!((mword)obj->data & (SGEN_ALLOC_ALIGN - 1)));
+       obj->size = size;
+       vtslot = (void**)obj->data;
+       *vtslot = vtable;
+       sgen_update_heap_boundaries ((mword)obj->data, (mword)obj->data + size);
+       obj->next = los_object_list;
+       los_object_list = obj;
+       los_memory_usage += size;
+       los_num_objects++;
+       SGEN_LOG (4, "Allocated large object %p, vtable: %p (%s), size: %zd", obj->data, vtable, sgen_client_vtable_get_name (vtable), size);
+       binary_protocol_alloc (obj->data, vtable, size, sgen_client_get_provenance ());
+
+#ifdef LOS_CONSISTENCY_CHECK
+       los_consistency_check ();
+#endif
+
+       return obj->data;
+}
+
+static void sgen_los_unpin_object (char *data);
+
+void
+sgen_los_sweep (void)
+{
+       LOSObject *bigobj, *prevbo;
+       LOSSection *section, *prev;
+       int i;
+       int num_sections = 0;
+
+       /* sweep the big objects list */
+       prevbo = NULL;
+       for (bigobj = los_object_list; bigobj;) {
+               SGEN_ASSERT (0, !SGEN_OBJECT_IS_PINNED (bigobj->data), "Who pinned a LOS object?");
+
+               if (bigobj->cardtable_mod_union) {
+                       sgen_card_table_free_mod_union (bigobj->cardtable_mod_union, bigobj->data, bigobj->size);
+                       bigobj->cardtable_mod_union = NULL;
+               }
+
+               if (sgen_los_object_is_pinned (bigobj->data)) {
+                       sgen_los_unpin_object (bigobj->data);
+                       sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
+               } else {
+                       LOSObject *to_free;
+                       /* not referenced anywhere, so we can free it */
+                       if (prevbo)
+                               prevbo->next = bigobj->next;
+                       else
+                               los_object_list = bigobj->next;
+                       to_free = bigobj;
+                       bigobj = bigobj->next;
+                       sgen_los_free_object (to_free);
+                       continue;
+               }
+               prevbo = bigobj;
+               bigobj = bigobj->next;
+       }
+
+       /* Try to free memory */
+       for (i = 0; i < LOS_NUM_FAST_SIZES; ++i)
+               los_fast_free_lists [i] = NULL;
+
+       prev = NULL;
+       section = los_sections;
+       while (section) {
+               if (section->num_free_chunks == LOS_SECTION_NUM_CHUNKS) {
+                       LOSSection *next = section->next;
+                       if (prev)
+                               prev->next = next;
+                       else
+                               los_sections = next;
+                       sgen_free_os_memory (section, LOS_SECTION_SIZE, SGEN_ALLOC_HEAP);
+                       sgen_memgov_release_space (LOS_SECTION_SIZE, SPACE_LOS);
+                       section = next;
+                       --los_num_sections;
+                       continue;
+               }
+
+               for (i = 0; i <= LOS_SECTION_NUM_CHUNKS; ++i) {
+                       if (section->free_chunk_map [i]) {
+                               int j;
+                               for (j = i + 1; j <= LOS_SECTION_NUM_CHUNKS && section->free_chunk_map [j]; ++j)
+                                       ;
+                               add_free_chunk ((LOSFreeChunks*)((char*)section + (i << LOS_CHUNK_BITS)), (j - i) << LOS_CHUNK_BITS);
+                               i = j - 1;
+                       }
+               }
+
+               prev = section;
+               section = section->next;
+
+               ++num_sections;
+       }
+
+#ifdef LOS_CONSISTENCY_CHECK
+       los_consistency_check ();
+#endif
+
+       /*
+       g_print ("LOS sections: %d  objects: %d  usage: %d\n", num_sections, los_num_objects, los_memory_usage);
+       for (i = 0; i < LOS_NUM_FAST_SIZES; ++i) {
+               int num_chunks = 0;
+               LOSFreeChunks *free_chunks;
+               for (free_chunks = los_fast_free_lists [i]; free_chunks; free_chunks = free_chunks->next_size)
+                       ++num_chunks;
+               g_print ("  %d: %d\n", i, num_chunks);
+       }
+       */
+
+       g_assert (los_num_sections == num_sections);
+}
+
+gboolean
+sgen_ptr_is_in_los (char *ptr, char **start)
+{
+       LOSObject *obj;
+
+       *start = NULL;
+       for (obj = los_object_list; obj; obj = obj->next) {
+               char *end = obj->data + obj->size;
+
+               if (ptr >= obj->data && ptr < end) {
+                       *start = obj->data;
+                       return TRUE;
+               }
+       }
+       return FALSE;
+}
+
+void
+sgen_los_iterate_objects (IterateObjectCallbackFunc cb, void *user_data)
+{
+       LOSObject *obj;
+
+       for (obj = los_object_list; obj; obj = obj->next)
+               cb (obj->data, obj->size, user_data);
+}
+
+gboolean
+sgen_los_is_valid_object (char *object)
+{
+       LOSObject *obj;
+
+       for (obj = los_object_list; obj; obj = obj->next) {
+               if (obj->data == object)
+                       return TRUE;
+       }
+       return FALSE;
+}
+
+gboolean
+mono_sgen_los_describe_pointer (char *ptr)
+{
+       LOSObject *obj;
+
+       for (obj = los_object_list; obj; obj = obj->next) {
+               const char *los_kind;
+               mword size;
+               gboolean pinned;
+
+               if (obj->data > ptr || obj->data + obj->size <= ptr)
+                       continue;
+
+               size = sgen_los_object_size (obj);
+               pinned = sgen_los_object_is_pinned (obj->data);
+
+               if (size > LOS_SECTION_OBJECT_LIMIT)
+                       los_kind = "huge-los-ptr";
+               else
+                       los_kind = "los-ptr";
+
+               if (obj->data == ptr) {
+                       SGEN_LOG (0, "%s (size %d pin %d)\n", los_kind, (int)size, pinned ? 1 : 0);
+               } else {
+                       SGEN_LOG (0, "%s (interior-ptr offset %td size %d pin %d)",
+                                         los_kind, ptr - obj->data, (int)size, pinned ? 1 : 0);
+               }
+
+               return TRUE;
+       }
+       return FALSE;
+}
+
+void
+sgen_los_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
+{
+       LOSObject *obj;
+       for (obj = los_object_list; obj; obj = obj->next) {
+               GCVTable *vt = (GCVTable*)SGEN_LOAD_VTABLE (obj->data);
+               if (SGEN_VTABLE_HAS_REFERENCES (vt))
+                       callback ((mword)obj->data, (mword)obj->size);
+       }
+}
+
+static guint8*
+get_cardtable_mod_union_for_object (LOSObject *obj)
+{
+       guint8 *mod_union = obj->cardtable_mod_union;
+       guint8 *other;
+       if (mod_union)
+               return mod_union;
+       mod_union = sgen_card_table_alloc_mod_union (obj->data, obj->size);
+       other = SGEN_CAS_PTR ((gpointer*)&obj->cardtable_mod_union, mod_union, NULL);
+       if (!other) {
+               SGEN_ASSERT (0, obj->cardtable_mod_union == mod_union, "Why did CAS not replace?");
+               return mod_union;
+       }
+       sgen_card_table_free_mod_union (mod_union, obj->data, obj->size);
+       return other;
+}
+
+void
+sgen_los_scan_card_table (gboolean mod_union, ScanCopyContext ctx)
+{
+       LOSObject *obj;
+
+       for (obj = los_object_list; obj; obj = obj->next) {
+               guint8 *cards;
+
+               if (!SGEN_OBJECT_HAS_REFERENCES (obj->data))
+                       continue;
+
+               if (mod_union) {
+                       if (!sgen_los_object_is_pinned (obj->data))
+                               continue;
+
+                       cards = get_cardtable_mod_union_for_object (obj);
+                       g_assert (cards);
+               } else {
+                       cards = NULL;
+               }
+
+               sgen_cardtable_scan_object (obj->data, obj->size, cards, mod_union, ctx);
+       }
+}
+
+void
+sgen_los_count_cards (long long *num_total_cards, long long *num_marked_cards)
+{
+       LOSObject *obj;
+       long long total_cards = 0;
+       long long marked_cards = 0;
+
+       for (obj = los_object_list; obj; obj = obj->next) {
+               int i;
+               guint8 *cards = sgen_card_table_get_card_scan_address ((mword) obj->data);
+               guint8 *cards_end = sgen_card_table_get_card_scan_address ((mword) obj->data + obj->size - 1);
+               mword num_cards = (cards_end - cards) + 1;
+
+               if (!SGEN_OBJECT_HAS_REFERENCES (obj->data))
+                       continue;
+
+               total_cards += num_cards;
+               for (i = 0; i < num_cards; ++i) {
+                       if (cards [i])
+                               ++marked_cards;
+               }
+       }
+
+       *num_total_cards = total_cards;
+       *num_marked_cards = marked_cards;
+}
+
+void
+sgen_los_update_cardtable_mod_union (void)
+{
+       LOSObject *obj;
+
+       for (obj = los_object_list; obj; obj = obj->next) {
+               if (!SGEN_OBJECT_HAS_REFERENCES (obj->data))
+                       continue;
+               sgen_card_table_update_mod_union (get_cardtable_mod_union_for_object (obj),
+                               obj->data, obj->size, NULL);
+       }
+}
+
+mword
+sgen_los_object_size (LOSObject *obj)
+{
+       return obj->size & ~1L;
+}
+
+LOSObject*
+sgen_los_header_for_object (char *data)
+{
+#if _MSC_VER
+       return (LOSObject*)(data - (int)(&(((LOSObject*)0)->data)));
+#else
+       return (LOSObject*)(data - sizeof (LOSObject));
+#endif
+}
+
+void
+sgen_los_pin_object (char *data)
+{
+       LOSObject *obj = sgen_los_header_for_object (data);
+       obj->size = obj->size | 1;
+       binary_protocol_pin (data, (gpointer)SGEN_LOAD_VTABLE (data), sgen_safe_object_get_size ((GCObject*)data));
+}
+
+static void
+sgen_los_unpin_object (char *data)
+{
+       LOSObject *obj = sgen_los_header_for_object (data);
+       obj->size = sgen_los_object_size (obj);
+}
+
+gboolean
+sgen_los_object_is_pinned (char *data)
+{
+       LOSObject *obj = sgen_los_header_for_object (data);
+       return obj->size & 1;
+}
+
+void
+sgen_los_mark_mod_union_card (GCObject *mono_obj, void **ptr)
+{
+       LOSObject *obj = sgen_los_header_for_object ((char*)mono_obj);
+       guint8 *mod_union = get_cardtable_mod_union_for_object (obj);
+       size_t offset = sgen_card_table_get_card_offset ((char*)ptr, (char*)sgen_card_table_align_pointer ((char*)obj));
+       SGEN_ASSERT (0, mod_union, "FIXME: optionally allocate the mod union if it's not here and CAS it in.");
+       SGEN_ASSERT (0, (char*)obj == (char*)sgen_card_table_align_pointer ((char*)obj), "Why are LOS objects not card aligned?");
+       mod_union [offset] = 1;
+}
+
+#endif /* HAVE_SGEN_GC */
diff --git a/mono/sgen/sgen-major-copy-object.h b/mono/sgen/sgen-major-copy-object.h
new file mode 100644 (file)
index 0000000..a4b10ff
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * sgen-major-copy-object.h: Object copying in the major collectors.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define collector_pin_object(obj, queue) do { \
+       if (sgen_ptr_in_nursery (obj)) {        \
+               sgen_pin_object (obj, queue);   \
+       } else {        \
+               g_assert (objsize <= SGEN_MAX_SMALL_OBJ_SIZE);  \
+               pin_major_object (obj, queue);  \
+       }       \
+} while (0)
+
+#define COLLECTOR_SERIAL_ALLOC_FOR_PROMOTION sgen_minor_collector.alloc_for_promotion
+
+#include "sgen-copy-object.h"
diff --git a/mono/sgen/sgen-marksweep-drain-gray-stack.h b/mono/sgen/sgen-marksweep-drain-gray-stack.h
new file mode 100644 (file)
index 0000000..ebfb250
--- /dev/null
@@ -0,0 +1,254 @@
+/*
+ * sgen-marksweep-drain-gray-stack.h: The copy/mark and gray stack
+ *     draining functions of the M&S major collector.
+ *
+ * Copyright (C) 2014 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * COPY_OR_MARK_FUNCTION_NAME must be defined to be the function name of the copy/mark
+ * function.
+ *
+ * SCAN_OBJECT_FUNCTION_NAME must be defined to be the function name of the object scanning
+ * function.
+ *
+ * DRAIN_GRAY_STACK_FUNCTION_NAME must be defined to be the function name of the gray stack
+ * draining function.
+ *
+ * Define COPY_OR_MARK_WITH_EVACUATION to support evacuation.
+ */
+
+/* Returns whether the object is still in the nursery. */
+static inline MONO_ALWAYS_INLINE gboolean
+COPY_OR_MARK_FUNCTION_NAME (void **ptr, void *obj, SgenGrayQueue *queue)
+{
+       MSBlockInfo *block;
+
+#ifdef HEAVY_STATISTICS
+       ++stat_optimized_copy;
+       {
+               char *forwarded;
+               mword desc;
+               if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj)))
+                       desc = sgen_obj_get_descriptor_safe (forwarded);
+               else
+                       desc = sgen_obj_get_descriptor_safe (obj);
+
+               sgen_descriptor_count_copied_object (desc);
+       }
+#endif
+
+       SGEN_ASSERT (9, obj, "null object from pointer %p", ptr);
+       SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
+
+       if (sgen_ptr_in_nursery (obj)) {
+               int word, bit;
+               char *forwarded, *old_obj;
+               mword vtable_word = *(mword*)obj;
+
+               HEAVY_STAT (++stat_optimized_copy_nursery);
+
+#if SGEN_MAX_DEBUG_LEVEL >= 9
+               if (sgen_nursery_is_to_space (obj))
+                       SGEN_ASSERT (9, !SGEN_VTABLE_IS_PINNED (vtable_word) && !SGEN_VTABLE_IS_FORWARDED (vtable_word), "To-space object can't be pinned or forwarded.");
+#endif
+
+               if (SGEN_VTABLE_IS_PINNED (vtable_word)) {
+                       SGEN_ASSERT (9, !SGEN_VTABLE_IS_FORWARDED (vtable_word), "Cannot be both pinned and forwarded.");
+                       HEAVY_STAT (++stat_optimized_copy_nursery_pinned);
+                       return TRUE;
+               }
+               if ((forwarded = SGEN_VTABLE_IS_FORWARDED (vtable_word))) {
+                       HEAVY_STAT (++stat_optimized_copy_nursery_forwarded);
+                       SGEN_UPDATE_REFERENCE (ptr, forwarded);
+                       return sgen_ptr_in_nursery (forwarded);
+               }
+
+               /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
+               if (sgen_nursery_is_to_space (obj))
+                       return TRUE;
+
+#ifdef COPY_OR_MARK_WITH_EVACUATION
+       do_copy_object:
+#endif
+               old_obj = obj;
+               obj = copy_object_no_checks (obj, queue);
+               if (G_UNLIKELY (old_obj == obj)) {
+                       /*
+                        * If we fail to evacuate an object we just stop doing it for a
+                        * given block size as all other will surely fail too.
+                        */
+                       /* FIXME: test this case somehow. */
+                       if (!sgen_ptr_in_nursery (obj)) {
+                               int size_index;
+                               block = MS_BLOCK_FOR_OBJ (obj);
+                               size_index = block->obj_size_index;
+                               evacuate_block_obj_sizes [size_index] = FALSE;
+                               MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
+                               return FALSE;
+                       }
+                       return TRUE;
+               }
+               HEAVY_STAT (++stat_objects_copied_major);
+               SGEN_UPDATE_REFERENCE (ptr, obj);
+
+               if (sgen_ptr_in_nursery (obj))
+                       return TRUE;
+
+               /*
+                * FIXME: See comment for copy_object_no_checks().  If
+                * we have that, we can let the allocation function
+                * give us the block info, too, and we won't have to
+                * re-fetch it.
+                *
+                * FIXME (2): We should rework this to avoid all those nursery checks.
+                */
+               /*
+                * For the split nursery allocator the object might
+                * still be in the nursery despite having being
+                * promoted, in which case we can't mark it.
+                */
+               block = MS_BLOCK_FOR_OBJ (obj);
+               MS_CALC_MARK_BIT (word, bit, obj);
+               SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
+               MS_SET_MARK_BIT (block, word, bit);
+               binary_protocol_mark (obj, (gpointer)LOAD_VTABLE (obj), sgen_safe_object_get_size ((GCObject*)obj));
+
+               return FALSE;
+       } else {
+               mword vtable_word = *(mword*)obj;
+               mword desc;
+               int type;
+
+               HEAVY_STAT (++stat_optimized_copy_major);
+
+#ifdef COPY_OR_MARK_WITH_EVACUATION
+               {
+                       char *forwarded;
+                       if ((forwarded = SGEN_VTABLE_IS_FORWARDED (vtable_word))) {
+                               HEAVY_STAT (++stat_optimized_copy_major_forwarded);
+                               SGEN_UPDATE_REFERENCE (ptr, forwarded);
+                               SGEN_ASSERT (9, !sgen_ptr_in_nursery (forwarded), "Cannot be forwarded to nursery.");
+                               return FALSE;
+                       }
+               }
+#endif
+
+               SGEN_ASSERT (9, !SGEN_VTABLE_IS_PINNED (vtable_word), "Pinned object in non-pinned block?");
+
+               desc = sgen_vtable_get_descriptor ((GCVTable*)vtable_word);
+               type = desc & DESC_TYPE_MASK;
+
+               if (sgen_safe_object_is_small ((GCObject*)obj, type)) {
+#ifdef HEAVY_STATISTICS
+                       if (type <= DESC_TYPE_MAX_SMALL_OBJ)
+                               ++stat_optimized_copy_major_small_fast;
+                       else
+                               ++stat_optimized_copy_major_small_slow;
+#endif
+
+                       block = MS_BLOCK_FOR_OBJ (obj);
+
+#ifdef COPY_OR_MARK_WITH_EVACUATION
+                       {
+                               int size_index = block->obj_size_index;
+
+                               if (evacuate_block_obj_sizes [size_index] && !block->has_pinned) {
+                                       HEAVY_STAT (++stat_optimized_copy_major_small_evacuate);
+                                       if (block->is_to_space)
+                                               return FALSE;
+                                       goto do_copy_object;
+                               }
+                       }
+#endif
+
+                       MS_MARK_OBJECT_AND_ENQUEUE (obj, desc, block, queue);
+               } else {
+                       HEAVY_STAT (++stat_optimized_copy_major_large);
+
+                       if (sgen_los_object_is_pinned (obj))
+                               return FALSE;
+                       binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((GCObject*)obj));
+
+                       sgen_los_pin_object (obj);
+                       if (SGEN_OBJECT_HAS_REFERENCES (obj))
+                               GRAY_OBJECT_ENQUEUE (queue, obj, sgen_obj_get_descriptor (obj));
+               }
+               return FALSE;
+       }
+       SGEN_ASSERT (0, FALSE, "How is this happening?");
+       return FALSE;
+}
+
+static void
+SCAN_OBJECT_FUNCTION_NAME (char *obj, mword desc, SgenGrayQueue *queue)
+{
+       char *start = obj;
+
+#ifdef HEAVY_STATISTICS
+       ++stat_optimized_major_scan;
+       if (!sgen_gc_descr_has_references (desc))
+               ++stat_optimized_major_scan_no_refs;
+       sgen_descriptor_count_scanned_object (desc);
+#endif
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+       add_scanned_object (start);
+#endif
+
+       /* Now scan the object. */
+
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj)    do {                                    \
+               void *__old = *(ptr);                                   \
+               binary_protocol_scan_process_reference ((obj), (ptr), __old); \
+               if (__old) {                                            \
+                       gboolean __still_in_nursery = COPY_OR_MARK_FUNCTION_NAME ((ptr), __old, queue); \
+                       if (G_UNLIKELY (__still_in_nursery && !sgen_ptr_in_nursery ((ptr)) && !SGEN_OBJECT_IS_CEMENTED (*(ptr)))) { \
+                               void *__copy = *(ptr);                  \
+                               sgen_add_to_global_remset ((ptr), __copy); \
+                       }                                               \
+               }                                                       \
+       } while (0)
+
+#define SCAN_OBJECT_PROTOCOL
+#include "sgen-scan-object.h"
+}
+
+static gboolean
+DRAIN_GRAY_STACK_FUNCTION_NAME (ScanCopyContext ctx)
+{
+       SgenGrayQueue *queue = ctx.queue;
+
+       SGEN_ASSERT (0, ctx.ops->scan_object == major_scan_object_with_evacuation, "Wrong scan function");
+
+       for (;;) {
+               char *obj;
+               mword desc;
+
+               HEAVY_STAT (++stat_drain_loops);
+
+               GRAY_OBJECT_DEQUEUE (queue, &obj, &desc);
+               if (!obj)
+                       return TRUE;
+
+               SCAN_OBJECT_FUNCTION_NAME (obj, desc, ctx.queue);
+       }
+}
+
+#undef COPY_OR_MARK_FUNCTION_NAME
+#undef COPY_OR_MARK_WITH_EVACUATION
+#undef SCAN_OBJECT_FUNCTION_NAME
+#undef DRAIN_GRAY_STACK_FUNCTION_NAME
diff --git a/mono/sgen/sgen-marksweep-scan-object-concurrent.h b/mono/sgen/sgen-marksweep-scan-object-concurrent.h
new file mode 100644 (file)
index 0000000..bdad973
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * sgen-major-scan-object.h: Object scanning in the major collectors.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+extern guint64 stat_scan_object_called_major;
+
+/*
+ * FIXME: We use the same scanning function in the concurrent collector whether we scan
+ * during the starting/finishing collection pause (with the world stopped) or from the
+ * concurrent worker thread.
+ *
+ * As long as the world is stopped, we should just follow pointers into the nursery and
+ * evict if possible.  In that case we also don't need the ALWAYS_ADD_TO_GLOBAL_REMSET case,
+ * which only seems to make sense for when the world is stopped, in which case we only need
+ * it because we don't follow into the nursery.
+ */
+
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj)    do {                                    \
+               void *__old = *(ptr);                                   \
+               SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP ((obj), (ptr)); \
+               binary_protocol_scan_process_reference ((obj), (ptr), __old); \
+               if (__old && !sgen_ptr_in_nursery (__old)) {            \
+                       PREFETCH_READ (__old);                  \
+                       major_copy_or_mark_object_concurrent ((ptr), __old, queue); \
+               } else {                                                \
+                       if (G_UNLIKELY (sgen_ptr_in_nursery (__old) && !sgen_ptr_in_nursery ((ptr)))) \
+                               ADD_TO_GLOBAL_REMSET ((GCObject*)(full_object), (ptr), __old); \
+               }                                                       \
+       } while (0)
+
+/* FIXME: Unify this with optimized code in sgen-marksweep.c. */
+
+#undef ADD_TO_GLOBAL_REMSET
+#define ADD_TO_GLOBAL_REMSET(object,ptr,target)        mark_mod_union_card ((object), (ptr))
+
+static void
+major_scan_object_no_mark_concurrent_anywhere (char *full_object, mword desc, SgenGrayQueue *queue)
+{
+       char *start = full_object;
+
+       SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP;
+
+#ifdef HEAVY_STATISTICS
+       sgen_descriptor_count_scanned_object (desc);
+#endif
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+       add_scanned_object (start);
+#endif
+
+#define SCAN_OBJECT_PROTOCOL
+#include "sgen-scan-object.h"
+
+       SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP;
+       HEAVY_STAT (++stat_scan_object_called_major);
+}
+
+static void
+major_scan_object_no_mark_concurrent_start (char *start, mword desc, SgenGrayQueue *queue)
+{
+       major_scan_object_no_mark_concurrent_anywhere (start, desc, queue);
+}
+
+static void
+major_scan_object_no_mark_concurrent (char *start, mword desc, SgenGrayQueue *queue)
+{
+       SGEN_ASSERT (0, !sgen_ptr_in_nursery (start), "Why are we scanning nursery objects in the concurrent collector?");
+       major_scan_object_no_mark_concurrent_anywhere (start, desc, queue);
+}
+
+#undef ADD_TO_GLOBAL_REMSET
+#define ADD_TO_GLOBAL_REMSET(object,ptr,target)        sgen_add_to_global_remset ((ptr), (target))
+
+static void
+major_scan_vtype_concurrent_finish (char *full_object, char *start, mword desc, SgenGrayQueue *queue BINARY_PROTOCOL_ARG (size_t size))
+{
+       SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP;
+
+#ifdef HEAVY_STATISTICS
+       /* FIXME: We're half scanning this object.  How do we account for that? */
+       //add_scanned_object (start);
+#endif
+
+       /* The descriptors include info about the object header as well */
+       start -= SGEN_CLIENT_OBJECT_HEADER_SIZE;
+
+#define SCAN_OBJECT_NOVTABLE
+#define SCAN_OBJECT_PROTOCOL
+#include "sgen-scan-object.h"
+
+       SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP;
+}
diff --git a/mono/sgen/sgen-marksweep.c b/mono/sgen/sgen-marksweep.c
new file mode 100644 (file)
index 0000000..cbc8064
--- /dev/null
@@ -0,0 +1,2548 @@
+/*
+ * sgen-marksweep.c: The Mark & Sweep major collector.
+ *
+ * Author:
+ *     Mark Probst <mark.probst@gmail.com>
+ *
+ * Copyright 2009-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+
+#ifdef HAVE_SGEN_GC
+
+#include <math.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-protocol.h"
+#include "mono/sgen/sgen-cardtable.h"
+#include "mono/sgen/sgen-memory-governor.h"
+#include "mono/sgen/sgen-layout-stats.h"
+#include "mono/sgen/sgen-pointer-queue.h"
+#include "mono/sgen/sgen-pinning.h"
+#include "mono/sgen/sgen-workers.h"
+#include "mono/sgen/sgen-thread-pool.h"
+#include "mono/sgen/sgen-client.h"
+#include "mono/utils/mono-membar.h"
+
+#if defined(ARCH_MIN_MS_BLOCK_SIZE) && defined(ARCH_MIN_MS_BLOCK_SIZE_SHIFT)
+#define MS_BLOCK_SIZE  ARCH_MIN_MS_BLOCK_SIZE
+#define MS_BLOCK_SIZE_SHIFT    ARCH_MIN_MS_BLOCK_SIZE_SHIFT
+#else
+#define MS_BLOCK_SIZE_SHIFT     14      /* INT FASTENABLE */
+#define MS_BLOCK_SIZE           (1 << MS_BLOCK_SIZE_SHIFT)
+#endif
+#define MAJOR_SECTION_SIZE     MS_BLOCK_SIZE
+#define CARDS_PER_BLOCK (MS_BLOCK_SIZE / CARD_SIZE_IN_BYTES)
+
+/*
+ * Don't allocate single blocks, but alloc a contingent of this many
+ * blocks in one swoop.  This must be a power of two.
+ */
+#define MS_BLOCK_ALLOC_NUM     32
+
+/*
+ * Number of bytes before the first object in a block.  At the start
+ * of a block is the MSBlockHeader, then opional padding, then come
+ * the objects, so this must be >= sizeof (MSBlockHeader).
+ */
+#define MS_BLOCK_SKIP  ((sizeof (MSBlockHeader) + 15) & ~15)
+
+#define MS_BLOCK_FREE  (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
+
+#define MS_NUM_MARK_WORDS      ((MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
+
+/*
+ * Blocks progress from one state to the next:
+ *
+ * SWEPT           The block is fully swept.  It might or might not be in
+ *                 a free list.
+ *
+ * MARKING         The block might or might not contain live objects.  If
+ *                 we're in between an initial collection pause and the
+ *                 finishing pause, the block might or might not be in a
+ *                 free list.
+ *
+ * CHECKING        The sweep thread is investigating the block to determine
+ *                 whether or not it contains live objects.  The block is
+ *                 not in a free list.
+ *
+ * NEED_SWEEPING   The block contains live objects but has not yet been
+ *                 swept.  It also contains free slots.  It is in a block
+ *                 free list.
+ *
+ * SWEEPING        The block is being swept.  It might be in a free list.
+ */
+
+enum {
+       BLOCK_STATE_SWEPT,
+       BLOCK_STATE_MARKING,
+       BLOCK_STATE_CHECKING,
+       BLOCK_STATE_NEED_SWEEPING,
+       BLOCK_STATE_SWEEPING
+};
+
+typedef struct _MSBlockInfo MSBlockInfo;
+struct _MSBlockInfo {
+       guint16 obj_size;
+       /*
+        * FIXME: Do we even need this? It's only used during sweep and might be worth
+        * recalculating to save the space.
+        */
+       guint16 obj_size_index;
+       /* FIXME: Reduce this - it only needs a byte. */
+       volatile gint32 state;
+       unsigned int pinned : 1;
+       unsigned int has_references : 1;
+       unsigned int has_pinned : 1;    /* means cannot evacuate */
+       unsigned int is_to_space : 1;
+       void ** volatile free_list;
+       MSBlockInfo * volatile next_free;
+       guint8 * volatile cardtable_mod_union;
+       mword mark_words [MS_NUM_MARK_WORDS];
+};
+
+#define MS_BLOCK_FOR_BLOCK_INFO(b)     ((char*)(b))
+
+#define MS_BLOCK_OBJ(b,i)              (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (b)->obj_size * (i))
+#define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size)            (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (obj_size) * (i))
+#define MS_BLOCK_DATA_FOR_OBJ(o)       ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
+
+typedef struct {
+       MSBlockInfo info;
+} MSBlockHeader;
+
+#define MS_BLOCK_FOR_OBJ(o)            (&((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
+
+/* object index will always be small */
+#define MS_BLOCK_OBJ_INDEX(o,b)        ((int)(((char*)(o) - (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP)) / (b)->obj_size))
+
+//casting to int is fine since blocks are 32k
+#define MS_CALC_MARK_BIT(w,b,o)        do {                            \
+               int i = ((int)((char*)(o) - MS_BLOCK_DATA_FOR_OBJ ((o)))) >> SGEN_ALLOC_ALIGN_BITS; \
+               if (sizeof (mword) == 4) {                              \
+                       (w) = i >> 5;                                   \
+                       (b) = i & 31;                                   \
+               } else {                                                \
+                       (w) = i >> 6;                                   \
+                       (b) = i & 63;                                   \
+               }                                                       \
+       } while (0)
+
+#define MS_MARK_BIT(bl,w,b)    ((bl)->mark_words [(w)] & (ONE_P << (b)))
+#define MS_SET_MARK_BIT(bl,w,b)        ((bl)->mark_words [(w)] |= (ONE_P << (b)))
+
+#define MS_OBJ_ALLOCED(o,b)    (*(void**)(o) && (*(char**)(o) < MS_BLOCK_FOR_BLOCK_INFO (b) || *(char**)(o) >= MS_BLOCK_FOR_BLOCK_INFO (b) + MS_BLOCK_SIZE))
+
+#define MS_BLOCK_OBJ_SIZE_FACTOR       (pow (2.0, 1.0 / 3))
+
+/*
+ * This way we can lookup block object size indexes for sizes up to
+ * 256 bytes with a single load.
+ */
+#define MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES     32
+
+static int *block_obj_sizes;
+static int num_block_obj_sizes;
+static int fast_block_obj_size_indexes [MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES];
+
+#define MS_BLOCK_FLAG_PINNED   1
+#define MS_BLOCK_FLAG_REFS     2
+
+#define MS_BLOCK_TYPE_MAX      4
+
+static gboolean *evacuate_block_obj_sizes;
+static float evacuation_threshold = 0.666f;
+static float concurrent_evacuation_threshold = 0.666f;
+static gboolean want_evacuation = FALSE;
+
+static gboolean lazy_sweep = FALSE;
+
+enum {
+       SWEEP_STATE_SWEPT,
+       SWEEP_STATE_NEED_SWEEPING,
+       SWEEP_STATE_SWEEPING,
+       SWEEP_STATE_SWEEPING_AND_ITERATING,
+       SWEEP_STATE_COMPACTING
+};
+
+static volatile int sweep_state = SWEEP_STATE_SWEPT;
+
+static gboolean concurrent_mark;
+static gboolean concurrent_sweep = TRUE;
+
+#define BLOCK_IS_TAGGED_HAS_REFERENCES(bl)     SGEN_POINTER_IS_TAGGED_1 ((bl))
+#define BLOCK_TAG_HAS_REFERENCES(bl)           SGEN_POINTER_TAG_1 ((bl))
+
+#define BLOCK_IS_TAGGED_CHECKING(bl)           SGEN_POINTER_IS_TAGGED_2 ((bl))
+#define BLOCK_TAG_CHECKING(bl)                 SGEN_POINTER_TAG_2 ((bl))
+
+#define BLOCK_UNTAG(bl)                                SGEN_POINTER_UNTAG_12 ((bl))
+
+#define BLOCK_TAG(bl)                          ((bl)->has_references ? BLOCK_TAG_HAS_REFERENCES ((bl)) : (bl))
+
+/* all allocated blocks in the system */
+static SgenPointerQueue allocated_blocks;
+
+/* non-allocated block free-list */
+static void *empty_blocks = NULL;
+static size_t num_empty_blocks = 0;
+
+#define FOREACH_BLOCK_NO_LOCK_CONDITION(cond,bl) {                     \
+       size_t __index;                                                 \
+       SGEN_ASSERT (0, (cond) && !sweep_in_progress (), "Can't iterate blocks while the world is running or sweep is in progress."); \
+       for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { \
+               (bl) = BLOCK_UNTAG (allocated_blocks.data [__index]);
+#define FOREACH_BLOCK_NO_LOCK(bl)                                      \
+       FOREACH_BLOCK_NO_LOCK_CONDITION(sgen_is_world_stopped (), bl)
+#define FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK(bl,hr) {                  \
+       size_t __index;                                                 \
+       SGEN_ASSERT (0, sgen_is_world_stopped () && !sweep_in_progress (), "Can't iterate blocks while the world is running or sweep is in progress."); \
+       for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { \
+               (bl) = allocated_blocks.data [__index];                 \
+               (hr) = BLOCK_IS_TAGGED_HAS_REFERENCES ((bl));           \
+               (bl) = BLOCK_UNTAG ((bl));
+#define END_FOREACH_BLOCK_NO_LOCK      } }
+
+static volatile size_t num_major_sections = 0;
+/*
+ * One free block list for each block object size.  We add and remove blocks from these
+ * lists lock-free via CAS.
+ *
+ * Blocks accessed/removed from `free_block_lists`:
+ *   from the mutator (with GC lock held)
+ *   in nursery collections
+ *   in non-concurrent major collections
+ *   in the finishing pause of concurrent major collections (whole list is cleared)
+ *
+ * Blocks added to `free_block_lists`:
+ *   in the sweeping thread
+ *   during nursery collections
+ *   from domain clearing (with the world stopped and no sweeping happening)
+ *
+ * The only item of those that doesn't require the GC lock is the sweep thread.  The sweep
+ * thread only ever adds blocks to the free list, so the ABA problem can't occur.
+ */
+static MSBlockInfo * volatile *free_block_lists [MS_BLOCK_TYPE_MAX];
+
+static guint64 stat_major_blocks_alloced = 0;
+static guint64 stat_major_blocks_freed = 0;
+static guint64 stat_major_blocks_lazy_swept = 0;
+static guint64 stat_major_objects_evacuated = 0;
+
+#if SIZEOF_VOID_P != 8
+static guint64 stat_major_blocks_freed_ideal = 0;
+static guint64 stat_major_blocks_freed_less_ideal = 0;
+static guint64 stat_major_blocks_freed_individual = 0;
+static guint64 stat_major_blocks_alloced_less_ideal = 0;
+#endif
+
+#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
+static guint64 num_major_objects_marked = 0;
+#define INC_NUM_MAJOR_OBJECTS_MARKED() (++num_major_objects_marked)
+#else
+#define INC_NUM_MAJOR_OBJECTS_MARKED()
+#endif
+
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+static mono_mutex_t scanned_objects_list_lock;
+static SgenPointerQueue scanned_objects_list;
+
+static void
+add_scanned_object (void *ptr)
+{
+       if (!binary_protocol_is_enabled ())
+               return;
+
+       mono_mutex_lock (&scanned_objects_list_lock);
+       sgen_pointer_queue_add (&scanned_objects_list, ptr);
+       mono_mutex_unlock (&scanned_objects_list_lock);
+}
+#endif
+
+static gboolean sweep_block (MSBlockInfo *block);
+
+static int
+ms_find_block_obj_size_index (size_t size)
+{
+       int i;
+       SGEN_ASSERT (9, size <= SGEN_MAX_SMALL_OBJ_SIZE, "size %zd is bigger than max small object size %d", size, SGEN_MAX_SMALL_OBJ_SIZE);
+       for (i = 0; i < num_block_obj_sizes; ++i)
+               if (block_obj_sizes [i] >= size)
+                       return i;
+       g_error ("no object of size %zd\n", size);
+       return -1;
+}
+
+#define FREE_BLOCKS_FROM(lists,p,r)    (lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
+#define FREE_BLOCKS(p,r)               (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
+
+#define MS_BLOCK_OBJ_SIZE_INDEX(s)                             \
+       (((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ?      \
+        fast_block_obj_size_indexes [((s)+7)>>3] :             \
+        ms_find_block_obj_size_index ((s)))
+
+static void*
+major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
+{
+       char *start;
+       if (nursery_align)
+               start = sgen_alloc_os_memory_aligned (nursery_size, nursery_align, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
+       else
+               start = sgen_alloc_os_memory (nursery_size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
+
+       return start;
+}
+
+static void
+update_heap_boundaries_for_block (MSBlockInfo *block)
+{
+       sgen_update_heap_boundaries ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), (mword)MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE);
+}
+
+/*
+ * Thread safe
+ */
+static void*
+ms_get_empty_block (void)
+{
+       char *p;
+       int i;
+       void *block, *empty, *next;
+
+ retry:
+       if (!empty_blocks) {
+               /*
+                * We try allocating MS_BLOCK_ALLOC_NUM blocks first.  If that's
+                * unsuccessful, we halve the number of blocks and try again, until we're at
+                * 1.  If that doesn't work, either, we assert.
+                */
+               int alloc_num = MS_BLOCK_ALLOC_NUM;
+               for (;;) {
+                       p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * alloc_num, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE,
+                                       alloc_num == 1 ? "major heap section" : NULL);
+                       if (p)
+                               break;
+                       alloc_num >>= 1;
+               }
+
+               for (i = 0; i < alloc_num; ++i) {
+                       block = p;
+                       /*
+                        * We do the free list update one after the
+                        * other so that other threads can use the new
+                        * blocks as quickly as possible.
+                        */
+                       do {
+                               empty = empty_blocks;
+                               *(void**)block = empty;
+                       } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block, empty) != empty);
+                       p += MS_BLOCK_SIZE;
+               }
+
+               SGEN_ATOMIC_ADD_P (num_empty_blocks, alloc_num);
+
+               stat_major_blocks_alloced += alloc_num;
+#if SIZEOF_VOID_P != 8
+               if (alloc_num != MS_BLOCK_ALLOC_NUM)
+                       stat_major_blocks_alloced_less_ideal += alloc_num;
+#endif
+       }
+
+       do {
+               empty = empty_blocks;
+               if (!empty)
+                       goto retry;
+               block = empty;
+               next = *(void**)block;
+       } while (SGEN_CAS_PTR (&empty_blocks, next, empty) != empty);
+
+       SGEN_ATOMIC_ADD_P (num_empty_blocks, -1);
+
+       *(void**)block = NULL;
+
+       g_assert (!((mword)block & (MS_BLOCK_SIZE - 1)));
+
+       return block;
+}
+
+/*
+ * This doesn't actually free a block immediately, but enqueues it into the `empty_blocks`
+ * list, where it will either be freed later on, or reused in nursery collections.
+ */
+static void
+ms_free_block (void *block)
+{
+       void *empty;
+
+       sgen_memgov_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
+       memset (block, 0, MS_BLOCK_SIZE);
+
+       do {
+               empty = empty_blocks;
+               *(void**)block = empty;
+       } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
+
+       SGEN_ATOMIC_ADD_P (num_empty_blocks, 1);
+
+       binary_protocol_block_free (block, MS_BLOCK_SIZE);
+}
+
+static gboolean
+sweep_in_progress (void)
+{
+       int state = sweep_state;
+       return state == SWEEP_STATE_SWEEPING ||
+               state == SWEEP_STATE_SWEEPING_AND_ITERATING ||
+               state == SWEEP_STATE_COMPACTING;
+}
+
+static inline gboolean
+block_is_swept_or_marking (MSBlockInfo *block)
+{
+       gint32 state = block->state;
+       return state == BLOCK_STATE_SWEPT || state == BLOCK_STATE_MARKING;
+}
+
+//#define MARKSWEEP_CONSISTENCY_CHECK
+
+#ifdef MARKSWEEP_CONSISTENCY_CHECK
+static void
+check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
+{
+       SGEN_ASSERT (0, !sweep_in_progress (), "Can't examine allocated blocks during sweep");
+       for (; block; block = block->next_free) {
+               SGEN_ASSERT (0, block->state != BLOCK_STATE_CHECKING, "Can't have a block we're checking in a free list.");
+               g_assert (block->obj_size == size);
+               g_assert ((pinned && block->pinned) || (!pinned && !block->pinned));
+
+               /* blocks in the free lists must have at least
+                  one free slot */
+               g_assert (block->free_list);
+
+               /* the block must be in the allocated_blocks array */
+               g_assert (sgen_pointer_queue_find (&allocated_blocks, BLOCK_TAG (block)) != (size_t)-1);
+       }
+}
+
+static void
+check_empty_blocks (void)
+{
+       void *p;
+       size_t i = 0;
+       for (p = empty_blocks; p; p = *(void**)p)
+               ++i;
+       g_assert (i == num_empty_blocks);
+}
+
+static void
+consistency_check (void)
+{
+       MSBlockInfo *block;
+       int i;
+
+       /* check all blocks */
+       FOREACH_BLOCK_NO_LOCK (block) {
+               int count = MS_BLOCK_FREE / block->obj_size;
+               int num_free = 0;
+               void **free;
+
+               /* count number of free slots */
+               for (i = 0; i < count; ++i) {
+                       void **obj = (void**) MS_BLOCK_OBJ (block, i);
+                       if (!MS_OBJ_ALLOCED (obj, block))
+                               ++num_free;
+               }
+
+               /* check free list */
+               for (free = block->free_list; free; free = (void**)*free) {
+                       g_assert (MS_BLOCK_FOR_OBJ (free) == block);
+                       --num_free;
+               }
+               g_assert (num_free == 0);
+
+               /* check all mark words are zero */
+               if (!sgen_concurrent_collection_in_progress () && block_is_swept_or_marking (block)) {
+                       for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
+                               g_assert (block->mark_words [i] == 0);
+               }
+       } END_FOREACH_BLOCK_NO_LOCK;
+
+       /* check free blocks */
+       for (i = 0; i < num_block_obj_sizes; ++i) {
+               int j;
+               for (j = 0; j < MS_BLOCK_TYPE_MAX; ++j)
+                       check_block_free_list (free_block_lists [j][i], block_obj_sizes [i], j & MS_BLOCK_FLAG_PINNED);
+       }
+
+       check_empty_blocks ();
+}
+#endif
+
+static void
+add_free_block (MSBlockInfo * volatile *free_blocks, int size_index, MSBlockInfo *block)
+{
+       MSBlockInfo *old;
+       do {
+               block->next_free = old = free_blocks [size_index];
+       } while (SGEN_CAS_PTR ((gpointer)&free_blocks [size_index], block, old) != old);
+}
+
+static void major_finish_sweep_checking (void);
+
+static gboolean
+ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
+{
+       int size = block_obj_sizes [size_index];
+       int count = MS_BLOCK_FREE / size;
+       MSBlockInfo *info;
+       MSBlockInfo * volatile * free_blocks = FREE_BLOCKS (pinned, has_references);
+       char *obj_start;
+       int i;
+
+       if (!sgen_memgov_try_alloc_space (MS_BLOCK_SIZE, SPACE_MAJOR))
+               return FALSE;
+
+       info = (MSBlockInfo*)ms_get_empty_block ();
+
+       SGEN_ASSERT (9, count >= 2, "block with %d objects, it must hold at least 2", count);
+
+       info->obj_size = size;
+       info->obj_size_index = size_index;
+       info->pinned = pinned;
+       info->has_references = has_references;
+       info->has_pinned = pinned;
+       /*
+        * Blocks that are to-space are not evacuated from.  During an major collection
+        * blocks are allocated for two reasons: evacuating objects from the nursery and
+        * evacuating them from major blocks marked for evacuation.  In both cases we don't
+        * want further evacuation.
+        */
+       info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD);
+       info->state = (info->is_to_space || sgen_concurrent_collection_in_progress ()) ? BLOCK_STATE_MARKING : BLOCK_STATE_SWEPT;
+       SGEN_ASSERT (6, !sweep_in_progress () || info->state == BLOCK_STATE_SWEPT, "How do we add a new block to be swept while sweeping?");
+       info->cardtable_mod_union = NULL;
+
+       update_heap_boundaries_for_block (info);
+
+       binary_protocol_block_alloc (info, MS_BLOCK_SIZE);
+
+       /* build free list */
+       obj_start = MS_BLOCK_FOR_BLOCK_INFO (info) + MS_BLOCK_SKIP;
+       info->free_list = (void**)obj_start;
+       /* we're skipping the last one - it must be nulled */
+       for (i = 0; i < count - 1; ++i) {
+               char *next_obj_start = obj_start + size;
+               *(void**)obj_start = next_obj_start;
+               obj_start = next_obj_start;
+       }
+       /* the last one */
+       *(void**)obj_start = NULL;
+
+       add_free_block (free_blocks, size_index, info);
+
+       /*
+        * This is the only place where the `allocated_blocks` array can potentially grow.
+        * We need to make sure concurrent sweep isn't running when that happens, so in that
+        * specific case we just wait for sweep to finish.
+        */
+       if (sgen_pointer_queue_will_grow (&allocated_blocks))
+               major_finish_sweep_checking ();
+
+       sgen_pointer_queue_add (&allocated_blocks, BLOCK_TAG (info));
+
+       SGEN_ATOMIC_ADD_P (num_major_sections, 1);
+       return TRUE;
+}
+
+static gboolean
+obj_is_from_pinned_alloc (char *ptr)
+{
+       MSBlockInfo *block;
+
+       FOREACH_BLOCK_NO_LOCK (block) {
+               if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE)
+                       return block->pinned;
+       } END_FOREACH_BLOCK_NO_LOCK;
+       return FALSE;
+}
+
+static void
+ensure_can_access_block_free_list (MSBlockInfo *block)
+{
+ retry:
+       for (;;) {
+               switch (block->state) {
+               case BLOCK_STATE_SWEPT:
+               case BLOCK_STATE_MARKING:
+                       return;
+               case BLOCK_STATE_CHECKING:
+                       SGEN_ASSERT (0, FALSE, "How did we get a block that's being checked from a free list?");
+                       break;
+               case BLOCK_STATE_NEED_SWEEPING:
+                       if (sweep_block (block))
+                               ++stat_major_blocks_lazy_swept;
+                       break;
+               case BLOCK_STATE_SWEEPING:
+                       /* FIXME: do this more elegantly */
+                       g_usleep (100);
+                       goto retry;
+               default:
+                       SGEN_ASSERT (0, FALSE, "Illegal block state");
+                       break;
+               }
+       }
+}
+
+static void*
+unlink_slot_from_free_list_uncontested (MSBlockInfo * volatile *free_blocks, int size_index)
+{
+       MSBlockInfo *block, *next_free_block;
+       void *obj, *next_free_slot;
+
+ retry:
+       block = free_blocks [size_index];
+       SGEN_ASSERT (9, block, "no free block to unlink from free_blocks %p size_index %d", free_blocks, size_index);
+
+       ensure_can_access_block_free_list (block);
+
+       obj = block->free_list;
+       SGEN_ASSERT (6, obj, "block %p in free list had no available object to alloc from", block);
+
+       next_free_slot = *(void**)obj;
+       if (next_free_slot) {
+               block->free_list = next_free_slot;
+               return obj;
+       }
+
+       next_free_block = block->next_free;
+       if (SGEN_CAS_PTR ((gpointer)&free_blocks [size_index], next_free_block, block) != block)
+               goto retry;
+
+       block->free_list = NULL;
+       block->next_free = NULL;
+
+       return obj;
+}
+
+static void*
+alloc_obj (GCVTable *vtable, size_t size, gboolean pinned, gboolean has_references)
+{
+       int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
+       MSBlockInfo * volatile * free_blocks = FREE_BLOCKS (pinned, has_references);
+       void *obj;
+
+       if (!free_blocks [size_index]) {
+               if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references)))
+                       return NULL;
+       }
+
+       obj = unlink_slot_from_free_list_uncontested (free_blocks, size_index);
+
+       *(GCVTable**)obj = vtable;
+
+       return obj;
+}
+
+static void*
+major_alloc_object (GCVTable *vtable, size_t size, gboolean has_references)
+{
+       return alloc_obj (vtable, size, FALSE, has_references);
+}
+
+/*
+ * We're not freeing the block if it's empty.  We leave that work for
+ * the next major collection.
+ *
+ * This is just called from the domain clearing code, which runs in a
+ * single thread and has the GC lock, so we don't need an extra lock.
+ */
+static void
+free_object (char *obj, size_t size, gboolean pinned)
+{
+       MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
+       int word, bit;
+       gboolean in_free_list;
+
+       SGEN_ASSERT (9, sweep_state == SWEEP_STATE_SWEPT, "Should have waited for sweep to free objects.");
+
+       ensure_can_access_block_free_list (block);
+       SGEN_ASSERT (9, (pinned && block->pinned) || (!pinned && !block->pinned), "free-object pinning mixup object %p pinned %d block %p pinned %d", obj, pinned, block, block->pinned);
+       SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p is already free", obj);
+       MS_CALC_MARK_BIT (word, bit, obj);
+       SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p has mark bit set", obj);
+
+       memset (obj, 0, size);
+
+       in_free_list = !!block->free_list;
+       *(void**)obj = block->free_list;
+       block->free_list = (void**)obj;
+
+       if (!in_free_list) {
+               MSBlockInfo * volatile *free_blocks = FREE_BLOCKS (pinned, block->has_references);
+               int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
+               SGEN_ASSERT (9, !block->next_free, "block %p doesn't have a free-list of object but belongs to a free-list of blocks", block);
+               add_free_block (free_blocks, size_index, block);
+       }
+}
+
+static void
+major_free_non_pinned_object (char *obj, size_t size)
+{
+       free_object (obj, size, FALSE);
+}
+
+/* size is a multiple of SGEN_ALLOC_ALIGN */
+static void*
+major_alloc_small_pinned_obj (GCVTable *vtable, size_t size, gboolean has_references)
+{
+       void *res;
+
+       res = alloc_obj (vtable, size, TRUE, has_references);
+        /*If we failed to alloc memory, we better try releasing memory
+         *as pinned alloc is requested by the runtime.
+         */
+        if (!res) {
+               sgen_perform_collection (0, GENERATION_OLD, "pinned alloc failure", TRUE);
+               res = alloc_obj (vtable, size, TRUE, has_references);
+        }
+        return res;
+}
+
+static void
+free_pinned_object (char *obj, size_t size)
+{
+       free_object (obj, size, TRUE);
+}
+
+/*
+ * size is already rounded up and we hold the GC lock.
+ */
+static void*
+major_alloc_degraded (GCVTable *vtable, size_t size)
+{
+       void *obj = alloc_obj (vtable, size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
+       if (G_LIKELY (obj)) {
+               HEAVY_STAT (++stat_objects_alloced_degraded);
+               HEAVY_STAT (stat_bytes_alloced_degraded += size);
+       }
+       return obj;
+}
+
+/*
+ * obj is some object.  If it's not in the major heap (i.e. if it's in
+ * the nursery or LOS), return FALSE.  Otherwise return whether it's
+ * been marked or copied.
+ */
+static gboolean
+major_is_object_live (char *obj)
+{
+       MSBlockInfo *block;
+       int word, bit;
+       mword objsize;
+
+       if (sgen_ptr_in_nursery (obj))
+               return FALSE;
+
+       objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)obj));
+
+       /* LOS */
+       if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
+               return FALSE;
+
+       /* now we know it's in a major block */
+       block = MS_BLOCK_FOR_OBJ (obj);
+       SGEN_ASSERT (9, !block->pinned, "block %p is pinned, BTW why is this bad?", block);
+       MS_CALC_MARK_BIT (word, bit, obj);
+       return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
+}
+
+static gboolean
+major_ptr_is_in_non_pinned_space (char *ptr, char **start)
+{
+       MSBlockInfo *block;
+
+       FOREACH_BLOCK_NO_LOCK (block) {
+               if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) {
+                       int count = MS_BLOCK_FREE / block->obj_size;
+                       int i;
+
+                       *start = NULL;
+                       for (i = 0; i <= count; ++i) {
+                               if (ptr >= MS_BLOCK_OBJ (block, i) && ptr < MS_BLOCK_OBJ (block, i + 1)) {
+                                       *start = MS_BLOCK_OBJ (block, i);
+                                       break;
+                               }
+                       }
+                       return !block->pinned;
+               }
+       } END_FOREACH_BLOCK_NO_LOCK;
+       return FALSE;
+}
+
+static gboolean
+try_set_sweep_state (int new, int expected)
+{
+       int old = SGEN_CAS (&sweep_state, new, expected);
+       return old == expected;
+}
+
+static void
+set_sweep_state (int new, int expected)
+{
+       gboolean success = try_set_sweep_state (new, expected);
+       SGEN_ASSERT (0, success, "Could not set sweep state.");
+}
+
+static gboolean ensure_block_is_checked_for_sweeping (int block_index, gboolean wait, gboolean *have_checked);
+
+static SgenThreadPoolJob * volatile sweep_job;
+
+static void
+major_finish_sweep_checking (void)
+{
+       int block_index;
+       SgenThreadPoolJob *job;
+
+ retry:
+       switch (sweep_state) {
+       case SWEEP_STATE_SWEPT:
+       case SWEEP_STATE_NEED_SWEEPING:
+               return;
+       case SWEEP_STATE_SWEEPING:
+               if (try_set_sweep_state (SWEEP_STATE_SWEEPING_AND_ITERATING, SWEEP_STATE_SWEEPING))
+                       break;
+               goto retry;
+       case SWEEP_STATE_SWEEPING_AND_ITERATING:
+               SGEN_ASSERT (0, FALSE, "Is there another minor collection running?");
+               goto retry;
+       case SWEEP_STATE_COMPACTING:
+               goto wait;
+       default:
+               SGEN_ASSERT (0, FALSE, "Invalid sweep state.");
+               break;
+       }
+
+       /*
+        * We're running with the world stopped and the only other thread doing work is the
+        * sweep thread, which doesn't add blocks to the array, so we can safely access
+        * `next_slot`.
+        */
+       for (block_index = 0; block_index < allocated_blocks.next_slot; ++block_index)
+               ensure_block_is_checked_for_sweeping (block_index, FALSE, NULL);
+
+       set_sweep_state (SWEEP_STATE_SWEEPING, SWEEP_STATE_SWEEPING_AND_ITERATING);
+
+ wait:
+       job = sweep_job;
+       if (job)
+               sgen_thread_pool_job_wait (job);
+       SGEN_ASSERT (0, !sweep_job, "Why did the sweep job not null itself?");
+       SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "How is the sweep job done but we're not swept?");
+}
+
+static void
+major_iterate_objects (IterateObjectsFlags flags, IterateObjectCallbackFunc callback, void *data)
+{
+       gboolean sweep = flags & ITERATE_OBJECTS_SWEEP;
+       gboolean non_pinned = flags & ITERATE_OBJECTS_NON_PINNED;
+       gboolean pinned = flags & ITERATE_OBJECTS_PINNED;
+       MSBlockInfo *block;
+
+       major_finish_sweep_checking ();
+       FOREACH_BLOCK_NO_LOCK (block) {
+               int count = MS_BLOCK_FREE / block->obj_size;
+               int i;
+
+               if (block->pinned && !pinned)
+                       continue;
+               if (!block->pinned && !non_pinned)
+                       continue;
+               if (sweep && lazy_sweep) {
+                       sweep_block (block);
+                       SGEN_ASSERT (6, block->state == BLOCK_STATE_SWEPT, "Block must be swept after sweeping");
+               }
+
+               for (i = 0; i < count; ++i) {
+                       void **obj = (void**) MS_BLOCK_OBJ (block, i);
+                       /*
+                        * We've finished sweep checking, but if we're sweeping lazily and
+                        * the flags don't require us to sweep, the block might still need
+                        * sweeping.  In that case, we need to consult the mark bits to tell
+                        * us whether an object slot is live.
+                        */
+                       if (!block_is_swept_or_marking (block)) {
+                               int word, bit;
+                               SGEN_ASSERT (6, !sweep && block->state == BLOCK_STATE_NEED_SWEEPING, "Has sweeping not finished?");
+                               MS_CALC_MARK_BIT (word, bit, obj);
+                               if (!MS_MARK_BIT (block, word, bit))
+                                       continue;
+                       }
+                       if (MS_OBJ_ALLOCED (obj, block))
+                               callback ((char*)obj, block->obj_size, data);
+               }
+       } END_FOREACH_BLOCK_NO_LOCK;
+}
+
+static gboolean
+major_is_valid_object (char *object)
+{
+       MSBlockInfo *block;
+
+       FOREACH_BLOCK_NO_LOCK (block) {
+               int idx;
+               char *obj;
+
+               if ((MS_BLOCK_FOR_BLOCK_INFO (block) > object) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= object))
+                       continue;
+
+               idx = MS_BLOCK_OBJ_INDEX (object, block);
+               obj = (char*)MS_BLOCK_OBJ (block, idx);
+               if (obj != object)
+                       return FALSE;
+               return MS_OBJ_ALLOCED (obj, block);
+       } END_FOREACH_BLOCK_NO_LOCK;
+
+       return FALSE;
+}
+
+
+static GCVTable*
+major_describe_pointer (char *ptr)
+{
+       MSBlockInfo *block;
+
+       FOREACH_BLOCK_NO_LOCK (block) {
+               int idx;
+               char *obj;
+               gboolean live;
+               GCVTable *vtable;
+               int w, b;
+               gboolean marked;
+
+               if ((MS_BLOCK_FOR_BLOCK_INFO (block) > ptr) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= ptr))
+                       continue;
+
+               SGEN_LOG (0, "major-ptr (block %p sz %d pin %d ref %d)\n",
+                       MS_BLOCK_FOR_BLOCK_INFO (block), block->obj_size, block->pinned, block->has_references);
+
+               idx = MS_BLOCK_OBJ_INDEX (ptr, block);
+               obj = (char*)MS_BLOCK_OBJ (block, idx);
+               live = MS_OBJ_ALLOCED (obj, block);
+               vtable = live ? (GCVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
+
+               MS_CALC_MARK_BIT (w, b, obj);
+               marked = MS_MARK_BIT (block, w, b);
+
+               if (obj == ptr) {
+                       SGEN_LOG (0, "\t(");
+                       if (live)
+                               SGEN_LOG (0, "object");
+                       else
+                               SGEN_LOG (0, "dead-object");
+               } else {
+                       if (live)
+                               SGEN_LOG (0, "interior-ptr offset %td", ptr - obj);
+                       else
+                               SGEN_LOG (0, "dead-interior-ptr offset %td", ptr - obj);
+               }
+
+               SGEN_LOG (0, " marked %d)\n", marked ? 1 : 0);
+
+               return vtable;
+       } END_FOREACH_BLOCK_NO_LOCK;
+
+       return NULL;
+}
+
+static void
+major_check_scan_starts (void)
+{
+}
+
+static void
+major_dump_heap (FILE *heap_dump_file)
+{
+       MSBlockInfo *block;
+       int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
+       int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
+       int i;
+
+       for (i = 0; i < num_block_obj_sizes; ++i)
+               slots_available [i] = slots_used [i] = 0;
+
+       FOREACH_BLOCK_NO_LOCK (block) {
+               int index = ms_find_block_obj_size_index (block->obj_size);
+               int count = MS_BLOCK_FREE / block->obj_size;
+
+               slots_available [index] += count;
+               for (i = 0; i < count; ++i) {
+                       if (MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block))
+                               ++slots_used [index];
+               }
+       } END_FOREACH_BLOCK_NO_LOCK;
+
+       fprintf (heap_dump_file, "<occupancies>\n");
+       for (i = 0; i < num_block_obj_sizes; ++i) {
+               fprintf (heap_dump_file, "<occupancy size=\"%d\" available=\"%d\" used=\"%d\" />\n",
+                               block_obj_sizes [i], slots_available [i], slots_used [i]);
+       }
+       fprintf (heap_dump_file, "</occupancies>\n");
+
+       FOREACH_BLOCK_NO_LOCK (block) {
+               int count = MS_BLOCK_FREE / block->obj_size;
+               int i;
+               int start = -1;
+
+               fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", "old", (size_t)MS_BLOCK_FREE);
+
+               for (i = 0; i <= count; ++i) {
+                       if ((i < count) && MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block)) {
+                               if (start < 0)
+                                       start = i;
+                       } else {
+                               if (start >= 0) {
+                                       sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), MS_BLOCK_FOR_BLOCK_INFO (block));
+                                       start = -1;
+                               }
+                       }
+               }
+
+               fprintf (heap_dump_file, "</section>\n");
+       } END_FOREACH_BLOCK_NO_LOCK;
+}
+
+static guint8*
+get_cardtable_mod_union_for_block (MSBlockInfo *block, gboolean allocate)
+{
+       guint8 *mod_union = block->cardtable_mod_union;
+       guint8 *other;
+       if (mod_union)
+               return mod_union;
+       else if (!allocate)
+               return NULL;
+       mod_union = sgen_card_table_alloc_mod_union (MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
+       other = SGEN_CAS_PTR ((gpointer*)&block->cardtable_mod_union, mod_union, NULL);
+       if (!other) {
+               SGEN_ASSERT (0, block->cardtable_mod_union == mod_union, "Why did CAS not replace?");
+               return mod_union;
+       }
+       sgen_card_table_free_mod_union (mod_union, MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
+       return other;
+}
+
+static inline guint8*
+major_get_cardtable_mod_union_for_reference (char *ptr)
+{
+       MSBlockInfo *block = MS_BLOCK_FOR_OBJ (ptr);
+       size_t offset = sgen_card_table_get_card_offset (ptr, (char*)sgen_card_table_align_pointer (MS_BLOCK_FOR_BLOCK_INFO (block)));
+       guint8 *mod_union = get_cardtable_mod_union_for_block (block, TRUE);
+       SGEN_ASSERT (0, mod_union, "FIXME: optionally allocate the mod union if it's not here and CAS it in.");
+       return &mod_union [offset];
+}
+
+/*
+ * Mark the mod-union card for `ptr`, which must be a reference within the object `obj`.
+ */
+static void
+mark_mod_union_card (GCObject *obj, void **ptr)
+{
+       int type = sgen_obj_get_descriptor ((char*)obj) & DESC_TYPE_MASK;
+       if (sgen_safe_object_is_small (obj, type)) {
+               guint8 *card_byte = major_get_cardtable_mod_union_for_reference ((char*)ptr);
+               SGEN_ASSERT (0, MS_BLOCK_FOR_OBJ (obj) == MS_BLOCK_FOR_OBJ (ptr), "How can an object and a reference inside it not be in the same block?");
+               *card_byte = 1;
+       } else {
+               sgen_los_mark_mod_union_card (obj, ptr);
+       }
+}
+
+#define LOAD_VTABLE    SGEN_LOAD_VTABLE
+
+#define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,desc,block,queue) do {  \
+               int __word, __bit;                                      \
+               MS_CALC_MARK_BIT (__word, __bit, (obj));                \
+               if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
+                       MS_SET_MARK_BIT ((block), __word, __bit);       \
+                       if (sgen_gc_descr_has_references (desc))                        \
+                               GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
+                       binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((GCObject*)(obj))); \
+                       INC_NUM_MAJOR_OBJECTS_MARKED ();                \
+               }                                                       \
+       } while (0)
+#define MS_MARK_OBJECT_AND_ENQUEUE(obj,desc,block,queue) do {          \
+               int __word, __bit;                                      \
+               MS_CALC_MARK_BIT (__word, __bit, (obj));                \
+               SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
+               if (!MS_MARK_BIT ((block), __word, __bit)) {            \
+                       MS_SET_MARK_BIT ((block), __word, __bit);       \
+                       if (sgen_gc_descr_has_references (desc))                        \
+                               GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
+                       binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((GCObject*)(obj))); \
+                       INC_NUM_MAJOR_OBJECTS_MARKED ();                \
+               }                                                       \
+       } while (0)
+
+static void
+pin_major_object (char *obj, SgenGrayQueue *queue)
+{
+       MSBlockInfo *block;
+
+       if (concurrent_mark)
+               g_assert_not_reached ();
+
+       block = MS_BLOCK_FOR_OBJ (obj);
+       block->has_pinned = TRUE;
+       MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
+}
+
+#include "sgen-major-copy-object.h"
+
+static void
+major_copy_or_mark_object_concurrent (void **ptr, void *obj, SgenGrayQueue *queue)
+{
+       SGEN_ASSERT (9, sgen_concurrent_collection_in_progress (), "Why are we scanning concurrently when there's no concurrent collection on?");
+       SGEN_ASSERT (9, !sgen_workers_are_working () || sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "We must not scan from two threads at the same time!");
+
+       g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
+
+       if (!sgen_ptr_in_nursery (obj)) {
+               mword objsize;
+
+               objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)obj));
+
+               if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE) {
+                       MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
+                       MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
+               } else {
+                       if (sgen_los_object_is_pinned (obj))
+                               return;
+
+                       binary_protocol_mark (obj, SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size (obj));
+
+                       sgen_los_pin_object (obj);
+                       if (SGEN_OBJECT_HAS_REFERENCES (obj))
+                               GRAY_OBJECT_ENQUEUE (queue, obj, sgen_obj_get_descriptor (obj));
+                       INC_NUM_MAJOR_OBJECTS_MARKED ();
+               }
+       }
+}
+
+static long long
+major_get_and_reset_num_major_objects_marked (void)
+{
+#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
+       long long num = num_major_objects_marked;
+       num_major_objects_marked = 0;
+       return num;
+#else
+       return 0;
+#endif
+}
+
+#define PREFETCH_CARDS         1       /* BOOL FASTENABLE */
+#if !PREFETCH_CARDS
+#undef PREFETCH_CARDS
+#endif
+
+/* gcc 4.2.1 from xcode4 crashes on sgen_card_table_get_card_address () when this is enabled */
+#if defined(PLATFORM_MACOSX)
+#define GCC_VERSION (__GNUC__ * 10000 \
+                               + __GNUC_MINOR__ * 100 \
+                               + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION <= 40300
+#undef PREFETCH_CARDS
+#endif
+#endif
+
+#ifdef HEAVY_STATISTICS
+static guint64 stat_optimized_copy;
+static guint64 stat_optimized_copy_nursery;
+static guint64 stat_optimized_copy_nursery_forwarded;
+static guint64 stat_optimized_copy_nursery_pinned;
+static guint64 stat_optimized_copy_major;
+static guint64 stat_optimized_copy_major_small_fast;
+static guint64 stat_optimized_copy_major_small_slow;
+static guint64 stat_optimized_copy_major_large;
+static guint64 stat_optimized_copy_major_forwarded;
+static guint64 stat_optimized_copy_major_small_evacuate;
+static guint64 stat_optimized_major_scan;
+static guint64 stat_optimized_major_scan_no_refs;
+
+static guint64 stat_drain_prefetch_fills;
+static guint64 stat_drain_prefetch_fill_failures;
+static guint64 stat_drain_loops;
+#endif
+
+static void major_scan_object_with_evacuation (char *start, mword desc, SgenGrayQueue *queue);
+
+#define COPY_OR_MARK_FUNCTION_NAME     major_copy_or_mark_object_no_evacuation
+#define SCAN_OBJECT_FUNCTION_NAME      major_scan_object_no_evacuation
+#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_no_evacuation
+#include "sgen-marksweep-drain-gray-stack.h"
+
+#define COPY_OR_MARK_WITH_EVACUATION
+#define COPY_OR_MARK_FUNCTION_NAME     major_copy_or_mark_object_with_evacuation
+#define SCAN_OBJECT_FUNCTION_NAME      major_scan_object_with_evacuation
+#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_with_evacuation
+#include "sgen-marksweep-drain-gray-stack.h"
+
+static gboolean
+drain_gray_stack (ScanCopyContext ctx)
+{
+       gboolean evacuation = FALSE;
+       int i;
+       for (i = 0; i < num_block_obj_sizes; ++i) {
+               if (evacuate_block_obj_sizes [i]) {
+                       evacuation = TRUE;
+                       break;
+               }
+       }
+
+       if (evacuation)
+               return drain_gray_stack_with_evacuation (ctx);
+       else
+               return drain_gray_stack_no_evacuation (ctx);
+}
+
+#include "sgen-marksweep-scan-object-concurrent.h"
+
+static void
+major_copy_or_mark_object_canonical (void **ptr, SgenGrayQueue *queue)
+{
+       major_copy_or_mark_object_with_evacuation (ptr, *ptr, queue);
+}
+
+static void
+major_copy_or_mark_object_concurrent_canonical (void **ptr, SgenGrayQueue *queue)
+{
+       major_copy_or_mark_object_concurrent (ptr, *ptr, queue);
+}
+
+static void
+major_copy_or_mark_object_concurrent_finish_canonical (void **ptr, SgenGrayQueue *queue)
+{
+       major_copy_or_mark_object_no_evacuation (ptr, *ptr, queue);
+}
+
+static void
+mark_pinned_objects_in_block (MSBlockInfo *block, size_t first_entry, size_t last_entry, SgenGrayQueue *queue)
+{
+       void **entry, **end;
+       int last_index = -1;
+
+       if (first_entry == last_entry)
+               return;
+
+       block->has_pinned = TRUE;
+
+       entry = sgen_pinning_get_entry (first_entry);
+       end = sgen_pinning_get_entry (last_entry);
+
+       for (; entry < end; ++entry) {
+               int index = MS_BLOCK_OBJ_INDEX (*entry, block);
+               char *obj;
+               SGEN_ASSERT (9, index >= 0 && index < MS_BLOCK_FREE / block->obj_size, "invalid object %p index %d max-index %d", *entry, index, (int)(MS_BLOCK_FREE / block->obj_size));
+               if (index == last_index)
+                       continue;
+               obj = MS_BLOCK_OBJ (block, index);
+               MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (obj, sgen_obj_get_descriptor (obj), block, queue);
+               last_index = index;
+       }
+}
+
+static inline void
+sweep_block_for_size (MSBlockInfo *block, int count, int obj_size)
+{
+       int obj_index;
+
+       for (obj_index = 0; obj_index < count; ++obj_index) {
+               int word, bit;
+               void *obj = MS_BLOCK_OBJ_FOR_SIZE (block, obj_index, obj_size);
+
+               MS_CALC_MARK_BIT (word, bit, obj);
+               if (MS_MARK_BIT (block, word, bit)) {
+                       SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p not allocated", obj);
+               } else {
+                       /* an unmarked object */
+                       if (MS_OBJ_ALLOCED (obj, block)) {
+                               /*
+                                * FIXME: Merge consecutive
+                                * slots for lower reporting
+                                * overhead.  Maybe memset
+                                * will also benefit?
+                                */
+                               binary_protocol_empty (obj, obj_size);
+                               memset (obj, 0, obj_size);
+                       }
+                       *(void**)obj = block->free_list;
+                       block->free_list = obj;
+               }
+       }
+}
+
+static inline gboolean
+try_set_block_state (MSBlockInfo *block, gint32 new_state, gint32 expected_state)
+{
+       gint32 old_state = SGEN_CAS (&block->state, new_state, expected_state);
+       gboolean success = old_state == expected_state;
+       if (success)
+               binary_protocol_block_set_state (block, MS_BLOCK_SIZE, old_state, new_state);
+       return success;
+}
+
+static inline void
+set_block_state (MSBlockInfo *block, gint32 new_state, gint32 expected_state)
+{
+       SGEN_ASSERT (6, block->state == expected_state, "Block state incorrect before set");
+       block->state = new_state;
+}
+
+/*
+ * If `block` needs sweeping, sweep it and return TRUE.  Otherwise return FALSE.
+ *
+ * Sweeping means iterating through the block's slots and building the free-list from the
+ * unmarked ones.  They will also be zeroed.  The mark bits will be reset.
+ */
+static gboolean
+sweep_block (MSBlockInfo *block)
+{
+       int count;
+       void *reversed = NULL;
+
+ retry:
+       switch (block->state) {
+       case BLOCK_STATE_SWEPT:
+               return FALSE;
+       case BLOCK_STATE_MARKING:
+       case BLOCK_STATE_CHECKING:
+               SGEN_ASSERT (0, FALSE, "How did we get to sweep a block that's being marked or being checked?");
+               goto retry;
+       case BLOCK_STATE_SWEEPING:
+               /* FIXME: Do this more elegantly */
+               g_usleep (100);
+               goto retry;
+       case BLOCK_STATE_NEED_SWEEPING:
+               if (!try_set_block_state (block, BLOCK_STATE_SWEEPING, BLOCK_STATE_NEED_SWEEPING))
+                       goto retry;
+               break;
+       default:
+               SGEN_ASSERT (0, FALSE, "Illegal block state");
+       }
+
+       SGEN_ASSERT (6, block->state == BLOCK_STATE_SWEEPING, "How did we get here without setting state to sweeping?");
+
+       count = MS_BLOCK_FREE / block->obj_size;
+
+       block->free_list = NULL;
+
+       /* Use inline instances specialized to constant sizes, this allows the compiler to replace the memset calls with inline code */
+       // FIXME: Add more sizes
+       switch (block->obj_size) {
+       case 16:
+               sweep_block_for_size (block, count, 16);
+               break;
+       default:
+               sweep_block_for_size (block, count, block->obj_size);
+               break;
+       }
+
+       /* reset mark bits */
+       memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
+
+       /* Reverse free list so that it's in address order */
+       reversed = NULL;
+       while (block->free_list) {
+               void *next = *(void**)block->free_list;
+               *(void**)block->free_list = reversed;
+               reversed = block->free_list;
+               block->free_list = next;
+       }
+       block->free_list = reversed;
+
+       mono_memory_write_barrier ();
+
+       set_block_state (block, BLOCK_STATE_SWEPT, BLOCK_STATE_SWEEPING);
+
+       return TRUE;
+}
+
+static inline int
+bitcount (mword d)
+{
+       int count = 0;
+
+#ifdef __GNUC__
+       if (sizeof (mword) == sizeof (unsigned long))
+               count += __builtin_popcountl (d);
+       else
+               count += __builtin_popcount (d);
+#else
+       while (d) {
+               count ++;
+               d &= (d - 1);
+       }
+#endif
+       return count;
+}
+
+/* statistics for evacuation */
+static size_t *sweep_slots_available;
+static size_t *sweep_slots_used;
+static size_t *sweep_num_blocks;
+
+static volatile size_t num_major_sections_before_sweep;
+static volatile size_t num_major_sections_freed_in_sweep;
+
+static void
+sweep_start (void)
+{
+       int i;
+
+       for (i = 0; i < num_block_obj_sizes; ++i)
+               sweep_slots_available [i] = sweep_slots_used [i] = sweep_num_blocks [i] = 0;
+
+       /* clear all the free lists */
+       for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
+               MSBlockInfo * volatile *free_blocks = free_block_lists [i];
+               int j;
+               for (j = 0; j < num_block_obj_sizes; ++j)
+                       free_blocks [j] = NULL;
+       }
+}
+
+static void sweep_finish (void);
+
+/*
+ * If `wait` is TRUE and the block is currently being checked, this function will wait until
+ * the checking has finished.
+ *
+ * Returns whether the block is still there.  If `wait` is FALSE, the return value will not
+ * be correct, i.e. must not be used.
+ */
+static gboolean
+ensure_block_is_checked_for_sweeping (int block_index, gboolean wait, gboolean *have_checked)
+{
+       int count;
+       gboolean have_live = FALSE;
+       gboolean have_free = FALSE;
+       int nused = 0;
+       int block_state;
+       int i;
+       void *tagged_block;
+       MSBlockInfo *block;
+
+       SGEN_ASSERT (6, sweep_in_progress (), "Why do we call this function if there's no sweep in progress?");
+
+       if (have_checked)
+               *have_checked = FALSE;
+
+ retry:
+       tagged_block = *(void * volatile *)&allocated_blocks.data [block_index];
+       if (!tagged_block)
+               return FALSE;
+
+       if (BLOCK_IS_TAGGED_CHECKING (tagged_block)) {
+               if (!wait)
+                       return FALSE;
+               /* FIXME: do this more elegantly */
+               g_usleep (100);
+               goto retry;
+       }
+
+       if (SGEN_CAS_PTR (&allocated_blocks.data [block_index], BLOCK_TAG_CHECKING (tagged_block), tagged_block) != tagged_block)
+               goto retry;
+
+       block = BLOCK_UNTAG (tagged_block);
+       block_state = block->state;
+
+       if (!sweep_in_progress ()) {
+               SGEN_ASSERT (6, block_state != BLOCK_STATE_SWEEPING && block_state != BLOCK_STATE_CHECKING, "Invalid block state.");
+               if (!lazy_sweep)
+                       SGEN_ASSERT (6, block_state != BLOCK_STATE_NEED_SWEEPING, "Invalid block state.");
+       }
+
+       switch (block_state) {
+       case BLOCK_STATE_SWEPT:
+       case BLOCK_STATE_NEED_SWEEPING:
+       case BLOCK_STATE_SWEEPING:
+               goto done;
+       case BLOCK_STATE_MARKING:
+               break;
+       case BLOCK_STATE_CHECKING:
+               SGEN_ASSERT (0, FALSE, "We set the CHECKING bit - how can the stage be CHECKING?");
+               goto done;
+       default:
+               SGEN_ASSERT (0, FALSE, "Illegal block state");
+               break;
+       }
+
+       SGEN_ASSERT (6, block->state == BLOCK_STATE_MARKING, "When we sweep all blocks must start out marking.");
+       set_block_state (block, BLOCK_STATE_CHECKING, BLOCK_STATE_MARKING);
+
+       if (have_checked)
+               *have_checked = TRUE;
+
+       block->has_pinned = block->pinned;
+
+       block->is_to_space = FALSE;
+
+       count = MS_BLOCK_FREE / block->obj_size;
+
+       if (block->cardtable_mod_union) {
+               sgen_card_table_free_mod_union (block->cardtable_mod_union, MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
+               block->cardtable_mod_union = NULL;
+       }
+
+       /* Count marked objects in the block */
+       for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
+               nused += bitcount (block->mark_words [i]);
+
+       if (nused)
+               have_live = TRUE;
+       if (nused < count)
+               have_free = TRUE;
+
+       if (have_live) {
+               int obj_size_index = block->obj_size_index;
+               gboolean has_pinned = block->has_pinned;
+
+               set_block_state (block, BLOCK_STATE_NEED_SWEEPING, BLOCK_STATE_CHECKING);
+
+               /*
+                * FIXME: Go straight to SWEPT if there are no free slots.  We need
+                * to set the free slot list to NULL, though, and maybe update some
+                * statistics.
+                */
+               if (!lazy_sweep)
+                       sweep_block (block);
+
+               if (!has_pinned) {
+                       ++sweep_num_blocks [obj_size_index];
+                       sweep_slots_used [obj_size_index] += nused;
+                       sweep_slots_available [obj_size_index] += count;
+               }
+
+               /*
+                * If there are free slots in the block, add
+                * the block to the corresponding free list.
+                */
+               if (have_free) {
+                       MSBlockInfo * volatile *free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
+
+                       if (!lazy_sweep)
+                               SGEN_ASSERT (6, block->free_list, "How do we not have a free list when there are free slots?");
+
+                       add_free_block (free_blocks, obj_size_index, block);
+               }
+
+               /* FIXME: Do we need the heap boundaries while we do nursery collections? */
+               update_heap_boundaries_for_block (block);
+       } else {
+               /*
+                * Blocks without live objects are removed from the
+                * block list and freed.
+                */
+               SGEN_ASSERT (6, block_index < allocated_blocks.next_slot, "How did the number of blocks shrink?");
+               SGEN_ASSERT (6, allocated_blocks.data [block_index] == BLOCK_TAG_CHECKING (tagged_block), "How did the block move?");
+
+               binary_protocol_empty (MS_BLOCK_OBJ (block, 0), (char*)MS_BLOCK_OBJ (block, count) - (char*)MS_BLOCK_OBJ (block, 0));
+               ms_free_block (block);
+
+               SGEN_ATOMIC_ADD_P (num_major_sections, -1);
+
+               tagged_block = NULL;
+       }
+
+ done:
+       allocated_blocks.data [block_index] = tagged_block;
+       return !!tagged_block;
+}
+
+static void
+sweep_job_func (void *thread_data_untyped, SgenThreadPoolJob *job)
+{
+       int block_index;
+       int num_blocks = num_major_sections_before_sweep;
+
+       SGEN_ASSERT (0, sweep_in_progress (), "Sweep thread called with wrong state");
+       SGEN_ASSERT (0, num_blocks <= allocated_blocks.next_slot, "How did we lose blocks?");
+
+       /*
+        * We traverse the block array from high to low.  Nursery collections will have to
+        * cooperate with the sweep thread to finish sweeping, and they will traverse from
+        * low to high, to avoid constantly colliding on the same blocks.
+        */
+       for (block_index = num_blocks - 1; block_index >= 0; --block_index) {
+               gboolean have_checked;
+
+               /*
+                * The block might have been freed by another thread doing some checking
+                * work.
+                */
+               if (!ensure_block_is_checked_for_sweeping (block_index, TRUE, &have_checked))
+                       ++num_major_sections_freed_in_sweep;
+       }
+
+       while (!try_set_sweep_state (SWEEP_STATE_COMPACTING, SWEEP_STATE_SWEEPING)) {
+               /*
+                * The main GC thread is currently iterating over the block array to help us
+                * finish the sweep.  We have already finished, but we don't want to mess up
+                * that iteration, so we just wait for it.
+                */
+               g_usleep (100);
+       }
+
+       if (SGEN_MAX_ASSERT_LEVEL >= 6) {
+               for (block_index = num_blocks; block_index < allocated_blocks.next_slot; ++block_index) {
+                       MSBlockInfo *block = BLOCK_UNTAG (allocated_blocks.data [block_index]);
+                       SGEN_ASSERT (6, block && block->state == BLOCK_STATE_SWEPT, "How did a new block to be swept get added while swept?");
+               }
+       }
+
+       sgen_pointer_queue_remove_nulls (&allocated_blocks);
+
+       sweep_finish ();
+
+       sweep_job = NULL;
+}
+
+static void
+sweep_finish (void)
+{
+       mword total_evacuate_heap = 0;
+       mword total_evacuate_saved = 0;
+       int i;
+
+       for (i = 0; i < num_block_obj_sizes; ++i) {
+               float usage = (float)sweep_slots_used [i] / (float)sweep_slots_available [i];
+               if (sweep_num_blocks [i] > 5 && usage < evacuation_threshold) {
+                       evacuate_block_obj_sizes [i] = TRUE;
+                       /*
+                       g_print ("slot size %d - %d of %d used\n",
+                                       block_obj_sizes [i], slots_used [i], slots_available [i]);
+                       */
+               } else {
+                       evacuate_block_obj_sizes [i] = FALSE;
+               }
+               {
+                       mword total_bytes = block_obj_sizes [i] * sweep_slots_available [i];
+                       total_evacuate_heap += total_bytes;
+                       if (evacuate_block_obj_sizes [i])
+                               total_evacuate_saved += total_bytes - block_obj_sizes [i] * sweep_slots_used [i];
+               }
+       }
+
+       want_evacuation = (float)total_evacuate_saved / (float)total_evacuate_heap > (1 - concurrent_evacuation_threshold);
+
+       set_sweep_state (SWEEP_STATE_SWEPT, SWEEP_STATE_COMPACTING);
+}
+
+static void
+major_sweep (void)
+{
+       set_sweep_state (SWEEP_STATE_SWEEPING, SWEEP_STATE_NEED_SWEEPING);
+
+       sweep_start ();
+
+       SGEN_ASSERT (0, num_major_sections == allocated_blocks.next_slot, "We don't know how many blocks we have?");
+
+       num_major_sections_before_sweep = num_major_sections;
+       num_major_sections_freed_in_sweep = 0;
+
+       SGEN_ASSERT (0, !sweep_job, "We haven't finished the last sweep?");
+       if (concurrent_sweep) {
+               sweep_job = sgen_thread_pool_job_alloc ("sweep", sweep_job_func, sizeof (SgenThreadPoolJob));
+               sgen_thread_pool_job_enqueue (sweep_job);
+       } else {
+               sweep_job_func (NULL, NULL);
+       }
+}
+
+static gboolean
+major_have_swept (void)
+{
+       return sweep_state == SWEEP_STATE_SWEPT;
+}
+
+static int count_pinned_ref;
+static int count_pinned_nonref;
+static int count_nonpinned_ref;
+static int count_nonpinned_nonref;
+
+static void
+count_nonpinned_callback (char *obj, size_t size, void *data)
+{
+       GCVTable *vtable = (GCVTable*)LOAD_VTABLE (obj);
+
+       if (SGEN_VTABLE_HAS_REFERENCES (vtable))
+               ++count_nonpinned_ref;
+       else
+               ++count_nonpinned_nonref;
+}
+
+static void
+count_pinned_callback (char *obj, size_t size, void *data)
+{
+       GCVTable *vtable = (GCVTable*)LOAD_VTABLE (obj);
+
+       if (SGEN_VTABLE_HAS_REFERENCES (vtable))
+               ++count_pinned_ref;
+       else
+               ++count_pinned_nonref;
+}
+
+static G_GNUC_UNUSED void
+count_ref_nonref_objs (void)
+{
+       int total;
+
+       count_pinned_ref = 0;
+       count_pinned_nonref = 0;
+       count_nonpinned_ref = 0;
+       count_nonpinned_nonref = 0;
+
+       major_iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, count_nonpinned_callback, NULL);
+       major_iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, count_pinned_callback, NULL);
+
+       total = count_pinned_nonref + count_nonpinned_nonref + count_pinned_ref + count_nonpinned_ref;
+
+       g_print ("ref: %d pinned %d non-pinned   non-ref: %d pinned %d non-pinned  --  %.1f\n",
+                       count_pinned_ref, count_nonpinned_ref,
+                       count_pinned_nonref, count_nonpinned_nonref,
+                       (count_pinned_nonref + count_nonpinned_nonref) * 100.0 / total);
+}
+
+static int
+ms_calculate_block_obj_sizes (double factor, int *arr)
+{
+       double target_size;
+       int num_sizes = 0;
+       int last_size = 0;
+
+       /*
+        * Have every possible slot size starting with the minimal
+        * object size up to and including four times that size.  Then
+        * proceed by increasing geometrically with the given factor.
+        */
+
+       for (int size = SGEN_CLIENT_MINIMUM_OBJECT_SIZE; size <= 4 * SGEN_CLIENT_MINIMUM_OBJECT_SIZE; size += SGEN_ALLOC_ALIGN) {
+               if (arr)
+                       arr [num_sizes] = size;
+               ++num_sizes;
+               last_size = size;
+       }
+       target_size = (double)last_size;
+
+       do {
+               int target_count = (int)floor (MS_BLOCK_FREE / target_size);
+               int size = MIN ((MS_BLOCK_FREE / target_count) & ~(SGEN_ALLOC_ALIGN - 1), SGEN_MAX_SMALL_OBJ_SIZE);
+
+               if (size != last_size) {
+                       if (arr)
+                               arr [num_sizes] = size;
+                       ++num_sizes;
+                       last_size = size;
+               }
+
+               target_size *= factor;
+       } while (last_size < SGEN_MAX_SMALL_OBJ_SIZE);
+
+       return num_sizes;
+}
+
+/* only valid during minor collections */
+static mword old_num_major_sections;
+
+static void
+major_start_nursery_collection (void)
+{
+#ifdef MARKSWEEP_CONSISTENCY_CHECK
+       consistency_check ();
+#endif
+
+       old_num_major_sections = num_major_sections;
+}
+
+static void
+major_finish_nursery_collection (void)
+{
+#ifdef MARKSWEEP_CONSISTENCY_CHECK
+       consistency_check ();
+#endif
+}
+
+static void
+major_start_major_collection (void)
+{
+       MSBlockInfo *block;
+       int i;
+
+       major_finish_sweep_checking ();
+
+       /*
+        * Clear the free lists for block sizes where we do evacuation.  For those block
+        * sizes we will have to allocate new blocks.
+        */
+       for (i = 0; i < num_block_obj_sizes; ++i) {
+               if (!evacuate_block_obj_sizes [i])
+                       continue;
+
+               free_block_lists [0][i] = NULL;
+               free_block_lists [MS_BLOCK_FLAG_REFS][i] = NULL;
+       }
+
+       if (lazy_sweep)
+               binary_protocol_sweep_begin (GENERATION_OLD, TRUE);
+
+       /* Sweep all unswept blocks and set them to MARKING */
+       FOREACH_BLOCK_NO_LOCK (block) {
+               if (lazy_sweep)
+                       sweep_block (block);
+               SGEN_ASSERT (0, block->state == BLOCK_STATE_SWEPT, "All blocks must be swept when we're pinning.");
+               set_block_state (block, BLOCK_STATE_MARKING, BLOCK_STATE_SWEPT);
+       } END_FOREACH_BLOCK_NO_LOCK;
+
+       if (lazy_sweep)
+               binary_protocol_sweep_end (GENERATION_OLD, TRUE);
+
+       set_sweep_state (SWEEP_STATE_NEED_SWEEPING, SWEEP_STATE_SWEPT);
+}
+
+static void
+major_finish_major_collection (ScannedObjectCounts *counts)
+{
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+       if (binary_protocol_is_enabled ()) {
+               counts->num_scanned_objects = scanned_objects_list.next_slot;
+
+               sgen_pointer_queue_sort_uniq (&scanned_objects_list);
+               counts->num_unique_scanned_objects = scanned_objects_list.next_slot;
+
+               sgen_pointer_queue_clear (&scanned_objects_list);
+       }
+#endif
+}
+
+#if SIZEOF_VOID_P != 8
+static int
+compare_pointers (const void *va, const void *vb) {
+       char *a = *(char**)va, *b = *(char**)vb;
+       if (a < b)
+               return -1;
+       if (a > b)
+               return 1;
+       return 0;
+}
+#endif
+
+/*
+ * This is called with sweep completed and the world stopped.
+ */
+static void
+major_free_swept_blocks (size_t allowance)
+{
+       /* FIXME: This is probably too much.  It's assuming all objects are small. */
+       size_t section_reserve = allowance / MS_BLOCK_SIZE;
+
+       SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "Sweeping must have finished before freeing blocks");
+
+#if SIZEOF_VOID_P != 8
+       {
+               int i, num_empty_blocks_orig, num_blocks, arr_length;
+               void *block;
+               void **empty_block_arr;
+               void **rebuild_next;
+
+#ifdef TARGET_WIN32
+               /*
+                * sgen_free_os_memory () asserts in mono_vfree () because windows doesn't like freeing the middle of
+                * a VirtualAlloc ()-ed block.
+                */
+               return;
+#endif
+
+               if (num_empty_blocks <= section_reserve)
+                       return;
+               SGEN_ASSERT (0, num_empty_blocks > 0, "section reserve can't be negative");
+
+               num_empty_blocks_orig = num_empty_blocks;
+               empty_block_arr = (void**)sgen_alloc_internal_dynamic (sizeof (void*) * num_empty_blocks_orig,
+                               INTERNAL_MEM_MS_BLOCK_INFO_SORT, FALSE);
+               if (!empty_block_arr)
+                       goto fallback;
+
+               i = 0;
+               for (block = empty_blocks; block; block = *(void**)block)
+                       empty_block_arr [i++] = block;
+               SGEN_ASSERT (0, i == num_empty_blocks, "empty block count wrong");
+
+               sgen_qsort (empty_block_arr, num_empty_blocks, sizeof (void*), compare_pointers);
+
+               /*
+                * We iterate over the free blocks, trying to find MS_BLOCK_ALLOC_NUM
+                * contiguous ones.  If we do, we free them.  If that's not enough to get to
+                * section_reserve, we halve the number of contiguous blocks we're looking
+                * for and have another go, until we're done with looking for pairs of
+                * blocks, at which point we give up and go to the fallback.
+                */
+               arr_length = num_empty_blocks_orig;
+               num_blocks = MS_BLOCK_ALLOC_NUM;
+               while (num_empty_blocks > section_reserve && num_blocks > 1) {
+                       int first = -1;
+                       int dest = 0;
+
+                       dest = 0;
+                       for (i = 0; i < arr_length; ++i) {
+                               int d = dest;
+                               void *block = empty_block_arr [i];
+                               SGEN_ASSERT (6, block, "we're not shifting correctly");
+                               if (i != dest) {
+                                       empty_block_arr [dest] = block;
+                                       /*
+                                        * This is not strictly necessary, but we're
+                                        * cautious.
+                                        */
+                                       empty_block_arr [i] = NULL;
+                               }
+                               ++dest;
+
+                               if (first < 0) {
+                                       first = d;
+                                       continue;
+                               }
+
+                               SGEN_ASSERT (6, first >= 0 && d > first, "algorithm is wrong");
+
+                               if ((char*)block != ((char*)empty_block_arr [d-1]) + MS_BLOCK_SIZE) {
+                                       first = d;
+                                       continue;
+                               }
+
+                               if (d + 1 - first == num_blocks) {
+                                       /*
+                                        * We found num_blocks contiguous blocks.  Free them
+                                        * and null their array entries.  As an optimization
+                                        * we could, instead of nulling the entries, shift
+                                        * the following entries over to the left, while
+                                        * we're iterating.
+                                        */
+                                       int j;
+                                       sgen_free_os_memory (empty_block_arr [first], MS_BLOCK_SIZE * num_blocks, SGEN_ALLOC_HEAP);
+                                       for (j = first; j <= d; ++j)
+                                               empty_block_arr [j] = NULL;
+                                       dest = first;
+                                       first = -1;
+
+                                       num_empty_blocks -= num_blocks;
+
+                                       stat_major_blocks_freed += num_blocks;
+                                       if (num_blocks == MS_BLOCK_ALLOC_NUM)
+                                               stat_major_blocks_freed_ideal += num_blocks;
+                                       else
+                                               stat_major_blocks_freed_less_ideal += num_blocks;
+
+                               }
+                       }
+
+                       SGEN_ASSERT (6, dest <= i && dest <= arr_length, "array length is off");
+                       arr_length = dest;
+                       SGEN_ASSERT (6, arr_length == num_empty_blocks, "array length is off");
+
+                       num_blocks >>= 1;
+               }
+
+               /* rebuild empty_blocks free list */
+               rebuild_next = (void**)&empty_blocks;
+               for (i = 0; i < arr_length; ++i) {
+                       void *block = empty_block_arr [i];
+                       SGEN_ASSERT (6, block, "we're missing blocks");
+                       *rebuild_next = block;
+                       rebuild_next = (void**)block;
+               }
+               *rebuild_next = NULL;
+
+               /* free array */
+               sgen_free_internal_dynamic (empty_block_arr, sizeof (void*) * num_empty_blocks_orig, INTERNAL_MEM_MS_BLOCK_INFO_SORT);
+       }
+
+       SGEN_ASSERT (0, num_empty_blocks >= 0, "we freed more blocks than we had in the first place?");
+
+ fallback:
+       /*
+        * This is our threshold.  If there's not more empty than used blocks, we won't
+        * release uncontiguous blocks, in fear of fragmenting the address space.
+        */
+       if (num_empty_blocks <= num_major_sections)
+               return;
+#endif
+
+       while (num_empty_blocks > section_reserve) {
+               void *next = *(void**)empty_blocks;
+               sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP);
+               empty_blocks = next;
+               /*
+                * Needs not be atomic because this is running
+                * single-threaded.
+                */
+               --num_empty_blocks;
+
+               ++stat_major_blocks_freed;
+#if SIZEOF_VOID_P != 8
+               ++stat_major_blocks_freed_individual;
+#endif
+       }
+}
+
+static void
+major_pin_objects (SgenGrayQueue *queue)
+{
+       MSBlockInfo *block;
+
+       FOREACH_BLOCK_NO_LOCK (block) {
+               size_t first_entry, last_entry;
+               SGEN_ASSERT (6, block_is_swept_or_marking (block), "All blocks must be swept when we're pinning.");
+               sgen_find_optimized_pin_queue_area (MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SKIP, MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE,
+                               &first_entry, &last_entry);
+               mark_pinned_objects_in_block (block, first_entry, last_entry, queue);
+       } END_FOREACH_BLOCK_NO_LOCK;
+}
+
+static void
+major_init_to_space (void)
+{
+}
+
+static void
+major_report_pinned_memory_usage (void)
+{
+       g_assert_not_reached ();
+}
+
+static gint64
+major_get_used_size (void)
+{
+       gint64 size = 0;
+       MSBlockInfo *block;
+
+       /*
+        * We're holding the GC lock, but the sweep thread might be running.  Make sure it's
+        * finished, then we can iterate over the block array.
+        */
+       major_finish_sweep_checking ();
+
+       FOREACH_BLOCK_NO_LOCK_CONDITION (TRUE, block) {
+               int count = MS_BLOCK_FREE / block->obj_size;
+               void **iter;
+               size += count * block->obj_size;
+               for (iter = block->free_list; iter; iter = (void**)*iter)
+                       size -= block->obj_size;
+       } END_FOREACH_BLOCK_NO_LOCK;
+
+       return size;
+}
+
+/* FIXME: return number of bytes, not of sections */
+static size_t
+get_num_major_sections (void)
+{
+       return num_major_sections;
+}
+
+/*
+ * Returns the number of bytes in blocks that were present when the last sweep was
+ * initiated, and were not freed during the sweep.  They are the basis for calculating the
+ * allowance.
+ */
+static size_t
+get_bytes_survived_last_sweep (void)
+{
+       SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "Can only query unswept sections after sweep");
+       return (num_major_sections_before_sweep - num_major_sections_freed_in_sweep) * MS_BLOCK_SIZE;
+}
+
+static gboolean
+major_handle_gc_param (const char *opt)
+{
+       if (g_str_has_prefix (opt, "evacuation-threshold=")) {
+               const char *arg = strchr (opt, '=') + 1;
+               int percentage = atoi (arg);
+               if (percentage < 0 || percentage > 100) {
+                       fprintf (stderr, "evacuation-threshold must be an integer in the range 0-100.\n");
+                       exit (1);
+               }
+               evacuation_threshold = (float)percentage / 100.0f;
+               return TRUE;
+       } else if (!strcmp (opt, "lazy-sweep")) {
+               lazy_sweep = TRUE;
+               return TRUE;
+       } else if (!strcmp (opt, "no-lazy-sweep")) {
+               lazy_sweep = FALSE;
+               return TRUE;
+       } else if (!strcmp (opt, "concurrent-sweep")) {
+               concurrent_sweep = TRUE;
+               return TRUE;
+       } else if (!strcmp (opt, "no-concurrent-sweep")) {
+               concurrent_sweep = FALSE;
+               return TRUE;
+       }
+
+       return FALSE;
+}
+
+static void
+major_print_gc_param_usage (void)
+{
+       fprintf (stderr,
+                       ""
+                       "  evacuation-threshold=P (where P is a percentage, an integer in 0-100)\n"
+                       "  (no-)lazy-sweep\n"
+                       "  (no-)concurrent-sweep\n"
+                       );
+}
+
+/*
+ * This callback is used to clear cards, move cards to the shadow table and do counting.
+ */
+static void
+major_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
+{
+       MSBlockInfo *block;
+       gboolean has_references;
+
+       major_finish_sweep_checking ();
+       FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
+               if (has_references)
+                       callback ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
+       } END_FOREACH_BLOCK_NO_LOCK;
+}
+
+#ifdef HEAVY_STATISTICS
+extern guint64 marked_cards;
+extern guint64 scanned_cards;
+extern guint64 scanned_objects;
+extern guint64 remarked_cards;
+#endif
+
+#define CARD_WORDS_PER_BLOCK (CARDS_PER_BLOCK / SIZEOF_VOID_P)
+/*
+ * MS blocks are 16K aligned.
+ * Cardtables are 4K aligned, at least.
+ * This means that the cardtable of a given block is 32 bytes aligned.
+ */
+static guint8*
+initial_skip_card (guint8 *card_data)
+{
+       mword *cards = (mword*)card_data;
+       mword card;
+       int i;
+       for (i = 0; i < CARD_WORDS_PER_BLOCK; ++i) {
+               card = cards [i];
+               if (card)
+                       break;
+       }
+
+       if (i == CARD_WORDS_PER_BLOCK)
+               return card_data + CARDS_PER_BLOCK;
+
+#if defined(__i386__) && defined(__GNUC__)
+       return card_data + i * 4 +  (__builtin_ffs (card) - 1) / 8;
+#elif defined(__x86_64__) && defined(__GNUC__)
+       return card_data + i * 8 +  (__builtin_ffsll (card) - 1) / 8;
+#elif defined(__s390x__) && defined(__GNUC__)
+       return card_data + i * 8 +  (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
+#else
+       for (i = i * SIZEOF_VOID_P; i < CARDS_PER_BLOCK; ++i) {
+               if (card_data [i])
+                       return &card_data [i];
+       }
+       return card_data;
+#endif
+}
+
+#define MS_BLOCK_OBJ_INDEX_FAST(o,b,os)        (((char*)(o) - ((b) + MS_BLOCK_SKIP)) / (os))
+#define MS_BLOCK_OBJ_FAST(b,os,i)                      ((b) + MS_BLOCK_SKIP + (os) * (i))
+#define MS_OBJ_ALLOCED_FAST(o,b)               (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + MS_BLOCK_SIZE))
+
+static void
+scan_card_table_for_block (MSBlockInfo *block, gboolean mod_union, ScanCopyContext ctx)
+{
+       SgenGrayQueue *queue = ctx.queue;
+       ScanObjectFunc scan_func = ctx.ops->scan_object;
+#ifndef SGEN_HAVE_OVERLAPPING_CARDS
+       guint8 cards_copy [CARDS_PER_BLOCK];
+#endif
+       gboolean small_objects;
+       int block_obj_size;
+       char *block_start;
+       guint8 *card_data, *card_base;
+       guint8 *card_data_end;
+       char *scan_front = NULL;
+
+       block_obj_size = block->obj_size;
+       small_objects = block_obj_size < CARD_SIZE_IN_BYTES;
+
+       block_start = MS_BLOCK_FOR_BLOCK_INFO (block);
+
+       /*
+        * This is safe in face of card aliasing for the following reason:
+        *
+        * Major blocks are 16k aligned, or 32 cards aligned.
+        * Cards aliasing happens in powers of two, so as long as major blocks are aligned to their
+        * sizes, they won't overflow the cardtable overlap modulus.
+        */
+       if (mod_union) {
+               card_data = card_base = block->cardtable_mod_union;
+               /*
+                * This happens when the nursery collection that precedes finishing
+                * the concurrent collection allocates new major blocks.
+                */
+               if (!card_data)
+                       return;
+       } else {
+#ifdef SGEN_HAVE_OVERLAPPING_CARDS
+               card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
+#else
+               if (!sgen_card_table_get_card_data (cards_copy, (mword)block_start, CARDS_PER_BLOCK))
+                       return;
+               card_data = card_base = cards_copy;
+#endif
+       }
+       card_data_end = card_data + CARDS_PER_BLOCK;
+
+       card_data += MS_BLOCK_SKIP >> CARD_BITS;
+
+       card_data = initial_skip_card (card_data);
+       while (card_data < card_data_end) {
+               size_t card_index, first_object_index;
+               char *start;
+               char *end;
+               char *first_obj, *obj;
+
+               HEAVY_STAT (++scanned_cards);
+
+               if (!*card_data) {
+                       ++card_data;
+                       continue;
+               }
+
+               card_index = card_data - card_base;
+               start = (char*)(block_start + card_index * CARD_SIZE_IN_BYTES);
+               end = start + CARD_SIZE_IN_BYTES;
+
+               if (!block_is_swept_or_marking (block))
+                       sweep_block (block);
+
+               HEAVY_STAT (++marked_cards);
+
+               if (small_objects)
+                       sgen_card_table_prepare_card_for_scanning (card_data);
+
+               /*
+                * If the card we're looking at starts at or in the block header, we
+                * must start at the first object in the block, without calculating
+                * the index of the object we're hypothetically starting at, because
+                * it would be negative.
+                */
+               if (card_index <= (MS_BLOCK_SKIP >> CARD_BITS))
+                       first_object_index = 0;
+               else
+                       first_object_index = MS_BLOCK_OBJ_INDEX_FAST (start, block_start, block_obj_size);
+
+               obj = first_obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, first_object_index);
+
+               binary_protocol_card_scan (first_obj, end - first_obj);
+
+               while (obj < end) {
+                       if (obj < scan_front || !MS_OBJ_ALLOCED_FAST (obj, block_start))
+                               goto next_object;
+
+                       if (mod_union) {
+                               /* FIXME: do this more efficiently */
+                               int w, b;
+                               MS_CALC_MARK_BIT (w, b, obj);
+                               if (!MS_MARK_BIT (block, w, b))
+                                       goto next_object;
+                       }
+
+                       if (small_objects) {
+                               HEAVY_STAT (++scanned_objects);
+                               scan_func (obj, sgen_obj_get_descriptor (obj), queue);
+                       } else {
+                               size_t offset = sgen_card_table_get_card_offset (obj, block_start);
+                               sgen_cardtable_scan_object (obj, block_obj_size, card_base + offset, mod_union, ctx);
+                       }
+               next_object:
+                       obj += block_obj_size;
+                       g_assert (scan_front <= obj);
+                       scan_front = obj;
+               }
+
+               HEAVY_STAT (if (*card_data) ++remarked_cards);
+
+               if (small_objects)
+                       ++card_data;
+               else
+                       card_data = card_base + sgen_card_table_get_card_offset (obj, block_start);
+       }
+}
+
+static void
+major_scan_card_table (gboolean mod_union, ScanCopyContext ctx)
+{
+       MSBlockInfo *block;
+       gboolean has_references;
+
+       if (!concurrent_mark)
+               g_assert (!mod_union);
+
+       major_finish_sweep_checking ();
+       FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
+#ifdef PREFETCH_CARDS
+               int prefetch_index = __index + 6;
+               if (prefetch_index < allocated_blocks.next_slot) {
+                       MSBlockInfo *prefetch_block = BLOCK_UNTAG (allocated_blocks.data [prefetch_index]);
+                       guint8 *prefetch_cards = sgen_card_table_get_card_scan_address ((mword)MS_BLOCK_FOR_BLOCK_INFO (prefetch_block));
+                       PREFETCH_READ (prefetch_block);
+                       PREFETCH_WRITE (prefetch_cards);
+                       PREFETCH_WRITE (prefetch_cards + 32);
+                }
+#endif
+
+               if (!has_references)
+                       continue;
+
+               scan_card_table_for_block (block, mod_union, ctx);
+       } END_FOREACH_BLOCK_NO_LOCK;
+}
+
+static void
+major_count_cards (long long *num_total_cards, long long *num_marked_cards)
+{
+       MSBlockInfo *block;
+       gboolean has_references;
+       long long total_cards = 0;
+       long long marked_cards = 0;
+
+       if (sweep_in_progress ()) {
+               *num_total_cards = -1;
+               *num_marked_cards = -1;
+               return;
+       }
+
+       FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
+               guint8 *cards = sgen_card_table_get_card_scan_address ((mword) MS_BLOCK_FOR_BLOCK_INFO (block));
+               int i;
+
+               if (!has_references)
+                       continue;
+
+               total_cards += CARDS_PER_BLOCK;
+               for (i = 0; i < CARDS_PER_BLOCK; ++i) {
+                       if (cards [i])
+                               ++marked_cards;
+               }
+       } END_FOREACH_BLOCK_NO_LOCK;
+
+       *num_total_cards = total_cards;
+       *num_marked_cards = marked_cards;
+}
+
+static void
+update_cardtable_mod_union (void)
+{
+       MSBlockInfo *block;
+
+       FOREACH_BLOCK_NO_LOCK (block) {
+               size_t num_cards;
+               guint8 *mod_union = get_cardtable_mod_union_for_block (block, TRUE);
+               sgen_card_table_update_mod_union (mod_union, MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE, &num_cards);
+               SGEN_ASSERT (6, num_cards == CARDS_PER_BLOCK, "Number of cards calculation is wrong");
+       } END_FOREACH_BLOCK_NO_LOCK;
+}
+
+#undef pthread_create
+
+static void
+post_param_init (SgenMajorCollector *collector)
+{
+       collector->sweeps_lazily = lazy_sweep;
+       collector->needs_thread_pool = concurrent_mark || concurrent_sweep;
+}
+
+static void
+sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurrent)
+{
+       int i;
+
+       sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
+
+       num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
+       block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
+       ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
+
+       evacuate_block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (gboolean) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
+       for (i = 0; i < num_block_obj_sizes; ++i)
+               evacuate_block_obj_sizes [i] = FALSE;
+
+       sweep_slots_available = sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
+       sweep_slots_used = sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
+       sweep_num_blocks = sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
+
+       /*
+       {
+               int i;
+               g_print ("block object sizes:\n");
+               for (i = 0; i < num_block_obj_sizes; ++i)
+                       g_print ("%d\n", block_obj_sizes [i]);
+       }
+       */
+
+       for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
+               free_block_lists [i] = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
+
+       for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES; ++i)
+               fast_block_obj_size_indexes [i] = ms_find_block_obj_size_index (i * 8);
+       for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
+               g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
+
+       mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced);
+       mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed);
+       mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_lazy_swept);
+       mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_objects_evacuated);
+#if SIZEOF_VOID_P != 8
+       mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_ideal);
+       mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_less_ideal);
+       mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_individual);
+       mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced_less_ideal);
+#endif
+
+       collector->section_size = MAJOR_SECTION_SIZE;
+
+       concurrent_mark = is_concurrent;
+       collector->is_concurrent = is_concurrent;
+       collector->needs_thread_pool = is_concurrent || concurrent_sweep;
+       if (is_concurrent)
+               collector->want_synchronous_collection = &want_evacuation;
+       else
+               collector->want_synchronous_collection = NULL;
+       collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
+       collector->supports_cardtable = TRUE;
+
+       collector->alloc_heap = major_alloc_heap;
+       collector->is_object_live = major_is_object_live;
+       collector->alloc_small_pinned_obj = major_alloc_small_pinned_obj;
+       collector->alloc_degraded = major_alloc_degraded;
+
+       collector->alloc_object = major_alloc_object;
+       collector->free_pinned_object = free_pinned_object;
+       collector->iterate_objects = major_iterate_objects;
+       collector->free_non_pinned_object = major_free_non_pinned_object;
+       collector->pin_objects = major_pin_objects;
+       collector->pin_major_object = pin_major_object;
+       collector->scan_card_table = major_scan_card_table;
+       collector->iterate_live_block_ranges = (void*)(void*) major_iterate_live_block_ranges;
+       if (is_concurrent) {
+               collector->update_cardtable_mod_union = update_cardtable_mod_union;
+               collector->get_cardtable_mod_union_for_object = major_get_cardtable_mod_union_for_reference;
+       }
+       collector->init_to_space = major_init_to_space;
+       collector->sweep = major_sweep;
+       collector->have_swept = major_have_swept;
+       collector->finish_sweeping = major_finish_sweep_checking;
+       collector->free_swept_blocks = major_free_swept_blocks;
+       collector->check_scan_starts = major_check_scan_starts;
+       collector->dump_heap = major_dump_heap;
+       collector->get_used_size = major_get_used_size;
+       collector->start_nursery_collection = major_start_nursery_collection;
+       collector->finish_nursery_collection = major_finish_nursery_collection;
+       collector->start_major_collection = major_start_major_collection;
+       collector->finish_major_collection = major_finish_major_collection;
+       collector->ptr_is_in_non_pinned_space = major_ptr_is_in_non_pinned_space;
+       collector->obj_is_from_pinned_alloc = obj_is_from_pinned_alloc;
+       collector->report_pinned_memory_usage = major_report_pinned_memory_usage;
+       collector->get_num_major_sections = get_num_major_sections;
+       collector->get_bytes_survived_last_sweep = get_bytes_survived_last_sweep;
+       collector->handle_gc_param = major_handle_gc_param;
+       collector->print_gc_param_usage = major_print_gc_param_usage;
+       collector->post_param_init = post_param_init;
+       collector->is_valid_object = major_is_valid_object;
+       collector->describe_pointer = major_describe_pointer;
+       collector->count_cards = major_count_cards;
+
+       collector->major_ops_serial.copy_or_mark_object = major_copy_or_mark_object_canonical;
+       collector->major_ops_serial.scan_object = major_scan_object_with_evacuation;
+       if (is_concurrent) {
+               collector->major_ops_concurrent_start.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
+               collector->major_ops_concurrent_start.scan_object = major_scan_object_no_mark_concurrent_start;
+
+               collector->major_ops_concurrent.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
+               collector->major_ops_concurrent.scan_object = major_scan_object_no_mark_concurrent;
+
+               collector->major_ops_concurrent_finish.copy_or_mark_object = major_copy_or_mark_object_concurrent_finish_canonical;
+               collector->major_ops_concurrent_finish.scan_object = major_scan_object_no_evacuation;
+               collector->major_ops_concurrent_finish.scan_vtype = major_scan_vtype_concurrent_finish;
+       }
+
+#if !defined (FIXED_HEAP) && !defined (SGEN_PARALLEL_MARK)
+       if (!is_concurrent)
+               collector->drain_gray_stack = drain_gray_stack;
+
+#ifdef HEAVY_STATISTICS
+       mono_counters_register ("Optimized copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy);
+       mono_counters_register ("Optimized copy nursery", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery);
+       mono_counters_register ("Optimized copy nursery forwarded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery_forwarded);
+       mono_counters_register ("Optimized copy nursery pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery_pinned);
+       mono_counters_register ("Optimized copy major", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major);
+       mono_counters_register ("Optimized copy major small fast", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_fast);
+       mono_counters_register ("Optimized copy major small slow", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_slow);
+       mono_counters_register ("Optimized copy major large", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_large);
+       mono_counters_register ("Optimized major scan", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan);
+       mono_counters_register ("Optimized major scan no refs", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan_no_refs);
+
+       mono_counters_register ("Gray stack drain loops", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_loops);
+       mono_counters_register ("Gray stack prefetch fills", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fills);
+       mono_counters_register ("Gray stack prefetch failures", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fill_failures);
+#endif
+#endif
+
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+       mono_mutex_init (&scanned_objects_list_lock);
+#endif
+
+       SGEN_ASSERT (0, SGEN_MAX_SMALL_OBJ_SIZE <= MS_BLOCK_FREE / 2, "MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2");
+
+       /*cardtable requires major pages to be 8 cards aligned*/
+       g_assert ((MS_BLOCK_SIZE % (8 * CARD_SIZE_IN_BYTES)) == 0);
+}
+
+void
+sgen_marksweep_init (SgenMajorCollector *collector)
+{
+       sgen_marksweep_init_internal (collector, FALSE);
+}
+
+void
+sgen_marksweep_conc_init (SgenMajorCollector *collector)
+{
+       sgen_marksweep_init_internal (collector, TRUE);
+}
+
+#endif
diff --git a/mono/sgen/sgen-memory-governor.c b/mono/sgen/sgen-memory-governor.c
new file mode 100644 (file)
index 0000000..6144417
--- /dev/null
@@ -0,0 +1,326 @@
+/*
+ * sgen-memory-governor.c: When to schedule collections based on
+ * memory usage.
+ *
+ * Author:
+ *     Rodrigo Kumpera (rkumpera@novell.com)
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include <stdlib.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-memory-governor.h"
+#include "mono/sgen/sgen-thread-pool.h"
+#include "mono/sgen/sgen-client.h"
+
+#define MIN_MINOR_COLLECTION_ALLOWANCE ((mword)(DEFAULT_NURSERY_SIZE * default_allowance_nursery_size_ratio))
+
+/*Heap limits and allocation knobs*/
+static mword max_heap_size = ((mword)0)- ((mword)1);
+static mword soft_heap_limit = ((mword)0) - ((mword)1);
+
+static double default_allowance_nursery_size_ratio = SGEN_DEFAULT_ALLOWANCE_NURSERY_SIZE_RATIO;
+static double save_target_ratio = SGEN_DEFAULT_SAVE_TARGET_RATIO;
+
+/**/
+static mword allocated_heap;
+static mword total_alloc = 0;
+static mword total_alloc_max = 0;
+
+/* GC triggers. */
+
+static gboolean debug_print_allowance = FALSE;
+
+
+/* use this to tune when to do a major/minor collection */
+static mword major_collection_trigger_size;
+
+static mword last_major_num_sections = 0;
+static mword last_los_memory_usage = 0;
+
+static gboolean need_calculate_minor_collection_allowance;
+
+/* The size of the LOS after the last major collection, after sweeping. */
+static mword last_collection_los_memory_usage = 0;
+
+static mword sgen_memgov_available_free_space (void);
+
+
+/* GC trigger heuristics. */
+
+static void
+sgen_memgov_calculate_minor_collection_allowance (void)
+{
+       size_t new_major, new_heap_size, allowance_target, allowance;
+
+       if (!need_calculate_minor_collection_allowance)
+               return;
+
+       SGEN_ASSERT (0, major_collector.have_swept (), "Can only calculate allowance if heap is swept");
+
+       new_major = major_collector.get_bytes_survived_last_sweep ();
+       new_heap_size = new_major + last_collection_los_memory_usage;
+
+       /*
+        * We allow the heap to grow by one third its current size before we start the next
+        * major collection.
+        */
+       allowance_target = new_heap_size / 3;
+
+       allowance = MAX (allowance_target, MIN_MINOR_COLLECTION_ALLOWANCE);
+
+       if (new_heap_size + allowance > soft_heap_limit) {
+               if (new_heap_size > soft_heap_limit)
+                       allowance = MIN_MINOR_COLLECTION_ALLOWANCE;
+               else
+                       allowance = MAX (soft_heap_limit - new_heap_size, MIN_MINOR_COLLECTION_ALLOWANCE);
+       }
+
+       /* FIXME: Why is this here? */
+       if (major_collector.free_swept_blocks)
+               major_collector.free_swept_blocks (allowance);
+
+       major_collection_trigger_size = new_heap_size + allowance;
+
+       need_calculate_minor_collection_allowance = FALSE;
+
+       if (debug_print_allowance) {
+               SGEN_LOG (0, "Surviving sweep: %ld bytes (%ld major, %ld LOS)", (long)new_heap_size, (long)new_major, (long)last_collection_los_memory_usage);
+               SGEN_LOG (0, "Allowance: %ld bytes", (long)allowance);
+               SGEN_LOG (0, "Trigger size: %ld bytes", (long)major_collection_trigger_size);
+       }
+}
+
+gboolean
+sgen_need_major_collection (mword space_needed)
+{
+       size_t heap_size;
+
+       if (sgen_concurrent_collection_in_progress ())
+               return FALSE;
+
+       /* FIXME: This is a cop-out.  We should have some way of figuring this out. */
+       if (!major_collector.have_swept ())
+               return FALSE;
+
+       if (space_needed > sgen_memgov_available_free_space ())
+               return TRUE;
+
+       sgen_memgov_calculate_minor_collection_allowance ();
+
+       heap_size = major_collector.get_num_major_sections () * major_collector.section_size + los_memory_usage;
+
+       return heap_size > major_collection_trigger_size;
+}
+
+void
+sgen_memgov_minor_collection_start (void)
+{
+}
+
+void
+sgen_memgov_minor_collection_end (void)
+{
+}
+
+void
+sgen_memgov_major_collection_start (void)
+{
+       need_calculate_minor_collection_allowance = TRUE;
+
+       if (debug_print_allowance) {
+               SGEN_LOG (0, "Starting collection with heap size %ld bytes", (long)(major_collector.get_num_major_sections () * major_collector.section_size + los_memory_usage));
+       }
+}
+
+void
+sgen_memgov_major_collection_end (gboolean forced)
+{
+       last_collection_los_memory_usage = los_memory_usage;
+
+       if (forced) {
+               sgen_get_major_collector ()->finish_sweeping ();
+               sgen_memgov_calculate_minor_collection_allowance ();
+       }
+}
+
+void
+sgen_memgov_collection_start (int generation)
+{
+}
+
+void
+sgen_memgov_collection_end (int generation, GGTimingInfo* info, int info_count)
+{
+       int i;
+       for (i = 0; i < info_count; ++i) {
+               if (info[i].generation != -1)
+                       sgen_client_log_timing (&info [i], last_major_num_sections, last_los_memory_usage);
+       }
+}
+
+/*
+Global GC memory tracking.
+This tracks the total usage of memory by the GC. This includes
+managed and unmanaged memory.
+*/
+
+static unsigned long
+prot_flags_for_activate (int activate)
+{
+       unsigned long prot_flags = activate? MONO_MMAP_READ|MONO_MMAP_WRITE: MONO_MMAP_NONE;
+       return prot_flags | MONO_MMAP_PRIVATE | MONO_MMAP_ANON;
+}
+
+void
+sgen_assert_memory_alloc (void *ptr, size_t requested_size, const char *assert_description)
+{
+       if (ptr || !assert_description)
+               return;
+       fprintf (stderr, "Error: Garbage collector could not allocate %zu bytes of memory for %s.\n", requested_size, assert_description);
+       exit (1);
+}
+
+/*
+ * Allocate a big chunk of memory from the OS (usually 64KB to several megabytes).
+ * This must not require any lock.
+ */
+void*
+sgen_alloc_os_memory (size_t size, SgenAllocFlags flags, const char *assert_description)
+{
+       void *ptr;
+
+       g_assert (!(flags & ~(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE)));
+
+       ptr = mono_valloc (0, size, prot_flags_for_activate (flags & SGEN_ALLOC_ACTIVATE));
+       sgen_assert_memory_alloc (ptr, size, assert_description);
+       if (ptr) {
+               SGEN_ATOMIC_ADD_P (total_alloc, size);
+               total_alloc_max = MAX (total_alloc_max, total_alloc);
+       }
+       return ptr;
+}
+
+/* size must be a power of 2 */
+void*
+sgen_alloc_os_memory_aligned (size_t size, mword alignment, SgenAllocFlags flags, const char *assert_description)
+{
+       void *ptr;
+
+       g_assert (!(flags & ~(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE)));
+
+       ptr = mono_valloc_aligned (size, alignment, prot_flags_for_activate (flags & SGEN_ALLOC_ACTIVATE));
+       sgen_assert_memory_alloc (ptr, size, assert_description);
+       if (ptr) {
+               SGEN_ATOMIC_ADD_P (total_alloc, size);
+               total_alloc_max = MAX (total_alloc_max, total_alloc);
+       }
+       return ptr;
+}
+
+/*
+ * Free the memory returned by sgen_alloc_os_memory (), returning it to the OS.
+ */
+void
+sgen_free_os_memory (void *addr, size_t size, SgenAllocFlags flags)
+{
+       g_assert (!(flags & ~SGEN_ALLOC_HEAP));
+
+       mono_vfree (addr, size);
+       SGEN_ATOMIC_ADD_P (total_alloc, -(gssize)size);
+       total_alloc_max = MAX (total_alloc_max, total_alloc);
+}
+
+size_t
+sgen_gc_get_total_heap_allocation (void)
+{
+       return total_alloc;
+}
+
+
+/*
+Heap Sizing limits.
+This limit the max size of the heap. It takes into account
+only memory actively in use to hold heap objects and not
+for other parts of the GC.
+ */
+static mword
+sgen_memgov_available_free_space (void)
+{
+       return max_heap_size - MIN (allocated_heap, max_heap_size);
+}
+
+void
+sgen_memgov_release_space (mword size, int space)
+{
+       SGEN_ATOMIC_ADD_P (allocated_heap, -(gssize)size);
+}
+
+gboolean
+sgen_memgov_try_alloc_space (mword size, int space)
+{
+       if (sgen_memgov_available_free_space () < size) {
+               SGEN_ASSERT (4, !sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "Memory shouldn't run out in worker thread");
+               return FALSE;
+       }
+
+       SGEN_ATOMIC_ADD_P (allocated_heap, size);
+       sgen_client_total_allocated_heap_changed (allocated_heap);
+       return TRUE;
+}
+
+void
+sgen_memgov_init (size_t max_heap, size_t soft_limit, gboolean debug_allowance, double allowance_ratio, double save_target)
+{
+       if (soft_limit)
+               soft_heap_limit = soft_limit;
+
+       debug_print_allowance = debug_allowance;
+       major_collection_trigger_size = MIN_MINOR_COLLECTION_ALLOWANCE;
+
+       mono_counters_register ("Memgov alloc", MONO_COUNTER_GC | MONO_COUNTER_WORD | MONO_COUNTER_BYTES | MONO_COUNTER_VARIABLE, &total_alloc);
+       mono_counters_register ("Memgov max alloc", MONO_COUNTER_GC | MONO_COUNTER_WORD | MONO_COUNTER_BYTES | MONO_COUNTER_MONOTONIC, &total_alloc_max);
+
+       if (max_heap == 0)
+               return;
+
+       if (max_heap < soft_limit) {
+               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Setting to minimum.", "`max-heap-size` must be at least as large as `soft-heap-limit`.");
+               max_heap = soft_limit;
+       }
+
+       if (max_heap < sgen_nursery_size * 4) {
+               sgen_env_var_error (MONO_GC_PARAMS_NAME, "Setting to minimum.", "`max-heap-size` must be at least 4 times as large as `nursery size`.");
+               max_heap = sgen_nursery_size * 4;
+       }
+       max_heap_size = max_heap - sgen_nursery_size;
+
+       if (allowance_ratio)
+               default_allowance_nursery_size_ratio = allowance_ratio;
+
+       if (save_target)
+               save_target_ratio = save_target;
+}
+
+#endif
diff --git a/mono/sgen/sgen-memory-governor.h b/mono/sgen/sgen-memory-governor.h
new file mode 100644 (file)
index 0000000..0115ec6
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __MONO_SGEN_MEMORY_GOVERNOR_H__
+#define __MONO_SGEN_MEMORY_GOVERNOR_H__
+
+/* Heap limits */
+void sgen_memgov_init (size_t max_heap, size_t soft_limit, gboolean debug_allowance, double min_allowance_ratio, double save_target);
+void sgen_memgov_release_space (mword size, int space);
+gboolean sgen_memgov_try_alloc_space (mword size, int space);
+
+/* GC trigger heuristics */
+void sgen_memgov_minor_collection_start (void);
+void sgen_memgov_minor_collection_end (void);
+
+void sgen_memgov_major_collection_start (void);
+void sgen_memgov_major_collection_end (gboolean forced);
+
+void sgen_memgov_collection_start (int generation);
+void sgen_memgov_collection_end (int generation, GGTimingInfo* info, int info_count);
+
+gboolean sgen_need_major_collection (mword space_needed);
+
+
+typedef enum {
+       SGEN_ALLOC_INTERNAL = 0,
+       SGEN_ALLOC_HEAP = 1,
+       SGEN_ALLOC_ACTIVATE = 2
+} SgenAllocFlags;
+
+/* OS memory allocation */
+void* sgen_alloc_os_memory (size_t size, SgenAllocFlags flags, const char *assert_description);
+void* sgen_alloc_os_memory_aligned (size_t size, mword alignment, SgenAllocFlags flags, const char *assert_description);
+void sgen_free_os_memory (void *addr, size_t size, SgenAllocFlags flags);
+
+/* Error handling */
+void sgen_assert_memory_alloc (void *ptr, size_t requested_size, const char *assert_description);
+
+#endif
+
diff --git a/mono/sgen/sgen-minor-copy-object.h b/mono/sgen/sgen-minor-copy-object.h
new file mode 100644 (file)
index 0000000..e323218
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+ * sgen-minor-copy-object.h: Copy functions for nursery collections.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define collector_pin_object(obj, queue) sgen_pin_object (obj, queue);
+#define COLLECTOR_SERIAL_ALLOC_FOR_PROMOTION alloc_for_promotion
+
+extern guint64 stat_nursery_copy_object_failed_to_space; /* from sgen-gc.c */
+
+#include "sgen-copy-object.h"
+
+/*
+ * This is how the copying happens from the nursery to the old generation.
+ * We assume that at this time all the pinned objects have been identified and
+ * marked as such.
+ * We run scan_object() for each pinned object so that each referenced
+ * objects if possible are copied. The new gray objects created can have
+ * scan_object() run on them right away, too.
+ * Then we run copy_object() for the precisely tracked roots. At this point
+ * all the roots are either gray or black. We run scan_object() on the gray
+ * objects until no more gray objects are created.
+ * At the end of the process we walk again the pinned list and we unmark
+ * the pinned flag. As we go we also create the list of free space for use
+ * in the next allocation runs.
+ *
+ * We need to remember objects from the old generation that point to the new one
+ * (or just addresses?).
+ *
+ * copy_object could be made into a macro once debugged (use inline for now).
+ */
+
+static MONO_ALWAYS_INLINE void
+SERIAL_COPY_OBJECT (void **obj_slot, SgenGrayQueue *queue) 
+{
+       char *forwarded;
+       char *copy;
+       char *obj = *obj_slot;
+
+       SGEN_ASSERT (9, current_collection_generation == GENERATION_NURSERY, "calling minor-serial-copy from a %d generation collection", current_collection_generation);
+
+       HEAVY_STAT (++stat_copy_object_called_nursery);
+
+       if (!sgen_ptr_in_nursery (obj)) {
+               HEAVY_STAT (++stat_nursery_copy_object_failed_from_space);
+               return;
+       }
+
+       SGEN_LOG (9, "Precise copy of %p from %p", obj, obj_slot);
+
+       /*
+        * Before we can copy the object we must make sure that we are
+        * allowed to, i.e. that the object not pinned, not already
+        * forwarded or belongs to the nursery To Space.
+        */
+
+       if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
+               SGEN_ASSERT (9, sgen_obj_get_descriptor (forwarded),  "forwarded object %p has no gc descriptor", forwarded);
+               SGEN_LOG (9, " (already forwarded to %p)", forwarded);
+               HEAVY_STAT (++stat_nursery_copy_object_failed_forwarded);
+               SGEN_UPDATE_REFERENCE (obj_slot, forwarded);
+               return;
+       }
+       if (G_UNLIKELY (SGEN_OBJECT_IS_PINNED (obj))) {
+               SGEN_ASSERT (9, sgen_vtable_get_descriptor ((GCVTable*)SGEN_LOAD_VTABLE(obj)), "pinned object %p has no gc descriptor", obj);
+               SGEN_LOG (9, " (pinned, no change)");
+               HEAVY_STAT (++stat_nursery_copy_object_failed_pinned);
+               return;
+       }
+
+#ifndef SGEN_SIMPLE_NURSERY
+       if (sgen_nursery_is_to_space (obj)) {
+               SGEN_ASSERT (9, sgen_vtable_get_descriptor ((GCVTable*)SGEN_LOAD_VTABLE(obj)), "to space object %p has no gc descriptor", obj);
+               SGEN_LOG (9, " (tospace, no change)");
+               HEAVY_STAT (++stat_nursery_copy_object_failed_to_space);                
+               return;
+       }
+#endif
+
+       HEAVY_STAT (++stat_objects_copied_nursery);
+
+       copy = copy_object_no_checks (obj, queue);
+       SGEN_UPDATE_REFERENCE (obj_slot, copy);
+}
+
+/*
+ * SERIAL_COPY_OBJECT_FROM_OBJ:
+ *
+ *   Similar to SERIAL_COPY_OBJECT, but assumes that OBJ_SLOT is part of an object, so it handles global remsets as well.
+ */
+static MONO_ALWAYS_INLINE void
+SERIAL_COPY_OBJECT_FROM_OBJ (void **obj_slot, SgenGrayQueue *queue) 
+{
+       char *forwarded;
+       char *obj = *obj_slot;
+       void *copy;
+
+       SGEN_ASSERT (9, current_collection_generation == GENERATION_NURSERY, "calling minor-serial-copy-from-obj from a %d generation collection", current_collection_generation);
+
+       HEAVY_STAT (++stat_copy_object_called_nursery);
+
+       if (!sgen_ptr_in_nursery (obj)) {
+               HEAVY_STAT (++stat_nursery_copy_object_failed_from_space);
+               return;
+       }
+
+       SGEN_LOG (9, "Precise copy of %p from %p", obj, obj_slot);
+
+       /*
+        * Before we can copy the object we must make sure that we are
+        * allowed to, i.e. that the object not pinned, not already
+        * forwarded or belongs to the nursery To Space.
+        */
+
+       if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
+               SGEN_ASSERT (9, sgen_obj_get_descriptor (forwarded),  "forwarded object %p has no gc descriptor", forwarded);
+               SGEN_LOG (9, " (already forwarded to %p)", forwarded);
+               HEAVY_STAT (++stat_nursery_copy_object_failed_forwarded);
+               SGEN_UPDATE_REFERENCE (obj_slot, forwarded);
+#ifndef SGEN_SIMPLE_NURSERY
+               if (G_UNLIKELY (sgen_ptr_in_nursery (forwarded) && !sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (forwarded)))
+                       sgen_add_to_global_remset (obj_slot, forwarded);
+#endif
+               return;
+       }
+       if (G_UNLIKELY (SGEN_OBJECT_IS_PINNED (obj))) {
+               SGEN_ASSERT (9, sgen_vtable_get_descriptor ((GCVTable*)SGEN_LOAD_VTABLE(obj)), "pinned object %p has no gc descriptor", obj);
+               SGEN_LOG (9, " (pinned, no change)");
+               HEAVY_STAT (++stat_nursery_copy_object_failed_pinned);
+               if (!sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (obj))
+                       sgen_add_to_global_remset (obj_slot, obj);
+               return;
+       }
+
+#ifndef SGEN_SIMPLE_NURSERY
+       if (sgen_nursery_is_to_space (obj)) {
+               /* FIXME: all of these could just use `sgen_obj_get_descriptor_safe()` */
+               SGEN_ASSERT (9, sgen_vtable_get_descriptor ((GCVTable*)SGEN_LOAD_VTABLE(obj)), "to space object %p has no gc descriptor", obj);
+               SGEN_LOG (9, " (tospace, no change)");
+               HEAVY_STAT (++stat_nursery_copy_object_failed_to_space);                
+
+               /*
+                * FIXME:
+                *
+                * The card table scanning code sometimes clears cards
+                * that have just been set for a global remset.  In
+                * the split nursery the following situation can
+                * occur:
+                *
+                * Let's say object A starts in card C but continues
+                * into C+1.  Within A, at offset O there's a
+                * reference to a new nursery object X.  A+O is in
+                * card C+1.  Now card C is scanned, and as part of
+                * it, object A.  The reference at A+O is processed by
+                * copying X into nursery to-space at Y.  Since it's
+                * still in the nursery, a global remset must be added
+                * for A+O, so card C+1 is marked.  Now, however, card
+                * C+1 is scanned, which means that it's cleared
+                * first.  This wouldn't be terribly bad if reference
+                * A+O were re-scanned and the global remset re-added,
+                * but since the reference points to to-space, that
+                * doesn't happen, and C+1 remains cleared: the remset
+                * is lost.
+                *
+                * There's at least two ways to fix this.  The easy
+                * one is to re-add the remset on the re-scan.  This
+                * is that - the following two lines of code.
+                *
+                * The proper solution appears to be to first make a
+                * copy of the cards before scanning a block, then to
+                * clear all the cards and scan from the copy, so no
+                * remsets will be overwritten.  Scanning objects at
+                * most once would be the icing on the cake.
+                */
+               if (!sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (obj))
+                       sgen_add_to_global_remset (obj_slot, obj);
+
+               return;
+       }
+#endif
+
+       HEAVY_STAT (++stat_objects_copied_nursery);
+
+       copy = copy_object_no_checks (obj, queue);
+       SGEN_UPDATE_REFERENCE (obj_slot, copy);
+#ifndef SGEN_SIMPLE_NURSERY
+       if (G_UNLIKELY (sgen_ptr_in_nursery (copy) && !sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (copy)))
+               sgen_add_to_global_remset (obj_slot, copy);
+#else
+       /* copy_object_no_checks () can return obj on OOM */
+       if (G_UNLIKELY (obj == copy)) {
+               if (G_UNLIKELY (sgen_ptr_in_nursery (copy) && !sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (copy)))
+                       sgen_add_to_global_remset (obj_slot, copy);
+       }
+#endif
+}
+
+#define FILL_MINOR_COLLECTOR_COPY_OBJECT(collector)    do {                    \
+               (collector)->serial_ops.copy_or_mark_object = SERIAL_COPY_OBJECT;                       \
+       } while (0)
diff --git a/mono/sgen/sgen-minor-scan-object.h b/mono/sgen/sgen-minor-scan-object.h
new file mode 100644 (file)
index 0000000..efe782f
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * sgen-minor-scan-object.h: Object scanning in the nursery collectors.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+extern guint64 stat_scan_object_called_nursery;
+
+#if defined(SGEN_SIMPLE_NURSERY)
+#define SERIAL_SCAN_OBJECT simple_nursery_serial_scan_object
+#define SERIAL_SCAN_VTYPE simple_nursery_serial_scan_vtype
+
+#elif defined (SGEN_SPLIT_NURSERY)
+#define SERIAL_SCAN_OBJECT split_nursery_serial_scan_object
+#define SERIAL_SCAN_VTYPE split_nursery_serial_scan_vtype
+
+#else
+#error "Please define GC_CONF_NAME"
+#endif
+
+#undef HANDLE_PTR
+/* Global remsets are handled in SERIAL_COPY_OBJECT_FROM_OBJ */
+#define HANDLE_PTR(ptr,obj)    do {    \
+               void *__old = *(ptr);   \
+               SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP ((obj), (ptr)); \
+               binary_protocol_scan_process_reference ((obj), (ptr), __old); \
+               if (__old) {    \
+                       SERIAL_COPY_OBJECT_FROM_OBJ ((ptr), queue);     \
+                       SGEN_COND_LOG (9, __old != *(ptr), "Overwrote field at %p with %p (was: %p)", (ptr), *(ptr), __old); \
+               }       \
+       } while (0)
+
+static void
+SERIAL_SCAN_OBJECT (char *start, mword desc, SgenGrayQueue *queue)
+{
+       SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP;
+
+#ifdef HEAVY_STATISTICS
+       sgen_descriptor_count_scanned_object (desc);
+#endif
+
+       SGEN_ASSERT (9, sgen_get_current_collection_generation () == GENERATION_NURSERY, "Must not use minor scan during major collection.");
+
+#define SCAN_OBJECT_PROTOCOL
+#include "sgen-scan-object.h"
+
+       SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP;
+       HEAVY_STAT (++stat_scan_object_called_nursery);
+}
+
+static void
+SERIAL_SCAN_VTYPE (char *full_object, char *start, mword desc, SgenGrayQueue *queue BINARY_PROTOCOL_ARG (size_t size))
+{
+       SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP;
+
+       SGEN_ASSERT (9, sgen_get_current_collection_generation () == GENERATION_NURSERY, "Must not use minor scan during major collection.");
+
+       /* The descriptors include info about the MonoObject header as well */
+       start -= SGEN_CLIENT_OBJECT_HEADER_SIZE;
+
+#define SCAN_OBJECT_NOVTABLE
+#define SCAN_OBJECT_PROTOCOL
+#include "sgen-scan-object.h"
+}
+
+#define FILL_MINOR_COLLECTOR_SCAN_OBJECT(collector)    do {                    \
+               (collector)->serial_ops.scan_object = SERIAL_SCAN_OBJECT;       \
+               (collector)->serial_ops.scan_vtype = SERIAL_SCAN_VTYPE; \
+       } while (0)
diff --git a/mono/sgen/sgen-nursery-allocator.c b/mono/sgen/sgen-nursery-allocator.c
new file mode 100644 (file)
index 0000000..ab12803
--- /dev/null
@@ -0,0 +1,927 @@
+/*
+ * sgen-nursery-allocator.c: Nursery allocation code.
+ *
+ * Copyright 2009-2010 Novell, Inc.
+ *           2011 Rodrigo Kumpera
+ * 
+ * Copyright 2011 Xamarin Inc  (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * The young generation is divided into fragments. This is because
+ * we can hand one fragments to a thread for lock-less fast alloc and
+ * because the young generation ends up fragmented anyway by pinned objects.
+ * Once a collection is done, a list of fragments is created. When doing
+ * thread local alloc we use smallish nurseries so we allow new threads to
+ * allocate memory from gen0 without triggering a collection. Threads that
+ * are found to allocate lots of memory are given bigger fragments. This
+ * should make the finalizer thread use little nursery memory after a while.
+ * We should start assigning threads very small fragments: if there are many
+ * threads the nursery will be full of reserved space that the threads may not
+ * use at all, slowing down allocation speed.
+ * Thread local allocation is done from areas of memory Hotspot calls Thread Local 
+ * Allocation Buffers (TLABs).
+ */
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef HAVE_PTHREAD_H
+#include <pthread.h>
+#endif
+#ifdef HAVE_SEMAPHORE_H
+#include <semaphore.h>
+#endif
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#ifdef __MACH__
+#undef _XOPEN_SOURCE
+#endif
+#ifdef __MACH__
+#define _XOPEN_SOURCE
+#endif
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-cardtable.h"
+#include "mono/sgen/sgen-protocol.h"
+#include "mono/sgen/sgen-memory-governor.h"
+#include "mono/sgen/sgen-pinning.h"
+#include "mono/sgen/sgen-client.h"
+#include "mono/utils/mono-membar.h"
+
+/* Enable it so nursery allocation diagnostic data is collected */
+//#define NALLOC_DEBUG 1
+
+/* The mutator allocs from here. */
+static SgenFragmentAllocator mutator_allocator;
+
+/* freeelist of fragment structures */
+static SgenFragment *fragment_freelist = NULL;
+
+/* Allocator cursors */
+static char *nursery_last_pinned_end = NULL;
+
+char *sgen_nursery_start;
+char *sgen_nursery_end;
+
+#ifdef USER_CONFIG
+size_t sgen_nursery_size = (1 << 22);
+int sgen_nursery_bits = 22;
+#endif
+
+char *sgen_space_bitmap;
+size_t sgen_space_bitmap_size;
+
+#ifdef HEAVY_STATISTICS
+
+static mword stat_wasted_bytes_trailer = 0;
+static mword stat_wasted_bytes_small_areas = 0;
+static mword stat_wasted_bytes_discarded_fragments = 0;
+static guint64 stat_nursery_alloc_requests = 0;
+static guint64 stat_alloc_iterations = 0;
+static guint64 stat_alloc_retries = 0;
+
+static guint64 stat_nursery_alloc_range_requests = 0;
+static guint64 stat_alloc_range_iterations = 0;
+static guint64 stat_alloc_range_retries = 0;
+
+#endif
+
+/************************************Nursery allocation debugging *********************************************/
+
+#ifdef NALLOC_DEBUG
+
+enum {
+       FIXED_ALLOC = 1,
+       RANGE_ALLOC,
+       PINNING,
+       BLOCK_ZEROING,
+       CLEAR_NURSERY_FRAGS
+};
+
+typedef struct {
+       char *address;
+       size_t size;
+       int reason;
+       int seq;
+       MonoNativeThreadId tid;
+} AllocRecord;
+
+#define ALLOC_RECORD_COUNT 128000
+
+
+static AllocRecord *alloc_records;
+static volatile int next_record;
+static volatile int alloc_count;
+
+void dump_alloc_records (void);
+void verify_alloc_records (void);
+
+static const char*
+get_reason_name (AllocRecord *rec)
+{
+       switch (rec->reason) {
+       case FIXED_ALLOC: return "fixed-alloc";
+       case RANGE_ALLOC: return "range-alloc";
+       case PINNING: return "pinning";
+       case BLOCK_ZEROING: return "block-zeroing";
+       case CLEAR_NURSERY_FRAGS: return "clear-nursery-frag";
+       default: return "invalid";
+       }
+}
+
+static void
+reset_alloc_records (void)
+{
+       next_record = 0;
+       alloc_count = 0;
+}
+
+static void
+add_alloc_record (char *addr, size_t size, int reason)
+{
+       int idx = InterlockedIncrement (&next_record) - 1;
+       alloc_records [idx].address = addr;
+       alloc_records [idx].size = size;
+       alloc_records [idx].reason = reason;
+       alloc_records [idx].seq = idx;
+       alloc_records [idx].tid = mono_native_thread_id_get ();
+}
+
+static int
+comp_alloc_record (const void *_a, const void *_b)
+{
+       const AllocRecord *a = _a;
+       const AllocRecord *b = _b;
+       if (a->address == b->address)
+               return a->seq - b->seq;
+       return a->address - b->address;
+}
+
+#define rec_end(REC) ((REC)->address + (REC)->size)
+
+void
+dump_alloc_records (void)
+{
+       int i;
+       sgen_qsort (alloc_records, next_record, sizeof (AllocRecord), comp_alloc_record);
+
+       printf ("------------------------------------DUMP RECORDS----------------------------\n");
+       for (i = 0; i < next_record; ++i) {
+               AllocRecord *rec = alloc_records + i;
+               printf ("obj [%p, %p] size %d reason %s seq %d tid %x\n", rec->address, rec_end (rec), (int)rec->size, get_reason_name (rec), rec->seq, (size_t)rec->tid);
+       }
+}
+
+void
+verify_alloc_records (void)
+{
+       int i;
+       int total = 0;
+       int holes = 0;
+       int max_hole = 0;
+       AllocRecord *prev = NULL;
+
+       sgen_qsort (alloc_records, next_record, sizeof (AllocRecord), comp_alloc_record);
+       printf ("------------------------------------DUMP RECORDS- %d %d---------------------------\n", next_record, alloc_count);
+       for (i = 0; i < next_record; ++i) {
+               AllocRecord *rec = alloc_records + i;
+               int hole_size = 0;
+               total += rec->size;
+               if (prev) {
+                       if (rec_end (prev) > rec->address)
+                               printf ("WE GOT OVERLAPPING objects %p and %p\n", prev->address, rec->address);
+                       if ((rec->address - rec_end (prev)) >= 8)
+                               ++holes;
+                       hole_size = rec->address - rec_end (prev);
+                       max_hole = MAX (max_hole, hole_size);
+               }
+               printf ("obj [%p, %p] size %d hole to prev %d reason %s seq %d tid %zx\n", rec->address, rec_end (rec), (int)rec->size, hole_size, get_reason_name (rec), rec->seq, (size_t)rec->tid);
+               prev = rec;
+       }
+       printf ("SUMMARY total alloc'd %d holes %d max_hole %d\n", total, holes, max_hole);
+}
+
+#endif
+
+/*********************************************************************************/
+
+
+static inline gpointer
+mask (gpointer n, uintptr_t bit)
+{
+       return (gpointer)(((uintptr_t)n) | bit);
+}
+
+static inline gpointer
+unmask (gpointer p)
+{
+       return (gpointer)((uintptr_t)p & ~(uintptr_t)0x3);
+}
+
+static inline uintptr_t
+get_mark (gpointer n)
+{
+       return (uintptr_t)n & 0x1;
+}
+
+/*MUST be called with world stopped*/
+SgenFragment*
+sgen_fragment_allocator_alloc (void)
+{
+       SgenFragment *frag = fragment_freelist;
+       if (frag) {
+               fragment_freelist = frag->next_in_order;
+               frag->next = frag->next_in_order = NULL;
+               return frag;
+       }
+       frag = sgen_alloc_internal (INTERNAL_MEM_FRAGMENT);
+       frag->next = frag->next_in_order = NULL;
+       return frag;
+}
+
+void
+sgen_fragment_allocator_add (SgenFragmentAllocator *allocator, char *start, char *end)
+{
+       SgenFragment *fragment;
+
+       fragment = sgen_fragment_allocator_alloc ();
+       fragment->fragment_start = start;
+       fragment->fragment_next = start;
+       fragment->fragment_end = end;
+       fragment->next_in_order = fragment->next = unmask (allocator->region_head);
+
+       allocator->region_head = allocator->alloc_head = fragment;
+       g_assert (fragment->fragment_end > fragment->fragment_start);
+}
+
+void
+sgen_fragment_allocator_release (SgenFragmentAllocator *allocator)
+{
+       SgenFragment *last = allocator->region_head;
+       if (!last)
+               return;
+
+       /* Find the last fragment in insert order */
+       for (; last->next_in_order; last = last->next_in_order) ;
+
+       last->next_in_order = fragment_freelist;
+       fragment_freelist = allocator->region_head;
+       allocator->alloc_head = allocator->region_head = NULL;
+}
+
+static SgenFragment**
+find_previous_pointer_fragment (SgenFragmentAllocator *allocator, SgenFragment *frag)
+{
+       SgenFragment **prev;
+       SgenFragment *cur, *next;
+#ifdef NALLOC_DEBUG
+       int count = 0;
+#endif
+
+try_again:
+       prev = &allocator->alloc_head;
+#ifdef NALLOC_DEBUG
+       if (count++ > 5)
+               printf ("retry count for fppf is %d\n", count);
+#endif
+
+       cur = unmask (*prev);
+
+       while (1) {
+               if (cur == NULL)
+                       return NULL;
+               next = cur->next;
+
+               /*
+                * We need to make sure that we dereference prev below
+                * after reading cur->next above, so we need a read
+                * barrier.
+                */
+               mono_memory_read_barrier ();
+
+               if (*prev != cur)
+                       goto try_again;
+
+               if (!get_mark (next)) {
+                       if (cur == frag)
+                               return prev;
+                       prev = &cur->next;
+               } else {
+                       next = unmask (next);
+                       if (InterlockedCompareExchangePointer ((volatile gpointer*)prev, next, cur) != cur)
+                               goto try_again;
+                       /*we must make sure that the next from cur->next happens after*/
+                       mono_memory_write_barrier ();
+               }
+
+               cur = unmask (next);
+       }
+       return NULL;
+}
+
+static gboolean
+claim_remaining_size (SgenFragment *frag, char *alloc_end)
+{
+       /* All space used, nothing to claim. */
+       if (frag->fragment_end <= alloc_end)
+               return FALSE;
+
+       /* Try to alloc all the remaining space. */
+       return InterlockedCompareExchangePointer ((volatile gpointer*)&frag->fragment_next, frag->fragment_end, alloc_end) == alloc_end;
+}
+
+static void*
+par_alloc_from_fragment (SgenFragmentAllocator *allocator, SgenFragment *frag, size_t size)
+{
+       char *p = frag->fragment_next;
+       char *end = p + size;
+
+       if (end > frag->fragment_end)
+               return NULL;
+
+       /* p = frag->fragment_next must happen before */
+       mono_memory_barrier ();
+
+       if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->fragment_next, end, p) != p)
+               return NULL;
+
+       if (frag->fragment_end - end < SGEN_MAX_NURSERY_WASTE) {
+               SgenFragment *next, **prev_ptr;
+               
+               /*
+                * Before we clean the remaining nursery, we must claim the remaining space
+                * as it could end up been used by the range allocator since it can end up
+                * allocating from this dying fragment as it doesn't respect SGEN_MAX_NURSERY_WASTE
+                * when doing second chance allocation.
+                */
+               if ((sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION || sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG) && claim_remaining_size (frag, end)) {
+                       sgen_clear_range (end, frag->fragment_end);
+                       HEAVY_STAT (InterlockedExchangeAdd (&stat_wasted_bytes_trailer, frag->fragment_end - end));
+#ifdef NALLOC_DEBUG
+                       add_alloc_record (end, frag->fragment_end - end, BLOCK_ZEROING);
+#endif
+               }
+
+               prev_ptr = find_previous_pointer_fragment (allocator, frag);
+
+               /*Use Michaels linked list remove*/
+
+               /*prev_ptr will be null if the fragment was removed concurrently */
+               while (prev_ptr) {
+                       next = frag->next;
+
+                       /*already deleted*/
+                       if (!get_mark (next)) {
+                               /*frag->next read must happen before the first CAS*/
+                               mono_memory_write_barrier ();
+
+                               /*Fail if the next node is removed concurrently and its CAS wins */
+                               if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->next, mask (next, 1), next) != next) {
+                                       continue;
+                               }
+                       }
+
+                       /* The second CAS must happen after the first CAS or frag->next. */
+                       mono_memory_write_barrier ();
+
+                       /* Fail if the previous node was deleted and its CAS wins */
+                       if (InterlockedCompareExchangePointer ((volatile gpointer*)prev_ptr, unmask (next), frag) != frag) {
+                               prev_ptr = find_previous_pointer_fragment (allocator, frag);
+                               continue;
+                       }
+                       break;
+               }
+       }
+
+       return p;
+}
+
+static void*
+serial_alloc_from_fragment (SgenFragment **previous, SgenFragment *frag, size_t size)
+{
+       char *p = frag->fragment_next;
+       char *end = p + size;
+
+       if (end > frag->fragment_end)
+               return NULL;
+
+       frag->fragment_next = end;
+
+       if (frag->fragment_end - end < SGEN_MAX_NURSERY_WASTE) {
+               *previous = frag->next;
+               
+               /* Clear the remaining space, pinning depends on this. FIXME move this to use phony arrays */
+               memset (end, 0, frag->fragment_end - end);
+
+               *previous = frag->next;
+       }
+
+       return p;
+}
+
+void*
+sgen_fragment_allocator_par_alloc (SgenFragmentAllocator *allocator, size_t size)
+{
+       SgenFragment *frag;
+
+#ifdef NALLOC_DEBUG
+       InterlockedIncrement (&alloc_count);
+#endif
+
+restart:
+       for (frag = unmask (allocator->alloc_head); unmask (frag); frag = unmask (frag->next)) {
+               HEAVY_STAT (InterlockedIncrement (&stat_alloc_iterations));
+
+               if (size <= (size_t)(frag->fragment_end - frag->fragment_next)) {
+                       void *p = par_alloc_from_fragment (allocator, frag, size);
+                       if (!p) {
+                               HEAVY_STAT (InterlockedIncrement (&stat_alloc_retries));
+                               goto restart;
+                       }
+#ifdef NALLOC_DEBUG
+                       add_alloc_record (p, size, FIXED_ALLOC);
+#endif
+                       return p;
+               }
+       }
+       return NULL;
+}
+
+void*
+sgen_fragment_allocator_serial_alloc (SgenFragmentAllocator *allocator, size_t size)
+{
+       SgenFragment *frag;
+       SgenFragment **previous;
+#ifdef NALLOC_DEBUG
+       InterlockedIncrement (&alloc_count);
+#endif
+
+       previous = &allocator->alloc_head;
+
+       for (frag = *previous; frag; frag = *previous) {
+               char *p = serial_alloc_from_fragment (previous, frag, size);
+
+               HEAVY_STAT (InterlockedIncrement (&stat_alloc_iterations));
+
+               if (p) {
+#ifdef NALLOC_DEBUG
+                       add_alloc_record (p, size, FIXED_ALLOC);
+#endif
+                       return p;
+               }
+               previous = &frag->next;
+       }
+       return NULL;
+}
+
+void*
+sgen_fragment_allocator_serial_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size)
+{
+       SgenFragment *frag, **previous, *min_frag = NULL, **prev_min_frag = NULL;
+       size_t current_minimum = minimum_size;
+
+#ifdef NALLOC_DEBUG
+       InterlockedIncrement (&alloc_count);
+#endif
+
+       previous = &allocator->alloc_head;
+
+       for (frag = *previous; frag; frag = *previous) {
+               size_t frag_size = frag->fragment_end - frag->fragment_next;
+
+               HEAVY_STAT (InterlockedIncrement (&stat_alloc_range_iterations));
+
+               if (desired_size <= frag_size) {
+                       void *p;
+                       *out_alloc_size = desired_size;
+
+                       p = serial_alloc_from_fragment (previous, frag, desired_size);
+#ifdef NALLOC_DEBUG
+                       add_alloc_record (p, desired_size, RANGE_ALLOC);
+#endif
+                       return p;
+               }
+               if (current_minimum <= frag_size) {
+                       min_frag = frag;
+                       prev_min_frag = previous;
+                       current_minimum = frag_size;
+               }
+               previous = &frag->next;
+       }
+
+       if (min_frag) {
+               void *p;
+               size_t frag_size = min_frag->fragment_end - min_frag->fragment_next;
+               *out_alloc_size = frag_size;
+
+               p = serial_alloc_from_fragment (prev_min_frag, min_frag, frag_size);
+
+#ifdef NALLOC_DEBUG
+               add_alloc_record (p, frag_size, RANGE_ALLOC);
+#endif
+               return p;
+       }
+
+       return NULL;
+}
+
+void*
+sgen_fragment_allocator_par_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size)
+{
+       SgenFragment *frag, *min_frag;
+       size_t current_minimum;
+
+restart:
+       min_frag = NULL;
+       current_minimum = minimum_size;
+
+#ifdef NALLOC_DEBUG
+       InterlockedIncrement (&alloc_count);
+#endif
+
+       for (frag = unmask (allocator->alloc_head); frag; frag = unmask (frag->next)) {
+               size_t frag_size = frag->fragment_end - frag->fragment_next;
+
+               HEAVY_STAT (InterlockedIncrement (&stat_alloc_range_iterations));
+
+               if (desired_size <= frag_size) {
+                       void *p;
+                       *out_alloc_size = desired_size;
+
+                       p = par_alloc_from_fragment (allocator, frag, desired_size);
+                       if (!p) {
+                               HEAVY_STAT (InterlockedIncrement (&stat_alloc_range_retries));
+                               goto restart;
+                       }
+#ifdef NALLOC_DEBUG
+                       add_alloc_record (p, desired_size, RANGE_ALLOC);
+#endif
+                       return p;
+               }
+               if (current_minimum <= frag_size) {
+                       min_frag = frag;
+                       current_minimum = frag_size;
+               }
+       }
+
+       /* The second fragment_next read should be ordered in respect to the first code block */
+       mono_memory_barrier ();
+
+       if (min_frag) {
+               void *p;
+               size_t frag_size;
+
+               frag_size = min_frag->fragment_end - min_frag->fragment_next;
+               if (frag_size < minimum_size)
+                       goto restart;
+
+               *out_alloc_size = frag_size;
+
+               mono_memory_barrier ();
+               p = par_alloc_from_fragment (allocator, min_frag, frag_size);
+
+               /*XXX restarting here is quite dubious given this is already second chance allocation. */
+               if (!p) {
+                       HEAVY_STAT (InterlockedIncrement (&stat_alloc_retries));
+                       goto restart;
+               }
+#ifdef NALLOC_DEBUG
+               add_alloc_record (p, frag_size, RANGE_ALLOC);
+#endif
+               return p;
+       }
+
+       return NULL;
+}
+
+void
+sgen_clear_allocator_fragments (SgenFragmentAllocator *allocator)
+{
+       SgenFragment *frag;
+
+       for (frag = unmask (allocator->alloc_head); frag; frag = unmask (frag->next)) {
+               SGEN_LOG (4, "Clear nursery frag %p-%p", frag->fragment_next, frag->fragment_end);
+               sgen_clear_range (frag->fragment_next, frag->fragment_end);
+#ifdef NALLOC_DEBUG
+               add_alloc_record (frag->fragment_next, frag->fragment_end - frag->fragment_next, CLEAR_NURSERY_FRAGS);
+#endif
+       }       
+}
+
+/* Clear all remaining nursery fragments */
+void
+sgen_clear_nursery_fragments (void)
+{
+       if (sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION || sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG) {
+               sgen_clear_allocator_fragments (&mutator_allocator);
+               sgen_minor_collector.clear_fragments ();
+       }
+}
+
+/*
+ * Mark a given range of memory as invalid.
+ *
+ * This can be done either by zeroing memory or by placing
+ * a phony byte[] array. This keeps the heap forward walkable.
+ *
+ * This function ignores calls with a zero range, even if
+ * both start and end are NULL.
+ */
+void
+sgen_clear_range (char *start, char *end)
+{
+       size_t size = end - start;
+
+       if ((start && !end) || (start > end))
+               g_error ("Invalid range [%p %p]", start, end);
+
+       if (sgen_client_array_fill_range (start, size)) {
+               sgen_set_nursery_scan_start (start);
+               SGEN_ASSERT (0, start + sgen_safe_object_get_size ((GCObject*)start) == end, "Array fill produced wrong size");
+       }
+}
+
+void
+sgen_nursery_allocator_prepare_for_pinning (void)
+{
+       sgen_clear_allocator_fragments (&mutator_allocator);
+       sgen_minor_collector.clear_fragments ();
+}
+
+static mword fragment_total = 0;
+/*
+ * We found a fragment of free memory in the nursery: memzero it and if
+ * it is big enough, add it to the list of fragments that can be used for
+ * allocation.
+ */
+static void
+add_nursery_frag (SgenFragmentAllocator *allocator, size_t frag_size, char* frag_start, char* frag_end)
+{
+       SGEN_LOG (4, "Found empty fragment: %p-%p, size: %zd", frag_start, frag_end, frag_size);
+       binary_protocol_empty (frag_start, frag_size);
+       /* Not worth dealing with smaller fragments: need to tune */
+       if (frag_size >= SGEN_MAX_NURSERY_WASTE) {
+               /* memsetting just the first chunk start is bound to provide better cache locality */
+               if (sgen_get_nursery_clear_policy () == CLEAR_AT_GC)
+                       memset (frag_start, 0, frag_size);
+               else if (sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG)
+                       memset (frag_start, 0xff, frag_size);
+
+#ifdef NALLOC_DEBUG
+               /* XXX convert this into a flight record entry
+               printf ("\tfragment [%p %p] size %zd\n", frag_start, frag_end, frag_size);
+               */
+#endif
+               sgen_fragment_allocator_add (allocator, frag_start, frag_end);
+               fragment_total += frag_size;
+       } else {
+               /* Clear unused fragments, pinning depends on this */
+               sgen_clear_range (frag_start, frag_end);
+               HEAVY_STAT (InterlockedExchangeAdd (&stat_wasted_bytes_small_areas, frag_size));
+       }
+}
+
+static void
+fragment_list_reverse (SgenFragmentAllocator *allocator)
+{
+       SgenFragment *prev = NULL, *list = allocator->region_head;
+       while (list) {
+               SgenFragment *next = list->next;
+               list->next = prev;
+               list->next_in_order = prev;
+               prev = list;
+               list = next;
+       }
+
+       allocator->region_head = allocator->alloc_head = prev;
+}
+
+mword
+sgen_build_nursery_fragments (GCMemSection *nursery_section, SgenGrayQueue *unpin_queue)
+{
+       char *frag_start, *frag_end;
+       size_t frag_size;
+       SgenFragment *frags_ranges;
+       void **pin_start, **pin_entry, **pin_end;
+
+#ifdef NALLOC_DEBUG
+       reset_alloc_records ();
+#endif
+       /*The mutator fragments are done. We no longer need them. */
+       sgen_fragment_allocator_release (&mutator_allocator);
+
+       frag_start = sgen_nursery_start;
+       fragment_total = 0;
+
+       /* The current nursery might give us a fragment list to exclude [start, next[*/
+       frags_ranges = sgen_minor_collector.build_fragments_get_exclude_head ();
+
+       /* clear scan starts */
+       memset (nursery_section->scan_starts, 0, nursery_section->num_scan_start * sizeof (gpointer));
+
+       pin_start = pin_entry = sgen_pinning_get_entry (nursery_section->pin_queue_first_entry);
+       pin_end = sgen_pinning_get_entry (nursery_section->pin_queue_last_entry);
+
+       while (pin_entry < pin_end || frags_ranges) {
+               char *addr0, *addr1;
+               size_t size;
+
+               addr0 = addr1 = sgen_nursery_end;
+               if (pin_entry < pin_end)
+                       addr0 = *pin_entry;
+               if (frags_ranges)
+                       addr1 = frags_ranges->fragment_start;
+
+               if (addr0 < addr1) {
+                       if (unpin_queue)
+                               GRAY_OBJECT_ENQUEUE (unpin_queue, addr0, sgen_obj_get_descriptor_safe (addr0));
+                       else
+                               SGEN_UNPIN_OBJECT (addr0);
+                       size = SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)addr0));
+                       CANARIFY_SIZE (size);
+                       sgen_set_nursery_scan_start (addr0);
+                       frag_end = addr0;
+                       ++pin_entry;
+               } else {
+                       frag_end = addr1;
+                       size = frags_ranges->fragment_next - addr1;
+                       frags_ranges = frags_ranges->next_in_order;
+               }
+
+               frag_size = frag_end - frag_start;
+
+               if (size == 0)
+                       continue;
+
+               g_assert (frag_size >= 0);
+               g_assert (size > 0);
+               if (frag_size && size)
+                       add_nursery_frag (&mutator_allocator, frag_size, frag_start, frag_end); 
+
+               frag_size = size;
+#ifdef NALLOC_DEBUG
+               add_alloc_record (*pin_entry, frag_size, PINNING);
+#endif
+               frag_start = frag_end + frag_size;
+       }
+
+       nursery_last_pinned_end = frag_start;
+       frag_end = sgen_nursery_end;
+       frag_size = frag_end - frag_start;
+       if (frag_size)
+               add_nursery_frag (&mutator_allocator, frag_size, frag_start, frag_end);
+
+       /* Now it's safe to release the fragments exclude list. */
+       sgen_minor_collector.build_fragments_release_exclude_head ();
+
+       /* First we reorder the fragment list to be in ascending address order. This makes H/W prefetchers happier. */
+       fragment_list_reverse (&mutator_allocator);
+
+       /*The collector might want to do something with the final nursery fragment list.*/
+       sgen_minor_collector.build_fragments_finish (&mutator_allocator);
+
+       if (!unmask (mutator_allocator.alloc_head)) {
+               SGEN_LOG (1, "Nursery fully pinned");
+               for (pin_entry = pin_start; pin_entry < pin_end; ++pin_entry) {
+                       void *p = *pin_entry;
+                       SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", p, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (p)), sgen_safe_object_get_size (p));
+               }
+       }
+       return fragment_total;
+}
+
+char *
+sgen_nursery_alloc_get_upper_alloc_bound (void)
+{
+       /*FIXME we need to calculate the collector upper bound as well, but this must be done in the previous GC. */
+       return sgen_nursery_end;
+}
+
+/*** Nursery memory allocation ***/
+void
+sgen_nursery_retire_region (void *address, ptrdiff_t size)
+{
+       HEAVY_STAT (InterlockedExchangeAdd (&stat_wasted_bytes_discarded_fragments, size));
+}
+
+gboolean
+sgen_can_alloc_size (size_t size)
+{
+       SgenFragment *frag;
+
+       if (!SGEN_CAN_ALIGN_UP (size))
+               return FALSE;
+
+       size = SGEN_ALIGN_UP (size);
+
+       for (frag = unmask (mutator_allocator.alloc_head); frag; frag = unmask (frag->next)) {
+               if ((size_t)(frag->fragment_end - frag->fragment_next) >= size)
+                       return TRUE;
+       }
+       return FALSE;
+}
+
+void*
+sgen_nursery_alloc (size_t size)
+{
+       SGEN_ASSERT (1, size >= (SGEN_CLIENT_MINIMUM_OBJECT_SIZE + CANARY_SIZE) && size <= (SGEN_MAX_SMALL_OBJ_SIZE + CANARY_SIZE), "Invalid nursery object size");
+
+       SGEN_LOG (4, "Searching nursery for size: %zd", size);
+       size = SGEN_ALIGN_UP (size);
+
+       HEAVY_STAT (InterlockedIncrement (&stat_nursery_alloc_requests));
+
+       return sgen_fragment_allocator_par_alloc (&mutator_allocator, size);
+}
+
+void*
+sgen_nursery_alloc_range (size_t desired_size, size_t minimum_size, size_t *out_alloc_size)
+{
+       SGEN_LOG (4, "Searching for byte range desired size: %zd minimum size %zd", desired_size, minimum_size);
+
+       HEAVY_STAT (InterlockedIncrement (&stat_nursery_alloc_range_requests));
+
+       return sgen_fragment_allocator_par_range_alloc (&mutator_allocator, desired_size, minimum_size, out_alloc_size);
+}
+
+/*** Initialization ***/
+
+#ifdef HEAVY_STATISTICS
+
+void
+sgen_nursery_allocator_init_heavy_stats (void)
+{
+       mono_counters_register ("bytes wasted trailer fragments", MONO_COUNTER_GC | MONO_COUNTER_WORD | MONO_COUNTER_BYTES, &stat_wasted_bytes_trailer);
+       mono_counters_register ("bytes wasted small areas", MONO_COUNTER_GC | MONO_COUNTER_WORD | MONO_COUNTER_BYTES, &stat_wasted_bytes_small_areas);
+       mono_counters_register ("bytes wasted discarded fragments", MONO_COUNTER_GC | MONO_COUNTER_WORD | MONO_COUNTER_BYTES, &stat_wasted_bytes_discarded_fragments);
+
+       mono_counters_register ("# nursery alloc requests", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_alloc_requests);
+       mono_counters_register ("# nursery alloc iterations", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_alloc_iterations);
+       mono_counters_register ("# nursery alloc retries", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_alloc_retries);
+
+       mono_counters_register ("# nursery alloc range requests", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_alloc_range_requests);
+       mono_counters_register ("# nursery alloc range iterations", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_alloc_range_iterations);
+       mono_counters_register ("# nursery alloc range restries", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_alloc_range_retries);
+}
+
+#endif
+
+void
+sgen_init_nursery_allocator (void)
+{
+       sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FRAGMENT, sizeof (SgenFragment));
+#ifdef NALLOC_DEBUG
+       alloc_records = sgen_alloc_os_memory (sizeof (AllocRecord) * ALLOC_RECORD_COUNT, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, "debugging memory");
+#endif
+}
+
+void
+sgen_nursery_alloc_prepare_for_minor (void)
+{
+       sgen_minor_collector.prepare_to_space (sgen_space_bitmap, sgen_space_bitmap_size);
+}
+
+void
+sgen_nursery_alloc_prepare_for_major (void)
+{
+       sgen_minor_collector.prepare_to_space (sgen_space_bitmap, sgen_space_bitmap_size);
+}
+
+void
+sgen_nursery_allocator_set_nursery_bounds (char *start, char *end)
+{
+       sgen_nursery_start = start;
+       sgen_nursery_end = end;
+
+       /*
+        * This will not divide evenly for tiny nurseries (<4kb), so we make sure to be on
+        * the right side of things and round up.  We could just do a MIN(1,x) instead,
+        * since the nursery size must be a power of 2.
+        */
+       sgen_space_bitmap_size = (end - start + SGEN_TO_SPACE_GRANULE_IN_BYTES * 8 - 1) / (SGEN_TO_SPACE_GRANULE_IN_BYTES * 8);
+       sgen_space_bitmap = g_malloc0 (sgen_space_bitmap_size);
+
+       /* Setup the single first large fragment */
+       sgen_minor_collector.init_nursery (&mutator_allocator, start, end);
+}
+
+#endif
diff --git a/mono/sgen/sgen-pinning-stats.c b/mono/sgen/sgen-pinning-stats.c
new file mode 100644 (file)
index 0000000..3137283
--- /dev/null
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-pinning.h"
+#include "mono/sgen/sgen-hash-table.h"
+#include "mono/sgen/sgen-client.h"
+
+typedef struct _PinStatAddress PinStatAddress;
+struct _PinStatAddress {
+       char *addr;
+       int pin_types;
+       PinStatAddress *left;
+       PinStatAddress *right;
+};
+
+typedef struct {
+       size_t num_pins [PIN_TYPE_MAX];
+} PinnedClassEntry;
+
+typedef struct {
+       gulong num_remsets;
+} GlobalRemsetClassEntry;
+
+static gboolean do_pin_stats = FALSE;
+
+static PinStatAddress *pin_stat_addresses = NULL;
+static size_t pinned_byte_counts [PIN_TYPE_MAX];
+
+static SgenPointerQueue pinned_objects = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_STATISTICS);
+
+static SgenHashTable pinned_class_hash_table = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_STATISTICS, INTERNAL_MEM_STAT_PINNED_CLASS, sizeof (PinnedClassEntry), g_str_hash, g_str_equal);
+static SgenHashTable global_remset_class_hash_table = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_STATISTICS, INTERNAL_MEM_STAT_REMSET_CLASS, sizeof (GlobalRemsetClassEntry), g_str_hash, g_str_equal);
+
+void
+sgen_pin_stats_enable (void)
+{
+       do_pin_stats = TRUE;
+}
+
+static void
+pin_stats_tree_free (PinStatAddress *node)
+{
+       if (!node)
+               return;
+       pin_stats_tree_free (node->left);
+       pin_stats_tree_free (node->right);
+       sgen_free_internal_dynamic (node, sizeof (PinStatAddress), INTERNAL_MEM_STATISTICS);
+}
+
+void
+sgen_pin_stats_reset (void)
+{
+       int i;
+       pin_stats_tree_free (pin_stat_addresses);
+       pin_stat_addresses = NULL;
+       for (i = 0; i < PIN_TYPE_MAX; ++i)
+               pinned_byte_counts [i] = 0;
+       sgen_pointer_queue_clear (&pinned_objects);
+}
+
+void
+sgen_pin_stats_register_address (char *addr, int pin_type)
+{
+       PinStatAddress **node_ptr = &pin_stat_addresses;
+       PinStatAddress *node;
+       int pin_type_bit = 1 << pin_type;
+
+       while (*node_ptr) {
+               node = *node_ptr;
+               if (addr == node->addr) {
+                       node->pin_types |= pin_type_bit;
+                       return;
+               }
+               if (addr < node->addr)
+                       node_ptr = &node->left;
+               else
+                       node_ptr = &node->right;
+       }
+
+       node = sgen_alloc_internal_dynamic (sizeof (PinStatAddress), INTERNAL_MEM_STATISTICS, TRUE);
+       node->addr = addr;
+       node->pin_types = pin_type_bit;
+       node->left = node->right = NULL;
+
+       *node_ptr = node;
+}
+
+static void
+pin_stats_count_object_from_tree (char *obj, size_t size, PinStatAddress *node, int *pin_types)
+{
+       if (!node)
+               return;
+       if (node->addr >= obj && node->addr < obj + size) {
+               int i;
+               for (i = 0; i < PIN_TYPE_MAX; ++i) {
+                       int pin_bit = 1 << i;
+                       if (!(*pin_types & pin_bit) && (node->pin_types & pin_bit)) {
+                               pinned_byte_counts [i] += size;
+                               *pin_types |= pin_bit;
+                       }
+               }
+       }
+       if (obj < node->addr)
+               pin_stats_count_object_from_tree (obj, size, node->left, pin_types);
+       if (obj + size - 1 > node->addr)
+               pin_stats_count_object_from_tree (obj, size, node->right, pin_types);
+}
+
+static gpointer
+lookup_vtable_entry (SgenHashTable *hash_table, GCVTable *vtable, gpointer empty_entry)
+{
+       char *name = g_strdup_printf ("%s.%s", sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable));
+       gpointer entry = sgen_hash_table_lookup (hash_table, name);
+
+       if (entry) {
+               g_free (name);
+       } else {
+               sgen_hash_table_replace (hash_table, name, empty_entry, NULL);
+               entry = sgen_hash_table_lookup (hash_table, name);
+       }
+
+       return entry;
+}
+
+static void
+register_vtable (GCVTable *vtable, int pin_types)
+{
+       PinnedClassEntry empty_entry;
+       PinnedClassEntry *entry;
+       int i;
+
+       memset (&empty_entry, 0, sizeof (PinnedClassEntry));
+       entry = lookup_vtable_entry (&pinned_class_hash_table, vtable, &empty_entry);
+
+       for (i = 0; i < PIN_TYPE_MAX; ++i) {
+               if (pin_types & (1 << i))
+                       ++entry->num_pins [i];
+       }
+}
+
+void
+sgen_pin_stats_register_object (char *obj, size_t size)
+{
+       int pin_types = 0;
+
+       if (!do_pin_stats)
+               return;
+
+       pin_stats_count_object_from_tree (obj, size, pin_stat_addresses, &pin_types);
+       sgen_pointer_queue_add (&pinned_objects, obj);
+
+       if (pin_types)
+               register_vtable ((GCVTable*)SGEN_LOAD_VTABLE (obj), pin_types);
+}
+
+void
+sgen_pin_stats_register_global_remset (char *obj)
+{
+       GlobalRemsetClassEntry empty_entry;
+       GlobalRemsetClassEntry *entry;
+
+       if (!do_pin_stats)
+               return;
+
+       memset (&empty_entry, 0, sizeof (GlobalRemsetClassEntry));
+       entry = lookup_vtable_entry (&global_remset_class_hash_table, (GCVTable*)SGEN_LOAD_VTABLE (obj), &empty_entry);
+
+       ++entry->num_remsets;
+}
+
+void
+sgen_pin_stats_print_class_stats (void)
+{
+       char *name;
+       PinnedClassEntry *pinned_entry;
+       GlobalRemsetClassEntry *remset_entry;
+
+       if (!do_pin_stats)
+               return;
+
+       g_print ("\n%-50s  %10s  %10s  %10s\n", "Class", "Stack", "Static", "Other");
+       SGEN_HASH_TABLE_FOREACH (&pinned_class_hash_table, name, pinned_entry) {
+               int i;
+               g_print ("%-50s", name);
+               for (i = 0; i < PIN_TYPE_MAX; ++i)
+                       g_print ("  %10ld", pinned_entry->num_pins [i]);
+               g_print ("\n");
+       } SGEN_HASH_TABLE_FOREACH_END;
+
+       g_print ("\n%-50s  %10s\n", "Class", "#Remsets");
+       SGEN_HASH_TABLE_FOREACH (&global_remset_class_hash_table, name, remset_entry) {
+               g_print ("%-50s  %10ld\n", name, remset_entry->num_remsets);
+       } SGEN_HASH_TABLE_FOREACH_END;
+}
+
+size_t
+sgen_pin_stats_get_pinned_byte_count (int pin_type)
+{
+       return pinned_byte_counts [pin_type];
+}
+
+SgenPointerQueue*
+sgen_pin_stats_get_object_list (void)
+{
+       return &pinned_objects;
+}
+
+#endif /* HAVE_SGEN_GC */
diff --git a/mono/sgen/sgen-pinning.c b/mono/sgen/sgen-pinning.c
new file mode 100644 (file)
index 0000000..6d122f8
--- /dev/null
@@ -0,0 +1,298 @@
+/*
+ * sgen-pinning.c: The pin queue.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-pinning.h"
+#include "mono/sgen/sgen-protocol.h"
+#include "mono/sgen/sgen-pointer-queue.h"
+#include "mono/sgen/sgen-client.h"
+
+static SgenPointerQueue pin_queue;
+static size_t last_num_pinned = 0;
+
+#define PIN_HASH_SIZE 1024
+static void *pin_hash_filter [PIN_HASH_SIZE];
+
+void
+sgen_init_pinning (void)
+{
+       memset (pin_hash_filter, 0, sizeof (pin_hash_filter));
+       pin_queue.mem_type = INTERNAL_MEM_PIN_QUEUE;
+}
+
+void
+sgen_finish_pinning (void)
+{
+       last_num_pinned = pin_queue.next_slot;
+       sgen_pointer_queue_clear (&pin_queue);
+}
+
+void
+sgen_pin_stage_ptr (void *ptr)
+{
+       /*very simple multiplicative hash function, tons better than simple and'ng */ 
+       int hash_idx = ((mword)ptr * 1737350767) & (PIN_HASH_SIZE - 1);
+       if (pin_hash_filter [hash_idx] == ptr)
+               return;
+
+       pin_hash_filter [hash_idx] = ptr;
+
+       sgen_pointer_queue_add (&pin_queue, ptr);
+}
+
+gboolean
+sgen_find_optimized_pin_queue_area (void *start, void *end, size_t *first_out, size_t *last_out)
+{
+       size_t first = sgen_pointer_queue_search (&pin_queue, start);
+       size_t last = sgen_pointer_queue_search (&pin_queue, end);
+       SGEN_ASSERT (0, last == pin_queue.next_slot || pin_queue.data [last] >= end, "Pin queue search gone awry");
+       *first_out = first;
+       *last_out = last;
+       return first != last;
+}
+
+void**
+sgen_pinning_get_entry (size_t index)
+{
+       SGEN_ASSERT (0, index <= pin_queue.next_slot, "Pin queue entry out of range");
+       return &pin_queue.data [index];
+}
+
+void
+sgen_find_section_pin_queue_start_end (GCMemSection *section)
+{
+       SGEN_LOG (6, "Pinning from section %p (%p-%p)", section, section->data, section->end_data);
+
+       sgen_find_optimized_pin_queue_area (section->data, section->end_data,
+                       &section->pin_queue_first_entry, &section->pin_queue_last_entry);
+
+       SGEN_LOG (6, "Found %zd pinning addresses in section %p",
+                       section->pin_queue_last_entry - section->pin_queue_first_entry, section);
+}
+
+/*This will setup the given section for the while pin queue. */
+void
+sgen_pinning_setup_section (GCMemSection *section)
+{
+       section->pin_queue_first_entry = 0;
+       section->pin_queue_last_entry = pin_queue.next_slot;
+}
+
+void
+sgen_pinning_trim_queue_to_section (GCMemSection *section)
+{
+       SGEN_ASSERT (0, section->pin_queue_first_entry == 0, "Pin queue trimming assumes the whole pin queue is used by the nursery");
+       pin_queue.next_slot = section->pin_queue_last_entry;
+}
+
+/*
+ * This is called when we've run out of memory during a major collection.
+ *
+ * After collecting potential pin entries and sorting the array, this is what it looks like:
+ *
+ * +--------------------+---------------------------------------------+--------------------+
+ * | major heap entries |               nursery entries               | major heap entries |
+ * +--------------------+---------------------------------------------+--------------------+
+ *
+ * Of course there might not be major heap entries before and/or after the nursery entries,
+ * depending on where the major heap sections are in the address space, and whether there
+ * were any potential pointers there.
+ *
+ * When we pin nursery objects, we compact the nursery part of the pin array, which leaves
+ * discarded entries after the ones that actually pointed to nursery objects:
+ *
+ * +--------------------+-----------------+---------------------------+--------------------+
+ * | major heap entries | nursery entries | discarded nursery entries | major heap entries |
+ * +--------------------+-----------------+---------------------------+--------------------+
+ *
+ * When, due to being out of memory, we late pin more objects, the pin array looks like
+ * this:
+ *
+ * +--------------------+-----------------+---------------------------+--------------------+--------------+
+ * | major heap entries | nursery entries | discarded nursery entries | major heap entries | late entries |
+ * +--------------------+-----------------+---------------------------+--------------------+--------------+
+ *
+ * This function gets rid of the discarded nursery entries by nulling them out.  Note that
+ * we can late pin objects not only in the nursery but also in the major heap, which happens
+ * when evacuation fails.
+ */
+void
+sgen_pin_queue_clear_discarded_entries (GCMemSection *section, size_t max_pin_slot)
+{
+       void **start = sgen_pinning_get_entry (section->pin_queue_last_entry);
+       void **end = sgen_pinning_get_entry (max_pin_slot);
+       void *addr;
+
+       for (; start < end; ++start) {
+               addr = *start;
+               if ((char*)addr < section->data || (char*)addr > section->end_data)
+                       break;
+               *start = NULL;
+       }
+}
+
+/* reduce the info in the pin queue, removing duplicate pointers and sorting them */
+void
+sgen_optimize_pin_queue (void)
+{
+       sgen_pointer_queue_sort_uniq (&pin_queue);
+}
+
+size_t
+sgen_get_pinned_count (void)
+{
+       return pin_queue.next_slot;
+}
+
+void
+sgen_dump_pin_queue (void)
+{
+       int i;
+
+       for (i = 0; i < last_num_pinned; ++i) {
+               void *ptr = pin_queue.data [i];
+               SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", ptr, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (ptr)), sgen_safe_object_get_size (ptr));
+       }
+}
+
+typedef struct _CementHashEntry CementHashEntry;
+struct _CementHashEntry {
+       char *obj;
+       unsigned int count;
+};
+
+static CementHashEntry cement_hash [SGEN_CEMENT_HASH_SIZE];
+
+static gboolean cement_enabled = TRUE;
+
+void
+sgen_cement_init (gboolean enabled)
+{
+       cement_enabled = enabled;
+}
+
+void
+sgen_cement_reset (void)
+{
+       memset (cement_hash, 0, sizeof (cement_hash));
+       binary_protocol_cement_reset ();
+}
+
+gboolean
+sgen_cement_lookup (char *obj)
+{
+       guint hv = sgen_aligned_addr_hash (obj);
+       int i = SGEN_CEMENT_HASH (hv);
+
+       SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense");
+
+       if (!cement_enabled)
+               return FALSE;
+
+       if (!cement_hash [i].obj)
+               return FALSE;
+       if (cement_hash [i].obj != obj)
+               return FALSE;
+
+       return cement_hash [i].count >= SGEN_CEMENT_THRESHOLD;
+}
+
+gboolean
+sgen_cement_lookup_or_register (char *obj)
+{
+       guint hv;
+       int i;
+       CementHashEntry *hash = cement_hash;
+
+       if (!cement_enabled)
+               return FALSE;
+
+       hv = sgen_aligned_addr_hash (obj);
+       i = SGEN_CEMENT_HASH (hv);
+
+       SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects");
+
+       if (!hash [i].obj) {
+               SGEN_ASSERT (5, !hash [i].count, "Cementing hash inconsistent");
+               hash [i].obj = obj;
+       } else if (hash [i].obj != obj) {
+               return FALSE;
+       }
+
+       if (hash [i].count >= SGEN_CEMENT_THRESHOLD)
+               return TRUE;
+
+       ++hash [i].count;
+       if (hash [i].count == SGEN_CEMENT_THRESHOLD) {
+               SGEN_ASSERT (9, sgen_get_current_collection_generation () >= 0, "We can only cement objects when we're in a collection pause.");
+               SGEN_ASSERT (9, SGEN_OBJECT_IS_PINNED (obj), "Can only cement pinned objects");
+               SGEN_CEMENT_OBJECT (obj);
+
+               binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj),
+                               (int)sgen_safe_object_get_size ((GCObject*)obj));
+       }
+
+       return FALSE;
+}
+
+static void
+pin_from_hash (CementHashEntry *hash, gboolean has_been_reset)
+{
+       int i;
+       for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) {
+               if (!hash [i].count)
+                       continue;
+
+               if (has_been_reset)
+                       SGEN_ASSERT (5, hash [i].count >= SGEN_CEMENT_THRESHOLD, "Cementing hash inconsistent");
+
+               sgen_pin_stage_ptr (hash [i].obj);
+               binary_protocol_cement_stage (hash [i].obj);
+               /* FIXME: do pin stats if enabled */
+
+               SGEN_CEMENT_OBJECT (hash [i].obj);
+       }
+}
+
+void
+sgen_pin_cemented_objects (void)
+{
+       pin_from_hash (cement_hash, TRUE);
+}
+
+void
+sgen_cement_clear_below_threshold (void)
+{
+       int i;
+       for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) {
+               if (cement_hash [i].count < SGEN_CEMENT_THRESHOLD) {
+                       cement_hash [i].obj = NULL;
+                       cement_hash [i].count = 0;
+               }
+       }
+}
+
+#endif /* HAVE_SGEN_GC */
diff --git a/mono/sgen/sgen-pinning.h b/mono/sgen/sgen-pinning.h
new file mode 100644 (file)
index 0000000..dd8051f
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * sgen-pinning.h: All about pinning objects.
+ *
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __MONO_SGEN_PINNING_H__
+#define __MONO_SGEN_PINNING_H__
+
+#include "mono/sgen/sgen-pointer-queue.h"
+
+enum {
+       PIN_TYPE_STACK,
+       PIN_TYPE_STATIC_DATA,
+       PIN_TYPE_OTHER,
+       PIN_TYPE_MAX
+};
+
+void sgen_pin_stage_ptr (void *ptr);
+void sgen_optimize_pin_queue (void);
+void sgen_init_pinning (void);
+void sgen_finish_pinning (void);
+void sgen_pin_queue_clear_discarded_entries (GCMemSection *section, size_t max_pin_slot);
+size_t sgen_get_pinned_count (void);
+void sgen_pinning_setup_section (GCMemSection *section);
+void sgen_pinning_trim_queue_to_section (GCMemSection *section);
+
+void sgen_dump_pin_queue (void);
+
+gboolean sgen_find_optimized_pin_queue_area (void *start, void *end, size_t *first_out, size_t *last_out);
+void sgen_find_section_pin_queue_start_end (GCMemSection *section);
+void** sgen_pinning_get_entry (size_t index);
+void sgen_pin_objects_in_section (GCMemSection *section, ScanCopyContext ctx);
+
+/* Pinning stats */
+
+void sgen_pin_stats_register_address (char *addr, int pin_type);
+size_t sgen_pin_stats_get_pinned_byte_count (int pin_type);
+SgenPointerQueue *sgen_pin_stats_get_object_list (void);
+void sgen_pin_stats_reset (void);
+
+/* Perpetual pinning, aka cementing */
+
+void sgen_cement_init (gboolean enabled);
+void sgen_cement_reset (void);
+gboolean sgen_cement_lookup (char *obj);
+gboolean sgen_cement_lookup_or_register (char *obj);
+void sgen_pin_cemented_objects (void);
+void sgen_cement_clear_below_threshold (void);
+
+#endif
diff --git a/mono/sgen/sgen-pointer-queue.c b/mono/sgen/sgen-pointer-queue.c
new file mode 100644 (file)
index 0000000..0e3fea6
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * sgen-pointer-queue.c: A pointer queue that can be sorted.
+ *
+ * Copyright (C) 2014 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-pointer-queue.h"
+
+void
+sgen_pointer_queue_clear (SgenPointerQueue *queue)
+{
+       queue->next_slot = 0;
+}
+
+void
+sgen_pointer_queue_init (SgenPointerQueue *queue, int mem_type)
+{
+       queue->next_slot = 0;
+       queue->size = 0;
+       queue->data = NULL;
+       queue->mem_type = mem_type;
+}
+
+static void
+realloc_queue (SgenPointerQueue *queue)
+{
+       size_t new_size = queue->size ? queue->size + queue->size/2 : 1024;
+       void **new_data = sgen_alloc_internal_dynamic (sizeof (void*) * new_size, queue->mem_type, TRUE);
+
+       memcpy (new_data, queue->data, sizeof (void*) * queue->next_slot);
+       sgen_free_internal_dynamic (queue->data, sizeof (void*) * queue->size, queue->mem_type);
+       queue->data = new_data;
+       queue->size = new_size;
+       SGEN_LOG (4, "Reallocated pointer queue to size: %lu", new_size);
+}
+
+gboolean
+sgen_pointer_queue_will_grow (SgenPointerQueue *queue)
+{
+       return queue->next_slot >= queue->size;
+}
+
+void
+sgen_pointer_queue_add (SgenPointerQueue *queue, void *ptr)
+{
+       if (sgen_pointer_queue_will_grow (queue))
+               realloc_queue (queue);
+
+       queue->data [queue->next_slot++] = ptr;
+}
+
+void*
+sgen_pointer_queue_pop (SgenPointerQueue *queue)
+{
+       g_assert (queue->next_slot);
+
+       return queue->data [--queue->next_slot];
+}
+
+size_t
+sgen_pointer_queue_search (SgenPointerQueue *queue, void *addr)
+{
+       size_t first = 0, last = queue->next_slot;
+       while (first < last) {
+               size_t middle = first + ((last - first) >> 1);
+               if (addr <= queue->data [middle])
+                       last = middle;
+               else
+                       first = middle + 1;
+       }
+       g_assert (first == last);
+       return first;
+}
+
+/*
+ * Removes all NULL pointers from the queue.
+ */
+void
+sgen_pointer_queue_remove_nulls (SgenPointerQueue *queue)
+{
+       void **start, **cur, **end;
+       start = cur = queue->data;
+       end = queue->data + queue->next_slot;
+       while (cur < end) {
+               if (*cur)
+                       *start++ = *cur++;
+               else
+                       ++cur;
+       }
+       queue->next_slot = start - queue->data;
+}
+
+/*
+ * Sorts the pointers in the queue, then removes duplicates.
+ */
+void
+sgen_pointer_queue_sort_uniq (SgenPointerQueue *queue)
+{
+       void **start, **cur, **end;
+       /* sort and uniq pin_queue: we just sort and we let the rest discard multiple values */
+       /* it may be better to keep ranges of pinned memory instead of individually pinning objects */
+       SGEN_LOG (5, "Sorting pointer queue, size: %lu", queue->next_slot);
+       if (queue->next_slot > 1)
+               sgen_sort_addresses (queue->data, queue->next_slot);
+       start = cur = queue->data;
+       end = queue->data + queue->next_slot;
+       while (cur < end) {
+               *start = *cur++;
+               while (cur < end && *start == *cur)
+                       cur++;
+               start++;
+       };
+       queue->next_slot = start - queue->data;
+       SGEN_LOG (5, "Pointer queue reduced to size: %lu", queue->next_slot);
+}
+
+/*
+ * Does a linear search through the pointer queue to find `ptr`.  Returns the index if
+ * found, otherwise (size_t)-1.
+ */
+size_t
+sgen_pointer_queue_find (SgenPointerQueue *queue, void *ptr)
+{
+       size_t i;
+       for (i = 0; i < queue->next_slot; ++i)
+               if (queue->data [i] == ptr)
+                       return i;
+       return (size_t)-1;
+}
+
+gboolean
+sgen_pointer_queue_is_empty (SgenPointerQueue *queue)
+{
+       return !queue->next_slot;
+}
+
+void
+sgen_pointer_queue_free (SgenPointerQueue *queue)
+{
+       sgen_free_internal_dynamic (queue->data, sizeof (void*) * queue->size, queue->mem_type);
+}
+
+#endif
diff --git a/mono/sgen/sgen-pointer-queue.h b/mono/sgen/sgen-pointer-queue.h
new file mode 100644 (file)
index 0000000..3352dab
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * sgen-pointer-queue.h: A pointer queue that can be sorted.
+ *
+ * Copyright (C) 2014 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MONO_SGEN_POINTER_QUEUE_H__
+#define __MONO_SGEN_POINTER_QUEUE_H__
+
+#include <glib.h>
+
+typedef struct {
+       void **data;
+       size_t size;
+       size_t next_slot;
+       int mem_type;
+} SgenPointerQueue;
+
+#define SGEN_POINTER_QUEUE_INIT(mem_type)      { NULL, 0, 0, (mem_type) }
+
+void sgen_pointer_queue_add (SgenPointerQueue *queue, void *ptr);
+void sgen_pointer_queue_clear (SgenPointerQueue *queue);
+void sgen_pointer_queue_remove_nulls (SgenPointerQueue *queue);
+void sgen_pointer_queue_sort_uniq (SgenPointerQueue *queue);
+size_t sgen_pointer_queue_search (SgenPointerQueue *queue, void *addr);
+size_t sgen_pointer_queue_find (SgenPointerQueue *queue, void *ptr);
+void sgen_pointer_queue_init (SgenPointerQueue *queue, int mem_type);
+void* sgen_pointer_queue_pop (SgenPointerQueue *queue);
+gboolean sgen_pointer_queue_is_empty (SgenPointerQueue *queue);
+void sgen_pointer_queue_free (SgenPointerQueue *queue);
+gboolean sgen_pointer_queue_will_grow (SgenPointerQueue *queue);
+
+#endif
diff --git a/mono/sgen/sgen-protocol-def.h b/mono/sgen/sgen-protocol-def.h
new file mode 100644 (file)
index 0000000..0df783a
--- /dev/null
@@ -0,0 +1,387 @@
+BEGIN_PROTOCOL_ENTRY3 (binary_protocol_collection_requested, TYPE_INT, generation, TYPE_SIZE, requested_size, TYPE_BOOL, force)
+FLUSH ()
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY2 (binary_protocol_collection_begin, TYPE_INT, index, TYPE_INT, generation)
+FLUSH ()
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY4 (binary_protocol_collection_end, TYPE_INT, index, TYPE_INT, generation, TYPE_LONGLONG, num_scanned_objects, TYPE_LONGLONG, num_unique_scanned_objects)
+FLUSH()
+CUSTOM_PRINT (printf ("%d generation %d scanned %lld unique %lld %0.2f%%", entry->index, entry->generation, entry->num_scanned_objects, entry->num_unique_scanned_objects, entry->num_unique_scanned_objects ? (100.0 * (double) entry->num_scanned_objects / (double) entry->num_unique_scanned_objects) : 0.0))
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY0 (binary_protocol_concurrent_start)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY0 (binary_protocol_concurrent_update)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY0 (binary_protocol_concurrent_finish)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY2 (binary_protocol_sweep_begin, TYPE_INT, generation, TYPE_BOOL, full_sweep)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY2 (binary_protocol_sweep_end, TYPE_INT, generation, TYPE_BOOL, full_sweep)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY3 (binary_protocol_world_stopping, TYPE_INT, generation, TYPE_LONGLONG, timestamp, TYPE_POINTER, thread)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (ptr == entry->thread ? 2 : BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY6 (binary_protocol_world_stopped, TYPE_INT, generation, TYPE_LONGLONG, timestamp, TYPE_LONGLONG, total_major_cards, TYPE_LONGLONG, marked_major_cards, TYPE_LONGLONG, total_los_cards, TYPE_LONGLONG, marked_los_cards)
+CUSTOM_PRINT (printf ("generation %d timestamp %lld total %lld marked %lld %0.2f%%", entry->generation, entry->timestamp, entry->total_major_cards + entry->total_los_cards, entry->marked_major_cards + entry->marked_los_cards, 100.0 * (double) (entry->marked_major_cards + entry->marked_los_cards) / (double) (entry->total_major_cards + entry->total_los_cards)))
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY6 (binary_protocol_world_restarting, TYPE_INT, generation, TYPE_LONGLONG, timestamp, TYPE_LONGLONG, total_major_cards, TYPE_LONGLONG, marked_major_cards, TYPE_LONGLONG, total_los_cards, TYPE_LONGLONG, marked_los_cards)
+CUSTOM_PRINT (printf ("generation %d timestamp %lld total %lld marked %lld %0.2f%%", entry->generation, entry->timestamp, entry->total_major_cards + entry->total_los_cards, entry->marked_major_cards + entry->marked_los_cards, 100.0 * (double) (entry->marked_major_cards + entry->marked_los_cards) / (double) (entry->total_major_cards + entry->total_los_cards)))
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY2 (binary_protocol_world_restarted, TYPE_INT, generation, TYPE_LONGLONG, timestamp)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY2 (binary_protocol_block_alloc, TYPE_POINTER, addr, TYPE_SIZE, size)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->addr, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY2 (binary_protocol_block_free, TYPE_POINTER, addr, TYPE_SIZE, size)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->addr, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_block_set_state, TYPE_POINTER, addr, TYPE_SIZE, size, TYPE_INT, old, TYPE_INT, new)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->addr, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY1 (binary_protocol_mark_start, TYPE_INT, generation)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY1 (binary_protocol_mark_end, TYPE_INT, generation)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+/* FIXME: unify sweep and reclaim */
+BEGIN_PROTOCOL_ENTRY1 (binary_protocol_reclaim_start, TYPE_INT, generation)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY1 (binary_protocol_reclaim_end, TYPE_INT, generation)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_alloc, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size, TYPE_POINTER, provenance)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->vtable)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_copy, TYPE_POINTER, from, TYPE_POINTER, to, TYPE_POINTER, vtable, TYPE_SIZE, size)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->from, entry->size) ? 0 : matches_interval (ptr, entry->to, entry->size) ? 1 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->vtable)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY2 (binary_protocol_pin_stage, TYPE_POINTER, addr_ptr, TYPE_POINTER, addr)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->addr_ptr ? 0 : ptr == entry->addr ? 1 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY1 (binary_protocol_cement_stage, TYPE_POINTER, addr)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->addr ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_pin, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->vtable)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_mark, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_scan_begin, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->vtable)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY2 (binary_protocol_scan_vtype_begin, TYPE_POINTER, obj, TYPE_SIZE, size)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_scan_process_reference, TYPE_POINTER, obj, TYPE_POINTER, ptr, TYPE_POINTER, value)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->obj ? 0 : ptr == entry->ptr ? 1 : ptr == entry->value ? 2 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_scan_stack, TYPE_POINTER, thread, TYPE_POINTER, stack_start, TYPE_POINTER, stack_end, TYPE_INT, skip_reason)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->thread ? 0 : (ptr >= entry->stack_start && ptr < entry->stack_end) ? 1 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_wbarrier, TYPE_POINTER, ptr, TYPE_POINTER, value, TYPE_POINTER, value_vtable)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->ptr ? 0 : ptr == entry->value ? 1 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->value_vtable)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_global_remset, TYPE_POINTER, ptr, TYPE_POINTER, value, TYPE_POINTER, value_vtable)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->ptr ? 0 : ptr == entry->value ? 1 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->value_vtable)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY5 (binary_protocol_ptr_update, TYPE_POINTER, ptr, TYPE_POINTER, old_value, TYPE_POINTER, new_value, TYPE_POINTER, vtable, TYPE_SIZE, size)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->ptr ? 0 : matches_interval (ptr, entry->old_value, entry->size) ? 1 : matches_interval (ptr, entry->new_value, entry->size) ? 2 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->vtable)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_cleanup, TYPE_POINTER, ptr, TYPE_POINTER, vtable, TYPE_SIZE, size)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->ptr, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->vtable)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY2 (binary_protocol_empty, TYPE_POINTER, start, TYPE_SIZE, size)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->start, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY2 (binary_protocol_thread_suspend, TYPE_POINTER, thread, TYPE_POINTER, stopped_ip)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY1 (binary_protocol_thread_restart, TYPE_POINTER, thread)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY1 (binary_protocol_thread_register, TYPE_POINTER, thread)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY1 (binary_protocol_thread_unregister, TYPE_POINTER, thread)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY6 (binary_protocol_missing_remset, TYPE_POINTER, obj, TYPE_POINTER, obj_vtable, TYPE_INT, offset, TYPE_POINTER, value, TYPE_POINTER, value_vtable, TYPE_BOOL, value_pinned)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->obj ? 0 : ptr == entry->value ? 3 : ptr == (char*)entry->obj + entry->offset ? BINARY_PROTOCOL_MATCH : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->obj_vtable || ptr == entry->value_vtable)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_alloc_pinned, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size, TYPE_POINTER, provenance)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->vtable)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_alloc_degraded, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size, TYPE_POINTER, provenance)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->vtable)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY2 (binary_protocol_card_scan, TYPE_POINTER, start, TYPE_SIZE, size)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->start, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY3 (binary_protocol_cement, TYPE_POINTER, obj, TYPE_POINTER, vtable, TYPE_SIZE, size)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (matches_interval (ptr, entry->obj, entry->size) ? 0 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->vtable)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY0 (binary_protocol_cement_reset)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_dislink_update, TYPE_POINTER, link, TYPE_POINTER, obj, TYPE_BOOL, track, TYPE_BOOL, staged)
+CUSTOM_PRINT(entry->obj ? printf ("link %p obj %p staged %d track %d", entry->link, entry->obj, entry->staged, entry->track) : printf ("link %p obj %p staged %d", entry->link, entry->obj, entry->staged))
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->link ? 0 : ptr == entry->obj ? 1 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_dislink_update_staged, TYPE_POINTER, link, TYPE_POINTER, obj, TYPE_BOOL, track, TYPE_INT, index)
+CUSTOM_PRINT(entry->obj ? printf ("link %p obj %p index %d track %d", entry->link, entry->obj, entry->index, entry->track) : printf ("link %p obj %p index %d", entry->link, entry->obj, entry->index))
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->link ? 0 : ptr == entry->obj ? 1 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_dislink_process_staged, TYPE_POINTER, link, TYPE_POINTER, obj, TYPE_INT, index)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->link ? 0 : ptr == entry->obj ? 1 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY1 (binary_protocol_domain_unload_begin, TYPE_POINTER, domain)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY1 (binary_protocol_domain_unload_end, TYPE_POINTER, domain)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (TRUE)
+MATCH_INDEX (BINARY_PROTOCOL_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_gray_enqueue, TYPE_POINTER, queue, TYPE_POINTER, cursor, TYPE_POINTER, value)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->cursor ? 1 : ptr == entry->value ? 2 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+BEGIN_PROTOCOL_ENTRY_HEAVY3 (binary_protocol_gray_dequeue, TYPE_POINTER, queue, TYPE_POINTER, cursor, TYPE_POINTER, value)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->cursor ? 1 : ptr == entry->value ? 2 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (FALSE)
+END_PROTOCOL_ENTRY_HEAVY
+
+#undef BEGIN_PROTOCOL_ENTRY0
+#undef BEGIN_PROTOCOL_ENTRY1
+#undef BEGIN_PROTOCOL_ENTRY2
+#undef BEGIN_PROTOCOL_ENTRY3
+#undef BEGIN_PROTOCOL_ENTRY4
+#undef BEGIN_PROTOCOL_ENTRY5
+#undef BEGIN_PROTOCOL_ENTRY6
+#undef BEGIN_PROTOCOL_ENTRY_HEAVY0
+#undef BEGIN_PROTOCOL_ENTRY_HEAVY1
+#undef BEGIN_PROTOCOL_ENTRY_HEAVY2
+#undef BEGIN_PROTOCOL_ENTRY_HEAVY3
+#undef BEGIN_PROTOCOL_ENTRY_HEAVY4
+#undef BEGIN_PROTOCOL_ENTRY_HEAVY5
+#undef BEGIN_PROTOCOL_ENTRY_HEAVY6
+
+#undef FLUSH
+
+#undef DEFAULT_PRINT
+#undef CUSTOM_PRINT
+
+#undef IS_ALWAYS_MATCH
+#undef MATCH_INDEX
+#undef IS_VTABLE_MATCH
+
+#undef END_PROTOCOL_ENTRY
+#undef END_PROTOCOL_ENTRY_HEAVY
diff --git a/mono/sgen/sgen-protocol.c b/mono/sgen/sgen-protocol.c
new file mode 100644 (file)
index 0000000..1b55b88
--- /dev/null
@@ -0,0 +1,434 @@
+/*
+ * sgen-protocol.c: Binary protocol of internal activity, to aid
+ * debugging.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifdef HAVE_SGEN_GC
+
+#include "config.h"
+#include "sgen-conf.h"
+#include "sgen-gc.h"
+#include "sgen-protocol.h"
+#include "sgen-memory-governor.h"
+#include "sgen-thread-pool.h"
+#include "sgen-client.h"
+#include "mono/utils/mono-membar.h"
+
+#include <errno.h>
+#include <string.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#include <fcntl.h>
+#endif
+
+/* FIXME Implement binary protocol IO on systems that don't have unistd */
+#ifdef HAVE_UNISTD_H
+/* If valid, dump binary protocol to this file */
+static int binary_protocol_file = -1;
+
+/* We set this to -1 to indicate an exclusive lock */
+static volatile int binary_protocol_use_count = 0;
+
+#define BINARY_PROTOCOL_BUFFER_SIZE    (65536 - 2 * 8)
+
+typedef struct _BinaryProtocolBuffer BinaryProtocolBuffer;
+struct _BinaryProtocolBuffer {
+       BinaryProtocolBuffer * volatile next;
+       volatile int index;
+       unsigned char buffer [BINARY_PROTOCOL_BUFFER_SIZE];
+};
+
+static BinaryProtocolBuffer * volatile binary_protocol_buffers = NULL;
+
+static char* filename_or_prefix = NULL;
+static int current_file_index = 0;
+static long long current_file_size = 0;
+static long long file_size_limit;
+
+static char*
+filename_for_index (int index)
+{
+       char *filename;
+
+       SGEN_ASSERT (0, file_size_limit > 0, "Indexed binary protocol filename must only be used with file size limit");
+
+       filename = sgen_alloc_internal_dynamic (strlen (filename_or_prefix) + 32, INTERNAL_MEM_BINARY_PROTOCOL, TRUE);
+       sprintf (filename, "%s.%d", filename_or_prefix, index);
+
+       return filename;
+}
+
+static void
+free_filename (char *filename)
+{
+       SGEN_ASSERT (0, file_size_limit > 0, "Indexed binary protocol filename must only be used with file size limit");
+
+       sgen_free_internal_dynamic (filename, strlen (filename_or_prefix) + 32, INTERNAL_MEM_BINARY_PROTOCOL);
+}
+
+static void
+binary_protocol_open_file (void)
+{
+       char *filename;
+
+       if (file_size_limit > 0)
+               filename = filename_for_index (current_file_index);
+       else
+               filename = filename_or_prefix;
+
+       do {
+               binary_protocol_file = open (filename, O_CREAT|O_WRONLY|O_TRUNC, 0644);
+               if (binary_protocol_file == -1 && errno != EINTR)
+                       break; /* Failed */
+       } while (binary_protocol_file == -1);
+
+       if (file_size_limit > 0)
+               free_filename (filename);
+}
+#endif
+
+void
+binary_protocol_init (const char *filename, long long limit)
+{
+#ifdef HAVE_UNISTD_H
+       filename_or_prefix = sgen_alloc_internal_dynamic (strlen (filename) + 1, INTERNAL_MEM_BINARY_PROTOCOL, TRUE);
+       strcpy (filename_or_prefix, filename);
+
+       file_size_limit = limit;
+
+       binary_protocol_open_file ();
+#endif
+}
+
+gboolean
+binary_protocol_is_enabled (void)
+{
+#ifdef HAVE_UNISTD_H
+       return binary_protocol_file != -1;
+#else
+       return FALSE;
+#endif
+}
+
+#ifdef HAVE_UNISTD_H
+
+static void
+close_binary_protocol_file (void)
+{
+       while (close (binary_protocol_file) == -1 && errno == EINTR)
+               ;
+       binary_protocol_file = -1;
+}
+
+static gboolean
+try_lock_exclusive (void)
+{
+       do {
+               if (binary_protocol_use_count)
+                       return FALSE;
+       } while (InterlockedCompareExchange (&binary_protocol_use_count, -1, 0) != 0);
+       mono_memory_barrier ();
+       return TRUE;
+}
+
+static void
+unlock_exclusive (void)
+{
+       mono_memory_barrier ();
+       SGEN_ASSERT (0, binary_protocol_use_count == -1, "Exclusively locked count must be -1");
+       if (InterlockedCompareExchange (&binary_protocol_use_count, 0, -1) != -1)
+               SGEN_ASSERT (0, FALSE, "Somebody messed with the exclusive lock");
+}
+
+static void
+lock_recursive (void)
+{
+       int old_count;
+       do {
+       retry:
+               old_count = binary_protocol_use_count;
+               if (old_count < 0) {
+                       /* Exclusively locked - retry */
+                       /* FIXME: short back-off */
+                       goto retry;
+               }
+       } while (InterlockedCompareExchange (&binary_protocol_use_count, old_count + 1, old_count) != old_count);
+       mono_memory_barrier ();
+}
+
+static void
+unlock_recursive (void)
+{
+       int old_count;
+       mono_memory_barrier ();
+       do {
+               old_count = binary_protocol_use_count;
+               SGEN_ASSERT (0, old_count > 0, "Locked use count must be at least 1");
+       } while (InterlockedCompareExchange (&binary_protocol_use_count, old_count - 1, old_count) != old_count);
+}
+
+static void
+binary_protocol_flush_buffer (BinaryProtocolBuffer *buffer)
+{
+       ssize_t ret;
+       size_t to_write = buffer->index;
+       size_t written = 0;
+       g_assert (buffer->index > 0);
+
+       while (written < to_write) {
+               ret = write (binary_protocol_file, buffer->buffer + written, to_write - written);
+               if (ret >= 0)
+                       written += ret;
+               else if (errno == EINTR)
+                       continue;
+               else
+                       close_binary_protocol_file ();
+       }
+
+       current_file_size += buffer->index;
+
+       sgen_free_os_memory (buffer, sizeof (BinaryProtocolBuffer), SGEN_ALLOC_INTERNAL);
+}
+
+static void
+binary_protocol_check_file_overflow (void)
+{
+       if (file_size_limit <= 0 || current_file_size < file_size_limit)
+               return;
+
+       close_binary_protocol_file ();
+
+       if (current_file_index > 0) {
+               char *filename = filename_for_index (current_file_index - 1);
+               unlink (filename);
+               free_filename (filename);
+       }
+
+       ++current_file_index;
+       current_file_size = 0;
+
+       binary_protocol_open_file ();
+}
+#endif
+
+void
+binary_protocol_flush_buffers (gboolean force)
+{
+#ifdef HAVE_UNISTD_H
+       int num_buffers = 0, i;
+       BinaryProtocolBuffer *buf;
+       BinaryProtocolBuffer **bufs;
+
+       if (binary_protocol_file == -1)
+               return;
+
+       if (!force && !try_lock_exclusive ())
+               return;
+
+       for (buf = binary_protocol_buffers; buf != NULL; buf = buf->next)
+               ++num_buffers;
+       bufs = sgen_alloc_internal_dynamic (num_buffers * sizeof (BinaryProtocolBuffer*), INTERNAL_MEM_BINARY_PROTOCOL, TRUE);
+       for (buf = binary_protocol_buffers, i = 0; buf != NULL; buf = buf->next, i++)
+               bufs [i] = buf;
+       SGEN_ASSERT (0, i == num_buffers, "Binary protocol buffer count error");
+
+       binary_protocol_buffers = NULL;
+
+       for (i = num_buffers - 1; i >= 0; --i) {
+               binary_protocol_flush_buffer (bufs [i]);
+               binary_protocol_check_file_overflow ();
+       }
+
+       sgen_free_internal_dynamic (buf, num_buffers * sizeof (BinaryProtocolBuffer*), INTERNAL_MEM_BINARY_PROTOCOL);
+
+       if (!force)
+               unlock_exclusive ();
+#endif
+}
+
+#ifdef HAVE_UNISTD_H
+static BinaryProtocolBuffer*
+binary_protocol_get_buffer (int length)
+{
+       BinaryProtocolBuffer *buffer, *new_buffer;
+ retry:
+       buffer = binary_protocol_buffers;
+       if (buffer && buffer->index + length <= BINARY_PROTOCOL_BUFFER_SIZE)
+               return buffer;
+
+       new_buffer = sgen_alloc_os_memory (sizeof (BinaryProtocolBuffer), SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, "debugging memory");
+       new_buffer->next = buffer;
+       new_buffer->index = 0;
+
+       if (InterlockedCompareExchangePointer ((void**)&binary_protocol_buffers, new_buffer, buffer) != buffer) {
+               sgen_free_os_memory (new_buffer, sizeof (BinaryProtocolBuffer), SGEN_ALLOC_INTERNAL);
+               goto retry;
+       }
+
+       return new_buffer;
+}
+#endif
+
+static void
+protocol_entry (unsigned char type, gpointer data, int size)
+{
+#ifdef HAVE_UNISTD_H
+       int index;
+       BinaryProtocolBuffer *buffer;
+
+       if (binary_protocol_file == -1)
+               return;
+
+       if (sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()))
+               type |= 0x80;
+
+       lock_recursive ();
+
+ retry:
+       buffer = binary_protocol_get_buffer (size + 1);
+ retry_same_buffer:
+       index = buffer->index;
+       if (index + 1 + size > BINARY_PROTOCOL_BUFFER_SIZE)
+               goto retry;
+
+       if (InterlockedCompareExchange (&buffer->index, index + 1 + size, index) != index)
+               goto retry_same_buffer;
+
+       /* FIXME: if we're interrupted at this point, we have a buffer
+          entry that contains random data. */
+
+       buffer->buffer [index++] = type;
+       memcpy (buffer->buffer + index, data, size);
+       index += size;
+
+       g_assert (index <= BINARY_PROTOCOL_BUFFER_SIZE);
+
+       unlock_recursive ();
+#endif
+}
+
+#define TYPE_INT int
+#define TYPE_LONGLONG long long
+#define TYPE_SIZE size_t
+#define TYPE_POINTER gpointer
+#define TYPE_BOOL gboolean
+
+#define BEGIN_PROTOCOL_ENTRY0(method) \
+       void method (void) { \
+               int __type = PROTOCOL_ID(method); \
+               gpointer __data = NULL; \
+               int __size = 0; \
+               CLIENT_PROTOCOL_NAME (method) ();
+#define BEGIN_PROTOCOL_ENTRY1(method,t1,f1) \
+       void method (t1 f1) { \
+               PROTOCOL_STRUCT(method) __entry = { f1 }; \
+               int __type = PROTOCOL_ID(method); \
+               gpointer __data = &__entry; \
+               int __size = sizeof (PROTOCOL_STRUCT(method)); \
+               CLIENT_PROTOCOL_NAME (method) (f1);
+#define BEGIN_PROTOCOL_ENTRY2(method,t1,f1,t2,f2) \
+       void method (t1 f1, t2 f2) { \
+               PROTOCOL_STRUCT(method) __entry = { f1, f2 }; \
+               int __type = PROTOCOL_ID(method); \
+               gpointer __data = &__entry; \
+               int __size = sizeof (PROTOCOL_STRUCT(method)); \
+               CLIENT_PROTOCOL_NAME (method) (f1, f2);
+#define BEGIN_PROTOCOL_ENTRY3(method,t1,f1,t2,f2,t3,f3) \
+       void method (t1 f1, t2 f2, t3 f3) { \
+               PROTOCOL_STRUCT(method) __entry = { f1, f2, f3 }; \
+               int __type = PROTOCOL_ID(method); \
+               gpointer __data = &__entry; \
+               int __size = sizeof (PROTOCOL_STRUCT(method)); \
+               CLIENT_PROTOCOL_NAME (method) (f1, f2, f3);
+#define BEGIN_PROTOCOL_ENTRY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
+       void method (t1 f1, t2 f2, t3 f3, t4 f4) { \
+               PROTOCOL_STRUCT(method) __entry = { f1, f2, f3, f4 }; \
+               int __type = PROTOCOL_ID(method); \
+               gpointer __data = &__entry; \
+               int __size = sizeof (PROTOCOL_STRUCT(method)); \
+               CLIENT_PROTOCOL_NAME (method) (f1, f2, f3, f4);
+#define BEGIN_PROTOCOL_ENTRY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
+       void method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5) { \
+               PROTOCOL_STRUCT(method) __entry = { f1, f2, f3, f4, f5 }; \
+               int __type = PROTOCOL_ID(method); \
+               gpointer __data = &__entry; \
+               int __size = sizeof (PROTOCOL_STRUCT(method)); \
+               CLIENT_PROTOCOL_NAME (method) (f1, f2, f3, f4, f5);
+#define BEGIN_PROTOCOL_ENTRY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
+       void method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6) { \
+               PROTOCOL_STRUCT(method) __entry = { f1, f2, f3, f4, f5, f6 }; \
+               int __type = PROTOCOL_ID(method); \
+               gpointer __data = &__entry; \
+               int __size = sizeof (PROTOCOL_STRUCT(method)); \
+               CLIENT_PROTOCOL_NAME (method) (f1, f2, f3, f4, f5, f6);
+
+#define FLUSH() \
+               binary_protocol_flush_buffers (FALSE);
+
+#define DEFAULT_PRINT()
+#define CUSTOM_PRINT(_)
+
+#define IS_ALWAYS_MATCH(_)
+#define MATCH_INDEX(_)
+#define IS_VTABLE_MATCH(_)
+
+#define END_PROTOCOL_ENTRY \
+               protocol_entry (__type, __data, __size); \
+       }
+
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) \
+       BEGIN_PROTOCOL_ENTRY0 (method)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) \
+       BEGIN_PROTOCOL_ENTRY1 (method,t1,f1)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) \
+       BEGIN_PROTOCOL_ENTRY2 (method,t1,f1,t2,f2)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) \
+       BEGIN_PROTOCOL_ENTRY3 (method,t1,f1,t2,f2,t3,f3)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
+       BEGIN_PROTOCOL_ENTRY4 (method,t1,f1,t2,f2,t3,f3,t4,f4)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
+       BEGIN_PROTOCOL_ENTRY5 (method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
+       BEGIN_PROTOCOL_ENTRY6 (method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6)
+
+#define END_PROTOCOL_ENTRY_HEAVY \
+       END_PROTOCOL_ENTRY
+#else
+#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6)
+
+#define END_PROTOCOL_ENTRY_HEAVY
+#endif
+
+#include "sgen-protocol-def.h"
+
+#undef TYPE_INT
+#undef TYPE_LONGLONG
+#undef TYPE_SIZE
+#undef TYPE_POINTER
+#undef TYPE_BOOL
+
+#endif /* HAVE_SGEN_GC */
diff --git a/mono/sgen/sgen-protocol.h b/mono/sgen/sgen-protocol.h
new file mode 100644 (file)
index 0000000..2b7176e
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * sgen-protocol.h: Binary protocol of internal activity, to aid
+ * debugging.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MONO_SGENPROTOCOL_H__
+#define __MONO_SGENPROTOCOL_H__
+
+#include "sgen-gc.h"
+
+/* Special indices returned by MATCH_INDEX. */
+#define BINARY_PROTOCOL_NO_MATCH (-1)
+#define BINARY_PROTOCOL_MATCH (-2)
+
+#define PROTOCOL_ID(method) method ## _id
+#define PROTOCOL_STRUCT(method) method ## _struct
+#define CLIENT_PROTOCOL_NAME(method) sgen_client_ ## method
+
+#define TYPE_INT int
+#define TYPE_LONGLONG long long
+#define TYPE_SIZE size_t
+#define TYPE_POINTER gpointer
+#define TYPE_BOOL gboolean
+
+enum {
+#define BEGIN_PROTOCOL_ENTRY0(method) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY1(method,t1,f1) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY2(method,t1,f1,t2,f2) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY3(method,t1,f1,t2,f2,t3,f3) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY4(method,t1,f1,t2,f2,t3,f3,t4,f4) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) PROTOCOL_ID(method),
+#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) PROTOCOL_ID(method),
+
+#define FLUSH()
+
+#define DEFAULT_PRINT()
+#define CUSTOM_PRINT(_)
+
+#define IS_ALWAYS_MATCH(_)
+#define MATCH_INDEX(_)
+#define IS_VTABLE_MATCH(_)
+
+#define END_PROTOCOL_ENTRY
+#define END_PROTOCOL_ENTRY_HEAVY
+
+#include "sgen-protocol-def.h"
+};
+
+#define BEGIN_PROTOCOL_ENTRY0(method)
+#define BEGIN_PROTOCOL_ENTRY1(method,t1,f1) \
+       typedef struct { \
+               t1 f1; \
+       } PROTOCOL_STRUCT(method);
+#define BEGIN_PROTOCOL_ENTRY2(method,t1,f1,t2,f2) \
+       typedef struct { \
+               t1 f1; \
+               t2 f2; \
+       } PROTOCOL_STRUCT(method);
+#define BEGIN_PROTOCOL_ENTRY3(method,t1,f1,t2,f2,t3,f3) \
+       typedef struct { \
+               t1 f1; \
+               t2 f2; \
+               t3 f3; \
+       } PROTOCOL_STRUCT(method);
+#define BEGIN_PROTOCOL_ENTRY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
+       typedef struct { \
+               t1 f1; \
+               t2 f2; \
+               t3 f3; \
+               t4 f4; \
+       } PROTOCOL_STRUCT(method);
+#define BEGIN_PROTOCOL_ENTRY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
+       typedef struct { \
+               t1 f1; \
+               t2 f2; \
+               t3 f3; \
+               t4 f4; \
+               t5 f5; \
+       } PROTOCOL_STRUCT(method);
+#define BEGIN_PROTOCOL_ENTRY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
+       typedef struct { \
+               t1 f1; \
+               t2 f2; \
+               t3 f3; \
+               t4 f4; \
+               t5 f5; \
+               t6 f6; \
+       } PROTOCOL_STRUCT(method);
+
+#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) \
+       BEGIN_PROTOCOL_ENTRY0 (method)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) \
+       BEGIN_PROTOCOL_ENTRY1 (method,t1,f1)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) \
+       BEGIN_PROTOCOL_ENTRY2 (method,t1,f1,t2,f2)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) \
+       BEGIN_PROTOCOL_ENTRY3 (method,t1,f1,t2,f2,t3,f3)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
+       BEGIN_PROTOCOL_ENTRY4 (method,t1,f1,t2,f2,t3,f3,t4,f4)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
+       BEGIN_PROTOCOL_ENTRY5 (method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
+       BEGIN_PROTOCOL_ENTRY6 (method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6)
+
+#define FLUSH()
+
+#define DEFAULT_PRINT()
+#define CUSTOM_PRINT(_)
+
+#define IS_ALWAYS_MATCH(_)
+#define MATCH_INDEX(_)
+#define IS_VTABLE_MATCH(_)
+
+#define END_PROTOCOL_ENTRY
+#define END_PROTOCOL_ENTRY_HEAVY
+
+#include "sgen-protocol-def.h"
+
+/* missing: finalizers, roots, non-store wbarriers */
+
+void binary_protocol_init (const char *filename, long long limit);
+gboolean binary_protocol_is_enabled (void);
+
+void binary_protocol_flush_buffers (gboolean force);
+
+#define BEGIN_PROTOCOL_ENTRY0(method) \
+       void method (void);
+#define BEGIN_PROTOCOL_ENTRY1(method,t1,f1) \
+       void method (t1 f1);
+#define BEGIN_PROTOCOL_ENTRY2(method,t1,f1,t2,f2) \
+       void method (t1 f1, t2 f2);
+#define BEGIN_PROTOCOL_ENTRY3(method,t1,f1,t2,f2,t3,f3) \
+       void method (t1 f1, t2 f2, t3 f3);
+#define BEGIN_PROTOCOL_ENTRY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
+       void method (t1 f1, t2 f2, t3 f3, t4 f4);
+#define BEGIN_PROTOCOL_ENTRY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
+       void method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5);
+#define BEGIN_PROTOCOL_ENTRY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
+       void method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6);
+
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+#define binary_protocol_is_heavy_enabled()     binary_protocol_is_enabled ()
+
+#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) \
+       BEGIN_PROTOCOL_ENTRY0 (method)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) \
+       BEGIN_PROTOCOL_ENTRY1 (method,t1,f1)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) \
+       BEGIN_PROTOCOL_ENTRY2 (method,t1,f1,t2,f2)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) \
+       BEGIN_PROTOCOL_ENTRY3 (method,t1,f1,t2,f2,t3,f3)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
+       BEGIN_PROTOCOL_ENTRY4 (method,t1,f1,t2,f2,t3,f3,t4,f4)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
+       BEGIN_PROTOCOL_ENTRY5 (method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5)
+#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
+       BEGIN_PROTOCOL_ENTRY6 (method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6)
+#else
+#define binary_protocol_is_heavy_enabled()     FALSE
+
+#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) \
+       static inline void method (void) {}
+#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) \
+       static inline void method (t1 f1) {}
+#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) \
+       static inline void method (t1 f1, t2 f2) {}
+#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) \
+       static inline void method (t1 f1, t2 f2, t3 f3) {}
+#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
+       static inline void method (t1 f1, t2 f2, t3 f3, t4 f4) {}
+#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
+       static inline void method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5) {}
+#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
+       static inline void method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6) {}
+#endif
+
+#define FLUSH()
+
+#define DEFAULT_PRINT()
+#define CUSTOM_PRINT(_)
+
+#define IS_ALWAYS_MATCH(_)
+#define MATCH_INDEX(_)
+#define IS_VTABLE_MATCH(_)
+
+#define END_PROTOCOL_ENTRY
+#define END_PROTOCOL_ENTRY_HEAVY
+
+#include "sgen-protocol-def.h"
+
+#undef TYPE_INT
+#undef TYPE_LONGLONG
+#undef TYPE_SIZE
+#undef TYPE_POINTER
+#undef TYPE_BOOL
+
+#endif
diff --git a/mono/sgen/sgen-qsort.c b/mono/sgen/sgen-qsort.c
new file mode 100644 (file)
index 0000000..7566bdd
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * sgen-qsort.c: Quicksort.
+ *
+ * Copyright (C) 2013 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+
+#ifdef HAVE_SGEN_GC
+
+#include "sgen/sgen-gc.h"
+
+#define ELEM(i)                (((unsigned char*)base) + ((i) * width))
+#define SWAP(i,j)      do {                                    \
+               size_t __i = (i), __j = (j);                    \
+               if (__i != __j) {                               \
+                       memcpy (swap_tmp, ELEM (__i), width);   \
+                       memcpy (ELEM (__i), ELEM (__j), width); \
+                       memcpy (ELEM (__j), swap_tmp, width);   \
+               }                                               \
+       } while (0)
+
+static size_t
+partition (void *base, size_t nel, size_t width, int (*compar) (const void*, const void*), unsigned char *pivot_tmp, unsigned char *swap_tmp)
+{
+       size_t pivot_idx = nel >> 1;
+       size_t s, i;
+
+       memcpy (pivot_tmp, ELEM (pivot_idx), width);
+       SWAP (pivot_idx, nel - 1);
+       s = 0;
+       for (i = 0; i < nel - 1; ++i) {
+               if (compar (ELEM (i), pivot_tmp) <= 0) {
+                       SWAP (i, s);
+                       ++s;
+               }
+       }
+       SWAP (s, nel - 1);
+       return s;
+}
+
+static void
+qsort_rec (void *base, size_t nel, size_t width, int (*compar) (const void*, const void*), unsigned char *pivot_tmp, unsigned char *swap_tmp)
+{
+       size_t pivot_idx;
+
+       if (nel <= 1)
+               return;
+
+       pivot_idx = partition (base, nel, width, compar, pivot_tmp, swap_tmp);
+       qsort_rec (base, pivot_idx, width, compar, pivot_tmp, swap_tmp);
+       if (pivot_idx < nel)
+               qsort_rec (ELEM (pivot_idx + 1), nel - pivot_idx - 1, width, compar, pivot_tmp, swap_tmp);
+}
+
+void
+sgen_qsort (void *base, size_t nel, size_t width, int (*compar) (const void*, const void*))
+{
+#ifndef _MSC_VER
+       unsigned char pivot_tmp [width];
+       unsigned char swap_tmp [width];
+#else
+       unsigned char* pivot_tmp = (unsigned char*) alloca(width);
+       unsigned char* swap_tmp = (unsigned char*) alloca(width);
+#endif
+
+       qsort_rec (base, nel, width, compar, pivot_tmp, swap_tmp);
+}
+
+#endif
diff --git a/mono/sgen/sgen-qsort.h b/mono/sgen/sgen-qsort.h
new file mode 100644 (file)
index 0000000..75577e5
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * sgen-qsort.h: Fast inline sorting
+ *
+ * Copyright (C) 2014 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __MONO_SGENQSORT_H__
+#define __MONO_SGENQSORT_H__
+
+#define DEF_QSORT_INLINE(NAME,ARRAY_TYPE,COMPARE_FUN)  \
+static size_t partition_##NAME (ARRAY_TYPE base[], size_t nel) {       \
+       size_t pivot_idx = nel >> 1;    \
+       size_t s, i;    \
+       ARRAY_TYPE pivot = base [pivot_idx];    \
+       { ARRAY_TYPE tmp = base [pivot_idx]; base [pivot_idx] = base [nel - 1]; base [nel - 1] = tmp; } \
+       s = 0;  \
+       for (i = 0; i < nel - 1; ++i) { \
+               if (COMPARE_FUN (base [i], pivot) <= 0) {       \
+                       { ARRAY_TYPE tmp = base [i]; base [i] = base [s]; base [s] = tmp; }     \
+                       ++s;    \
+               }       \
+       }       \
+       { ARRAY_TYPE tmp = base [s]; base [s] = base [nel - 1]; base [nel - 1] = tmp; } \
+       return s;       \
+}      \
+static void rec_##NAME (ARRAY_TYPE base[], size_t nel) {       \
+       size_t pivot_idx;       \
+       if (nel <= 1)   \
+               return; \
+       pivot_idx = partition_##NAME (base, nel); \
+       rec_##NAME (base, pivot_idx);   \
+       if (pivot_idx < nel)    \
+               rec_##NAME (&base[pivot_idx + 1], nel - pivot_idx - 1); \
+}      \
+static void qsort_##NAME (ARRAY_TYPE base[], size_t nel) {     \
+       rec_##NAME (base, nel); \
+}      \
+
+
+#endif
diff --git a/mono/sgen/sgen-scan-object.h b/mono/sgen/sgen-scan-object.h
new file mode 100644 (file)
index 0000000..9d1611c
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * sgen-scan-object.h: Generic object scan.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2013 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * Scans one object, using the OBJ_XXX macros.  The start of the
+ * object must be given in the variable "char* start".  Afterwards,
+ * "start" will point to the start of the next object, if the scanned
+ * object contained references.  If not, the value of "start" should
+ * be considered undefined after executing this code.  The object's
+ * GC descriptor must be in the variable "mword desc".
+ *
+ * The macro `HANDLE_PTR` will be invoked for every reference encountered while scanning the
+ * object.  It is called with two parameters: The pointer to the reference (not the
+ * reference itself!) as well as the pointer to the scanned object.
+ *
+ * Modifiers (automatically undefined):
+ *
+ * SCAN_OBJECT_NOSCAN - if defined, don't actually scan the object,
+ * i.e. don't invoke the OBJ_XXX macros.
+ *
+ * SCAN_OBJECT_NOVTABLE - desc is provided by the includer, instead of
+ * vt.  Complex arrays cannot not be scanned.
+ *
+ * SCAN_OBJECT_PROTOCOL - if defined, binary protocol the scan.
+ * Should only be used for scanning that's done for the actual
+ * collection, not for debugging scans.
+ */
+
+{
+#ifndef SCAN_OBJECT_NOVTABLE
+#if defined(SGEN_HEAVY_BINARY_PROTOCOL) && defined(SCAN_OBJECT_PROTOCOL)
+       binary_protocol_scan_begin (start, SGEN_LOAD_VTABLE (start), sgen_safe_object_get_size ((GCObject*)start));
+#endif
+#else
+#if defined(SGEN_HEAVY_BINARY_PROTOCOL) && defined(SCAN_OBJECT_PROTOCOL)
+       binary_protocol_scan_vtype_begin (start + SGEN_CLIENT_OBJECT_HEADER_SIZE, size);
+#endif
+#endif
+       switch (desc & DESC_TYPE_MASK) {
+       case DESC_TYPE_RUN_LENGTH:
+#define SCAN OBJ_RUN_LEN_FOREACH_PTR (desc, start)
+#ifndef SCAN_OBJECT_NOSCAN
+               SCAN;
+#endif
+#undef SCAN
+               break;
+       case DESC_TYPE_VECTOR:
+#define SCAN OBJ_VECTOR_FOREACH_PTR (desc, start)
+#ifndef SCAN_OBJECT_NOSCAN
+               SCAN;
+#endif
+#undef SCAN
+               break;
+       case DESC_TYPE_BITMAP:
+#define SCAN OBJ_BITMAP_FOREACH_PTR (desc, start)
+#ifndef SCAN_OBJECT_NOSCAN
+               SCAN;
+#endif
+#undef SCAN
+               break;
+       case DESC_TYPE_COMPLEX:
+               /* this is a complex object */
+#define SCAN OBJ_COMPLEX_FOREACH_PTR (desc, start)
+#ifndef SCAN_OBJECT_NOSCAN
+               SCAN;
+#endif
+#undef SCAN
+               break;
+#ifndef SCAN_OBJECT_NOVTABLE
+       case DESC_TYPE_COMPLEX_ARR:
+               /* this is an array of complex structs */
+#define SCAN OBJ_COMPLEX_ARR_FOREACH_PTR (desc, start)
+#ifndef SCAN_OBJECT_NOSCAN
+               SCAN;
+#endif
+#undef SCAN
+               break;
+#endif
+       case DESC_TYPE_SMALL_PTRFREE:
+       case DESC_TYPE_COMPLEX_PTRFREE:
+               /*Nothing to do*/
+               break;
+       default:
+               g_assert_not_reached ();
+       }
+}
+
+#undef SCAN_OBJECT_NOSCAN
+#undef SCAN_OBJECT_NOVTABLE
+#undef SCAN_OBJECT_PROTOCOL
diff --git a/mono/sgen/sgen-simple-nursery.c b/mono/sgen/sgen-simple-nursery.c
new file mode 100644 (file)
index 0000000..5488775
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * sgen-simple-nursery.c: Simple always promote nursery.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-protocol.h"
+#include "mono/sgen/sgen-layout-stats.h"
+#include "mono/sgen/sgen-client.h"
+
+static inline char*
+alloc_for_promotion (GCVTable *vtable, char *obj, size_t objsize, gboolean has_references)
+{
+       return major_collector.alloc_object (vtable, objsize, has_references);
+}
+
+static SgenFragment*
+build_fragments_get_exclude_head (void)
+{
+       return NULL;
+}
+
+static void
+build_fragments_release_exclude_head (void)
+{
+}
+
+static void
+build_fragments_finish (SgenFragmentAllocator *allocator)
+{
+}
+
+static void
+prepare_to_space (char *to_space_bitmap, size_t space_bitmap_size)
+{
+}
+
+static void
+clear_fragments (void)
+{      
+}
+
+static void
+init_nursery (SgenFragmentAllocator *allocator, char *start, char *end)
+{
+       sgen_fragment_allocator_add (allocator, start, end);
+}
+
+
+/******************************************Copy/Scan functins ************************************************/
+
+#define SGEN_SIMPLE_NURSERY
+
+#define SERIAL_COPY_OBJECT simple_nursery_serial_copy_object
+#define SERIAL_COPY_OBJECT_FROM_OBJ simple_nursery_serial_copy_object_from_obj
+
+#include "sgen-minor-copy-object.h"
+#include "sgen-minor-scan-object.h"
+
+void
+sgen_simple_nursery_init (SgenMinorCollector *collector)
+{
+       collector->is_split = FALSE;
+
+       collector->alloc_for_promotion = alloc_for_promotion;
+
+       collector->prepare_to_space = prepare_to_space;
+       collector->clear_fragments = clear_fragments;
+       collector->build_fragments_get_exclude_head = build_fragments_get_exclude_head;
+       collector->build_fragments_release_exclude_head = build_fragments_release_exclude_head;
+       collector->build_fragments_finish = build_fragments_finish;
+       collector->init_nursery = init_nursery;
+
+       FILL_MINOR_COLLECTOR_COPY_OBJECT (collector);
+       FILL_MINOR_COLLECTOR_SCAN_OBJECT (collector);
+}
+
+
+#endif
diff --git a/mono/sgen/sgen-split-nursery.c b/mono/sgen/sgen-split-nursery.c
new file mode 100644 (file)
index 0000000..96d765f
--- /dev/null
@@ -0,0 +1,457 @@
+/*
+ * sgen-splliy-nursery.c: 3-space based nursery collector.
+ *
+ * Author:
+ *     Rodrigo Kumpera Kumpera <kumpera@gmail.com>
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright 2011-2012 Xamarin Inc (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+#include <stdlib.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-protocol.h"
+#include "mono/sgen/sgen-layout-stats.h"
+#include "mono/sgen/sgen-client.h"
+#include "mono/utils/mono-memory-model.h"
+
+/*
+The nursery is logically divided into 3 spaces: Allocator space and two Survivor spaces.
+
+Objects are born (allocated by the mutator) in the Allocator Space.
+
+The Survivor spaces are divided in a copying collector style From and To spaces.
+The hole of each space switch on each collection.
+
+On each collection we process objects from the nursery this way:
+Objects from the Allocator Space are evacuated into the To Space.
+Objects from the Survivor From Space are evacuated into the old generation.
+
+
+The nursery is physically divided in two parts, set by the promotion barrier.
+
+The Allocator Space takes the botton part of the nursery.
+
+The Survivor spaces are intermingled in the top part of the nursery. It's done
+this way since the required size for the To Space depends on the survivor rate
+of objects from the Allocator Space. 
+
+During a collection when the object scan function see a nursery object it must
+determine if the object needs to be evacuated or left in place. Originally, this
+check was done by checking if a forwarding pointer is installed, but now an object
+can be in the To Space, it won't have a forwarding pointer and it must be left in place.
+
+In order to solve that we classify nursery memory been either in the From Space or in
+the To Space. Since the Allocator Space has the same behavior as the Survivor From Space
+they are unified for this purpoise - a bit confusing at first.
+
+This from/to classification is done on a larger granule than object to make the check efficient
+and, due to that, we must make sure that all fragemnts used to allocate memory from the To Space
+are naturally aligned in both ends to that granule to avoid wronly classifying a From Space object.
+
+TODO:
+-The promotion barrier is statically defined to 50% of the nursery, it should be dinamically adjusted based
+on survival rates;
+-We apply the same promotion policy to all objects, finalizable ones should age longer in the nursery;
+-We apply the same promotion policy to all stages of a collection, maybe we should promote more aggressively
+objects from non-stack roots, specially those found in the remembered set;
+-Fix our major collection trigger to happen before we do a minor GC and collect the nursery only once.
+-Make the serial fragment allocator fast path inlineable
+-Make aging threshold be based on survival rates and survivor occupancy;
+-Change promotion barrier to be size and not address based;
+-Pre allocate memory for young ages to make sure that on overflow only the older suffer;
+-Get rid of par_alloc_buffer_refill_mutex so to the parallel collection of the nursery doesn't suck;
+*/
+
+/*FIXME Move this to a separate header. */
+#define _toi(ptr) ((size_t)ptr)
+#define make_ptr_mask(bits) ((1 << bits) - 1)
+#define align_down(ptr, bits) ((void*)(_toi(ptr) & ~make_ptr_mask (bits)))
+#define align_up(ptr, bits) ((void*) ((_toi(ptr) + make_ptr_mask (bits)) & ~make_ptr_mask (bits)))
+
+/*
+Even though the effective max age is 255, aging that much doesn't make sense.
+It might even make sense to use nimbles for age recording.
+*/
+#define MAX_AGE 15
+
+/*
+ * Each age has its allocation buffer.  Whenever an object is to be
+ * aged we try to fit it into its new age's allocation buffer.  If
+ * that is not possible we get new space from the fragment allocator
+ * and set the allocation buffer to that space (minus the space
+ * required for the object).
+ */
+
+typedef struct {
+       char *next;
+       char *end;
+} AgeAllocationBuffer;
+
+/* Limits the ammount of memory the mutator can have. */
+static char *promotion_barrier;
+
+/*
+Promotion age and alloc ratio are the two nursery knobs to control
+how much effort we want to spend on young objects.
+
+Allocation ratio should be the inverse of the expected survivor rate.
+The more objects surviver, the smaller the alloc ratio much be so we can
+age all objects.
+
+Promote age depends on how much effort we want to spend aging objects before
+we promote them to the old generation. If addional ages don't somewhat improve
+mortality, it's better avoid as they increase the cost of minor collections.
+
+*/
+
+
+/*
+If we're evacuating an object with this age or more, promote it.
+Age is the number of surviving collections of an object.
+*/
+static int promote_age = 2;
+
+/*
+Initial ratio of allocation and survivor spaces.
+This should be read as the fraction of the whole nursery dedicated
+for the allocator space.
+*/
+static float alloc_ratio = 60.f/100.f;
+
+
+static char *region_age;
+static size_t region_age_size;
+static AgeAllocationBuffer age_alloc_buffers [MAX_AGE];
+
+/* The collector allocs from here. */
+static SgenFragmentAllocator collector_allocator;
+
+static inline int
+get_object_age (char *object)
+{
+       size_t idx = (object - sgen_nursery_start) >> SGEN_TO_SPACE_GRANULE_BITS;
+       return region_age [idx];
+}
+
+static void
+set_age_in_range (char *start, char *end, int age)
+{
+       char *region_start;
+       size_t region_idx, length;
+       region_idx = (start - sgen_nursery_start) >> SGEN_TO_SPACE_GRANULE_BITS;
+       region_start = &region_age [region_idx];
+       length = (end - start) >> SGEN_TO_SPACE_GRANULE_BITS;
+       memset (region_start, age, length);
+}
+
+static inline void
+mark_bit (char *space_bitmap, char *pos)
+{
+       size_t idx = (pos - sgen_nursery_start) >> SGEN_TO_SPACE_GRANULE_BITS;
+       size_t byte = idx / 8;
+       int bit = idx & 0x7;
+
+       g_assert (byte < sgen_space_bitmap_size);
+       space_bitmap [byte] |= 1 << bit;
+}
+
+static void
+mark_bits_in_range (char *space_bitmap, char *start, char *end)
+{
+       start = align_down (start, SGEN_TO_SPACE_GRANULE_BITS);
+       end = align_up (end, SGEN_TO_SPACE_GRANULE_BITS);
+
+       for (;start < end; start += SGEN_TO_SPACE_GRANULE_IN_BYTES)
+               mark_bit (space_bitmap, start);
+}
+
+/*
+ * This splits the fragments at the point of the promotion barrier.
+ * Two allocator are actually involved here: The mutator allocator and
+ * the collector allocator.  This function is called with the
+ * collector, but it's a copy of the mutator allocator and contains
+ * all the fragments in the nursery.  The fragments below the
+ * promotion barrier are left with the mutator allocator and the ones
+ * above are put into the collector allocator.
+ */
+static void
+fragment_list_split (SgenFragmentAllocator *allocator)
+{
+       SgenFragment *prev = NULL, *list = allocator->region_head;
+
+       while (list) {
+               if (list->fragment_end > promotion_barrier) {
+                       if (list->fragment_start < promotion_barrier) {
+                               SgenFragment *res = sgen_fragment_allocator_alloc ();
+
+                               res->fragment_start = promotion_barrier;
+                               res->fragment_next = promotion_barrier;
+                               res->fragment_end = list->fragment_end;
+                               res->next = list->next;
+                               res->next_in_order = list->next_in_order;
+                               g_assert (res->fragment_end > res->fragment_start);
+
+                               list->fragment_end = promotion_barrier;
+                               list->next = list->next_in_order = NULL;
+                               set_age_in_range (list->fragment_start, list->fragment_end, 0);
+
+                               allocator->region_head = allocator->alloc_head = res;
+                               return;
+                       } else {
+                               if (prev)
+                                       prev->next = prev->next_in_order = NULL;
+                               allocator->region_head = allocator->alloc_head = list;
+                               return;
+                       }
+               }
+               set_age_in_range (list->fragment_start, list->fragment_end, 0);
+               prev = list;
+               list = list->next;
+       }
+       allocator->region_head = allocator->alloc_head = NULL;
+}
+
+/******************************************Minor Collector API ************************************************/
+
+#define AGE_ALLOC_BUFFER_MIN_SIZE SGEN_TO_SPACE_GRANULE_IN_BYTES
+#define AGE_ALLOC_BUFFER_DESIRED_SIZE (SGEN_TO_SPACE_GRANULE_IN_BYTES * 8)
+
+static char*
+alloc_for_promotion_slow_path (int age, size_t objsize)
+{
+       char *p;
+       size_t allocated_size;
+       size_t aligned_objsize = (size_t)align_up (objsize, SGEN_TO_SPACE_GRANULE_BITS);
+
+       p = sgen_fragment_allocator_serial_range_alloc (
+               &collector_allocator,
+               MAX (aligned_objsize, AGE_ALLOC_BUFFER_DESIRED_SIZE),
+               MAX (aligned_objsize, AGE_ALLOC_BUFFER_MIN_SIZE),
+               &allocated_size);
+       if (p) {
+               set_age_in_range (p, p + allocated_size, age);
+               sgen_clear_range (age_alloc_buffers [age].next, age_alloc_buffers [age].end);
+               age_alloc_buffers [age].next = p + objsize;
+               age_alloc_buffers [age].end = p + allocated_size;
+       }
+       return p;
+}
+
+static inline char*
+alloc_for_promotion (GCVTable *vtable, char *obj, size_t objsize, gboolean has_references)
+{
+       char *p = NULL;
+       int age;
+
+       age = get_object_age (obj);
+       if (age >= promote_age)
+               return major_collector.alloc_object (vtable, objsize, has_references);
+
+       /* Promote! */
+       ++age;
+
+       p = age_alloc_buffers [age].next;
+       if (G_LIKELY (p + objsize <= age_alloc_buffers [age].end)) {
+        age_alloc_buffers [age].next += objsize;
+       } else {
+               p = alloc_for_promotion_slow_path (age, objsize);
+               if (!p)
+                       return major_collector.alloc_object (vtable, objsize, has_references);
+       }
+
+       /* FIXME: assumes object layout */
+       *(GCVTable**)p = vtable;
+
+       return p;
+}
+
+static char*
+minor_alloc_for_promotion (GCVTable *vtable, char *obj, size_t objsize, gboolean has_references)
+{
+       /*
+       We only need to check for a non-nursery object if we're doing a major collection.
+       */
+       if (!sgen_ptr_in_nursery (obj))
+               return major_collector.alloc_object (vtable, objsize, has_references);
+
+       return alloc_for_promotion (vtable, obj, objsize, has_references);
+}
+
+static SgenFragment*
+build_fragments_get_exclude_head (void)
+{
+       int i;
+       for (i = 0; i < MAX_AGE; ++i) {
+               /*If we OOM'd on the last collection ->end might be null while ->next not.*/
+               if (age_alloc_buffers [i].end)
+                       sgen_clear_range (age_alloc_buffers [i].next, age_alloc_buffers [i].end);
+       }
+
+       return collector_allocator.region_head;
+}
+
+static void
+build_fragments_release_exclude_head (void)
+{
+       sgen_fragment_allocator_release (&collector_allocator);
+}
+
+static void
+build_fragments_finish (SgenFragmentAllocator *allocator)
+{
+       /* We split the fragment list based on the promotion barrier. */
+       collector_allocator = *allocator;
+       fragment_list_split (&collector_allocator);
+}
+
+static void
+prepare_to_space (char *to_space_bitmap, size_t space_bitmap_size)
+{
+       SgenFragment **previous, *frag;
+
+       memset (to_space_bitmap, 0, space_bitmap_size);
+       memset (age_alloc_buffers, 0, sizeof (age_alloc_buffers));
+
+       previous = &collector_allocator.alloc_head;
+
+       for (frag = *previous; frag; frag = *previous) {
+               char *start = align_up (frag->fragment_next, SGEN_TO_SPACE_GRANULE_BITS);
+               char *end = align_down (frag->fragment_end, SGEN_TO_SPACE_GRANULE_BITS);
+
+               /* Fragment is too small to be usable. */
+               if ((end - start) < SGEN_MAX_NURSERY_WASTE) {
+                       sgen_clear_range (frag->fragment_next, frag->fragment_end);
+                       frag->fragment_next = frag->fragment_end = frag->fragment_start;
+                       *previous = frag->next;
+                       continue;
+               }
+
+               /*
+               We need to insert 3 phony objects so the fragments build step can correctly
+               walk the nursery.
+               */
+
+               /* Clean the fragment range. */
+               sgen_clear_range (start, end);
+               /* We need a phony object in between the original fragment start and the effective one. */
+               if (start != frag->fragment_next)
+                       sgen_clear_range (frag->fragment_next, start);
+               /* We need an phony object in between the new fragment end and the original fragment end. */
+               if (end != frag->fragment_end)
+                       sgen_clear_range (end, frag->fragment_end);
+
+               frag->fragment_start = frag->fragment_next = start;
+               frag->fragment_end = end;
+               mark_bits_in_range (to_space_bitmap, start, end);
+               previous = &frag->next;
+       }
+}
+
+static void
+clear_fragments (void)
+{
+       sgen_clear_allocator_fragments (&collector_allocator);
+}
+
+static void
+init_nursery (SgenFragmentAllocator *allocator, char *start, char *end)
+{
+       int alloc_quote = (int)((end - start) * alloc_ratio);
+       promotion_barrier = align_down (start + alloc_quote, 3);
+       sgen_fragment_allocator_add (allocator, start, promotion_barrier);
+       sgen_fragment_allocator_add (&collector_allocator, promotion_barrier, end);
+
+       region_age_size = (end - start) >> SGEN_TO_SPACE_GRANULE_BITS;
+       region_age = g_malloc0 (region_age_size);
+}
+
+static gboolean
+handle_gc_param (const char *opt)
+{
+       if (g_str_has_prefix (opt, "alloc-ratio=")) {
+               const char *arg = strchr (opt, '=') + 1;
+               int percentage = atoi (arg);
+               if (percentage < 1 || percentage > 100) {
+                       fprintf (stderr, "alloc-ratio must be an integer in the range 1-100.\n");
+                       exit (1);
+               }
+               alloc_ratio = (float)percentage / 100.0f;
+               return TRUE;
+       }
+
+       if (g_str_has_prefix (opt, "promotion-age=")) {
+               const char *arg = strchr (opt, '=') + 1;
+               promote_age = atoi (arg);
+               if (promote_age < 1 || promote_age >= MAX_AGE) {
+                       fprintf (stderr, "promotion-age must be an integer in the range 1-%d.\n", MAX_AGE - 1);
+                       exit (1);
+               }
+               return TRUE;
+       }
+       return FALSE;
+}
+
+static void
+print_gc_param_usage (void)
+{
+       fprintf (stderr,
+                       ""
+                       "  alloc-ratio=P (where P is a percentage, an integer in 1-100)\n"
+                       "  promotion-age=P (where P is a number, an integer in 1-%d)\n",
+                       MAX_AGE - 1
+                       );
+}
+
+/******************************************Copy/Scan functins ************************************************/
+
+#define SGEN_SPLIT_NURSERY
+
+#define SERIAL_COPY_OBJECT split_nursery_serial_copy_object
+#define SERIAL_COPY_OBJECT_FROM_OBJ split_nursery_serial_copy_object_from_obj
+
+#include "sgen-minor-copy-object.h"
+#include "sgen-minor-scan-object.h"
+
+void
+sgen_split_nursery_init (SgenMinorCollector *collector)
+{
+       collector->is_split = TRUE;
+
+       collector->alloc_for_promotion = minor_alloc_for_promotion;
+
+       collector->prepare_to_space = prepare_to_space;
+       collector->clear_fragments = clear_fragments;
+       collector->build_fragments_get_exclude_head = build_fragments_get_exclude_head;
+       collector->build_fragments_release_exclude_head = build_fragments_release_exclude_head;
+       collector->build_fragments_finish = build_fragments_finish;
+       collector->init_nursery = init_nursery;
+       collector->handle_gc_param = handle_gc_param;
+       collector->print_gc_param_usage = print_gc_param_usage;
+
+       FILL_MINOR_COLLECTOR_COPY_OBJECT (collector);
+       FILL_MINOR_COLLECTOR_SCAN_OBJECT (collector);
+}
+
+
+#endif
diff --git a/mono/sgen/sgen-tagged-pointer.h b/mono/sgen/sgen-tagged-pointer.h
new file mode 100644 (file)
index 0000000..2d55abb
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * sgen-tagged-pointer.h: Macros for tagging and untagging pointers.
+ *
+ * Copyright (C) 2014 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MONO_SGEN_TAGGED_POINTER_H__
+#define __MONO_SGEN_TAGGED_POINTER_H__
+
+#define SGEN_TAGGED_POINTER_MASK       7
+
+#define SGEN_POINTER_IS_TAGGED_1(p)    ((mword)(p) & 1)
+#define SGEN_POINTER_TAG_1(p)          ((void*)((mword)(p) | 1))
+#define SGEN_POINTER_UNTAG_1(p)                ((void*)((mword)(p) & ~1))
+
+#define SGEN_POINTER_IS_TAGGED_2(p)    ((mword)(p) & 2)
+#define SGEN_POINTER_TAG_2(p)          ((void*)((mword)(p) | 2))
+#define SGEN_POINTER_UNTAG_2(p)                ((void*)((mword)(p) & ~2))
+
+#define SGEN_POINTER_TAG_12(p)         ((mword)(p) & 3)
+#define SGEN_POINTER_SET_TAG_12(p,t)   ((void*)(((mword)(p) & ~3) | (t)))
+
+#define SGEN_POINTER_IS_TAGGED_4(p)    ((mword)(p) & 4)
+#define SGEN_POINTER_TAG_4(p)          ((void*)((mword)(p) | 4))
+#define SGEN_POINTER_UNTAG_4(p)                ((void*)((mword)(p) & ~4))
+
+#define SGEN_POINTER_UNTAG_12(p)       ((void*)((mword)(p) & ~3))
+#define SGEN_POINTER_UNTAG_24(p)       ((void*)((mword)(p) & ~6))
+
+#define SGEN_POINTER_IS_TAGGED_ANY(p)  ((mword)(p) & SGEN_TAGGED_POINTER_MASK)
+#define SGEN_POINTER_UNTAG_ALL(p)      ((void*)((mword)(p) & ~SGEN_TAGGED_POINTER_MASK))
+
+#endif
diff --git a/mono/sgen/sgen-thread-pool.c b/mono/sgen/sgen-thread-pool.c
new file mode 100644 (file)
index 0000000..305a423
--- /dev/null
@@ -0,0 +1,259 @@
+/*
+ * sgen-thread-pool.c: Threadpool for all concurrent GC work.
+ *
+ * Copyright (C) 2015 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-thread-pool.h"
+#include "mono/sgen/sgen-pointer-queue.h"
+#include "mono/utils/mono-mutex.h"
+#ifndef SGEN_WITHOUT_MONO
+#include "mono/utils/mono-threads.h"
+#endif
+
+static mono_mutex_t lock;
+static mono_cond_t work_cond;
+static mono_cond_t done_cond;
+
+static MonoNativeThreadId thread;
+
+/* Only accessed with the lock held. */
+static SgenPointerQueue job_queue;
+
+static SgenThreadPoolThreadInitFunc thread_init_func;
+static SgenThreadPoolIdleJobFunc idle_job_func;
+static SgenThreadPoolContinueIdleJobFunc continue_idle_job_func;
+
+enum {
+       STATE_WAITING,
+       STATE_IN_PROGRESS,
+       STATE_DONE
+};
+
+/* Assumes that the lock is held. */
+static SgenThreadPoolJob*
+get_job_and_set_in_progress (void)
+{
+       for (size_t i = 0; i < job_queue.next_slot; ++i) {
+               SgenThreadPoolJob *job = job_queue.data [i];
+               if (job->state == STATE_WAITING) {
+                       job->state = STATE_IN_PROGRESS;
+                       return job;
+               }
+       }
+       return NULL;
+}
+
+/* Assumes that the lock is held. */
+static ssize_t
+find_job_in_queue (SgenThreadPoolJob *job)
+{
+       for (ssize_t i = 0; i < job_queue.next_slot; ++i) {
+               if (job_queue.data [i] == job)
+                       return i;
+       }
+       return -1;
+}
+
+/* Assumes that the lock is held. */
+static void
+remove_job (SgenThreadPoolJob *job)
+{
+       ssize_t index;
+       SGEN_ASSERT (0, job->state == STATE_DONE, "Why are we removing a job that's not done?");
+       index = find_job_in_queue (job);
+       SGEN_ASSERT (0, index >= 0, "Why is the job we're trying to remove not in the queue?");
+       job_queue.data [index] = NULL;
+       sgen_pointer_queue_remove_nulls (&job_queue);
+       sgen_thread_pool_job_free (job);
+}
+
+static gboolean
+continue_idle_job (void)
+{
+       if (!continue_idle_job_func)
+               return FALSE;
+       return continue_idle_job_func ();
+}
+
+static mono_native_thread_return_t
+thread_func (void *thread_data)
+{
+       thread_init_func (thread_data);
+
+       mono_mutex_lock (&lock);
+       for (;;) {
+               /*
+                * It's important that we check the continue idle flag with the lock held.
+                * Suppose we didn't check with the lock held, and the result is FALSE.  The
+                * main thread might then set continue idle and signal us before we can take
+                * the lock, and we'd lose the signal.
+                */
+               gboolean do_idle = continue_idle_job ();
+               SgenThreadPoolJob *job = get_job_and_set_in_progress ();
+
+               if (!job && !do_idle) {
+                       /*
+                        * pthread_cond_wait() can return successfully despite the condition
+                        * not being signalled, so we have to run this in a loop until we
+                        * really have work to do.
+                        */
+                       mono_cond_wait (&work_cond, &lock);
+                       continue;
+               }
+
+               mono_mutex_unlock (&lock);
+
+               if (job) {
+                       job->func (thread_data, job);
+
+                       mono_mutex_lock (&lock);
+
+                       SGEN_ASSERT (0, job->state == STATE_IN_PROGRESS, "The job should still be in progress.");
+                       job->state = STATE_DONE;
+                       remove_job (job);
+                       /*
+                        * Only the main GC thread will ever wait on the done condition, so we don't
+                        * have to broadcast.
+                        */
+                       mono_cond_signal (&done_cond);
+               } else {
+                       SGEN_ASSERT (0, do_idle, "Why did we unlock if we still have to wait for idle?");
+                       SGEN_ASSERT (0, idle_job_func, "Why do we have idle work when there's no idle job function?");
+                       do {
+                               idle_job_func (thread_data);
+                               do_idle = continue_idle_job ();
+                       } while (do_idle && !job_queue.next_slot);
+
+                       mono_mutex_lock (&lock);
+
+                       if (!do_idle)
+                               mono_cond_signal (&done_cond);
+               }
+       }
+
+       return 0;
+}
+
+void
+sgen_thread_pool_init (int num_threads, SgenThreadPoolThreadInitFunc init_func, SgenThreadPoolIdleJobFunc idle_func, SgenThreadPoolContinueIdleJobFunc continue_idle_func, void **thread_datas)
+{
+       SGEN_ASSERT (0, num_threads == 1, "We only support 1 thread pool thread for now.");
+
+       mono_mutex_init (&lock);
+       mono_cond_init (&work_cond, NULL);
+       mono_cond_init (&done_cond, NULL);
+
+       thread_init_func = init_func;
+       idle_job_func = idle_func;
+       continue_idle_job_func = continue_idle_func;
+
+       mono_native_thread_create (&thread, thread_func, thread_datas ? thread_datas [0] : NULL);
+}
+
+SgenThreadPoolJob*
+sgen_thread_pool_job_alloc (const char *name, SgenThreadPoolJobFunc func, size_t size)
+{
+       SgenThreadPoolJob *job = sgen_alloc_internal_dynamic (size, INTERNAL_MEM_THREAD_POOL_JOB, TRUE);
+       job->name = name;
+       job->size = size;
+       job->state = STATE_WAITING;
+       job->func = func;
+       return job;
+}
+
+void
+sgen_thread_pool_job_free (SgenThreadPoolJob *job)
+{
+       sgen_free_internal_dynamic (job, job->size, INTERNAL_MEM_THREAD_POOL_JOB);
+}
+
+void
+sgen_thread_pool_job_enqueue (SgenThreadPoolJob *job)
+{
+       mono_mutex_lock (&lock);
+
+       sgen_pointer_queue_add (&job_queue, job);
+       /*
+        * FIXME: We could check whether there is a job in progress.  If there is, there's
+        * no need to signal the condition, at least as long as we have only one thread.
+        */
+       mono_cond_signal (&work_cond);
+
+       mono_mutex_unlock (&lock);
+}
+
+void
+sgen_thread_pool_job_wait (SgenThreadPoolJob *job)
+{
+       SGEN_ASSERT (0, job, "Where's the job?");
+
+       mono_mutex_lock (&lock);
+
+       while (find_job_in_queue (job) >= 0)
+               mono_cond_wait (&done_cond, &lock);
+
+       mono_mutex_unlock (&lock);
+}
+
+void
+sgen_thread_pool_idle_signal (void)
+{
+       SGEN_ASSERT (0, idle_job_func, "Why are we signaling idle without an idle function?");
+
+       mono_mutex_lock (&lock);
+
+       if (continue_idle_job_func ())
+               mono_cond_signal (&work_cond);
+
+       mono_mutex_unlock (&lock);
+}
+
+void
+sgen_thread_pool_idle_wait (void)
+{
+       SGEN_ASSERT (0, idle_job_func, "Why are we waiting for idle without an idle function?");
+
+       mono_mutex_lock (&lock);
+
+       while (continue_idle_job_func ())
+               mono_cond_wait (&done_cond, &lock);
+
+       mono_mutex_unlock (&lock);
+}
+
+void
+sgen_thread_pool_wait_for_all_jobs (void)
+{
+       mono_mutex_lock (&lock);
+
+       while (!sgen_pointer_queue_is_empty (&job_queue))
+               mono_cond_wait (&done_cond, &lock);
+
+       mono_mutex_unlock (&lock);
+}
+
+gboolean
+sgen_thread_pool_is_thread_pool_thread (MonoNativeThreadId some_thread)
+{
+       return some_thread == thread;
+}
+
+#endif
diff --git a/mono/sgen/sgen-thread-pool.h b/mono/sgen/sgen-thread-pool.h
new file mode 100644 (file)
index 0000000..4dcb3a9
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * sgen-thread-pool.h: Threadpool for all concurrent GC work.
+ *
+ * Copyright (C) 2015 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MONO_SGEN_THREAD_POOL_H__
+#define __MONO_SGEN_THREAD_POOL_H__
+
+typedef struct _SgenThreadPoolJob SgenThreadPoolJob;
+
+typedef void (*SgenThreadPoolJobFunc) (void *thread_data, SgenThreadPoolJob *job);
+
+struct _SgenThreadPoolJob {
+       const char *name;
+       SgenThreadPoolJobFunc func;
+       size_t size;
+       volatile gint32 state;
+};
+
+typedef void (*SgenThreadPoolThreadInitFunc) (void*);
+typedef void (*SgenThreadPoolIdleJobFunc) (void*);
+typedef gboolean (*SgenThreadPoolContinueIdleJobFunc) (void);
+
+void sgen_thread_pool_init (int num_threads, SgenThreadPoolThreadInitFunc init_func, SgenThreadPoolIdleJobFunc idle_func, SgenThreadPoolContinueIdleJobFunc continue_idle_func, void **thread_datas);
+
+SgenThreadPoolJob* sgen_thread_pool_job_alloc (const char *name, SgenThreadPoolJobFunc func, size_t size);
+/* This only needs to be called on jobs that are not enqueued. */
+void sgen_thread_pool_job_free (SgenThreadPoolJob *job);
+
+void sgen_thread_pool_job_enqueue (SgenThreadPoolJob *job);
+/* This must only be called after the job has been enqueued. */
+void sgen_thread_pool_job_wait (SgenThreadPoolJob *job);
+
+void sgen_thread_pool_idle_signal (void);
+void sgen_thread_pool_idle_wait (void);
+
+void sgen_thread_pool_wait_for_all_jobs (void);
+
+gboolean sgen_thread_pool_is_thread_pool_thread (MonoNativeThreadId thread);
+
+#endif
diff --git a/mono/sgen/sgen-workers.c b/mono/sgen/sgen-workers.c
new file mode 100644 (file)
index 0000000..53eedc0
--- /dev/null
@@ -0,0 +1,400 @@
+/*
+ * sgen-workers.c: Worker threads for parallel and concurrent GC.
+ *
+ * Copyright 2001-2003 Ximian, Inc
+ * Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-workers.h"
+#include "mono/sgen/sgen-thread-pool.h"
+#include "mono/utils/mono-membar.h"
+#include "mono/sgen/sgen-client.h"
+
+static int workers_num;
+static WorkerData *workers_data;
+
+static SgenSectionGrayQueue workers_distribute_gray_queue;
+static gboolean workers_distribute_gray_queue_inited;
+
+/*
+ * Allowed transitions:
+ *
+ * | from \ to          | NOT WORKING | WORKING | WORK ENQUEUED | NURSERY COLLECTION |
+ * |--------------------+-------------+---------+---------------+--------------------|
+ * | NOT WORKING        | -           | -       | main          | main               |
+ * | WORKING            | worker      | -       | main          | main               |
+ * | WORK ENQUEUED      | -           | worker  | -             | main               |
+ * | NURSERY COLLECTION | -           | -       | main          | -                  |
+ *
+ * The WORK ENQUEUED state guarantees that the worker thread will inspect the queue again at
+ * least once.  Only after looking at the queue will it go back to WORKING, and then,
+ * eventually, to NOT WORKING.  After enqueuing work the main thread transitions the state
+ * to WORK ENQUEUED.  Signalling the worker thread to wake up is only necessary if the old
+ * state was NOT WORKING.
+ */
+
+enum {
+       STATE_NOT_WORKING,
+       STATE_WORKING,
+       STATE_WORK_ENQUEUED,
+       STATE_NURSERY_COLLECTION
+} WorkersStateName;
+
+typedef gint32 State;
+
+static volatile State workers_state;
+
+static SgenObjectOperations * volatile idle_func_object_ops;
+
+static guint64 stat_workers_num_finished;
+
+static gboolean
+set_state (State old_state, State new_state)
+{
+       SGEN_ASSERT (0, old_state != new_state, "Why are we transitioning to the same state?");
+       if (new_state == STATE_NOT_WORKING)
+               SGEN_ASSERT (0, old_state == STATE_WORKING, "We can only transition to NOT WORKING from WORKING");
+       else if (new_state == STATE_WORKING)
+               SGEN_ASSERT (0, old_state == STATE_WORK_ENQUEUED, "We can only transition to WORKING from WORK ENQUEUED");
+       if (new_state == STATE_NOT_WORKING || new_state == STATE_WORKING)
+               SGEN_ASSERT (6, sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "Only the worker thread is allowed to transition to NOT_WORKING or WORKING");
+
+       return InterlockedCompareExchange (&workers_state, new_state, old_state) == old_state;
+}
+
+static void
+assert_nursery_collection (State state)
+{
+       SGEN_ASSERT (0, state == STATE_NURSERY_COLLECTION, "Must be in the nursery collection state");
+}
+
+static gboolean
+state_is_working_or_enqueued (State state)
+{
+       return state == STATE_WORKING || state == STATE_WORK_ENQUEUED;
+}
+
+static void
+workers_signal_enqueue_work (gboolean from_nursery_collection)
+{
+       State old_state;
+       gboolean did_set_state;
+
+       do {
+               old_state = workers_state;
+
+               if (from_nursery_collection)
+                       assert_nursery_collection (old_state);
+               else
+                       SGEN_ASSERT (0, old_state != STATE_NURSERY_COLLECTION, "If we're not in a nursery collection, how come the state is NURSERY COLLECTION?");
+
+               if (old_state == STATE_WORK_ENQUEUED)
+                       break;
+
+               did_set_state = set_state (old_state, STATE_WORK_ENQUEUED);
+               if (from_nursery_collection)
+                       SGEN_ASSERT (0, did_set_state, "Nobody else should be mutating the state");
+       } while (!did_set_state);
+
+       if (!state_is_working_or_enqueued (old_state))
+               sgen_thread_pool_idle_signal ();
+}
+
+void
+sgen_workers_ensure_awake (void)
+{
+       SGEN_ASSERT (0, workers_state != STATE_NURSERY_COLLECTION, "Can't wake workers during nursery collection");
+       workers_signal_enqueue_work (FALSE);
+}
+
+static void
+worker_try_finish (void)
+{
+       State old_state;
+
+       ++stat_workers_num_finished;
+
+       do {
+               old_state = workers_state;
+
+               SGEN_ASSERT (0, old_state != STATE_NOT_WORKING, "How did we get from doing idle work to NOT WORKING without setting it ourselves?");
+               if (old_state == STATE_NURSERY_COLLECTION)
+                       return;
+               if (old_state == STATE_WORK_ENQUEUED)
+                       return;
+               SGEN_ASSERT (0, old_state == STATE_WORKING, "What other possibility is there?");
+
+               /* We are the last thread to go to sleep. */
+       } while (!set_state (old_state, STATE_NOT_WORKING));
+}
+
+static gboolean
+collection_needs_workers (void)
+{
+       return sgen_collection_is_concurrent ();
+}
+
+void
+sgen_workers_enqueue_job (SgenThreadPoolJob *job)
+{
+       if (!collection_needs_workers ()) {
+               job->func (NULL, job);
+               sgen_thread_pool_job_free (job);
+               return;
+       }
+
+       sgen_thread_pool_job_enqueue (job);
+}
+
+void
+sgen_workers_wait_for_jobs_finished (void)
+{
+       sgen_thread_pool_wait_for_all_jobs ();
+       /*
+        * If the idle task was never triggered or it finished before the last job did and
+        * then didn't get triggered again, we might end up in the situation of having
+        * something in the gray queue yet the idle task not working.  The easiest way to
+        * make sure this doesn't stay that way is to just trigger it again after all jobs
+        * have finished.
+        */
+       sgen_workers_ensure_awake ();
+}
+
+void
+sgen_workers_signal_start_nursery_collection_and_wait (void)
+{
+       State old_state;
+
+       do {
+               old_state = workers_state;
+
+               if (old_state != STATE_NOT_WORKING)
+                       SGEN_ASSERT (0, old_state != STATE_NURSERY_COLLECTION, "Why are we transitioning to NURSERY COLLECTION when we're already there?");
+       } while (!set_state (old_state, STATE_NURSERY_COLLECTION));
+
+       sgen_thread_pool_idle_wait ();
+
+       assert_nursery_collection (workers_state);
+}
+
+void
+sgen_workers_signal_finish_nursery_collection (void)
+{
+       assert_nursery_collection (workers_state);
+       workers_signal_enqueue_work (TRUE);
+}
+
+static gboolean
+workers_get_work (WorkerData *data)
+{
+       SgenMajorCollector *major;
+
+       g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
+
+       /* If we're concurrent, steal from the workers distribute gray queue. */
+       major = sgen_get_major_collector ();
+       if (major->is_concurrent) {
+               GrayQueueSection *section = sgen_section_gray_queue_dequeue (&workers_distribute_gray_queue);
+               if (section) {
+                       sgen_gray_object_enqueue_section (&data->private_gray_queue, section);
+                       return TRUE;
+               }
+       }
+
+       /* Nobody to steal from */
+       g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
+       return FALSE;
+}
+
+static void
+concurrent_enqueue_check (char *obj)
+{
+       g_assert (sgen_concurrent_collection_in_progress ());
+       g_assert (!sgen_ptr_in_nursery (obj));
+       g_assert (SGEN_LOAD_VTABLE (obj));
+}
+
+static void
+init_private_gray_queue (WorkerData *data)
+{
+       sgen_gray_object_queue_init (&data->private_gray_queue,
+                       sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL);
+}
+
+static void
+thread_pool_init_func (void *data_untyped)
+{
+       WorkerData *data = data_untyped;
+       SgenMajorCollector *major = sgen_get_major_collector ();
+
+       sgen_client_thread_register_worker ();
+
+       if (!major->is_concurrent)
+               return;
+
+       init_private_gray_queue (data);
+}
+
+static gboolean
+continue_idle_func (void)
+{
+       return state_is_working_or_enqueued (workers_state);
+}
+
+static void
+marker_idle_func (void *data_untyped)
+{
+       WorkerData *data = data_untyped;
+
+       if (!continue_idle_func ())
+               return;
+
+       SGEN_ASSERT (0, sgen_concurrent_collection_in_progress (), "The worker should only mark in concurrent collections.");
+       SGEN_ASSERT (0, sgen_get_current_collection_generation () != GENERATION_NURSERY, "Why are we doing work while there's a nursery collection happening?");
+
+       if (workers_state == STATE_WORK_ENQUEUED) {
+               set_state (STATE_WORK_ENQUEUED, STATE_WORKING);
+               SGEN_ASSERT (0, workers_state != STATE_NOT_WORKING, "How did we get from WORK ENQUEUED to NOT WORKING?");
+       }
+
+       if (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data)) {
+               ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (idle_func_object_ops, &data->private_gray_queue);
+
+               SGEN_ASSERT (0, !sgen_gray_object_queue_is_empty (&data->private_gray_queue), "How is our gray queue empty if we just got work?");
+
+               sgen_drain_gray_stack (32, ctx);
+       } else {
+               worker_try_finish ();
+       }
+}
+
+static void
+init_distribute_gray_queue (void)
+{
+       if (workers_distribute_gray_queue_inited) {
+               g_assert (sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue));
+               g_assert (workers_distribute_gray_queue.locked);
+               return;
+       }
+
+       sgen_section_gray_queue_init (&workers_distribute_gray_queue, TRUE,
+                       sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL);
+       workers_distribute_gray_queue_inited = TRUE;
+}
+
+void
+sgen_workers_init_distribute_gray_queue (void)
+{
+       SGEN_ASSERT (0, sgen_get_major_collector ()->is_concurrent && collection_needs_workers (),
+                       "Why should we init the distribute gray queue if we don't need it?");
+       init_distribute_gray_queue ();
+}
+
+void
+sgen_workers_init (int num_workers)
+{
+       int i;
+       void *workers_data_ptrs [num_workers];
+
+       if (!sgen_get_major_collector ()->is_concurrent) {
+               sgen_thread_pool_init (num_workers, thread_pool_init_func, NULL, NULL, NULL);
+               return;
+       }
+
+       //g_print ("initing %d workers\n", num_workers);
+
+       workers_num = num_workers;
+
+       workers_data = sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA, TRUE);
+       memset (workers_data, 0, sizeof (WorkerData) * num_workers);
+
+       init_distribute_gray_queue ();
+
+       for (i = 0; i < workers_num; ++i)
+               workers_data_ptrs [i] = &workers_data [i];
+
+       sgen_thread_pool_init (num_workers, thread_pool_init_func, marker_idle_func, continue_idle_func, workers_data_ptrs);
+
+       mono_counters_register ("# workers finished", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_workers_num_finished);
+}
+
+void
+sgen_workers_start_all_workers (SgenObjectOperations *object_ops)
+{
+       if (!collection_needs_workers ())
+               return;
+
+       idle_func_object_ops = object_ops;
+       mono_memory_write_barrier ();
+
+       workers_signal_enqueue_work (FALSE);
+}
+
+void
+sgen_workers_join (void)
+{
+       int i;
+
+       SGEN_ASSERT (0, workers_state != STATE_NURSERY_COLLECTION, "Can't be in nursery collection when joining");
+
+       if (!collection_needs_workers ())
+               return;
+
+       sgen_thread_pool_wait_for_all_jobs ();
+       sgen_thread_pool_idle_wait ();
+       SGEN_ASSERT (0, workers_state == STATE_NOT_WORKING, "Can only signal enqueue work when in no work state");
+
+       /* At this point all the workers have stopped. */
+
+       SGEN_ASSERT (0, sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue), "Why is there still work left to do?");
+       for (i = 0; i < workers_num; ++i)
+               SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&workers_data [i].private_gray_queue), "Why is there still work left to do?");
+}
+
+gboolean
+sgen_workers_all_done (void)
+{
+       return workers_state == STATE_NOT_WORKING;
+}
+
+/* Must only be used for debugging */
+gboolean
+sgen_workers_are_working (void)
+{
+       return state_is_working_or_enqueued (workers_state);
+}
+
+void
+sgen_workers_wait (void)
+{
+       sgen_thread_pool_idle_wait ();
+       SGEN_ASSERT (0, sgen_workers_all_done (), "Why are the workers not done after we wait for them?");
+}
+
+SgenSectionGrayQueue*
+sgen_workers_get_distribute_section_gray_queue (void)
+{
+       return &workers_distribute_gray_queue;
+}
+
+#endif
diff --git a/mono/sgen/sgen-workers.h b/mono/sgen/sgen-workers.h
new file mode 100644 (file)
index 0000000..82cf26d
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * sgen-workers.c: Worker threads for parallel and concurrent GC.
+ *
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MONO_SGEN_WORKER_H__
+#define __MONO_SGEN_WORKER_H__
+
+#include "mono/sgen/sgen-thread-pool.h"
+
+typedef struct _WorkerData WorkerData;
+struct _WorkerData {
+       SgenGrayQueue private_gray_queue; /* only read/written by worker thread */
+};
+
+void sgen_workers_init (int num_workers);
+void sgen_workers_start_all_workers (SgenObjectOperations *object_ops);
+void sgen_workers_ensure_awake (void);
+void sgen_workers_init_distribute_gray_queue (void);
+void sgen_workers_enqueue_job (SgenThreadPoolJob *job);
+void sgen_workers_wait_for_jobs_finished (void);
+void sgen_workers_distribute_gray_queue_sections (void);
+void sgen_workers_reset_data (void);
+void sgen_workers_join (void);
+gboolean sgen_workers_all_done (void);
+gboolean sgen_workers_are_working (void);
+void sgen_workers_wait (void);
+SgenSectionGrayQueue* sgen_workers_get_distribute_section_gray_queue (void);
+
+void sgen_workers_signal_start_nursery_collection_and_wait (void);
+void sgen_workers_signal_finish_nursery_collection (void);
+
+#endif
index aee3344f537e96931d4d2aca00ed611b773af01a..ce9958e5946a0b393e375257ecea4a24e9596c25 100644 (file)
@@ -5,7 +5,7 @@
 /*.la
 /*.lo
 /*.trs
-/test-gc-memfuncs
+/test-memfuncs
 /test-mono-linked-list-set
 /test-sgen-qsort
 /test-conc-hashtable
index 50cec63f24ea8fd10d72f34c9dcf08e8bc8ccd04..8ff5eef6684851b2e7e30b3fac8c652339d87e93 100644 (file)
@@ -1,7 +1,7 @@
 AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/mono $(LIBGC_CPPFLAGS) $(GLIB_CFLAGS) -DMONO_BINDIR=\"$(bindir)/\" -DMONO_ASSEMBLIES=\"$(assembliesdir)\" -DMONO_CFG_DIR=\"$(confdir)\"
 
 test_cflags = $(AM_CFLAGS) $(SGEN_DEFINES)
-test_ldadd = ../metadata/libmonoruntimesgen.la ../io-layer/libwapi.la ../utils/libmonoutils.la \
+test_ldadd = ../metadata/libmonoruntimesgen.la ../sgen/libmonosgen.la ../io-layer/libwapi.la ../utils/libmonoutils.la \
        $(LIBGC_LIBS) $(GLIB_LIBS) -lm $(LIBICONV)
 if PLATFORM_DARWIN
 test_ldflags = -framework CoreFoundation -framework Foundation
@@ -18,10 +18,10 @@ test_sgen_qsort_CFLAGS = $(test_cflags)
 test_sgen_qsort_LDADD = $(test_ldadd)
 test_sgen_qsort_LDFLAGS = $(test_ldflags)
 
-test_gc_memfuncs_SOURCES = test-gc-memfuncs.c
-test_gc_memfuncs_CFLAGS = $(test_cflags)
-test_gc_memfuncs_LDADD = $(test_ldadd)
-test_gc_memfuncs_LDFLAGS = $(test_ldflags)
+test_memfuncs_SOURCES = test-memfuncs.c
+test_memfuncs_CFLAGS = $(test_cflags)
+test_memfuncs_LDADD = $(test_ldadd)
+test_memfuncs_LDFLAGS = $(test_ldflags)
 
 test_mono_linked_list_set_SOURCES = test-mono-linked-list-set.c
 test_mono_linked_list_set_CFLAGS = $(test_cflags)
@@ -33,9 +33,9 @@ test_conc_hashtable_CFLAGS = $(test_cflags)
 test_conc_hashtable_LDADD = $(test_ldadd)
 test_conc_hashtable_LDFLAGS = $(test_ldflags)
 
-noinst_PROGRAMS = test-sgen-qsort test-gc-memfuncs test-mono-linked-list-set test-conc-hashtable
+noinst_PROGRAMS = test-sgen-qsort test-memfuncs test-mono-linked-list-set test-conc-hashtable
 
-TESTS = test-sgen-qsort test-gc-memfuncs test-mono-linked-list-set test-conc-hashtable
+TESTS = test-sgen-qsort test-memfuncs test-mono-linked-list-set test-conc-hashtable
 
 endif !PLATFORM_GNU
 endif SUPPORT_BOEHM
diff --git a/mono/unit-tests/test-gc-memfuncs.c b/mono/unit-tests/test-gc-memfuncs.c
deleted file mode 100644 (file)
index 37c6c26..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * test-sgen-qsort.c: Unit test for our own bzero/memmove.
- *
- * Copyright (C) 2013 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-
-#include "metadata/gc-internal.h"
-
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
-#include <assert.h>
-
-#define POOL_SIZE      2048
-#define START_OFFSET   128
-
-#define BZERO_OFFSETS  64
-#define BZERO_SIZES    256
-
-#define MEMMOVE_SRC_OFFSETS            32
-#define MEMMOVE_DEST_OFFSETS           32
-#define MEMMOVE_SIZES                  256
-#define MEMMOVE_NONOVERLAP_START       1024
-
-int
-main (void)
-{
-       unsigned char *random_mem = malloc (POOL_SIZE);
-       unsigned char *reference = malloc (POOL_SIZE);
-       unsigned char *playground = malloc (POOL_SIZE);
-       long *long_random_mem;
-       int i, offset, size, src_offset, dest_offset;
-
-       srandom (time (NULL));
-
-       /* init random memory */
-       long_random_mem = (long*)random_mem;
-       for (i = 0; i < POOL_SIZE / sizeof (long); ++i)
-               long_random_mem [i] = random ();
-
-       /* test bzero */
-       for (offset = 0; offset <= BZERO_OFFSETS; ++offset) {
-               for (size = 0; size <= BZERO_SIZES; ++size) {
-                       memcpy (reference, random_mem, POOL_SIZE);
-                       memcpy (playground, random_mem, POOL_SIZE);
-
-                       memset (reference + START_OFFSET + offset, 0, size);
-                       mono_gc_bzero_atomic (playground + START_OFFSET + offset, size);
-
-                       assert (!memcmp (reference, playground, POOL_SIZE));
-               }
-       }
-
-       /* test memmove */
-       for (src_offset = -MEMMOVE_SRC_OFFSETS; src_offset <= MEMMOVE_SRC_OFFSETS; ++src_offset) {
-               for (dest_offset = -MEMMOVE_DEST_OFFSETS; dest_offset <= MEMMOVE_DEST_OFFSETS; ++dest_offset) {
-                       for (size = 0; size <= MEMMOVE_SIZES; ++size) {
-                               /* overlapping */
-                               memcpy (reference, random_mem, POOL_SIZE);
-                               memcpy (playground, random_mem, POOL_SIZE);
-
-                               memmove (reference + START_OFFSET + dest_offset, reference + START_OFFSET + src_offset, size);
-                               mono_gc_memmove_atomic (playground + START_OFFSET + dest_offset, playground + START_OFFSET + src_offset, size);
-
-                               assert (!memcmp (reference, playground, POOL_SIZE));
-
-                               /* non-overlapping with dest < src */
-                               memcpy (reference, random_mem, POOL_SIZE);
-                               memcpy (playground, random_mem, POOL_SIZE);
-
-                               memmove (reference + START_OFFSET + dest_offset, reference + MEMMOVE_NONOVERLAP_START + src_offset, size);
-                               mono_gc_memmove_atomic (playground + START_OFFSET + dest_offset, playground + MEMMOVE_NONOVERLAP_START + src_offset, size);
-
-                               assert (!memcmp (reference, playground, POOL_SIZE));
-
-                               /* non-overlapping with dest > src */
-                               memcpy (reference, random_mem, POOL_SIZE);
-                               memcpy (playground, random_mem, POOL_SIZE);
-
-                               memmove (reference + MEMMOVE_NONOVERLAP_START + dest_offset, reference + START_OFFSET + src_offset, size);
-                               mono_gc_memmove_atomic (playground + MEMMOVE_NONOVERLAP_START + dest_offset, playground + START_OFFSET + src_offset, size);
-
-                               assert (!memcmp (reference, playground, POOL_SIZE));
-                       }
-               }
-       }
-
-       return 0;
-}
diff --git a/mono/unit-tests/test-memfuncs.c b/mono/unit-tests/test-memfuncs.c
new file mode 100644 (file)
index 0000000..a2b86bb
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * test-sgen-qsort.c: Unit test for our own bzero/memmove.
+ *
+ * Copyright (C) 2013 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+
+#include "utils/memfuncs.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <assert.h>
+
+#define POOL_SIZE      2048
+#define START_OFFSET   128
+
+#define BZERO_OFFSETS  64
+#define BZERO_SIZES    256
+
+#define MEMMOVE_SRC_OFFSETS            32
+#define MEMMOVE_DEST_OFFSETS           32
+#define MEMMOVE_SIZES                  256
+#define MEMMOVE_NONOVERLAP_START       1024
+
+int
+main (void)
+{
+       unsigned char *random_mem = malloc (POOL_SIZE);
+       unsigned char *reference = malloc (POOL_SIZE);
+       unsigned char *playground = malloc (POOL_SIZE);
+       long *long_random_mem;
+       int i, offset, size, src_offset, dest_offset;
+
+       srandom (time (NULL));
+
+       /* init random memory */
+       long_random_mem = (long*)random_mem;
+       for (i = 0; i < POOL_SIZE / sizeof (long); ++i)
+               long_random_mem [i] = random ();
+
+       /* test bzero */
+       for (offset = 0; offset <= BZERO_OFFSETS; ++offset) {
+               for (size = 0; size <= BZERO_SIZES; ++size) {
+                       memcpy (reference, random_mem, POOL_SIZE);
+                       memcpy (playground, random_mem, POOL_SIZE);
+
+                       memset (reference + START_OFFSET + offset, 0, size);
+                       mono_gc_bzero_atomic (playground + START_OFFSET + offset, size);
+
+                       assert (!memcmp (reference, playground, POOL_SIZE));
+               }
+       }
+
+       /* test memmove */
+       for (src_offset = -MEMMOVE_SRC_OFFSETS; src_offset <= MEMMOVE_SRC_OFFSETS; ++src_offset) {
+               for (dest_offset = -MEMMOVE_DEST_OFFSETS; dest_offset <= MEMMOVE_DEST_OFFSETS; ++dest_offset) {
+                       for (size = 0; size <= MEMMOVE_SIZES; ++size) {
+                               /* overlapping */
+                               memcpy (reference, random_mem, POOL_SIZE);
+                               memcpy (playground, random_mem, POOL_SIZE);
+
+                               memmove (reference + START_OFFSET + dest_offset, reference + START_OFFSET + src_offset, size);
+                               mono_gc_memmove_atomic (playground + START_OFFSET + dest_offset, playground + START_OFFSET + src_offset, size);
+
+                               assert (!memcmp (reference, playground, POOL_SIZE));
+
+                               /* non-overlapping with dest < src */
+                               memcpy (reference, random_mem, POOL_SIZE);
+                               memcpy (playground, random_mem, POOL_SIZE);
+
+                               memmove (reference + START_OFFSET + dest_offset, reference + MEMMOVE_NONOVERLAP_START + src_offset, size);
+                               mono_gc_memmove_atomic (playground + START_OFFSET + dest_offset, playground + MEMMOVE_NONOVERLAP_START + src_offset, size);
+
+                               assert (!memcmp (reference, playground, POOL_SIZE));
+
+                               /* non-overlapping with dest > src */
+                               memcpy (reference, random_mem, POOL_SIZE);
+                               memcpy (playground, random_mem, POOL_SIZE);
+
+                               memmove (reference + MEMMOVE_NONOVERLAP_START + dest_offset, reference + START_OFFSET + src_offset, size);
+                               mono_gc_memmove_atomic (playground + MEMMOVE_NONOVERLAP_START + dest_offset, playground + START_OFFSET + src_offset, size);
+
+                               assert (!memcmp (reference, playground, POOL_SIZE));
+                       }
+               }
+       }
+
+       return 0;
+}
index 2ee1afa95c9e755aba6715a69e9f08c942c9da78..40ca64cb01f1d64db54abc270b47d5c2f652aa07 100644 (file)
@@ -19,8 +19,8 @@
 
 #include "config.h"
 
-#include <metadata/sgen-gc.h>
-#include <metadata/sgen-qsort.h>
+#include <sgen/sgen-gc.h>
+#include <sgen/sgen-qsort.h>
 
 #include <stdlib.h>
 #include <string.h>
index f5cb440f46861bcf9bffcebc96dadff9c66d4707..f0a09cb3868151620006a3de3543f6b31c0b48bf 100644 (file)
@@ -135,7 +135,11 @@ monoutils_sources = \
        networking-windows.c    \
        networking.h    \
        mono-rand.c     \
-       mono-rand.h
+       mono-rand.h \
+       memfuncs.c \
+       memfuncs.h \
+       parse.c \
+       parse.h
 
 arch_sources = 
 
diff --git a/mono/utils/memfuncs.c b/mono/utils/memfuncs.c
new file mode 100644 (file)
index 0000000..c2a39d2
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+ * memfuncs.c: Our own bzero/memmove.
+ *
+ * Copyright (C) 2013-2015 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * SGen cannot deal with invalid pointers on the heap or in registered roots.  Sometimes we
+ * need to copy or zero out memory in code that might be interrupted by collections.  To
+ * guarantee that those operations will not result in invalid pointers, we must do it
+ * word-atomically.
+ *
+ * libc's bzero() and memcpy()/memmove() functions do not guarantee word-atomicity, even in
+ * cases where one would assume so.  For instance, some implementations (like Darwin's on
+ * x86) have variants of memcpy() using vector instructions.  Those may copy bytewise for
+ * the region preceding the first vector-aligned address.  That region could be
+ * word-aligned, but it would still be copied byte-wise.
+ *
+ * All our memory writes here are to "volatile" locations.  This is so that C compilers
+ * don't "optimize" our code back to calls to bzero()/memmove().  LLVM, specifically, will
+ * do that.
+ */
+
+#include <config.h>
+#include <glib.h>
+#include <string.h>
+
+#include "memfuncs.h"
+
+#define ptr_mask ((sizeof (void*) - 1))
+#define _toi(ptr) ((size_t)ptr)
+#define unaligned_bytes(ptr) (_toi(ptr) & ptr_mask)
+#define align_down(ptr) ((void*)(_toi(ptr) & ~ptr_mask))
+#define align_up(ptr) ((void*) ((_toi(ptr) + ptr_mask) & ~ptr_mask))
+#if SIZEOF_VOID_P == 4
+#define bytes_to_words(n)      ((size_t)(n) >> 2)
+#elif SIZEOF_VOID_P == 8
+#define bytes_to_words(n)      ((size_t)(n) >> 3)
+#else
+#error We only support 32 and 64 bit architectures.
+#endif
+
+#define BZERO_WORDS(dest,words) do {                   \
+               void * volatile *__d = (void* volatile*)(dest);         \
+               int __n = (words);                      \
+               int __i;                                \
+               for (__i = 0; __i < __n; ++__i)         \
+                       __d [__i] = NULL;               \
+       } while (0)
+
+
+/**
+ * mono_gc_bzero_aligned:
+ * @dest: address to start to clear
+ * @size: size of the region to clear
+ *
+ * Zero @size bytes starting at @dest.
+ * The address of @dest MUST be aligned to word boundaries
+ *
+ * FIXME borrow faster code from some BSD libc or bionic
+ */
+void
+mono_gc_bzero_aligned (void *dest, size_t size)
+{
+       volatile char *d = (char*)dest;
+       size_t tail_bytes, word_bytes;
+
+       g_assert (unaligned_bytes (dest) == 0);
+
+       /* copy all words with memmove */
+       word_bytes = (size_t)align_down (size);
+       switch (word_bytes) {
+       case sizeof (void*) * 1:
+               BZERO_WORDS (d, 1);
+               break;
+       case sizeof (void*) * 2:
+               BZERO_WORDS (d, 2);
+               break;
+       case sizeof (void*) * 3:
+               BZERO_WORDS (d, 3);
+               break;
+       case sizeof (void*) * 4:
+               BZERO_WORDS (d, 4);
+               break;
+       default:
+               BZERO_WORDS (d, bytes_to_words (word_bytes));
+       }
+
+       tail_bytes = unaligned_bytes (size);
+       if (tail_bytes) {
+               d += word_bytes;
+               do {
+                       *d++ = 0;
+               } while (--tail_bytes);
+       }
+}
+
+/**
+ * mono_gc_bzero_atomic:
+ * @dest: address to start to clear
+ * @size: size of the region to clear
+ *
+ * Zero @size bytes starting at @dest.
+ *
+ * Use this to zero memory without word tearing when dest is aligned.
+ */
+void
+mono_gc_bzero_atomic (void *dest, size_t size)
+{
+       if (unaligned_bytes (dest))
+               memset (dest, 0, size);
+       else
+               mono_gc_bzero_aligned (dest, size);
+}
+
+#define MEMMOVE_WORDS_UPWARD(dest,src,words) do {      \
+               void * volatile *__d = (void* volatile*)(dest);         \
+               void **__s = (void**)(src);             \
+               int __n = (int)(words);                 \
+               int __i;                                \
+               for (__i = 0; __i < __n; ++__i)         \
+                       __d [__i] = __s [__i];          \
+       } while (0)
+
+#define MEMMOVE_WORDS_DOWNWARD(dest,src,words) do {    \
+               void * volatile *__d = (void* volatile*)(dest);         \
+               void **__s = (void**)(src);             \
+               int __n = (int)(words);                 \
+               int __i;                                \
+               for (__i = __n - 1; __i >= 0; --__i)    \
+                       __d [__i] = __s [__i];          \
+       } while (0)
+
+
+/**
+ * mono_gc_memmove_aligned:
+ * @dest: destination of the move
+ * @src: source
+ * @size: size of the block to move
+ *
+ * Move @size bytes from @src to @dest.
+ *
+ * Use this to copy memory without word tearing when both pointers are aligned
+ */void
+mono_gc_memmove_aligned (void *dest, const void *src, size_t size)
+{
+       g_assert (unaligned_bytes (dest) == 0);
+       g_assert (unaligned_bytes (src) == 0);
+
+       /*
+       If we're copying less than a word we don't need to worry about word tearing
+       so we bailout to memmove early.
+       */
+       if (size < sizeof(void*)) {
+               memmove (dest, src, size);
+               return;
+       }
+
+       /*
+        * A bit of explanation on why we align only dest before doing word copies.
+        * Pointers to managed objects must always be stored in word aligned addresses, so
+        * even if dest is misaligned, src will be by the same amount - this ensure proper atomicity of reads.
+        *
+        * We don't need to case when source and destination have different alignments since we only do word stores
+        * using memmove, which must handle it.
+        */
+       if (dest > src && ((size_t)((char*)dest - (char*)src) < size)) { /*backward copy*/
+                       volatile char *p = (char*)dest + size;
+                       char *s = (char*)src + size;
+                       char *start = (char*)dest;
+                       char *align_end = MAX((char*)dest, (char*)align_down (p));
+                       char *word_start;
+                       size_t bytes_to_memmove;
+
+                       while (p > align_end)
+                               *--p = *--s;
+
+                       word_start = align_up (start);
+                       bytes_to_memmove = p - word_start;
+                       p -= bytes_to_memmove;
+                       s -= bytes_to_memmove;
+                       MEMMOVE_WORDS_DOWNWARD (p, s, bytes_to_words (bytes_to_memmove));
+       } else {
+               volatile char *d = (char*)dest;
+               const char *s = (const char*)src;
+               size_t tail_bytes;
+
+               /* copy all words with memmove */
+               MEMMOVE_WORDS_UPWARD (d, s, bytes_to_words (align_down (size)));
+
+               tail_bytes = unaligned_bytes (size);
+               if (tail_bytes) {
+                       d += (size_t)align_down (size);
+                       s += (size_t)align_down (size);
+                       do {
+                               *d++ = *s++;
+                       } while (--tail_bytes);
+               }
+       }
+}
+
+/**
+ * mono_gc_memmove_atomic:
+ * @dest: destination of the move
+ * @src: source
+ * @size: size of the block to move
+ *
+ * Move @size bytes from @src to @dest.
+ *
+ * Use this to copy memory without word tearing when both pointers are aligned
+ */
+void
+mono_gc_memmove_atomic (void *dest, const void *src, size_t size)
+{
+       if (unaligned_bytes (_toi (dest) | _toi (src)))
+               memmove (dest, src, size);
+       else
+               mono_gc_memmove_aligned (dest, src, size);
+}
diff --git a/mono/utils/memfuncs.h b/mono/utils/memfuncs.h
new file mode 100644 (file)
index 0000000..51a3618
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * memfuncs.h: Our own bzero/memmove.
+ *
+ * Copyright (C) 2015 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MONO_UTILS_MEMFUNCS_H__
+#define __MONO_UTILS_MEMFUNCS_H__
+
+#include <stdlib.h>
+
+/*
+These functions must be used when it's possible that either destination is not
+word aligned or size is not a multiple of word size.
+*/
+void mono_gc_bzero_atomic (void *dest, size_t size);
+void mono_gc_bzero_aligned (void *dest, size_t size);
+void mono_gc_memmove_atomic (void *dest, const void *src, size_t size);
+void mono_gc_memmove_aligned (void *dest, const void *src, size_t size);
+
+#endif
diff --git a/mono/utils/parse.c b/mono/utils/parse.c
new file mode 100644 (file)
index 0000000..0c44c3f
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * parse.c: Parsing for GC options.
+ *
+ * Copyright (C) 2015 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <config.h>
+#include <glib.h>
+#include <string.h>
+#include <errno.h>
+#include <ctype.h>
+#include <stdlib.h>
+
+#include "parse.h"
+
+/**
+ * mono_gc_parse_environment_string_extract_number:
+ *
+ * @str: points to the first digit of the number
+ * @out: pointer to the variable that will receive the value
+ *
+ * Tries to extract a number from the passed string, taking in to account m, k
+ * and g suffixes
+ *
+ * Returns true if passing was successful
+ */
+gboolean
+mono_gc_parse_environment_string_extract_number (const char *str, size_t *out)
+{
+       char *endptr;
+       int len = strlen (str), shift = 0;
+       size_t val;
+       gboolean is_suffix = FALSE;
+       char suffix;
+
+       if (!len)
+               return FALSE;
+
+       suffix = str [len - 1];
+
+       switch (suffix) {
+               case 'g':
+               case 'G':
+                       shift += 10;
+               case 'm':
+               case 'M':
+                       shift += 10;
+               case 'k':
+               case 'K':
+                       shift += 10;
+                       is_suffix = TRUE;
+                       break;
+               default:
+                       if (!isdigit (suffix))
+                               return FALSE;
+                       break;
+       }
+
+       errno = 0;
+       val = strtol (str, &endptr, 10);
+
+       if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN))
+                       || (errno != 0 && val == 0) || (endptr == str))
+               return FALSE;
+
+       if (is_suffix) {
+               size_t unshifted;
+
+               if (*(endptr + 1)) /* Invalid string. */
+                       return FALSE;
+
+               unshifted = (size_t)val;
+               val <<= shift;
+               if (((size_t)val >> shift) != unshifted) /* value too large */
+                       return FALSE;
+       }
+
+       *out = val;
+       return TRUE;
+}
diff --git a/mono/utils/parse.h b/mono/utils/parse.h
new file mode 100644 (file)
index 0000000..a899908
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * parse.h: Parsing for GC options.
+ *
+ * Copyright (C) 2015 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MONO_UTILS_PARSE_H__
+#define __MONO_UTILS_PARSE_H__
+
+#include <glib.h>
+#include <stdlib.h>
+
+gboolean mono_gc_parse_environment_string_extract_number (const char *str, size_t *out);
+
+#endif
index 9cda6ae31d916c1537dd3aeb44ff6dc4139a048a..5643e9344471623a80f96341911133e6b91221cb 100644 (file)
@@ -53,7 +53,6 @@
     <ClCompile Include="..\mono\metadata\file-io.c" />\r
     <ClCompile Include="..\mono\metadata\file-mmap-windows.c" />\r
     <ClCompile Include="..\mono\metadata\filewatcher.c" />\r
-    <ClCompile Include="..\mono\metadata\gc-memfuncs.c" />\r
     <ClCompile Include="..\mono\metadata\gc.c" />\r
     <ClCompile Include="..\mono\metadata\icall.c" />\r
     <ClCompile Include="..\mono\metadata\image.c" />\r
     <ClCompile Include="..\mono\metadata\security-core-clr.c" />\r
     <ClCompile Include="..\mono\metadata\security-manager.c" />\r
     <ClCompile Include="..\mono\metadata\mono-security.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-alloc.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-alloc.c" />\r
     <ClCompile Include="..\mono\metadata\sgen-bridge.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-cardtable.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-debug.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-descriptor.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-gc.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-gray.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-hash-table.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-internal.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-los.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-marksweep.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-memory-governor.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-cardtable.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-debug.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-descriptor.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-gc.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-gray.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-hash-table.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-internal.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-los.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-marksweep.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-memory-governor.c" />\r
     <ClCompile Include="..\mono\metadata\sgen-new-bridge.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-nursery-allocator.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-nursery-allocator.c" />\r
     <ClCompile Include="..\mono\metadata\sgen-old-bridge.c" />\r
     <ClCompile Include="..\mono\metadata\sgen-os-mach.c" />\r
     <ClCompile Include="..\mono\metadata\sgen-os-posix.c" />\r
     <ClCompile Include="..\mono\metadata\sgen-os-win32.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-pinning-stats.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-pinning.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-pointer-queue.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-protocol.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-qsort.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-simple-nursery.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-split-nursery.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-pinning-stats.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-pinning.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-pointer-queue.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-protocol.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-qsort.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-simple-nursery.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-split-nursery.c" />\r
     <ClCompile Include="..\mono\metadata\sgen-tarjan-bridge.c" />\r
     <ClCompile Include="..\mono\metadata\sgen-toggleref.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-workers.c" />\r
-    <ClCompile Include="..\mono\metadata\sgen-fin-weak-hash.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-workers.c" />\r
+    <ClCompile Include="..\mono\sgen\sgen-fin-weak-hash.c" />\r
     <ClCompile Include="..\mono\metadata\sgen-stw.c" />\r
     <ClCompile Include="..\mono\metadata\socket-io.c" />\r
     <ClCompile Include="..\mono\metadata\string-icalls.c" />\r
     <ClInclude Include="..\mono\metadata\file-io.h" />\r
     <ClInclude Include="..\mono\metadata\filewatcher.h" />\r
     <ClInclude Include="..\mono\metadata\gc-internal.h" />\r
+    <ClInclude Include="..\mono\sgen\gc-internal-agnostic.h" />\r
     <ClInclude Include="..\mono\metadata\icall-def.h" />\r
     <ClInclude Include="..\mono\metadata\image.h" />\r
     <ClInclude Include="..\mono\metadata\loader.h" />\r
     <ClInclude Include="..\mono\metadata\security-core-clr.h" />\r
     <ClInclude Include="..\mono\metadata\security-manager.h" />\r
     <ClInclude Include="..\mono\metadata\security.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-archdep.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-archdep.h" />\r
     <ClInclude Include="..\mono\metadata\sgen-bridge.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-cardtable.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-conf.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-copy-object.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-descriptor.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-gc.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-gray.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-major-copy-object.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-major-scan-object.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-memory-governor.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-minor-copy-object.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-minor-scan-object.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-pinning.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-protocol.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-scan-object.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-cardtable.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-conf.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-copy-object.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-descriptor.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-gc.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-gray.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-major-copy-object.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-major-scan-object.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-memory-governor.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-minor-copy-object.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-minor-scan-object.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-pinning.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-protocol.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-qsort.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-scan-object.h" />\r
     <ClInclude Include="..\mono\metadata\sgen-toggleref.h" />\r
-    <ClInclude Include="..\mono\metadata\sgen-workers.h" />\r
+    <ClInclude Include="..\mono\sgen\sgen-workers.h" />\r
     <ClInclude Include="..\mono\metadata\socket-io.h" />\r
     <ClInclude Include="..\mono\metadata\string-icalls.h" />\r
     <ClInclude Include="..\mono\metadata\sysmath.h" />\r
   <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
   <ImportGroup Label="ExtensionTargets">\r
   </ImportGroup>\r
-</Project>
\ No newline at end of file
+</Project>\r
index 074b868e1eeada4e58912ae627d962a7c6bc2965..dee2de94794651ce5188c5375cf96c6559377f4d 100644 (file)
@@ -74,6 +74,8 @@
     <ClCompile Include="..\mono\utils\mono-hwcap.c" />\r
     <ClCompile Include="..\mono\utils\mono-hwcap-x86.c" />\r
     <ClCompile Include="..\mono\utils\bsearch.c" />\r
+    <ClCompile Include="..\mono\utils\memfuncs.c" />\r
+    <ClCompile Include="..\mono\utils\parse.c" />\r
   </ItemGroup>\r
   <ItemGroup>\r
     <ClInclude Include="..\mono\utils\dlmalloc.h" />\r
     <ClInclude Include="..\mono\utils\mono-hwcap.h" />\r
     <ClInclude Include="..\mono\utils\mono-hwcap-x86.h" />\r
     <ClInclude Include="..\mono\utils\bsearch.h" />\r
+    <ClInclude Include="..\mono\utils\memfuncs.h" />\r
+    <ClInclude Include="..\mono\utils\parse.h" />\r
   </ItemGroup>\r
   <ItemGroup>\r
     <MASM Include="..\mono\utils\win64.asm">\r