Merge branch 'master' into msbuilddll2
[mono.git] / mono / metadata / sgen-marksweep.c
old mode 100644 (file)
new mode 100755 (executable)
index 01da3c9..1ef999b
@@ -1,29 +1,24 @@
 /*
- * sgen-marksweep.c: Simple generational GC.
+ * sgen-marksweep.c: The Mark & Sweep major collector.
  *
  * Author:
  *     Mark Probst <mark.probst@gmail.com>
  *
  * Copyright 2009-2010 Novell, Inc.
- * 
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include "config.h"
 #include "metadata/sgen-protocol.h"
 #include "metadata/sgen-cardtable.h"
 #include "metadata/sgen-memory-governor.h"
+#include "metadata/sgen-layout-stats.h"
 #include "metadata/gc-internal.h"
 
+#if !defined(SGEN_PARALLEL_MARK) && !defined(FIXED_HEAP)
+#define SGEN_HAVE_CONCURRENT_MARK
+#endif
+
 #define MS_BLOCK_SIZE  (16*1024)
 #define MS_BLOCK_SIZE_SHIFT    14
 #define MAJOR_SECTION_SIZE     MS_BLOCK_SIZE
@@ -56,7 +56,7 @@
 
 /*
  * Don't allocate single blocks, but alloc a contingent of this many
- * blocks in one swoop.
+ * blocks in one swoop.  This must be a power of two.
  */
 #define MS_BLOCK_ALLOC_NUM     32
 
@@ -88,6 +88,7 @@ struct _MSBlockInfo {
        unsigned int has_references : 1;
        unsigned int has_pinned : 1;    /* means cannot evacuate */
        unsigned int is_to_space : 1;
+       unsigned int swept : 1;
 #ifdef FIXED_HEAP
        unsigned int used : 1;
        unsigned int zeroed : 1;
@@ -97,11 +98,14 @@ struct _MSBlockInfo {
        void **free_list;
        MSBlockInfo *next_free;
        void **pin_queue_start;
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+       guint8 *cardtable_mod_union;
+#endif
        mword mark_words [MS_NUM_MARK_WORDS];
 };
 
 #ifdef FIXED_HEAP
-static int ms_heap_num_blocks = MS_DEFAULT_HEAP_NUM_BLOCKS;
+static mword ms_heap_num_blocks = MS_DEFAULT_HEAP_NUM_BLOCKS;
 
 static char *ms_heap_start;
 static char *ms_heap_end;
@@ -113,6 +117,7 @@ static MSBlockInfo *block_infos;
 #endif
 
 #define MS_BLOCK_OBJ(b,i)              ((b)->block + MS_BLOCK_SKIP + (b)->obj_size * (i))
+#define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size)            ((b)->block + MS_BLOCK_SKIP + (obj_size) * (i))
 #define MS_BLOCK_DATA_FOR_OBJ(o)       ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
 
 #ifdef FIXED_HEAP
@@ -183,10 +188,18 @@ static LOCK_DECLARE (ms_block_list_mutex);
 
 static gboolean *evacuate_block_obj_sizes;
 static float evacuation_threshold = 0.666;
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+static float concurrent_evacuation_threshold = 0.666;
+static gboolean want_evacuation = FALSE;
+#endif
 
-static gboolean concurrent_sweep = FALSE;
+static gboolean lazy_sweep = TRUE;
 static gboolean have_swept;
 
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+static gboolean concurrent_mark;
+#endif
+
 /* all allocated blocks in the system */
 static MSBlockInfo *all_blocks;
 
@@ -216,64 +229,31 @@ static MonoNativeTlsKey workers_free_block_lists_key;
 
 static long long stat_major_blocks_alloced = 0;
 static long long stat_major_blocks_freed = 0;
+static long long stat_major_blocks_lazy_swept = 0;
 static long long stat_major_objects_evacuated = 0;
-static long long stat_time_wait_for_sweep = 0;
-
-static gboolean ms_sweep_in_progress = FALSE;
-static MonoNativeThreadId ms_sweep_thread;
-static MonoSemType ms_sweep_cmd_semaphore;
-static MonoSemType ms_sweep_done_semaphore;
 
-static void
-ms_signal_sweep_command (void)
-{
-       if (!concurrent_sweep)
-               return;
-
-       g_assert (!ms_sweep_in_progress);
-       ms_sweep_in_progress = TRUE;
-       MONO_SEM_POST (&ms_sweep_cmd_semaphore);
-}
-
-static void
-ms_signal_sweep_done (void)
-{
-       if (!concurrent_sweep)
-               return;
+#if SIZEOF_VOID_P != 8
+static long long stat_major_blocks_freed_ideal = 0;
+static long long stat_major_blocks_freed_less_ideal = 0;
+static long long stat_major_blocks_freed_individual = 0;
+static long long stat_major_blocks_alloced_less_ideal = 0;
+#endif
 
-       MONO_SEM_POST (&ms_sweep_done_semaphore);
-}
+#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
+static long long num_major_objects_marked = 0;
+#define INC_NUM_MAJOR_OBJECTS_MARKED() (++num_major_objects_marked)
+#else
+#define INC_NUM_MAJOR_OBJECTS_MARKED()
+#endif
 
 static void
-ms_wait_for_sweep_done (void)
-{
-       SGEN_TV_DECLARE (atv);
-       SGEN_TV_DECLARE (btv);
-       int result;
-
-       if (!concurrent_sweep)
-               return;
-
-       if (!ms_sweep_in_progress)
-               return;
-
-       SGEN_TV_GETTIME (atv);
-       while ((result = MONO_SEM_WAIT (&ms_sweep_done_semaphore)) != 0) {
-               if (errno != EINTR)
-                       g_error ("MONO_SEM_WAIT");
-       }
-       SGEN_TV_GETTIME (btv);
-       stat_time_wait_for_sweep += SGEN_TV_ELAPSED (atv, btv);
-
-       g_assert (ms_sweep_in_progress);
-       ms_sweep_in_progress = FALSE;
-}
+sweep_block (MSBlockInfo *block, gboolean during_major_collection);
 
 static int
 ms_find_block_obj_size_index (int size)
 {
        int i;
-       DEBUG (9, g_assert (size <= SGEN_MAX_SMALL_OBJ_SIZE));
+       SGEN_ASSERT (9, size <= SGEN_MAX_SMALL_OBJ_SIZE, "size %d is bigger than max small object size %d", size, SGEN_MAX_SMALL_OBJ_SIZE);
        for (i = 0; i < num_block_obj_sizes; ++i)
                if (block_obj_sizes [i] >= size)
                        return i;
@@ -304,18 +284,18 @@ major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
        char *nursery_start;
        mword major_heap_size = ms_heap_num_blocks * MS_BLOCK_SIZE;
        mword alloc_size = nursery_size + major_heap_size;
-       int i;
+       mword i;
 
        g_assert (ms_heap_num_blocks > 0);
        g_assert (nursery_size % MS_BLOCK_SIZE == 0);
        if (nursery_align)
                g_assert (nursery_align % MS_BLOCK_SIZE == 0);
 
-       nursery_start = sgen_alloc_os_memory_aligned (alloc_size, nursery_align ? nursery_align : MS_BLOCK_SIZE, TRUE);
+       nursery_start = sgen_alloc_os_memory_aligned (alloc_size, nursery_align ? nursery_align : MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "heap");
        ms_heap_start = nursery_start + nursery_size;
        ms_heap_end = ms_heap_start + major_heap_size;
 
-       block_infos = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo) * ms_heap_num_blocks, INTERNAL_MEM_MS_BLOCK_INFO);
+       block_infos = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo) * ms_heap_num_blocks, INTERNAL_MEM_MS_BLOCK_INFO, TRUE);
 
        for (i = 0; i < ms_heap_num_blocks; ++i) {
                block_infos [i].block = ms_heap_start + i * MS_BLOCK_SIZE;
@@ -336,9 +316,9 @@ major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
 {
        char *start;
        if (nursery_align)
-               start = sgen_alloc_os_memory_aligned (nursery_size, nursery_align, TRUE);
+               start = sgen_alloc_os_memory_aligned (nursery_size, nursery_align, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
        else
-               start = sgen_alloc_os_memory (nursery_size, TRUE);
+               start = sgen_alloc_os_memory (nursery_size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
 
        return start;
 }
@@ -389,9 +369,21 @@ ms_get_empty_block (void)
 
  retry:
        if (!empty_blocks) {
-               p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * MS_BLOCK_ALLOC_NUM, MS_BLOCK_SIZE, TRUE);
+               /*
+                * We try allocating MS_BLOCK_ALLOC_NUM blocks first.  If that's
+                * unsuccessful, we halve the number of blocks and try again, until we're at
+                * 1.  If that doesn't work, either, we assert.
+                */
+               int alloc_num = MS_BLOCK_ALLOC_NUM;
+               for (;;) {
+                       p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * alloc_num, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE,
+                                       alloc_num == 1 ? "major heap section" : NULL);
+                       if (p)
+                               break;
+                       alloc_num >>= 1;
+               }
 
-               for (i = 0; i < MS_BLOCK_ALLOC_NUM; ++i) {
+               for (i = 0; i < alloc_num; ++i) {
                        block = p;
                        /*
                         * We do the free list update one after the
@@ -405,9 +397,13 @@ ms_get_empty_block (void)
                        p += MS_BLOCK_SIZE;
                }
 
-               SGEN_ATOMIC_ADD (num_empty_blocks, MS_BLOCK_ALLOC_NUM);
+               SGEN_ATOMIC_ADD (num_empty_blocks, alloc_num);
 
-               stat_major_blocks_alloced += MS_BLOCK_ALLOC_NUM;
+               stat_major_blocks_alloced += alloc_num;
+#if SIZEOF_VOID_P != 8
+               if (alloc_num != MS_BLOCK_ALLOC_NUM)
+                       stat_major_blocks_alloced_less_ideal += alloc_num;
+#endif
        }
 
        do {
@@ -458,7 +454,8 @@ check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
 
                /* blocks in the free lists must have at least
                   one free slot */
-               g_assert (block->free_list);
+               if (block->swept)
+                       g_assert (block->free_list);
 
 #ifdef FIXED_HEAP
                /* the block must not be in the empty_blocks list */
@@ -518,8 +515,10 @@ consistency_check (void)
                g_assert (num_free == 0);
 
                /* check all mark words are zero */
-               for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
-                       g_assert (block->mark_words [i] == 0);
+               if (block->swept) {
+                       for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
+                               g_assert (block->mark_words [i] == 0);
+               }
        } END_FOREACH_BLOCK;
 
        /* check free blocks */
@@ -558,20 +557,30 @@ ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
        info = sgen_alloc_internal (INTERNAL_MEM_MS_BLOCK_INFO);
 #endif
 
-       DEBUG (9, g_assert (count >= 2));
+       SGEN_ASSERT (9, count >= 2, "block with %d objects, it must hold at least 2", count);
 
        info->obj_size = size;
        info->obj_size_index = size_index;
        info->pinned = pinned;
        info->has_references = has_references;
        info->has_pinned = pinned;
-       info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD); /*FIXME WHY??? */
+       /*
+        * Blocks that are to-space are not evacuated from.  During an major collection
+        * blocks are allocated for two reasons: evacuating objects from the nursery and
+        * evacuating them from major blocks marked for evacuation.  In both cases we don't
+        * want further evacuation.
+        */
+       info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD);
+       info->swept = 1;
 #ifndef FIXED_HEAP
        info->block = ms_get_empty_block ();
 
        header = (MSBlockHeader*) info->block;
        header->info = info;
 #endif
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+       info->cardtable_mod_union = NULL;
+#endif
 
        update_heap_boundaries_for_block (info);
 
@@ -626,10 +635,15 @@ unlink_slot_from_free_list_uncontested (MSBlockInfo **free_blocks, int size_inde
        void *obj;
 
        block = free_blocks [size_index];
-       DEBUG (9, g_assert (block));
+       SGEN_ASSERT (9, block, "no free block to unlink from free_blocks %p size_index %d", free_blocks, size_index);
+
+       if (G_UNLIKELY (!block->swept)) {
+               stat_major_blocks_lazy_swept ++;
+               sweep_block (block, FALSE);
+       }
 
        obj = block->free_list;
-       DEBUG (9, g_assert (obj));
+       SGEN_ASSERT (9, obj, "block %p in free list had no available object to alloc from", block);
 
        block->free_list = *(void**)obj;
        if (!block->free_list) {
@@ -662,15 +676,19 @@ try_remove_block_from_free_list (MSBlockInfo *block, MSBlockInfo **free_blocks,
 }
 
 static void*
-alloc_obj_par (int size, gboolean pinned, gboolean has_references)
+alloc_obj_par (MonoVTable *vtable, int size, gboolean pinned, gboolean has_references)
 {
        int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
        MSBlockInfo **free_blocks_local = FREE_BLOCKS_LOCAL (pinned, has_references);
        MSBlockInfo *block;
        void *obj;
 
-       DEBUG (9, g_assert (!ms_sweep_in_progress));
-       DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+       if (concurrent_mark)
+               g_assert_not_reached ();
+#endif
+
+       SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
 
        if (free_blocks_local [size_index]) {
        get_slot:
@@ -704,34 +722,29 @@ alloc_obj_par (int size, gboolean pinned, gboolean has_references)
                }
        }
 
-       /*
-        * FIXME: This should not be necessary because it'll be
-        * overwritten by the vtable immediately.
-        */
-       *(void**)obj = NULL;
+       *(MonoVTable**)obj = vtable;
 
        return obj;
 }
 
 static void*
-major_par_alloc_object (int size, gboolean has_references)
+major_par_alloc_object (MonoVTable *vtable, int size, gboolean has_references)
 {
-       return alloc_obj_par (size, FALSE, has_references);
+       return alloc_obj_par (vtable, size, FALSE, has_references);
 }
 #endif
 
 static void*
-alloc_obj (int size, gboolean pinned, gboolean has_references)
+alloc_obj (MonoVTable *vtable, int size, gboolean pinned, gboolean has_references)
 {
        int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
        MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
        void *obj;
 
 #ifdef SGEN_PARALLEL_MARK
-       DEBUG (9, g_assert (current_collection_generation != GENERATION_OLD));
-#endif
+       SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
 
-       DEBUG (9, g_assert (!ms_sweep_in_progress));
+#endif
 
        if (!free_blocks [size_index]) {
                if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references)))
@@ -740,19 +753,15 @@ alloc_obj (int size, gboolean pinned, gboolean has_references)
 
        obj = unlink_slot_from_free_list_uncontested (free_blocks, size_index);
 
-       /*
-        * FIXME: This should not be necessary because it'll be
-        * overwritten by the vtable immediately.
-        */
-       *(void**)obj = NULL;
+       *(MonoVTable**)obj = vtable;
 
        return obj;
 }
 
 static void*
-major_alloc_object (int size, gboolean has_references)
+major_alloc_object (MonoVTable *vtable, int size, gboolean has_references)
 {
-       return alloc_obj (size, FALSE, has_references);
+       return alloc_obj (vtable, size, FALSE, has_references);
 }
 
 /*
@@ -767,14 +776,17 @@ free_object (char *obj, size_t size, gboolean pinned)
 {
        MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
        int word, bit;
-       DEBUG (9, g_assert ((pinned && block->pinned) || (!pinned && !block->pinned)));
-       DEBUG (9, g_assert (MS_OBJ_ALLOCED (obj, block)));
+
+       if (!block->swept)
+               sweep_block (block, FALSE);
+       SGEN_ASSERT (9, (pinned && block->pinned) || (!pinned && !block->pinned), "free-object pinning mixup object %p pinned %d block %p pinned %d", obj, pinned, block, block->pinned);
+       SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p is already free", obj);
        MS_CALC_MARK_BIT (word, bit, obj);
-       DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
+       SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p has mark bit set");
        if (!block->free_list) {
                MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, block->has_references);
                int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
-               DEBUG (9, g_assert (!block->next_free));
+               SGEN_ASSERT (9, !block->next_free, "block %p doesn't have a free-list of object but belongs to a free-list of blocks");
                block->next_free = free_blocks [size_index];
                free_blocks [size_index] = block;
        }
@@ -791,19 +803,17 @@ major_free_non_pinned_object (char *obj, size_t size)
 
 /* size is a multiple of SGEN_ALLOC_ALIGN */
 static void*
-major_alloc_small_pinned_obj (size_t size, gboolean has_references)
+major_alloc_small_pinned_obj (MonoVTable *vtable, size_t size, gboolean has_references)
 {
        void *res;
 
-       ms_wait_for_sweep_done ();
-
-       res = alloc_obj (size, TRUE, has_references);
+       res = alloc_obj (vtable, size, TRUE, has_references);
         /*If we failed to alloc memory, we better try releasing memory
          *as pinned alloc is requested by the runtime.
          */
         if (!res) {
-               sgen_perform_collection (0, GENERATION_OLD, "pinned alloc failure");
-               res = alloc_obj (size, TRUE, has_references);
+               sgen_perform_collection (0, GENERATION_OLD, "pinned alloc failure", TRUE);
+               res = alloc_obj (vtable, size, TRUE, has_references);
         }
         return res;
 }
@@ -823,13 +833,10 @@ major_alloc_degraded (MonoVTable *vtable, size_t size)
        void *obj;
        int old_num_sections;
 
-       ms_wait_for_sweep_done ();
-
        old_num_sections = num_major_sections;
 
-       obj = alloc_obj (size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
+       obj = alloc_obj (vtable, size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
        if (G_LIKELY (obj)) {
-               *(MonoVTable**)obj = vtable;
                HEAVY_STAT (++stat_objects_alloced_degraded);
                HEAVY_STAT (stat_bytes_alloced_degraded += size);
                g_assert (num_major_sections >= old_num_sections);
@@ -871,19 +878,30 @@ major_is_object_live (char *obj)
 
        /* now we know it's in a major block */
        block = MS_BLOCK_FOR_OBJ (obj);
-       DEBUG (9, g_assert (!block->pinned));
+       SGEN_ASSERT (9, !block->pinned, "block %p is pinned, BTW why is this bad?");
        MS_CALC_MARK_BIT (word, bit, obj);
        return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
 }
 
 static gboolean
-major_ptr_is_in_non_pinned_space (char *ptr)
+major_ptr_is_in_non_pinned_space (char *ptr, char **start)
 {
        MSBlockInfo *block;
 
        FOREACH_BLOCK (block) {
-               if (ptr >= block->block && ptr <= block->block + MS_BLOCK_SIZE)
+               if (ptr >= block->block && ptr <= block->block + MS_BLOCK_SIZE) {
+                       int count = MS_BLOCK_FREE / block->obj_size;
+                       int i;
+
+                       *start = NULL;
+                       for (i = 0; i <= count; ++i) {
+                               if (ptr >= MS_BLOCK_OBJ (block, i) && ptr < MS_BLOCK_OBJ (block, i + 1)) {
+                                       *start = MS_BLOCK_OBJ (block, i);
+                                       break;
+                               }
+                       }
                        return !block->pinned;
+               }
        } END_FOREACH_BLOCK;
        return FALSE;
 }
@@ -893,8 +911,6 @@ major_iterate_objects (gboolean non_pinned, gboolean pinned, IterateObjectCallba
 {
        MSBlockInfo *block;
 
-       ms_wait_for_sweep_done ();
-
        FOREACH_BLOCK (block) {
                int count = MS_BLOCK_FREE / block->obj_size;
                int i;
@@ -903,6 +919,8 @@ major_iterate_objects (gboolean non_pinned, gboolean pinned, IterateObjectCallba
                        continue;
                if (!block->pinned && !non_pinned)
                        continue;
+               if (lazy_sweep)
+                       sweep_block (block, FALSE);
 
                for (i = 0; i < count; ++i) {
                        void **obj = (void**) MS_BLOCK_OBJ (block, i);
@@ -917,7 +935,6 @@ major_is_valid_object (char *object)
 {
        MSBlockInfo *block;
 
-       ms_wait_for_sweep_done ();
        FOREACH_BLOCK (block) {
                int idx;
                char *obj;
@@ -936,7 +953,7 @@ major_is_valid_object (char *object)
 }
 
 
-static gboolean
+static MonoVTable*
 major_describe_pointer (char *ptr)
 {
        MSBlockInfo *block;
@@ -946,37 +963,42 @@ major_describe_pointer (char *ptr)
                char *obj;
                gboolean live;
                MonoVTable *vtable;
+               int w, b;
+               gboolean marked;
 
                if ((block->block > ptr) || ((block->block + MS_BLOCK_SIZE) <= ptr))
                        continue;
 
-               fprintf (gc_debug_file, "major-ptr (block %p sz %d pin %d ref %d) ",
+               SGEN_LOG (0, "major-ptr (block %p sz %d pin %d ref %d)\n",
                        block->block, block->obj_size, block->pinned, block->has_references);
 
                idx = MS_BLOCK_OBJ_INDEX (ptr, block);
                obj = (char*)MS_BLOCK_OBJ (block, idx);
                live = MS_OBJ_ALLOCED (obj, block);
                vtable = live ? (MonoVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
-               
+
+               MS_CALC_MARK_BIT (w, b, obj);
+               marked = MS_MARK_BIT (block, w, b);
+
                if (obj == ptr) {
+                       SGEN_LOG (0, "\t(");
                        if (live)
-                               fprintf (gc_debug_file, "(object %s.%s)", vtable->klass->name_space, vtable->klass->name);
+                               SGEN_LOG (0, "object");
                        else
-                               fprintf (gc_debug_file, "(dead-object)");
+                               SGEN_LOG (0, "dead-object");
                } else {
                        if (live)
-                               fprintf (gc_debug_file, "(interior-ptr offset %td of %p %s.%s)",
-                                       ptr - obj,
-                                       obj, vtable->klass->name_space, vtable->klass->name);
+                               SGEN_LOG (0, "interior-ptr offset %td", ptr - obj);
                        else
-                               fprintf (gc_debug_file, "(dead-interior-ptr to %td to %p)",
-                                       ptr - obj, obj);
+                               SGEN_LOG (0, "dead-interior-ptr offset %td", ptr - obj);
                }
 
-               return TRUE;
+               SGEN_LOG (0, " marked %d)\n", marked ? 1 : 0);
+
+               return vtable;
        } END_FOREACH_BLOCK;
 
-       return FALSE;
+       return NULL;
 }
 
 static void
@@ -1046,36 +1068,46 @@ major_dump_heap (FILE *heap_dump_file)
                        if ((block)->has_references)                    \
                                GRAY_OBJECT_ENQUEUE ((queue), (obj));   \
                        binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
+                       INC_NUM_MAJOR_OBJECTS_MARKED ();                \
                }                                                       \
        } while (0)
 #define MS_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do {               \
                int __word, __bit;                                      \
                MS_CALC_MARK_BIT (__word, __bit, (obj));                \
-               DEBUG (9, g_assert (MS_OBJ_ALLOCED ((obj), (block))));  \
+               SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj);       \
                if (!MS_MARK_BIT ((block), __word, __bit)) {            \
                        MS_SET_MARK_BIT ((block), __word, __bit);       \
                        if ((block)->has_references)                    \
                                GRAY_OBJECT_ENQUEUE ((queue), (obj));   \
                        binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
+                       INC_NUM_MAJOR_OBJECTS_MARKED ();                \
                }                                                       \
        } while (0)
 #define MS_PAR_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do {           \
                int __word, __bit;                                      \
                gboolean __was_marked;                                  \
-               DEBUG (9, g_assert (MS_OBJ_ALLOCED ((obj), (block))));  \
+               SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj);       \
                MS_CALC_MARK_BIT (__word, __bit, (obj));                \
                MS_PAR_SET_MARK_BIT (__was_marked, (block), __word, __bit); \
                if (!__was_marked) {                                    \
                        if ((block)->has_references)                    \
                                GRAY_OBJECT_ENQUEUE ((queue), (obj));   \
                        binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
+                       INC_NUM_MAJOR_OBJECTS_MARKED ();                \
                }                                                       \
        } while (0)
 
 static void
 pin_major_object (char *obj, SgenGrayQueue *queue)
 {
-       MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
+       MSBlockInfo *block;
+
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+       if (concurrent_mark)
+               g_assert_not_reached ();
+#endif
+
+       block = MS_BLOCK_FOR_OBJ (obj);
        block->has_pinned = TRUE;
        MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
 }
@@ -1084,17 +1116,16 @@ pin_major_object (char *obj, SgenGrayQueue *queue)
 
 #ifdef SGEN_PARALLEL_MARK
 static void
-major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
+major_copy_or_mark_object (void **ptr, void *obj, SgenGrayQueue *queue)
 {
-       void *obj = *ptr;
        mword objsize;
        MSBlockInfo *block;
        MonoVTable *vt;
 
        HEAVY_STAT (++stat_copy_object_called_major);
 
-       DEBUG (9, g_assert (obj));
-       DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
+       SGEN_ASSERT (9, obj, "null object from pointer %p", ptr);
+       SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
 
        if (sgen_ptr_in_nursery (obj)) {
                int word, bit;
@@ -1121,7 +1152,7 @@ major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
                objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
                has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
 
-               destination = sgen_minor_collector.par_alloc_for_promotion (obj, objsize, has_references);
+               destination = sgen_minor_collector.par_alloc_for_promotion (vt, obj, objsize, has_references);
                if (G_UNLIKELY (!destination)) {
                        if (!sgen_ptr_in_nursery (obj)) {
                                int size_index;
@@ -1135,14 +1166,6 @@ major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
                        return;
                }
 
-               /*
-                * We do this before the CAS because we want to make
-                * sure that if another thread sees the destination
-                * pointer the VTable is already in place.  Not doing
-                * this can crash binary protocols.
-                */
-               *(MonoVTable**)destination = vt;
-
                if (SGEN_CAS_PTR (obj, (void*)((mword)destination | SGEN_FORWARDED_BIT), vt) == vt) {
                        gboolean was_marked;
 
@@ -1166,8 +1189,9 @@ major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
                        if (!sgen_ptr_in_nursery (obj)) {
                                block = MS_BLOCK_FOR_OBJ (obj);
                                MS_CALC_MARK_BIT (word, bit, obj);
-                               DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
+                               SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
                                MS_PAR_SET_MARK_BIT (was_marked, block, word, bit);
+                               binary_protocol_mark (obj, vt, sgen_safe_object_get_size ((MonoObject*)obj));
                        }
                } else {
                        /*
@@ -1230,34 +1254,73 @@ major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
 
                        MS_PAR_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
                } else {
+                       LOSObject *bigobj = sgen_los_header_for_object (obj);
+                       mword size_word = bigobj->size;
 #ifdef FIXED_HEAP
                        mword vtable_word = *(mword*)obj;
                        vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
 #endif
-
-                       if (vtable_word & SGEN_PINNED_BIT)
+                       if (size_word & 1)
                                return;
                        binary_protocol_pin (obj, vt, sgen_safe_object_get_size ((MonoObject*)obj));
-                       if (SGEN_CAS_PTR (obj, (void*)(vtable_word | SGEN_PINNED_BIT), (void*)vtable_word) == (void*)vtable_word) {
+                       if (SGEN_CAS_PTR ((void*)&bigobj->size, (void*)(size_word | 1), (void*)size_word) == (void*)size_word) {
                                if (SGEN_VTABLE_HAS_REFERENCES (vt))
                                        GRAY_OBJECT_ENQUEUE (queue, obj);
                        } else {
-                               g_assert (SGEN_OBJECT_IS_PINNED (obj));
+                               g_assert (sgen_los_object_is_pinned (obj));
                        }
                }
        }
 }
 #else
+#ifdef SGEN_HAVE_CONCURRENT_MARK
 static void
-major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
+major_copy_or_mark_object_concurrent (void **ptr, void *obj, SgenGrayQueue *queue)
+{
+       g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
+
+       if (!sgen_ptr_in_nursery (obj)) {
+#ifdef FIXED_HEAP
+               if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
+#else
+               mword objsize;
+
+               objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
+
+               if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
+#endif
+               {
+                       MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
+                       MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
+               } else {
+                       if (sgen_los_object_is_pinned (obj))
+                               return;
+
+#ifdef ENABLE_DTRACE
+                       if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
+                               MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
+                               MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
+                       }
+#endif
+
+                       sgen_los_pin_object (obj);
+                       if (SGEN_OBJECT_HAS_REFERENCES (obj))
+                               GRAY_OBJECT_ENQUEUE (queue, obj);
+                       INC_NUM_MAJOR_OBJECTS_MARKED ();
+               }
+       }
+}
+#endif
+
+static void
+major_copy_or_mark_object (void **ptr, void *obj, SgenGrayQueue *queue)
 {
-       void *obj = *ptr;
        MSBlockInfo *block;
 
        HEAVY_STAT (++stat_copy_object_called_major);
 
-       DEBUG (9, g_assert (obj));
-       DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
+       SGEN_ASSERT (9, obj, "null object from pointer %p", ptr);
+       SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
 
        if (sgen_ptr_in_nursery (obj)) {
                int word, bit;
@@ -1308,8 +1371,9 @@ major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
                if (!sgen_ptr_in_nursery (obj)) {
                        block = MS_BLOCK_FOR_OBJ (obj);
                        MS_CALC_MARK_BIT (word, bit, obj);
-                       DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
+                       SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
                        MS_SET_MARK_BIT (block, word, bit);
+                       binary_protocol_mark (obj, (gpointer)LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
                }
        } else {
                char *forwarded;
@@ -1367,19 +1431,59 @@ major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
                                MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
                        }
                } else {
-                       if (SGEN_OBJECT_IS_PINNED (obj))
+                       if (sgen_los_object_is_pinned (obj))
                                return;
                        binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
-                       SGEN_PIN_OBJECT (obj);
-                       /* FIXME: only enqueue if object has references */
-                       GRAY_OBJECT_ENQUEUE (queue, obj);
+
+#ifdef ENABLE_DTRACE
+                       if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
+                               MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
+                               MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
+                       }
+#endif
+
+                       sgen_los_pin_object (obj);
+                       if (SGEN_OBJECT_HAS_REFERENCES (obj))
+                               GRAY_OBJECT_ENQUEUE (queue, obj);
                }
        }
 }
 #endif
 
+static void
+major_copy_or_mark_object_canonical (void **ptr, SgenGrayQueue *queue)
+{
+       major_copy_or_mark_object (ptr, *ptr, queue);
+}
+
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+static void
+major_copy_or_mark_object_concurrent_canonical (void **ptr, SgenGrayQueue *queue)
+{
+       major_copy_or_mark_object_concurrent (ptr, *ptr, queue);
+}
+
+static long long
+major_get_and_reset_num_major_objects_marked (void)
+{
+#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
+       long long num = num_major_objects_marked;
+       num_major_objects_marked = 0;
+       return num;
+#else
+       return 0;
+#endif
+}
+#endif
+
 #include "sgen-major-scan-object.h"
 
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+#define SCAN_FOR_CONCURRENT_MARK
+#include "sgen-major-scan-object.h"
+#undef SCAN_FOR_CONCURRENT_MARK
+#endif
+
 static void
 mark_pinned_objects_in_block (MSBlockInfo *block, SgenGrayQueue *queue)
 {
@@ -1393,7 +1497,7 @@ mark_pinned_objects_in_block (MSBlockInfo *block, SgenGrayQueue *queue)
 
        for (i = 0; i < block->pin_queue_num_entries; ++i) {
                int index = MS_BLOCK_OBJ_INDEX (block->pin_queue_start [i], block);
-               DEBUG (9, g_assert (index >= 0 && index < MS_BLOCK_FREE / block->obj_size));
+               SGEN_ASSERT (9, index >= 0 && index < MS_BLOCK_FREE / block->obj_size, "invalid object %p index %d max-index %d", block->pin_queue_start [i], index, MS_BLOCK_FREE / block->obj_size);
                if (index == last_index)
                        continue;
                MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (MS_BLOCK_OBJ (block, index), block, queue);
@@ -1401,6 +1505,104 @@ mark_pinned_objects_in_block (MSBlockInfo *block, SgenGrayQueue *queue)
        }
 }
 
+static inline void
+sweep_block_for_size (MSBlockInfo *block, int count, int obj_size)
+{
+       int obj_index;
+
+       for (obj_index = 0; obj_index < count; ++obj_index) {
+               int word, bit;
+               void *obj = MS_BLOCK_OBJ_FOR_SIZE (block, obj_index, obj_size);
+
+               MS_CALC_MARK_BIT (word, bit, obj);
+               if (MS_MARK_BIT (block, word, bit)) {
+                       SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p not allocated", obj);
+               } else {
+                       /* an unmarked object */
+                       if (MS_OBJ_ALLOCED (obj, block)) {
+                               /*
+                                * FIXME: Merge consecutive
+                                * slots for lower reporting
+                                * overhead.  Maybe memset
+                                * will also benefit?
+                                */
+                               binary_protocol_empty (obj, obj_size);
+                               MONO_GC_MAJOR_SWEPT ((mword)obj, obj_size);
+                               memset (obj, 0, obj_size);
+                       }
+                       *(void**)obj = block->free_list;
+                       block->free_list = obj;
+               }
+       }
+}
+
+/*
+ * sweep_block:
+ *
+ *   Traverse BLOCK, freeing and zeroing unused objects.
+ */
+static void
+sweep_block (MSBlockInfo *block, gboolean during_major_collection)
+{
+       int count;
+       void *reversed = NULL;
+
+       if (!during_major_collection)
+               g_assert (!sgen_concurrent_collection_in_progress ());
+
+       if (block->swept)
+               return;
+
+       count = MS_BLOCK_FREE / block->obj_size;
+
+       block->free_list = NULL;
+
+       /* Use inline instances specialized to constant sizes, this allows the compiler to replace the memset calls with inline code */
+       // FIXME: Add more sizes
+       switch (block->obj_size) {
+       case 16:
+               sweep_block_for_size (block, count, 16);
+               break;
+       default:
+               sweep_block_for_size (block, count, block->obj_size);
+               break;
+       }
+
+       /* reset mark bits */
+       memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
+
+       /* Reverse free list so that it's in address order */
+       reversed = NULL;
+       while (block->free_list) {
+               void *next = *(void**)block->free_list;
+               *(void**)block->free_list = reversed;
+               reversed = block->free_list;
+               block->free_list = next;
+       }
+       block->free_list = reversed;
+
+       block->swept = 1;
+}
+
+static inline int
+bitcount (mword d)
+{
+       int count = 0;
+
+#ifdef __GNUC__
+       if (sizeof (mword) == sizeof (unsigned long))
+               count += __builtin_popcountl (d);
+       else
+               count += __builtin_popcount (d);
+#else
+       while (d) {
+               count ++;
+               d &= (d - 1);
+       }
+#endif
+       return count;
+}
+
 static void
 ms_sweep (void)
 {
@@ -1412,6 +1614,11 @@ ms_sweep (void)
        int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
        int *num_blocks = alloca (sizeof (int) * num_block_obj_sizes);
 
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+       mword total_evacuate_heap = 0;
+       mword total_evacuate_saved = 0;
+#endif
+
        for (i = 0; i < num_block_obj_sizes; ++i)
                slots_available [i] = slots_used [i] = num_blocks [i] = 0;
 
@@ -1430,8 +1637,9 @@ ms_sweep (void)
                int count;
                gboolean have_live = FALSE;
                gboolean has_pinned;
-               int obj_index;
+               gboolean have_free = FALSE;
                int obj_size_index;
+               int nused = 0;
 
                obj_size_index = block->obj_size_index;
 
@@ -1439,42 +1647,34 @@ ms_sweep (void)
                block->has_pinned = block->pinned;
 
                block->is_to_space = FALSE;
+               block->swept = 0;
 
                count = MS_BLOCK_FREE / block->obj_size;
-               block->free_list = NULL;
-
-               for (obj_index = 0; obj_index < count; ++obj_index) {
-                       int word, bit;
-                       void *obj = MS_BLOCK_OBJ (block, obj_index);
 
-                       MS_CALC_MARK_BIT (word, bit, obj);
-                       if (MS_MARK_BIT (block, word, bit)) {
-                               DEBUG (9, g_assert (MS_OBJ_ALLOCED (obj, block)));
-                               have_live = TRUE;
-                               if (!has_pinned)
-                                       ++slots_used [obj_size_index];
-                       } else {
-                               /* an unmarked object */
-                               if (MS_OBJ_ALLOCED (obj, block)) {
-                                       binary_protocol_empty (obj, block->obj_size);
-                                       memset (obj, 0, block->obj_size);
-                               }
-                               *(void**)obj = block->free_list;
-                               block->free_list = obj;
-                       }
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+               if (block->cardtable_mod_union) {
+                       sgen_free_internal_dynamic (block->cardtable_mod_union, CARDS_PER_BLOCK, INTERNAL_MEM_CARDTABLE_MOD_UNION);
+                       block->cardtable_mod_union = NULL;
                }
+#endif
 
-               /* reset mark bits */
-               memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
+               /* Count marked objects in the block */
+               for (i = 0; i < MS_NUM_MARK_WORDS; ++i) {
+                       nused += bitcount (block->mark_words [i]);
+               }
+               if (nused) {
+                       have_live = TRUE;
+               }
+               if (nused < count)
+                       have_free = TRUE;
 
-               /*
-                * FIXME: reverse free list so that it's in address
-                * order
-                */
+               if (!lazy_sweep)
+                       sweep_block (block, TRUE);
 
                if (have_live) {
                        if (!has_pinned) {
                                ++num_blocks [obj_size_index];
+                               slots_used [obj_size_index] += nused;
                                slots_available [obj_size_index] += count;
                        }
 
@@ -1484,7 +1684,7 @@ ms_sweep (void)
                         * If there are free slots in the block, add
                         * the block to the corresponding free list.
                         */
-                       if (block->free_list) {
+                       if (have_free) {
                                MSBlockInfo **free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
                                int index = MS_BLOCK_OBJ_SIZE_INDEX (block->obj_size);
                                block->next_free = free_blocks [index];
@@ -1522,41 +1722,27 @@ ms_sweep (void)
                } else {
                        evacuate_block_obj_sizes [i] = FALSE;
                }
-       }
-
-       have_swept = TRUE;
-}
-
-static mono_native_thread_return_t
-ms_sweep_thread_func (void *dummy)
-{
-       g_assert (concurrent_sweep);
-
-       for (;;) {
-               int result;
-
-               while ((result = MONO_SEM_WAIT (&ms_sweep_cmd_semaphore)) != 0) {
-                       if (errno != EINTR)
-                               g_error ("MONO_SEM_WAIT FAILED with %d errno %d (%s)", result, errno, strerror (errno));
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+               {
+                       mword total_bytes = block_obj_sizes [i] * slots_available [i];
+                       total_evacuate_heap += total_bytes;
+                       if (evacuate_block_obj_sizes [i])
+                               total_evacuate_saved += total_bytes - block_obj_sizes [i] * slots_used [i];
                }
-
-               ms_sweep ();
-
-               ms_signal_sweep_done ();
+#endif
        }
 
-       return NULL;
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+       want_evacuation = (float)total_evacuate_saved / (float)total_evacuate_heap > (1 - concurrent_evacuation_threshold);
+#endif
+
+       have_swept = TRUE;
 }
 
 static void
 major_sweep (void)
 {
-       if (concurrent_sweep) {
-               g_assert (ms_sweep_thread);
-               ms_signal_sweep_command ();
-       } else {
-               ms_sweep ();
-       }
+       ms_sweep ();
 }
 
 static int count_pinned_ref;
@@ -1586,7 +1772,7 @@ count_pinned_callback (char *obj, size_t size, void *data)
                ++count_pinned_nonref;
 }
 
-static void __attribute__ ((unused))
+static G_GNUC_UNUSED void
 count_ref_nonref_objs (void)
 {
        int total;
@@ -1637,8 +1823,6 @@ static int old_num_major_sections;
 static void
 major_start_nursery_collection (void)
 {
-       ms_wait_for_sweep_done ();
-
 #ifdef MARKSWEEP_CONSISTENCY_CHECK
        consistency_check ();
 #endif
@@ -1660,8 +1844,6 @@ major_start_major_collection (void)
 {
        int i;
 
-       ms_wait_for_sweep_done ();
-
        /* clear the free lists */
        for (i = 0; i < num_block_obj_sizes; ++i) {
                if (!evacuate_block_obj_sizes [i])
@@ -1670,6 +1852,24 @@ major_start_major_collection (void)
                free_block_lists [0][i] = NULL;
                free_block_lists [MS_BLOCK_FLAG_REFS][i] = NULL;
        }
+
+       // Sweep all unswept blocks
+       if (lazy_sweep) {
+               MSBlockInfo **iter;
+
+               MONO_GC_SWEEP_BEGIN (GENERATION_OLD, TRUE);
+
+               iter = &all_blocks;
+               while (*iter) {
+                       MSBlockInfo *block = *iter;
+
+                       sweep_block (block, TRUE);
+
+                       iter = &block->next;
+               }
+
+               MONO_GC_SWEEP_END (GENERATION_OLD, TRUE);
+       }
 }
 
 static void
@@ -1677,6 +1877,18 @@ major_finish_major_collection (void)
 {
 }
 
+#if !defined(FIXED_HEAP) && SIZEOF_VOID_P != 8
+static int
+compare_pointers (const void *va, const void *vb) {
+       char *a = *(char**)va, *b = *(char**)vb;
+       if (a < b)
+               return -1;
+       if (a > b)
+               return 1;
+       return 0;
+}
+#endif
+
 static void
 major_have_computer_minor_collection_allowance (void)
 {
@@ -1684,20 +1896,140 @@ major_have_computer_minor_collection_allowance (void)
        int section_reserve = sgen_get_minor_collection_allowance () / MS_BLOCK_SIZE;
 
        g_assert (have_swept);
-       ms_wait_for_sweep_done ();
-       g_assert (!ms_sweep_in_progress);
 
+#if SIZEOF_VOID_P != 8
+       {
+               int i, num_empty_blocks_orig, num_blocks, arr_length;
+               void *block;
+               void **empty_block_arr;
+               void **rebuild_next;
+
+#ifdef TARGET_WIN32
+               /*
+                * sgen_free_os_memory () asserts in mono_vfree () because windows doesn't like freeing the middle of
+                * a VirtualAlloc ()-ed block.
+                */
+               return;
+#endif
+
+               if (num_empty_blocks <= section_reserve)
+                       return;
+               SGEN_ASSERT (0, num_empty_blocks > 0, "section reserve can't be negative");
+
+               num_empty_blocks_orig = num_empty_blocks;
+               empty_block_arr = (void**)sgen_alloc_internal_dynamic (sizeof (void*) * num_empty_blocks_orig,
+                               INTERNAL_MEM_MS_BLOCK_INFO_SORT, FALSE);
+               if (!empty_block_arr)
+                       goto fallback;
+
+               i = 0;
+               for (block = empty_blocks; block; block = *(void**)block)
+                       empty_block_arr [i++] = block;
+               SGEN_ASSERT (0, i == num_empty_blocks, "empty block count wrong");
+
+               sgen_qsort (empty_block_arr, num_empty_blocks, sizeof (void*), compare_pointers);
+
+               /*
+                * We iterate over the free blocks, trying to find MS_BLOCK_ALLOC_NUM
+                * contiguous ones.  If we do, we free them.  If that's not enough to get to
+                * section_reserve, we halve the number of contiguous blocks we're looking
+                * for and have another go, until we're done with looking for pairs of
+                * blocks, at which point we give up and go to the fallback.
+                */
+               arr_length = num_empty_blocks_orig;
+               num_blocks = MS_BLOCK_ALLOC_NUM;
+               while (num_empty_blocks > section_reserve && num_blocks > 1) {
+                       int first = -1;
+                       int dest = 0;
+
+                       dest = 0;
+                       for (i = 0; i < arr_length; ++i) {
+                               int d = dest;
+                               void *block = empty_block_arr [i];
+                               SGEN_ASSERT (0, block, "we're not shifting correctly");
+                               if (i != dest) {
+                                       empty_block_arr [dest] = block;
+                                       /*
+                                        * This is not strictly necessary, but we're
+                                        * cautious.
+                                        */
+                                       empty_block_arr [i] = NULL;
+                               }
+                               ++dest;
+
+                               if (first < 0) {
+                                       first = d;
+                                       continue;
+                               }
+
+                               SGEN_ASSERT (0, first >= 0 && d > first, "algorithm is wrong");
+
+                               if ((char*)block != ((char*)empty_block_arr [d-1]) + MS_BLOCK_SIZE) {
+                                       first = d;
+                                       continue;
+                               }
+
+                               if (d + 1 - first == num_blocks) {
+                                       /*
+                                        * We found num_blocks contiguous blocks.  Free them
+                                        * and null their array entries.  As an optimization
+                                        * we could, instead of nulling the entries, shift
+                                        * the following entries over to the left, while
+                                        * we're iterating.
+                                        */
+                                       int j;
+                                       sgen_free_os_memory (empty_block_arr [first], MS_BLOCK_SIZE * num_blocks, SGEN_ALLOC_HEAP);
+                                       for (j = first; j <= d; ++j)
+                                               empty_block_arr [j] = NULL;
+                                       dest = first;
+                                       first = -1;
+
+                                       num_empty_blocks -= num_blocks;
+
+                                       stat_major_blocks_freed += num_blocks;
+                                       if (num_blocks == MS_BLOCK_ALLOC_NUM)
+                                               stat_major_blocks_freed_ideal += num_blocks;
+                                       else
+                                               stat_major_blocks_freed_less_ideal += num_blocks;
+
+                               }
+                       }
+
+                       SGEN_ASSERT (0, dest <= i && dest <= arr_length, "array length is off");
+                       arr_length = dest;
+                       SGEN_ASSERT (0, arr_length == num_empty_blocks, "array length is off");
+
+                       num_blocks >>= 1;
+               }
+
+               /* rebuild empty_blocks free list */
+               rebuild_next = (void**)&empty_blocks;
+               for (i = 0; i < arr_length; ++i) {
+                       void *block = empty_block_arr [i];
+                       SGEN_ASSERT (0, block, "we're missing blocks");
+                       *rebuild_next = block;
+                       rebuild_next = (void**)block;
+               }
+               *rebuild_next = NULL;
+
+               /* free array */
+               sgen_free_internal_dynamic (empty_block_arr, sizeof (void*) * num_empty_blocks_orig, INTERNAL_MEM_MS_BLOCK_INFO_SORT);
+       }
+
+       SGEN_ASSERT (0, num_empty_blocks >= 0, "we freed more blocks than we had in the first place?");
+
+ fallback:
        /*
-        * FIXME: We don't free blocks on 32 bit platforms because it
-        * can lead to address space fragmentation, since we're
-        * allocating blocks in larger contingents.
+        * This is our threshold.  If there's not more empty than used blocks, we won't
+        * release uncontiguous blocks, in fear of fragmenting the address space.
         */
-       if (sizeof (mword) < 8)
+       if (num_empty_blocks <= num_major_sections)
                return;
+#endif
 
        while (num_empty_blocks > section_reserve) {
                void *next = *(void**)empty_blocks;
-               sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE);
+               sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP);
                empty_blocks = next;
                /*
                 * Needs not be atomic because this is running
@@ -1706,6 +2038,9 @@ major_have_computer_minor_collection_allowance (void)
                --num_empty_blocks;
 
                ++stat_major_blocks_freed;
+#if SIZEOF_VOID_P != 8
+               ++stat_major_blocks_freed_individual;
+#endif
        }
 #endif
 }
@@ -1788,11 +2123,11 @@ major_handle_gc_param (const char *opt)
                }
                evacuation_threshold = (float)percentage / 100.0;
                return TRUE;
-       } else if (!strcmp (opt, "concurrent-sweep")) {
-               concurrent_sweep = TRUE;
+       } else if (!strcmp (opt, "lazy-sweep")) {
+               lazy_sweep = TRUE;
                return TRUE;
-       } else if (!strcmp (opt, "no-concurrent-sweep")) {
-               concurrent_sweep = FALSE;
+       } else if (!strcmp (opt, "no-lazy-sweep")) {
+               lazy_sweep = FALSE;
                return TRUE;
        }
 
@@ -1808,11 +2143,10 @@ major_print_gc_param_usage (void)
                        "  major-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n"
 #endif
                        "  evacuation-threshold=P (where P is a percentage, an integer in 0-100)\n"
-                       "  (no-)concurrent-sweep\n"
+                       "  (no-)lazy-sweep\n"
                        );
 }
 
-#ifdef SGEN_HAVE_CARDTABLE
 static void
 major_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
 {
@@ -1881,11 +2215,18 @@ skip_card (guint8 *card_data, guint8 *card_data_end)
 #define MS_OBJ_ALLOCED_FAST(o,b)               (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + MS_BLOCK_SIZE))
 
 static void
-major_scan_card_table (SgenGrayQueue *queue)
+major_scan_card_table (gboolean mod_union, SgenGrayQueue *queue)
 {
        MSBlockInfo *block;
        ScanObjectFunc scan_func = sgen_get_current_object_ops ()->scan_object;
 
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+       if (!concurrent_mark)
+               g_assert (!mod_union);
+#else
+       g_assert (!mod_union);
+#endif
+
        FOREACH_BLOCK (block) {
                int block_obj_size;
                char *block_start;
@@ -1903,24 +2244,54 @@ major_scan_card_table (SgenGrayQueue *queue)
 #endif
                        char *obj, *end, *base;
 
+                       if (mod_union) {
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+                               cards = block->cardtable_mod_union;
+                               /*
+                                * This happens when the nursery
+                                * collection that precedes finishing
+                                * the concurrent collection allocates
+                                * new major blocks.
+                                */
+                               if (!cards)
+                                       continue;
+#endif
+                       } else {
                        /*We can avoid the extra copy since the remark cardtable was cleaned before */
 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
-                       cards = sgen_card_table_get_card_scan_address ((mword)block_start);
+                               cards = sgen_card_table_get_card_scan_address ((mword)block_start);
 #else
-                       cards = cards_data;
-                       if (!sgen_card_table_get_card_data (cards_data, (mword)block_start, CARDS_PER_BLOCK))
-                               continue;
+                               cards = cards_data;
+                               if (!sgen_card_table_get_card_data (cards_data, (mword)block_start, CARDS_PER_BLOCK))
+                                       continue;
 #endif
+                       }
 
                        obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, 0);
                        end = block_start + MS_BLOCK_SIZE;
                        base = sgen_card_table_align_pointer (obj);
 
                        while (obj < end) {
-                               if (MS_OBJ_ALLOCED_FAST (obj, block_start)) {
-                                       int card_offset = (obj - base) >> CARD_BITS;
-                                       sgen_cardtable_scan_object (obj, block_obj_size, cards + card_offset, queue);
+                               int card_offset;
+
+                               if (!block->swept)
+                                       sweep_block (block, FALSE);
+
+                               if (!MS_OBJ_ALLOCED_FAST (obj, block_start))
+                                       goto next_large;
+
+                               if (mod_union) {
+                                       /* FIXME: do this more efficiently */
+                                       int w, b;
+                                       MS_CALC_MARK_BIT (w, b, obj);
+                                       if (!MS_MARK_BIT (block, w, b))
+                                               goto next_large;
                                }
+
+                               card_offset = (obj - base) >> CARD_BITS;
+                               sgen_cardtable_scan_object (obj, block_obj_size, cards + card_offset, mod_union, queue);
+
+                       next_large:
                                obj += block_obj_size;
                        }
                } else {
@@ -1934,7 +2305,24 @@ major_scan_card_table (SgenGrayQueue *queue)
                         * Cards aliasing happens in powers of two, so as long as major blocks are aligned to their
                         * sizes, they won't overflow the cardtable overlap modulus.
                         */
-                       card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
+                       if (mod_union) {
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+                               card_data = card_base = block->cardtable_mod_union;
+                               /*
+                                * This happens when the nursery
+                                * collection that precedes finishing
+                                * the concurrent collection allocates
+                                * new major blocks.
+                                */
+                               if (!card_data)
+                                       continue;
+#else
+                               g_assert_not_reached ();
+                               card_data = NULL;
+#endif
+                       } else {
+                               card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
+                       }
                        card_data_end = card_data + CARDS_PER_BLOCK;
 
                        for (card_data = initial_skip_card (card_data); card_data < card_data_end; ++card_data) { //card_data = skip_card (card_data + 1, card_data_end)) {
@@ -1942,13 +2330,16 @@ major_scan_card_table (SgenGrayQueue *queue)
                                int idx = card_data - card_base;
                                char *start = (char*)(block_start + idx * CARD_SIZE_IN_BYTES);
                                char *end = start + CARD_SIZE_IN_BYTES;
-                               char *obj;
+                               char *first_obj, *obj;
 
                                HEAVY_STAT (++scanned_cards);
 
                                if (!*card_data)
                                        continue;
 
+                               if (!block->swept)
+                                       sweep_block (block, FALSE);
+
                                HEAVY_STAT (++marked_cards);
 
                                sgen_card_table_prepare_card_for_scanning (card_data);
@@ -1958,36 +2349,61 @@ major_scan_card_table (SgenGrayQueue *queue)
                                else
                                        index = MS_BLOCK_OBJ_INDEX_FAST (start, block_start, block_obj_size);
 
-                               obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, index);
+                               obj = first_obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, index);
                                while (obj < end) {
-                                       if (MS_OBJ_ALLOCED_FAST (obj, block_start)) {
-                                               HEAVY_STAT (++scanned_objects);
-                                               scan_func (obj, queue);
+                                       if (!MS_OBJ_ALLOCED_FAST (obj, block_start))
+                                               goto next_small;
+
+                                       if (mod_union) {
+                                               /* FIXME: do this more efficiently */
+                                               int w, b;
+                                               MS_CALC_MARK_BIT (w, b, obj);
+                                               if (!MS_MARK_BIT (block, w, b))
+                                                       goto next_small;
                                        }
+
+                                       HEAVY_STAT (++scanned_objects);
+                                       scan_func (obj, queue);
+                               next_small:
                                        obj += block_obj_size;
                                }
                                HEAVY_STAT (if (*card_data) ++remarked_cards);
+                               binary_protocol_card_scan (first_obj, obj - first_obj);
                        }
                }
        } END_FOREACH_BLOCK;
 }
-#endif
 
-static gboolean
-major_is_worker_thread (MonoNativeThreadId thread)
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+static void
+update_cardtable_mod_union (void)
 {
-       if (concurrent_sweep)
-               return thread == ms_sweep_thread;
-       else
-               return FALSE;
+       MSBlockInfo *block;
+
+       FOREACH_BLOCK (block) {
+               size_t num_cards;
+
+               block->cardtable_mod_union = sgen_card_table_update_mod_union (block->cardtable_mod_union,
+                               block->block, MS_BLOCK_SIZE, &num_cards);
+
+               SGEN_ASSERT (0, num_cards == CARDS_PER_BLOCK, "Number of cards calculation is wrong");
+       } END_FOREACH_BLOCK;
+}
+
+static guint8*
+major_get_cardtable_mod_union_for_object (char *obj)
+{
+       MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
+       return &block->cardtable_mod_union [(obj - (char*)sgen_card_table_align_pointer (block->block)) >> CARD_BITS];
 }
+#endif
 
 static void
 alloc_free_block_lists (MSBlockInfo ***lists)
 {
        int i;
        for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
-               lists [i] = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
+               lists [i] = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
 }
 
 #ifdef SGEN_PARALLEL_MARK
@@ -2036,31 +2452,32 @@ major_reset_worker_data (void *data)
 #undef pthread_create
 
 static void
-post_param_init (void)
+post_param_init (SgenMajorCollector *collector)
 {
-       if (concurrent_sweep) {
-               if (!mono_native_thread_create (&ms_sweep_thread, ms_sweep_thread_func, NULL)) {
-                       fprintf (stderr, "Error: Could not create sweep thread.\n");
-                       exit (1);
-               }
-       }
+       collector->sweeps_lazily = lazy_sweep;
 }
 
-void
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+static void
+sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurrent)
+#else // SGEN_HAVE_CONCURRENT_MARK
 #ifdef SGEN_PARALLEL_MARK
 #ifdef FIXED_HEAP
-sgen_marksweep_fixed_par_init
-#else
-sgen_marksweep_par_init
-#endif
-#else
+void
+sgen_marksweep_fixed_par_init (SgenMajorCollector *collector)
+#else // FIXED_HEAP
+void
+sgen_marksweep_par_init (SgenMajorCollector *collector)
+#endif // FIXED_HEAP
+#else // SGEN_PARALLEL_MARK
 #ifdef FIXED_HEAP
-sgen_marksweep_fixed_init
-#else
-sgen_marksweep_init
-#endif
-#endif
-       (SgenMajorCollector *collector)
+void
+sgen_marksweep_fixed_init (SgenMajorCollector *collector)
+#else // FIXED_HEAP
+#error unknown configuration
+#endif // FIXED_HEAP
+#endif // SGEN_PARALLEL_MARK
+#endif // SGEN_HAVE_CONCURRENT_MARK
 {
        int i;
 
@@ -2069,10 +2486,10 @@ sgen_marksweep_init
 #endif
 
        num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
-       block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
+       block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
        ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
 
-       evacuate_block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (gboolean) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
+       evacuate_block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (gboolean) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
        for (i = 0; i < num_block_obj_sizes; ++i)
                evacuate_block_obj_sizes [i] = FALSE;
 
@@ -2098,21 +2515,21 @@ sgen_marksweep_init
 
        mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced);
        mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed);
+       mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_lazy_swept);
        mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_objects_evacuated);
-       mono_counters_register ("Wait for sweep time", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &stat_time_wait_for_sweep);
+#if SIZEOF_VOID_P != 8
+       mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_ideal);
+       mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_less_ideal);
+       mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_individual);
+       mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced_less_ideal);
+#endif
+
 #ifdef SGEN_PARALLEL_MARK
 #ifndef HAVE_KW_THREAD
        mono_native_tls_alloc (&workers_free_block_lists_key, NULL);
 #endif
 #endif
 
-       /*
-        * FIXME: These are superfluous if concurrent sweep is
-        * disabled.  We might want to create them lazily.
-        */
-       MONO_SEM_INIT (&ms_sweep_cmd_semaphore, 0);
-       MONO_SEM_INIT (&ms_sweep_done_semaphore, 0);
-
        collector->section_size = MAJOR_SECTION_SIZE;
 #ifdef SGEN_PARALLEL_MARK
        collector->is_parallel = TRUE;
@@ -2122,6 +2539,18 @@ sgen_marksweep_init
 #else
        collector->is_parallel = FALSE;
 #endif
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+       concurrent_mark = is_concurrent;
+       if (is_concurrent) {
+               collector->is_concurrent = TRUE;
+               collector->want_synchronous_collection = &want_evacuation;
+               collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
+       } else
+#endif
+       {
+               collector->is_concurrent = FALSE;
+               collector->want_synchronous_collection = NULL;
+       }
        collector->supports_cardtable = TRUE;
 
        collector->have_swept = &have_swept;
@@ -2141,9 +2570,13 @@ sgen_marksweep_init
        collector->find_pin_queue_start_ends = major_find_pin_queue_start_ends;
        collector->pin_objects = major_pin_objects;
        collector->pin_major_object = pin_major_object;
-#ifdef SGEN_HAVE_CARDTABLE
        collector->scan_card_table = major_scan_card_table;
        collector->iterate_live_block_ranges = (void*)(void*) major_iterate_live_block_ranges;
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+       if (is_concurrent) {
+               collector->update_cardtable_mod_union = update_cardtable_mod_union;
+               collector->get_cardtable_mod_union_for_object = major_get_cardtable_mod_union_for_object;
+       }
 #endif
        collector->init_to_space = major_init_to_space;
        collector->sweep = major_sweep;
@@ -2161,18 +2594,36 @@ sgen_marksweep_init
        collector->get_num_major_sections = get_num_major_sections;
        collector->handle_gc_param = major_handle_gc_param;
        collector->print_gc_param_usage = major_print_gc_param_usage;
-       collector->is_worker_thread = major_is_worker_thread;
        collector->post_param_init = post_param_init;
        collector->is_valid_object = major_is_valid_object;
        collector->describe_pointer = major_describe_pointer;
 
-       collector->major_ops.copy_or_mark_object = major_copy_or_mark_object;
+       collector->major_ops.copy_or_mark_object = major_copy_or_mark_object_canonical;
        collector->major_ops.scan_object = major_scan_object;
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+       if (is_concurrent) {
+               collector->major_concurrent_ops.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
+               collector->major_concurrent_ops.scan_object = major_scan_object_concurrent;
+               collector->major_concurrent_ops.scan_vtype = major_scan_vtype_concurrent;
+       }
+#endif
 
-#ifdef SGEN_HAVE_CARDTABLE
        /*cardtable requires major pages to be 8 cards aligned*/
        g_assert ((MS_BLOCK_SIZE % (8 * CARD_SIZE_IN_BYTES)) == 0);
-#endif
 }
 
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+void
+sgen_marksweep_init (SgenMajorCollector *collector)
+{
+       sgen_marksweep_init_internal (collector, FALSE);
+}
+
+void
+sgen_marksweep_conc_init (SgenMajorCollector *collector)
+{
+       sgen_marksweep_init_internal (collector, TRUE);
+}
+#endif
+
 #endif